diff --git a/CloudronPackages/APISIX/CloudronManifest.json b/CloudronPackages/APISIX/CloudronManifest.json new file mode 100644 index 0000000..c0a91fa --- /dev/null +++ b/CloudronPackages/APISIX/CloudronManifest.json @@ -0,0 +1,33 @@ +{ + "id": "apisix", + "title": "Apache APISIX", + "description": "Apache APISIX is a dynamic, real-time, high-performance API gateway.", + "tagline": "High-performance API Gateway", + "icon": "https://cdn.cloudron.io/icons/apisix.svg", + "main": { + "type": "docker", + "image": "cloudron/base:4.2.0", + "ports": { + "9080/tcp": "APISIX HTTP/HTTPS Port" + }, + "healthCheck": { + "url": "/" + } + }, + "manifestVersion": 2, + "addons": { + "etcd": {} + }, + "environment": { + "APISIX_ETCD_HOST": { + "type": "string", + "description": "etcd host for APISIX", + "required": true + }, + "APISIX_ETCD_PORT": { + "type": "string", + "description": "etcd port for APISIX", + "required": true + } + } +} \ No newline at end of file diff --git a/CloudronPackages/APISIX/Dockerfile b/CloudronPackages/APISIX/Dockerfile new file mode 100644 index 0000000..0776c2a --- /dev/null +++ b/CloudronPackages/APISIX/Dockerfile @@ -0,0 +1,58 @@ +FROM cloudron/base:4.2.0 AS build + +ENV DEBIAN_FRONTEND=noninteractive +ENV ENV_INST_LUADIR=/usr/local/apisix + +COPY apisix-source /apisix + +WORKDIR /apisix + +RUN set -x + && apt-get -y update --fix-missing + && apt-get install -y + make + git + sudo + libyaml-dev + libldap2-dev + && make deps + && mkdir -p ${ENV_INST_LUADIR} + && cp -r deps ${ENV_INST_LUADIR} + && make install + +FROM cloudron/base:4.2.0 + +# Install the runtime libyaml package +RUN apt-get -y update --fix-missing + && apt-get install -y libyaml-0-2 + && apt-get remove --purge --auto-remove -y + && mkdir -p /usr/local/apisix/ui + +COPY --from=build /usr/local/apisix /usr/local/apisix +COPY --from=build /usr/local/openresty /usr/local/openresty +COPY --from=build /usr/bin/apisix /usr/bin/apisix +# Assuming UI files are in apisix-source/ui, adjust if needed +COPY apisix-source/ui/ /usr/local/apisix/ui/ + +# Install brotli (from upstream install-brotli.sh) +RUN apt-get update && apt-get install -y + libbrotli-dev + --no-install-recommends && + rm -rf /var/lib/apt/lists/* + +ENV PATH=$PATH:/usr/local/openresty/luajit/bin:/usr/local/openresty/nginx/sbin:/usr/local/openresty/bin + +WORKDIR /usr/local/apisix + +RUN ln -sf /dev/stdout /usr/local/apisix/logs/access.log + && ln -sf /dev/stderr /usr/local/apisix/logs/error.log + +EXPOSE 9080 9443 + +# Copy our custom start.sh +COPY start.sh /usr/local/bin/start.sh +RUN chmod +x /usr/local/bin/start.sh + +ENTRYPOINT ["/usr/local/bin/start.sh"] + +STOPSIGNAL SIGQUIT diff --git a/CloudronPackages/APISIX/apisix-source/.devcontainer/Dockerfile b/CloudronPackages/APISIX/apisix-source/.devcontainer/Dockerfile new file mode 100644 index 0000000..4402db7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.devcontainer/Dockerfile @@ -0,0 +1,38 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +FROM ubuntu:24.04 + +RUN apt update && export DEBIAN_FRONTEND=noninteractive \ + && apt install -y sudo git make gcc tini + +COPY Makefile .requirements apisix-master-0.rockspec ./ +COPY utils/install-dependencies.sh utils/linux-install-luarocks.sh utils/ + +RUN make install-runtime + +RUN cpanm --notest Test::Nginx IPC::Run > build.log 2>&1 || (cat build.log && exit 1) + +ARG ETCD_VER=v3.5.17 +ARG BUILDARCH +RUN curl -L https://github.com/etcd-io/etcd/releases/download/${ETCD_VER}/etcd-${ETCD_VER}-linux-${BUILDARCH}.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-${BUILDARCH}.tar.gz \ + && mkdir -p /tmp/etcd-download-test \ + && tar xzvf /tmp/etcd-${ETCD_VER}-linux-${BUILDARCH}.tar.gz -C /tmp/etcd-download-test --strip-components=1 \ + && mv /tmp/etcd-download-test/etcdctl /usr/bin \ + && rm -rf /tmp/* + +ENTRYPOINT [ "tini", "--" ] diff --git a/CloudronPackages/APISIX/apisix-source/.devcontainer/devcontainer.json b/CloudronPackages/APISIX/apisix-source/.devcontainer/devcontainer.json new file mode 100644 index 0000000..7151076 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.devcontainer/devcontainer.json @@ -0,0 +1,14 @@ +{ + "name": "APISIX", + "dockerComposeFile": ["docker-compose.yml"], + "service": "apisix", + "workspaceFolder": "/workspace", + "privileged": true, + "postCreateCommand": "bash -c 'cd /workspace && rm -rf test-nginx && git config --global --add safe.directory /workspace && git submodule update --init --recursive && git clone https://github.com/openresty/test-nginx.git --depth 1 --single-branch -b master && make deps'", + "customizations": { + "vscode": { + "extensions": ["ms-vscode.makefile-tools", "ms-azuretools.vscode-docker", "sumneko.lua"] + } + }, + "forwardPorts": [9080, 9180, 2379] +} diff --git a/CloudronPackages/APISIX/apisix-source/.devcontainer/docker-compose.yml b/CloudronPackages/APISIX/apisix-source/.devcontainer/docker-compose.yml new file mode 100644 index 0000000..f238f23 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.devcontainer/docker-compose.yml @@ -0,0 +1,37 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +services: + apisix: + build: + context: .. + dockerfile: .devcontainer/Dockerfile + command: sleep infinity + volumes: + - ..:/workspace:cached + network_mode: service:etcd + etcd: + image: bitnami/etcd:3.5 + volumes: + - etcd_data:/bitnami/etcd + environment: + ALLOW_NONE_AUTHENTICATION: "yes" + ETCD_ADVERTISE_CLIENT_URLS: "http://127.0.0.1:2379" + ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379" + +volumes: + etcd_data: diff --git a/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/bug_report.yml b/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 0000000..de8c29d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,68 @@ +name: "Bug Report" +description: Report a bug to help improve the project. +title: "bug: " +body: + - type: markdown + attributes: + value: | + Thank you for taking the time to report this bug! + + _The more information you share, the faster we can identify and fix the bug._ + + Prior to opening the issue, please make sure that you: + + - Use English to communicate. + - Search the [open issues](https://github.com/apache/apisix/issues) and [discussion forum](https://github.com/apache/apisix/discussions) to avoid duplicating the issue. + + - type: textarea + id: current-behavior + attributes: + label: Current Behavior + description: Describe the issue you are facing. + placeholder: | + What is the issue with the current behavior? + validations: + required: true + - type: textarea + id: expected-behavior + attributes: + label: Expected Behavior + description: Describe what you expected to happen. + placeholder: | + What did you expect to happen instead? + validations: + required: false + - type: textarea + id: error + attributes: + label: Error Logs + description: Paste the error logs if any. You can change the [log level](https://github.com/apache/apisix/blob/617c325628f33961be67f61f0fa8002afc370e42/docs/en/latest/FAQ.md#how-to-change-the-log-level) to get a verbose error log. + validations: + required: false + - type: textarea + id: steps + attributes: + label: Steps to Reproduce + description: Share the steps you took so that we can reproduce the issue. Reports without proper steps details will likely be closed. + placeholder: | + 1. Run APISIX via the Docker image. + 2. Create a Route with the Admin API. + 3. Try configuring ... + 4. ... + validations: + required: true + - type: textarea + id: environment + attributes: + label: Environment + description: Share your environment details. Reports without proper environment details will likely be closed. + value: | + - APISIX version (run `apisix version`): + - Operating system (run `uname -a`): + - OpenResty / Nginx version (run `openresty -V` or `nginx -V`): + - etcd version, if relevant (run `curl http://127.0.0.1:9090/v1/server_info`): + - APISIX Dashboard version, if relevant: + - Plugin runner version, for issues related to plugin runners: + - LuaRocks version, for installation issues (run `luarocks --version`): + validations: + required: true diff --git a/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/config.yml b/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..f07f3eb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: APISIX Discussion Forum + url: https://github.com/apache/apisix/discussions + about: Please ask and answer questions here. diff --git a/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/feature_request.yml b/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 0000000..a2b5714 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,23 @@ +name: "Feature Request" +description: Suggest an enhancement to APISIX. +title: "feat: As a user, I want to ..., so that ..." +body: + - type: markdown + attributes: + value: | + _The more information you share, the faster we can help you._ + + Prior to opening the issue, please make sure that you: + + - Use English to communicate. + - Search the [open issues](https://github.com/apache/apisix/issues) and [discussion forum](https://github.com/apache/apisix/discussions) to avoid duplicating the issue. + + - type: textarea + id: description + attributes: + label: Description + description: Describe the feature you would like to see. + placeholder: | + As a user, I want to ..., so that... + validations: + required: true diff --git a/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/improve_docs.yml b/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/improve_docs.yml new file mode 100644 index 0000000..ef737b0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/improve_docs.yml @@ -0,0 +1,33 @@ +name: "Documentation Issue" +description: Issues related to documentation. +title: "docs: " +labels: [doc] +body: + - type: markdown + attributes: + value: | + _The more information you share, the faster we can help you._ + + Prior to opening the issue, please make sure that you: + + - Use English to communicate. + - Search the [open issues](https://github.com/apache/apisix/issues) and [discussion forum](https://github.com/apache/apisix/discussions) to avoid duplicating the issue. + + - type: textarea + id: current-state + attributes: + label: Current State + description: Describe the current state of the documentation. + placeholder: | + The documentation for the API in this page (url) is missing ... + validations: + required: true + - type: textarea + id: desired-state + attributes: + label: Desired State + description: Describe the desired state the documentation should be in. + placeholder: | + There should be line mentioning how the API behaves when ... + validations: + required: true diff --git a/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/request_help.yml b/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/request_help.yml new file mode 100644 index 0000000..dc0d6b9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/ISSUE_TEMPLATE/request_help.yml @@ -0,0 +1,36 @@ +name: "Request Help" +description: Stuck? Ask for help! +title: "help request: " +body: + - type: markdown + attributes: + value: | + _The more information you share, the faster we can help you._ + + Prior to opening the issue, please make sure that you: + + - Use English to communicate. + - Search the [open issues](https://github.com/apache/apisix/issues) and [discussion forum](https://github.com/apache/apisix/discussions) to avoid duplicating the issue. + + - type: textarea + id: description + attributes: + label: Description + description: Describe the issue you are facing and what you need help with. + validations: + required: true + - type: textarea + id: environment + attributes: + label: Environment + description: Share your environment details. Reports without proper environment details will likely be closed. + value: | + - APISIX version (run `apisix version`): + - Operating system (run `uname -a`): + - OpenResty / Nginx version (run `openresty -V` or `nginx -V`): + - etcd version, if relevant (run `curl http://127.0.0.1:9090/v1/server_info`): + - APISIX Dashboard version, if relevant: + - Plugin runner version, for issues related to plugin runners: + - LuaRocks version, for installation issues (run `luarocks --version`): + validations: + required: true diff --git a/CloudronPackages/APISIX/apisix-source/.github/PULL_REQUEST_TEMPLATE.md b/CloudronPackages/APISIX/apisix-source/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..5860ce4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,33 @@ +### Description + + + + +#### Which issue(s) this PR fixes: + +Fixes # + +### Checklist + +- [ ] I have explained the need for this PR and the problem it solves +- [ ] I have explained the changes or the new features added to this PR +- [ ] I have added tests corresponding to this change +- [ ] I have updated the documentation to reflect this change +- [ ] I have verified that this change is backward compatible (If not, please discuss on the [APISIX mailing list](https://github.com/apache/apisix/tree/master#community) first) + + diff --git a/CloudronPackages/APISIX/apisix-source/.github/dependabot.yml b/CloudronPackages/APISIX/apisix-source/.github/dependabot.yml new file mode 100644 index 0000000..5737055 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/dependabot.yml @@ -0,0 +1,10 @@ +# Set update schedule for GitHub Actions + +version: 2 +updates: + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + # Check for updates to GitHub Actions every weekday + interval: "daily" diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/build.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/build.yml new file mode 100644 index 0000000..185d919 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/build.yml @@ -0,0 +1,177 @@ +name: CI + +on: + push: + branches: [master, 'release/**'] + paths-ignore: + - 'docs/**' + - '**/*.md' + pull_request: + branches: [master, 'release/**'] + paths-ignore: + - 'docs/**' + - '**/*.md' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + build: + strategy: + fail-fast: false + matrix: + platform: + - ubuntu-latest + os_name: + - linux_openresty + events_module: + - lua-resty-worker-events + - lua-resty-events + test_dir: + - t/plugin/[a-k]* + - t/plugin/[l-z]* + - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/http3/admin t/misc + - t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library t/xrpc + + runs-on: ${{ matrix.platform }} + timeout-minutes: 90 + env: + SERVER_NAME: ${{ matrix.os_name }} + OPENRESTY_VERSION: default + + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: "1.17" + + - name: Cache deps + uses: actions/cache@v4 + env: + cache-name: cache-deps + with: + path: deps + key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_name }}-${{ hashFiles('apisix-master-0.rockspec') }} + + - name: Extract test type + shell: bash + id: test_env + run: | + test_dir="${{ matrix.test_dir }}" + if [[ $test_dir =~ 't/plugin' ]]; then + echo "type=plugin" >>$GITHUB_OUTPUT + fi + if [[ $test_dir =~ 't/admin ' ]]; then + echo "type=first" >>$GITHUB_OUTPUT + fi + if [[ $test_dir =~ ' t/xrpc' ]]; then + echo "type=last" >>$GITHUB_OUTPUT + fi + + - name: Free disk space + run: | + bash ./ci/free_disk_space.sh + + - name: Linux Before install + run: sudo ./ci/${{ matrix.os_name }}_runner.sh before_install + + - name: Linux Install + run: | + sudo --preserve-env=OPENRESTY_VERSION \ + ./ci/${{ matrix.os_name }}_runner.sh do_install + + - name: Linux launch common services + run: | + make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml + sudo ./ci/init-common-test-service.sh + + - name: Cache images + id: cache-images + uses: actions/cache@v4 + env: + cache-name: cache-apisix-docker-images + with: + path: docker-images-backup + key: ${{ runner.os }}-${{ env.cache-name }}-${{ steps.test_env.outputs.type }}-${{ hashFiles(format('./ci/pod/docker-compose.{0}.yml', steps.test_env.outputs.type )) }} + + - if: ${{ steps.cache-images.outputs.cache-hit == 'true' }} + name: Load saved docker images + run: | + if [[ -f docker-images-backup/apisix-images.tar ]]; then + [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before + docker load --input docker-images-backup/apisix-images.tar + echo "loaded docker images" + + # preserve storage space + rm docker-images-backup/apisix-images.tar + + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml + if [[ ${{ steps.test_env.outputs.type }} != first ]]; then + sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after + fi + fi + - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }} + name: Linux launch services + run: | + [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before + [[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml + [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after + echo "Linux launch services, done." + - name: Start Dubbo Backend + if: matrix.os_name == 'linux_openresty' && (steps.test_env.outputs.type == 'plugin' || steps.test_env.outputs.type == 'last') + run: | + cur_dir=$(pwd) + sudo apt update + sudo apt install -y maven openjdk-8-jdk + sudo update-java-alternatives --set java-1.8.0-openjdk-amd64 + export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 + export PATH=$JAVA_HOME/bin:$PATH + cd t/lib/dubbo-backend + mvn package + cd dubbo-backend-provider/target + java \ + -Djava.net.preferIPv4Stack=true \ + -jar dubbo-demo-provider.one-jar.jar > /tmp/java.log & + cd $cur_dir/t/lib/dubbo-serialization-backend + mvn package + cd dubbo-serialization-backend-provider/target + java \ + -Djava.net.preferIPv4Stack=true \ + -jar dubbo-demo-provider.one-jar.jar > /tmp/java2.log & + + - name: Build xDS library + if: steps.test_env.outputs.type == 'last' + run: | + cd t/xds-library + go build -o libxds.so -buildmode=c-shared main.go export.go + + - name: Build wasm code + if: matrix.os_name == 'linux_openresty' && steps.test_env.outputs.type == 'last' + run: | + export TINYGO_VER=0.20.0 + wget https://github.com/tinygo-org/tinygo/releases/download/v${TINYGO_VER}/tinygo_${TINYGO_VER}_amd64.deb 2>/dev/null + sudo dpkg -i tinygo_${TINYGO_VER}_amd64.deb + cd t/wasm && find . -type f -name "*.go" | xargs -Ip tinygo build -o p.wasm -scheduler=none -target=wasi p + + - name: Linux Script + env: + TEST_FILE_SUB_DIR: ${{ matrix.test_dir }} + TEST_EVENTS_MODULE: ${{ matrix.events_module }} + run: sudo -E ./ci/${{ matrix.os_name }}_runner.sh script + + - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }} + name: Save docker images + run: | + echo "start backing up, $(date)" + bash ./ci/backup-docker-images.sh ${{ steps.test_env.outputs.type }} + echo "backup done, $(date)" diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/check-changelog.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/check-changelog.yml new file mode 100644 index 0000000..0efef10 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/check-changelog.yml @@ -0,0 +1,27 @@ +name: Check Changelog + +on: + push: + paths: + - 'CHANGELOG.md' + - 'ci/check_changelog_prs.ts' + pull_request: + paths: + - 'CHANGELOG.md' + - 'ci/check_changelog_prs.ts' + +jobs: + check-changelog: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + + - name: Run check_changelog_prs script + working-directory: ci + run: | + curl -fsSL https://bun.sh/install | bash + export PATH="$HOME/.bun/bin:$PATH" + bun run check_changelog_prs.ts diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/cli.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/cli.yml new file mode 100644 index 0000000..1840f9a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/cli.yml @@ -0,0 +1,68 @@ +name: CLI Test + +on: + push: + branches: [master, 'release/**'] + paths-ignore: + - 'docs/**' + - '**/*.md' + pull_request: + branches: [master, 'release/**'] + paths-ignore: + - 'docs/**' + - '**/*.md' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + build: + strategy: + fail-fast: false + matrix: + platform: + - ubuntu-latest + job_name: + - linux_apisix_current_luarocks + - linux_apisix_current_luarocks_in_customed_nginx + + runs-on: ${{ matrix.platform }} + timeout-minutes: 30 + env: + SERVER_NAME: ${{ matrix.job_name }} + OPENRESTY_VERSION: default + + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Cache deps + uses: actions/cache@v4 + env: + cache-name: cache-deps + with: + path: deps + key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.job_name }}-${{ hashFiles('apisix-master-0.rockspec') }} + + - name: Linux launch common services + run: | + project_compose_ci=ci/pod/docker-compose.common.yml make ci-env-up + + - name: Linux Before install + run: sudo ./ci/${{ matrix.job_name }}_runner.sh before_install + + - name: Linux Install + run: | + sudo --preserve-env=OPENRESTY_VERSION \ + ./ci/${{ matrix.job_name }}_runner.sh do_install + + - name: Linux Script + run: | + sudo chmod +x /home/runner + sudo ./ci/${{ matrix.job_name }}_runner.sh script diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/close-unresponded.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/close-unresponded.yml new file mode 100644 index 0000000..9508af7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/close-unresponded.yml @@ -0,0 +1,39 @@ +name: Check Issues + +on: + workflow_dispatch: + schedule: + - cron: '0 10 * * *' + +permissions: + contents: read + +jobs: + prune_stale: + permissions: + issues: write # for actions/stale to close stale issues + name: Prune Unresponded + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Prune Stale + uses: actions/stale@v8 + with: + days-before-issue-stale: 60 + days-before-issue-close: 3 + stale-issue-message: > + Due to lack of the reporter's response this issue has been labeled with "no response". + It will be close in 3 days if no further activity occurs. If this issue is still + relevant, please simply write any comment. Even if closed, you can still revive the + issue at any time or discuss it on the dev@apisix.apache.org list. + Thank you for your contributions. + close-issue-message: > + This issue has been closed due to lack of activity. If you think that + is incorrect, or the issue requires additional review, you can revive the issue at + any time. + # Issues with these labels will never be considered stale. + only-labels: 'wait for update' + stale-issue-label: 'no response' + exempt-issue-labels: "don't close" + ascending: true diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/code-lint.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/code-lint.yml new file mode 100644 index 0000000..0fdbf28 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/code-lint.yml @@ -0,0 +1,48 @@ +name: Code Lint + +on: + pull_request: + branches: [master, 'release/**'] + paths-ignore: + - 'docs/**' + - '**/*.md' + +permissions: + contents: read + +jobs: + lint: + runs-on: ubuntu-latest + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + - name: Install + run: | + . ./ci/common.sh + export_or_prefix + export OPENRESTY_VERSION=default + + sudo -E ./ci/linux-install-openresty.sh + ./utils/linux-install-luarocks.sh + sudo -E luarocks install luacheck + + - name: Script + run: | + . ./ci/common.sh + export_or_prefix + make lint + + sc-lint: + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Shellcheck code + run: | + scversion="latest" + wget -qO- "https://github.com/koalaman/shellcheck/releases/download/${scversion?}/shellcheck-${scversion?}.linux.x86_64.tar.xz" | tar -xJv + cp -av "shellcheck-${scversion}/shellcheck" /usr/local/bin/ + shellcheck --version + git ls-files -- "*.sh" | xargs -t shellcheck diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/doc-lint.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/doc-lint.yml new file mode 100644 index 0000000..962671d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/doc-lint.yml @@ -0,0 +1,58 @@ +name: Doc Lint + +on: + push: + paths: + - "docs/**" + - "**/*.md" + - ".github/workflows/doc-lint.yml" + pull_request: + branches: [master, "release/**"] + paths: + - "docs/**" + - "**/*.md" + - ".github/workflows/doc-lint.yml" + +permissions: + contents: read + +jobs: + markdownlint: + name: 🍇 Markdown + runs-on: ubuntu-latest + timeout-minutes: 1 + steps: + - uses: actions/checkout@v4 + - name: 🚀 Use Node.js + uses: actions/setup-node@v4.4.0 + with: + node-version: "12.x" + - run: npm install -g markdownlint-cli@0.25.0 + - run: markdownlint '**/*.md' + - name: check category + run: | + ./utils/check-category.py + - name: check Chinese doc + run: | + sudo pip3 install zhon + ./utils/fix-zh-doc-segment.py > \ + /tmp/check.log 2>&1 || (cat /tmp/check.log && exit 1) + if grep "find broken newline in file: " /tmp/check.log; then + cat /tmp/check.log + echo "Newline can't appear in the middle of Chinese sentences." + echo "You need to run ./utils/fix-zh-doc-segment.py to fix them." + exit 1 + fi + + Chinse-Copywriting-lint: + name: Chinese Copywriting + runs-on: ubuntu-latest + timeout-minutes: 1 + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - name: Check Chinese copywriting + uses: ./.github/actions/autocorrect + with: + args: autocorrect --lint --no-diff-bg-color ./docs/zh/latest/ diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/gm-cron.yaml.disabled b/CloudronPackages/APISIX/apisix-source/.github/workflows/gm-cron.yaml.disabled new file mode 100644 index 0000000..46447e4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/gm-cron.yaml.disabled @@ -0,0 +1,182 @@ +name: CI GM (cron) + +on: + schedule: + # UTC 7:30 every Friday + - cron: "30 7 * * 5" + +permissions: + contents: read + +jobs: + build: + strategy: + fail-fast: false + matrix: + platform: + - ubuntu-latest + os_name: + - linux_openresty_tongsuo + test_dir: + - t/plugin/[a-k]* + - t/plugin/[l-z]* + - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc + - t/node t/pubsub t/router t/script t/stream-node t/utils t/wasm t/xds-library t/xrpc + + runs-on: ${{ matrix.platform }} + timeout-minutes: 90 + env: + SERVER_NAME: ${{ matrix.os_name }} + OPENRESTY_VERSION: default + + # TODO: refactor the workflows to reduce duplicate parts. Maybe we can write them in shell + # scripts or a separate action? + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: "1.17" + + - name: Cache deps + uses: actions/cache@v4 + env: + cache-name: cache-deps + with: + path: deps + key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_name }}-${{ hashFiles('apisix-master-0.rockspec') }} + + - name: Cache Tongsuo compilation + id: cache-tongsuo + uses: actions/cache@v4 + env: + cache-name: cache-tongsuo + with: + path: ./tongsuo + key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_name }}-tongsuo-ver + + - name: Test SSL Env + id: test_ssl_env + shell: bash + if: steps.cache-tongsuo.outputs.cache-hit != 'true' + run: | + echo "compile_tongsuo=true" >>$GITHUB_OUTPUT + + - name: Extract test type + shell: bash + id: test_env + run: | + test_dir="${{ matrix.test_dir }}" + if [[ $test_dir =~ 't/plugin' ]]; then + echo "type=plugin" >>$GITHUB_OUTPUT + fi + if [[ $test_dir =~ 't/admin ' ]]; then + echo "type=first" >>$GITHUB_OUTPUT + fi + if [[ $test_dir =~ ' t/xrpc' ]]; then + echo "type=last" >>$GITHUB_OUTPUT + fi + + - name: Free disk space + run: | + bash ./ci/free_disk_space.sh + + - name: Linux launch common services + run: | + make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml + sudo ./ci/init-common-test-service.sh + + - name: Cache images + id: cache-images + uses: actions/cache@v4 + env: + cache-name: cache-apisix-docker-images + with: + path: docker-images-backup + key: ${{ runner.os }}-${{ env.cache-name }}-${{ steps.test_env.outputs.type }}-${{ hashFiles(format('./ci/pod/docker-compose.{0}.yml', steps.test_env.outputs.type )) }} + + - if: ${{ steps.cache-images.outputs.cache-hit == 'true' }} + name: Load saved docker images + run: | + if [[ -f docker-images-backup/apisix-images.tar ]]; then + [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before + docker load --input docker-images-backup/apisix-images.tar + rm docker-images-backup/apisix-images.tar + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml + echo "loaded docker images" + if [[ ${{ steps.test_env.outputs.type }} != first ]]; then + sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after + fi + fi + - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }} + name: Linux launch services + run: | + [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before + [[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml + [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after + echo "Linux launch services, done." + + - name: Start Dubbo Backend + if: steps.test_env.outputs.type == 'plugin' + run: | + cur_dir=$(pwd) + sudo apt update + sudo apt install -y maven openjdk-8-jdk + sudo update-java-alternatives --set java-1.8.0-openjdk-amd64 + export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 + export PATH=$JAVA_HOME/bin:$PATH + cd t/lib/dubbo-backend + mvn package + cd dubbo-backend-provider/target + java \ + -Djava.net.preferIPv4Stack=true \ + -jar dubbo-demo-provider.one-jar.jar > /tmp/java.log & + cd $cur_dir/t/lib/dubbo-serialization-backend + mvn package + cd dubbo-serialization-backend-provider/target + java \ + -Djava.net.preferIPv4Stack=true \ + -jar dubbo-demo-provider.one-jar.jar > /tmp/java2.log & + + - name: Build xDS library + if: steps.test_env.outputs.type == 'last' + run: | + cd t/xds-library + go build -o libxds.so -buildmode=c-shared main.go export.go + + - name: Build wasm code + if: steps.test_env.outputs.type == 'last' + run: | + export TINYGO_VER=0.20.0 + wget https://github.com/tinygo-org/tinygo/releases/download/v${TINYGO_VER}/tinygo_${TINYGO_VER}_amd64.deb 2>/dev/null + sudo dpkg -i tinygo_${TINYGO_VER}_amd64.deb + cd t/wasm && find . -type f -name "*.go" | xargs -Ip tinygo build -o p.wasm -scheduler=none -target=wasi p + + - name: Linux Before install + env: + COMPILE_TONGSUO: ${{ steps.test_ssl_env.outputs.compile_tongsuo }} + run: | + sudo --preserve-env=COMPILE_TONGSUO \ + ./ci/${{ matrix.os_name }}_runner.sh before_install + + - name: Linux Install + run: | + sudo --preserve-env=OPENRESTY_VERSION \ + ./ci/${{ matrix.os_name }}_runner.sh do_install + + - name: Linux Script + env: + TEST_FILE_SUB_DIR: ${{ matrix.test_dir }} + run: sudo -E ./ci/${{ matrix.os_name }}_runner.sh script + + - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }} + name: Save docker images + run: | + echo "start backing up, $(date)" + bash ./ci/backup-docker-images.sh ${{ steps.test_env.outputs.type }} + echo "backup done, $(date)" diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/gm.yml.disabled b/CloudronPackages/APISIX/apisix-source/.github/workflows/gm.yml.disabled new file mode 100644 index 0000000..44260e0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/gm.yml.disabled @@ -0,0 +1,93 @@ +name: CI GM + +on: + push: + branches: [master] + paths-ignore: + - 'docs/**' + - '**/*.md' + pull_request: + branches: [master] + paths-ignore: + - 'docs/**' + - '**/*.md' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + build: + strategy: + fail-fast: false + matrix: + platform: + - ubuntu-latest + os_name: + - linux_openresty_tongsuo + test_dir: + - t/gm + + runs-on: ${{ matrix.platform }} + timeout-minutes: 90 + env: + SERVER_NAME: ${{ matrix.os_name }} + OPENRESTY_VERSION: default + + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Cache deps + uses: actions/cache@v4 + env: + cache-name: cache-deps + with: + path: deps + key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_name }}-${{ hashFiles('apisix-master-0.rockspec') }} + + - name: Cache Tongsuo compilation + id: cache-tongsuo + uses: actions/cache@v4 + env: + cache-name: cache-tongsuo + with: + path: ./tongsuo + # TODO: use a fixed release once they have created one. + # See https://github.com/Tongsuo-Project/Tongsuo/issues/318 + key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_name }}-tongsuo-ver + + - name: Test SSL Env + id: test_ssl_env + shell: bash + if: steps.cache-tongsuo.outputs.cache-hit != 'true' + run: | + echo "compile_tongsuo=true" >>$GITHUB_OUTPUT + + - name: Linux launch common services + run: | + make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml + sudo ./ci/init-common-test-service.sh + + - name: Linux Before install + env: + COMPILE_TONGSUO: ${{ steps.test_ssl_env.outputs.compile_tongsuo }} + run: | + sudo --preserve-env=COMPILE_TONGSUO \ + ./ci/${{ matrix.os_name }}_runner.sh before_install + + - name: Linux Do install + run: | + sudo --preserve-env=OPENRESTY_VERSION \ + ./ci/${{ matrix.os_name }}_runner.sh do_install + + - name: Linux Script + env: + TEST_FILE_SUB_DIR: ${{ matrix.test_dir }} + run: | + sudo -E ./ci/${{ matrix.os_name }}_runner.sh script diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/kubernetes-ci.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/kubernetes-ci.yml new file mode 100644 index 0000000..16f3343 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/kubernetes-ci.yml @@ -0,0 +1,79 @@ +name: CI Kubernetes + +on: + push: + branches: [ master, 'release/**' ] + paths-ignore: + - 'docs/**' + - '**/*.md' + pull_request: + branches: [ master, 'release/**' ] + paths-ignore: + - 'docs/**' + - '**/*.md' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + kubernetes-discovery: + strategy: + fail-fast: false + matrix: + platform: + - ubuntu-latest + os_name: + - linux_openresty + + runs-on: ${{ matrix.platform }} + timeout-minutes: 15 + env: + SERVER_NAME: ${{ matrix.os_name }} + OPENRESTY_VERSION: default + + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Setup kubernetes cluster + run: | + KUBERNETES_VERSION="v1.22.7" + + kind create cluster --name apisix-test --config ./t/kubernetes/configs/kind.yaml --image kindest/node:${KUBERNETES_VERSION} + + kubectl wait --for=condition=Ready nodes --all --timeout=180s + + kubectl apply -f ./t/kubernetes/configs/account.yaml + + kubectl apply -f ./t/kubernetes/configs/endpoint.yaml + + KUBERNETES_CLIENT_TOKEN_CONTENT=$(kubectl get secrets | grep apisix-test | awk '{system("kubectl get secret -o jsonpath={.data.token} "$1" | base64 --decode")}') + + KUBERNETES_CLIENT_TOKEN_DIR="/tmp/var/run/secrets/kubernetes.io/serviceaccount" + + KUBERNETES_CLIENT_TOKEN_FILE=${KUBERNETES_CLIENT_TOKEN_DIR}/token + + mkdir -p ${KUBERNETES_CLIENT_TOKEN_DIR} + echo -n "$KUBERNETES_CLIENT_TOKEN_CONTENT" > ${KUBERNETES_CLIENT_TOKEN_FILE} + + echo 'KUBERNETES_SERVICE_HOST=127.0.0.1' + echo 'KUBERNETES_SERVICE_PORT=6443' + echo 'KUBERNETES_CLIENT_TOKEN='"${KUBERNETES_CLIENT_TOKEN_CONTENT}" + echo 'KUBERNETES_CLIENT_TOKEN_FILE='${KUBERNETES_CLIENT_TOKEN_FILE} + + kubectl proxy -p 6445 & + + - name: Linux Install + run: | + sudo ./ci/${{ matrix.os_name }}_runner.sh before_install + sudo --preserve-env=OPENRESTY_VERSION ./ci/${{ matrix.os_name }}_runner.sh do_install + + - name: Run test cases + run: | + ./ci/kubernetes-ci.sh run_case diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/license-checker.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/license-checker.yml new file mode 100644 index 0000000..bae5d6c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/license-checker.yml @@ -0,0 +1,37 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +name: License checker + +on: + push: + branches: [master, 'release/**'] + pull_request: + branches: [master, 'release/**'] + +jobs: + check-license: + runs-on: ubuntu-latest + timeout-minutes: 3 + + steps: + - uses: actions/checkout@v4 + - name: Check License Header + uses: apache/skywalking-eyes@v0.6.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/link-check.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/link-check.yml new file mode 100644 index 0000000..75758a9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/link-check.yml @@ -0,0 +1,49 @@ +name: 'Link Checker' + +# **What it does**: Renders the content of every page and check all internal links. +# **Why we have it**: To make sure all links connect correctly. +# **Who does it impact**: Docs content. + +on: + workflow_dispatch: + push: + # branches: [master, 'release/**'] + paths: + - '**/*.md' + - '**/link-check.yml' + pull_request: + branches: [master, "release/**"] + paths: + - '**/*.md' + - '**/link-check.yml' + +permissions: + contents: read + # Needed for the 'trilom/file-changes-action' action + pull-requests: read + +# This allows a subsequently queued workflow run to interrupt previous runs +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +jobs: + check-links: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Get script + run: | + wget https://raw.githubusercontent.com/xuruidong/markdown-link-checker/main/link_checker.py + + - name: Setup python + uses: actions/setup-python@v5 + with: + python-version: '3.9' + + - name: Link check (critical, all files) + run: | + # python link_checker.py ./ --enable-external --ignore "http://apisix.iresty.com" "https://www.upyun.com" "https://github.com/apache/apisix/actions/workflows/build.yml/badge.svg" "https://httpbin.org/" "https://en.wikipedia.org/wiki/Cache" + python link_checker.py ./ diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/lint.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/lint.yml new file mode 100644 index 0000000..a3244ca --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/lint.yml @@ -0,0 +1,46 @@ +name: ❄️ Lint + +on: [push, pull_request] + +permissions: + contents: read + +jobs: + misc: + name: misc checker + runs-on: ubuntu-latest + steps: + - name: Check out code. + uses: actions/checkout@v4 + - name: spell check + run: | + pip install codespell==2.1.0 + # codespell considers some repo name in go.sum are misspelled + git grep --cached -l '' | grep -v go.sum | grep -v pnpm-lock.yaml |xargs codespell --ignore-words=.ignore_words --skip="*.ts,*.mts" + - name: Merge conflict + run: | + bash ./utils/check-merge-conflict.sh + - name: Plugin Code + run: | + bash ./utils/check-plugins-code.sh + + ci-eclint: + runs-on: ubuntu-latest + timeout-minutes: 5 + + steps: + - name: Check out code + uses: actions/checkout@v4 + + - name: Setup Nodejs env + uses: actions/setup-node@v4.4.0 + with: + node-version: '12' + + - name: Install eclint + run: | + sudo npm install -g eclint + + - name: Run eclint + run: | + eclint check diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/push-dev-image-on-commit.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/push-dev-image-on-commit.yml new file mode 100644 index 0000000..793347b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/push-dev-image-on-commit.yml @@ -0,0 +1,109 @@ +name: Build and Push `apisix:dev` to DockerHub on Commit + +on: + pull_request: + paths-ignore: + - "docs/**" + - "**/*.md" + push: + paths-ignore: + - "docs/**" + - "**/*.md" + workflow_dispatch: + +jobs: + build-test-push: + strategy: + matrix: + include: + - runner: ubuntu-24.04 + arch: amd64 + - runner: ubuntu-24.04-arm + arch: arm64 + + runs-on: ${{ matrix.runner }} + + env: + APISIX_DOCKER_TAG: master-debian-dev + ENV_OS_ARCH: ${{ matrix.arch }} + DOCKER_BUILDKIT: 1 + + steps: + - name: Check out the repo + uses: actions/checkout@v4 + + - name: Build APISIX Dashboard + run: | + # install node.js and pnpm + sudo n lts + corepack enable pnpm + + # prepare apisix-dashboard source code + source .requirements + git clone --revision=${APISIX_DASHBOARD_COMMIT} --depth 1 https://github.com/apache/apisix-dashboard.git + pushd apisix-dashboard + + # compile + pnpm install --frozen-lockfile + pnpm run build + popd + + # copy the dist files to the ui directory + mkdir ui + cp -r apisix-dashboard/dist/* ui/ + rm -r apisix-dashboard + + - name: Build and run + run: | + make build-on-debian-dev + docker compose -f ./docker/compose/docker-compose-master.yaml up -d + sleep 30 + docker logs compose-apisix-1 + + - name: Test APISIX + run: | + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + { + "uri": "/get", + "upstream": { + "type": "roundrobin", + "nodes": { "httpbin.org:80": 1 } + } + }' + + result_code=$(curl -I -m 10 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/get) + if [[ $result_code -ne 200 ]]; then + printf "result_code: %s\n" "$result_code" + exit 125 + fi + + - name: Login to Docker Hub + if: github.ref == 'refs/heads/master' + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Push apisix image to Docker Hub + if: github.ref == 'refs/heads/master' + run: | + make push-on-debian-dev + + merge-tags: + needs: build-test-push + if: github.ref == 'refs/heads/master' + runs-on: ubuntu-latest + steps: + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Check out the repo + uses: actions/checkout@v4 + + - name: Merge architecture-specific tags + run: | + make merge-dev-tags diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/redhat-ci.yaml b/CloudronPackages/APISIX/apisix-source/.github/workflows/redhat-ci.yaml new file mode 100644 index 0000000..190919c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/redhat-ci.yaml @@ -0,0 +1,179 @@ +name: CI Redhat UBI - Daily + +on: + schedule: + - cron: "0 0 * * *" + pull_request: + branches: [master] + paths-ignore: + - 'docs/**' + - '**/*.md' +concurrency: + group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + test_apisix: + name: run ci on redhat ubi + runs-on: ubuntu-latest + timeout-minutes: 90 + strategy: + fail-fast: false + matrix: + events_module: + - lua-resty-worker-events + - lua-resty-events + test_dir: + - t/plugin/[a-k]* + - t/plugin/[l-z]* + - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc + - t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/xds-library + + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Cache deps + uses: actions/cache@v4 + env: + cache-name: cache-deps + with: + path: deps + key: ${{ runner.os }}-${{ env.cache-name }}-ubi8.6-${{ hashFiles('apisix-master-0.rockspec') }} + + - name: Extract branch name + if: ${{ startsWith(github.ref, 'refs/heads/release/') }} + id: branch_env + shell: bash + run: | + echo "version=${GITHUB_REF##*/}" >>$GITHUB_OUTPUT + + - name: Extract test type + shell: bash + id: test_env + run: | + test_dir="${{ matrix.test_dir }}" + if [[ $test_dir =~ 't/plugin' ]]; then + echo "type=plugin" >>$GITHUB_OUTPUT + fi + if [[ $test_dir =~ 't/admin ' ]]; then + echo "type=first" >>$GITHUB_OUTPUT + fi + if [[ $test_dir =~ ' t/xds-library' ]]; then + echo "type=last" >>$GITHUB_OUTPUT + fi + + - name: Free disk space + run: | + bash ./ci/free_disk_space.sh + + - name: Linux launch common services + run: | + make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml + sudo ./ci/init-common-test-service.sh + + - name: Build rpm package + if: ${{ startsWith(github.ref, 'refs/heads/release/') }} + run: | + export VERSION=${{ steps.branch_env.outputs.version }} + sudo gem install --no-document fpm + git clone --depth 1 https://github.com/api7/apisix-build-tools.git + + # move codes under build tool + mkdir ./apisix-build-tools/apisix + for dir in `ls|grep -v "^apisix-build-tools$"`;do cp -r $dir ./apisix-build-tools/apisix/;done + + cd apisix-build-tools + make package type=rpm app=apisix version=${VERSION} checkout=release/${VERSION} image_base=ubi image_tag=8.6 local_code_path=./apisix + cd .. + rm -rf $(ls -1 -I apisix-build-tools -I t -I utils -I ci --ignore=Makefile -I "*.rockspec") + + - name: Start Dubbo Backend + run: | + cur_dir=$(pwd) + sudo apt update + sudo apt install -y maven + cd t/lib/dubbo-backend + mvn package + cd dubbo-backend-provider/target + java \ + -Djava.net.preferIPv4Stack=true \ + -jar dubbo-demo-provider.one-jar.jar > /tmp/java.log & + cd $cur_dir/t/lib/dubbo-serialization-backend + mvn package + cd dubbo-serialization-backend-provider/target + java \ + -Djava.net.preferIPv4Stack=true \ + -jar dubbo-demo-provider.one-jar.jar > /tmp/java2.log & + + - name: Build xDS library + if: steps.test_env.outputs.type == 'last' + run: | + cd t/xds-library + go build -o libxds.so -buildmode=c-shared main.go export.go + + - name: Run redhat docker and mapping apisix into container + env: + TEST_FILE_SUB_DIR: ${{ matrix.test_dir }} + TEST_EVENTS_MODULE: ${{ matrix.events_module }} + run: | + docker run -itd -v ${{ github.workspace }}:/apisix --env TEST_FILE_SUB_DIR="$TEST_FILE_SUB_DIR" --env TEST_EVENTS_MODULE="$TEST_EVENTS_MODULE" --name ubiInstance --net="host" --dns 8.8.8.8 --dns-search apache.org registry.access.redhat.com/ubi8/ubi:8.6 /bin/bash + + - name: Cache images + id: cache-images + uses: actions/cache@v4 + env: + cache-name: cache-apisix-docker-images + with: + path: docker-images-backup + key: ${{ runner.os }}-${{ env.cache-name }}-${{ steps.test_env.outputs.type }}-${{ hashFiles(format('./ci/pod/docker-compose.{0}.yml', steps.test_env.outputs.type )) }} + + - if: ${{ steps.cache-images.outputs.cache-hit == 'true' }} + name: Load saved docker images + run: | + if [[ -f docker-images-backup/apisix-images.tar ]]; then + [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before + docker load --input docker-images-backup/apisix-images.tar + rm docker-images-backup/apisix-images.tar + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml + echo "loaded docker images" + if [[ ${{ steps.test_env.outputs.type }} != first ]]; then + sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after + fi + fi + - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }} + name: Linux launch services + run: | + [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before + [[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml + [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after + echo "Linux launch services, done." + + - name: Install dependencies + run: | + docker exec ubiInstance bash -c "cd apisix && chmod +x ./ci/redhat-ci.sh && ./ci/redhat-ci.sh install_dependencies" + + - name: Install rpm package + if: ${{ startsWith(github.ref, 'refs/heads/release/') }} + run: | + docker exec ubiInstance bash -c "cd apisix && rpm -iv --prefix=/apisix ./apisix-build-tools/output/apisix-${{ steps.branch_env.outputs.version }}-0.ubi8.6.x86_64.rpm" + # Dependencies are attached with rpm, so revert `make deps` + docker exec ubiInstance bash -c "cd apisix && rm -rf deps" + docker exec ubiInstance bash -c "cd apisix && mv usr/bin . && mv usr/local/apisix/* ." + + - name: Run test cases + run: | + docker exec ubiInstance bash -c "cd apisix && chmod +x ./ci/redhat-ci.sh && ./ci/redhat-ci.sh run_case" + + - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }} + name: Save docker images + run: | + echo "start backing up, $(date)" + bash ./ci/backup-docker-images.sh ${{ steps.test_env.outputs.type }} + echo "backup done, $(date)" diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/semantic.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/semantic.yml new file mode 100644 index 0000000..85df2c0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/semantic.yml @@ -0,0 +1,35 @@ +name: "PR Lint" + +on: + pull_request_target: + types: + - opened + - edited + - synchronize + +jobs: + main: + name: Validate PR title + runs-on: ubuntu-latest + steps: + - name: Check out repository code + uses: actions/checkout@v4 + with: + submodules: recursive + - uses: ./.github/actions/action-semantic-pull-request + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + types: | + feat + fix + docs + style + refactor + perf + test + build + ci + chore + revert + change diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/source-install.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/source-install.yml new file mode 100644 index 0000000..c21435f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/source-install.yml @@ -0,0 +1,124 @@ +name: Source Code Install + +on: + push: + branches: [master, 'release/**'] + paths-ignore: + - 'docs/**' + - '**/*.md' + pull_request: + branches: [master, 'release/**'] + paths-ignore: + - 'docs/**' + - '**/*.md' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + install-on-multi-platform: + strategy: + fail-fast: false + matrix: + platform: + - ubuntu-latest + os_platform: + - ubuntu + - redhat + services: + etcd: + image: bitnami/etcd:3.5.4 + ports: + - 2379:2379 + - 2380:2380 + env: + ALLOW_NONE_AUTHENTICATION: yes + ETCD_ADVERTISE_CLIENT_URLS: http://0.0.0.0:2379 + httpbin: + image: kennethreitz/httpbin + ports: + - 8088:80 + + runs-on: ${{ matrix.platform }} + timeout-minutes: 30 + + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Cache deps + uses: actions/cache@v4 + env: + cache-name: cache-deps + with: + path: deps + key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_platform }}-${{ hashFiles('apisix-master-0.rockspec') }} + + - name: Install and start apisix on ${{ matrix.os_platform }} + env: + INSTALL_PLATFORM: ${{ matrix.os_platform }} + run: | + if [[ $INSTALL_PLATFORM == "ubuntu" ]]; then + sudo apt-get update + sudo apt-get install -y git sudo make + make deps + sudo make install + apisix start + elif [[ $INSTALL_PLATFORM == "redhat" ]]; then + docker run -itd -v ${{ github.workspace }}:/apisix --name ubi8 --net="host" --dns 8.8.8.8 --dns-search apache.org registry.access.redhat.com/ubi8/ubi:8.6 /bin/bash + docker exec ubi8 bash -c "yum install -y git sudo make" + docker exec ubi8 bash -c "cd apisix && make deps" + docker exec ubi8 bash -c "cd apisix && make install" + docker exec ubi8 bash -c "cd apisix && apisix start" + elif [[ $INSTALL_PLATFORM == "centos7" ]]; then + docker run -itd -v ${{ github.workspace }}:/apisix --name centos7Instance --net="host" --dns 8.8.8.8 --dns-search apache.org docker.io/centos:7 /bin/bash + docker exec centos7Instance bash -c "yum install -y git sudo make" + docker exec centos7Instance bash -c "cd apisix && make deps" + docker exec centos7Instance bash -c "cd apisix && make install" + docker exec centos7Instance bash -c "cd apisix && apisix start" + fi + sleep 6 + + - name: Test apisix + run: | + wget https://github.com/mikefarah/yq/releases/download/3.4.1/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq + get_admin_key() { + local admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml) + echo "$admin_key" + } + export admin_key=$(get_admin_key); echo $admin_key + cat conf/config.yaml + curl -v http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "uri": "/get", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8088": 1 + } + } + }' + result_code=`curl -I -m 10 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/get` + if [[ $result_code -ne 200 ]]; then + printf "result_code: %s\n" "$result_code" + echo "===============access.log===============" + cat logs/access.log + echo "===============error.log===============" + cat logs/error.log + exit 125 + fi + + - name: Check error log + run: | + if grep -q '\[error\]' logs/error.log; then + echo "=====found error log=====" + cat /usr/local/apisix/logs/error.log + exit 125 + fi diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/stale.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/stale.yml new file mode 100644 index 0000000..3bd686e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/stale.yml @@ -0,0 +1,52 @@ +name: Stable Test + +on: + workflow_dispatch: + schedule: + - cron: '0 10 * * *' + +permissions: + contents: read + +jobs: + prune_stale: + permissions: + issues: write # for actions/stale to close stale issues + pull-requests: write # for actions/stale to close stale PRs + name: Prune Stale + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Prune Stale + uses: actions/stale@v8 + with: + days-before-issue-stale: 350 + days-before-issue-close: 14 + stale-issue-message: > + This issue has been marked as stale due to 350 days of inactivity. + It will be closed in 2 weeks if no further activity occurs. If this issue is still + relevant, please simply write any comment. Even if closed, you can still revive the + issue at any time or discuss it on the dev@apisix.apache.org list. + Thank you for your contributions. + close-issue-message: > + This issue has been closed due to lack of activity. If you think that + is incorrect, or the issue requires additional review, you can revive the issue at + any time. + days-before-pr-stale: 60 + days-before-pr-close: 28 + stale-pr-message: > + This pull request has been marked as stale due to 60 days of inactivity. + It will be closed in 4 weeks if no further activity occurs. If you think + that's incorrect or this pull request should instead be reviewed, please simply + write any comment. Even if closed, you can still revive the PR at any time or + discuss it on the dev@apisix.apache.org list. + Thank you for your contributions. + close-pr-message: > + This pull request/issue has been closed due to lack of activity. If you think that + is incorrect, or the pull request requires review, you can revive the PR at any time. + # Issues with these labels will never be considered stale. + exempt-issue-labels: 'bug,enhancement,good first issue' + stale-issue-label: 'stale' + stale-pr-label: 'stale' + ascending: true diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/tars-ci.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/tars-ci.yml new file mode 100644 index 0000000..8e2dba4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/tars-ci.yml @@ -0,0 +1,55 @@ +name: CI Tars + +on: + push: + branches: [ master, 'release/**' ] + paths-ignore: + - 'docs/**' + - '**/*.md' + pull_request: + branches: [ master, 'release/**' ] + paths-ignore: + - 'docs/**' + - '**/*.md' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + tars: + strategy: + fail-fast: false + matrix: + platform: + - ubuntu-latest + os_name: + - linux_openresty + + runs-on: ${{ matrix.platform }} + timeout-minutes: 15 + env: + SERVER_NAME: ${{ matrix.os_name }} + OPENRESTY_VERSION: default + + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Setup Tars MySql + run: | + docker run -d -p 3306:3306 -v $PWD/t/tars/conf/tars.sql:/docker-entrypoint-initdb.d/tars.sql -e MYSQL_ROOT_PASSWORD=tars2022 mysql:5.7 + + - name: Linux Install + run: | + sudo ./ci/${{ matrix.os_name }}_runner.sh before_install + sudo --preserve-env=OPENRESTY_VERSION ./ci/${{ matrix.os_name }}_runner.sh do_install + + - name: Run test cases + run: | + ./ci/tars-ci.sh run_case diff --git a/CloudronPackages/APISIX/apisix-source/.github/workflows/update-labels.yml b/CloudronPackages/APISIX/apisix-source/.github/workflows/update-labels.yml new file mode 100644 index 0000000..bc974d9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.github/workflows/update-labels.yml @@ -0,0 +1,62 @@ +name: Update labels when user responds in issue and pr +permissions: + issues: write + pull-requests: write + +on: + issue_comment: + types: [created] + pull_request_review_comment: + types: [created] + +jobs: + issue_commented: + if: github.event.issue && !github.event.issue.pull_request && github.event.comment.user.login == github.event.issue.user.login && contains(github.event.issue.labels.*.name, 'wait for update') && !contains(github.event.issue.labels.*.name, 'user responded') + runs-on: ubuntu-latest + steps: + - name: update labels when user responds + uses: actions/github-script@v7 + with: + script: | + github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["user responded"] + }) + github.rest.issues.removeLabel({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + name: "wait for update" + }) + + pr_commented: + if: github.event.issue && github.event.issue.pull_request && github.event.comment.user.login == github.event.issue.user.login && (contains(github.event.issue.labels.*.name, 'wait for update') || contains(github.event.issue.labels.*.name, 'discuss') || contains(github.event.issue.labels.*.name, 'need test cases')) && !contains(github.event.issue.labels.*.name, 'user responded') + runs-on: ubuntu-latest + steps: + - name: update label when user responds + uses: actions/github-script@v7 + with: + script: | + github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["user responded"] + }) + + pr_review_commented: + if: github.event.pull_request && github.event.comment.user.login == github.event.pull_request.user.login && (contains(github.event.pull_request.labels.*.name, 'wait for update') || contains(github.event.pull_request.labels.*.name, 'discuss') || contains(github.event.issue.labels.*.name, 'need test cases')) && !contains(github.event.pull_request.labels.*.name, 'user responded') + runs-on: ubuntu-latest + steps: + - name: update label when user responds + uses: actions/github-script@v7 + with: + script: | + github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["user responded"] + }) diff --git a/CloudronPackages/APISIX/apisix-source/.gitmodules b/CloudronPackages/APISIX/apisix-source/.gitmodules new file mode 100644 index 0000000..9819855 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.gitmodules @@ -0,0 +1,9 @@ +[submodule "t/toolkit"] + path = t/toolkit + url = https://github.com/api7/test-toolkit.git +[submodule ".github/actions/action-semantic-pull-request"] + path = .github/actions/action-semantic-pull-request + url = https://github.com/amannn/action-semantic-pull-request.git +[submodule ".github/actions/autocorrect"] + path = .github/actions/autocorrect + url = https://github.com/huacnlee/autocorrect.git diff --git a/CloudronPackages/APISIX/apisix-source/.ignore_words b/CloudronPackages/APISIX/apisix-source/.ignore_words new file mode 100644 index 0000000..86683d3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.ignore_words @@ -0,0 +1,11 @@ +iam +te +ba +ue +shttp +nd +hel +nulll +smove +aks +nin diff --git a/CloudronPackages/APISIX/apisix-source/.licenserc.yaml b/CloudronPackages/APISIX/apisix-source/.licenserc.yaml new file mode 100644 index 0000000..86edebf --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.licenserc.yaml @@ -0,0 +1,60 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +header: + license: + spdx-id: Apache-2.0 + copyright-owner: Apache Software Foundation + + license-location-threshold: 360 + + paths-ignore: + - '.gitignore' + - '.gitattributes' + - '.gitmodules' + - 'LICENSE' + - 'NOTICE' + - '**/*.json' + - '**/*.key' + - '**/*.crt' + - '**/*.pem' + - '**/*.pb.go' + - '**/pnpm-lock.yaml' + - '.github/' + - 'conf/mime.types' + - '**/*.svg' + # Exclude CI env_file + - 'ci/pod/**/*.env' + # eyes has some limitation to handle git pattern + - '**/*.log' + # Exclude test toolkit files + - 't/toolkit' + - 'go.mod' + - 'go.sum' + # Exclude non-Apache licensed files + - 'apisix/balancer/ewma.lua' + # Exclude plugin-specific configuration files + - 't/plugin/authz-casbin' + - 't/coredns' + - 't/fuzzing/requirements.txt' + - 'autodocs/' + - 'docs/**/*.md' + - '.ignore_words' + - '.luacheckrc' + # Exclude file contains certificate revocation information + - 't/certs/ocsp/index.txt' + + comment: on-failure diff --git a/CloudronPackages/APISIX/apisix-source/.markdownlint.yml b/CloudronPackages/APISIX/apisix-source/.markdownlint.yml new file mode 100644 index 0000000..36d2485 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/.markdownlint.yml @@ -0,0 +1,34 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +MD001: false +MD004: false +MD005: false +MD006: false +MD007: false +MD010: false +MD013: false +MD014: false +MD024: false +MD026: false +MD029: false +MD033: false +MD034: false +MD036: false +MD040: false +MD041: false +MD046: false diff --git a/CloudronPackages/APISIX/apisix-source/CHANGELOG.md b/CloudronPackages/APISIX/apisix-source/CHANGELOG.md new file mode 100644 index 0000000..41c430a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/CHANGELOG.md @@ -0,0 +1,1936 @@ +--- +title: Changelog +--- + + + +## Table of Contents + +- [3.13.0](#3130) +- [3.12.0](#3120) +- [3.11.0](#3110) +- [3.10.0](#3100) +- [3.9.0](#390) +- [3.8.0](#380) +- [3.7.0](#370) +- [3.6.0](#360) +- [3.5.0](#350) +- [3.4.0](#340) +- [3.3.0](#330) +- [3.2.1](#321) +- [3.2.0](#320) +- [3.1.0](#310) +- [3.0.0](#300) +- [3.0.0-beta](#300-beta) +- [2.15.3](#2153) +- [2.15.2](#2152) +- [2.15.1](#2151) +- [2.15.0](#2150) +- [2.14.1](#2141) +- [2.14.0](#2140) +- [2.13.3](#2133) +- [2.13.2](#2132) +- [2.13.1](#2131) +- [2.13.0](#2130) +- [2.12.1](#2121) +- [2.12.0](#2120) +- [2.11.0](#2110) +- [2.10.5](#2105) +- [2.10.4](#2104) +- [2.10.3](#2103) +- [2.10.2](#2102) +- [2.10.1](#2101) +- [2.10.0](#2100) +- [2.9.0](#290) +- [2.8.0](#280) +- [2.7.0](#270) +- [2.6.0](#260) +- [2.5.0](#250) +- [2.4.0](#240) +- [2.3.0](#230) +- [2.2.0](#220) +- [2.1.0](#210) +- [2.0.0](#200) +- [1.5.0](#150) +- [1.4.1](#141) +- [1.4.0](#140) +- [1.3.0](#130) +- [1.2.0](#120) +- [1.1.0](#110) +- [1.0.0](#100) +- [0.9.0](#090) +- [0.8.0](#080) +- [0.7.0](#070) +- [0.6.0](#060) + +## 3.13.0 + +**The changes marked with :warning: are not backward compatible.** + +### Change + +- :warning: mark server-info plugin as deprecated [#12244](https://github.com/apache/apisix/pull/12244) +- :warning: fill in the metadata of resource schema [#12224](https://github.com/apache/apisix/pull/12224). +This PR sets additionalProperties to false for consumer credentials. + +### Bugfixes + +- fix: running stale healthchecker when new node count <= 1 [#12118](https://github.com/apache/apisix/pull/12118) +- fix: release healthchecker on 0 nodes [#12126](https://github.com/apache/apisix/pull/12126) +- fix: only parse and validate apisix.yaml in cli when startup [#12216](https://github.com/apache/apisix/pull/12216) +- fix(standalone): API-driven mode does not properly handle consumer schema [#12256](https://github.com/apache/apisix/pull/12256) +- fix: added restriction for TLSv1.3 cross-SNI session resumption [#12366](https://github.com/apache/apisix/pull/12366) +- fix: flaky t/admin/filter.t due to url encoding for query params [#12370](https://github.com/apache/apisix/pull/12370) +- fix(workflow/push-dev-image-on-commit): remove already defined uses [#12365](https://github.com/apache/apisix/pull/12365) +- fix(workflow): use runners with different architectures instead of QEMU [#12322](https://github.com/apache/apisix/pull/12322) +- fix: kubernetes service discovery single mode data dump [#12284](https://github.com/apache/apisix/pull/12284) +- fix: handle consul nil port cases by defaulting to port 80 [#12304](https://github.com/apache/apisix/pull/12304) +- fix: check if config contains duplicate resources in API-driven standalone mode [#12317](https://github.com/apache/apisix/pull/12317) +- fix: original key being modified causing cache inconsistency [#12299](https://github.com/apache/apisix/pull/12299) +- fix: access to the apisix dashboard in dev returns 404 [#12376](https://github.com/apache/apisix/pull/12376) + +### Core + +- feat(consumer): consumer username allows - in it [#12296](https://github.com/apache/apisix/pull/12296) +- chore: change log level to debug to avoid unnecessary logs [#12361](https://github.com/apache/apisix/pull/12361) +- chore: change log level from warn to info for stale batch processor removal [#12297](https://github.com/apache/apisix/pull/12297) +- feat(standalone): allow more characters in credential_id for API-driven mode [#12295](https://github.com/apache/apisix/pull/12295) +- feat: add standalone admin api [#12179](https://github.com/apache/apisix/pull/12179) +- feat: support health checker for stream subsystem [#12180](https://github.com/apache/apisix/pull/12180) +- feat(standalone): support revision in API-driven standalone mode like etcd [#12214](https://github.com/apache/apisix/pull/12214) +- feat: add healthcheck for sync configuration [#12200](https://github.com/apache/apisix/pull/12200) +- perf: compare service discovery nodes by address [#12258](https://github.com/apache/apisix/pull/12258) +- feat: fill in the metadata of resource schema [#12224](https://github.com/apache/apisix/pull/12224) +- feat: add embedded apisix dashboard ui [#12276](https://github.com/apache/apisix/pull/12276) +- feat: add apisix dashboard to dev image [#12369](https://github.com/apache/apisix/pull/12369) +- feat: add max pending entries option to batch-processor [#12338](https://github.com/apache/apisix/pull/12338) +- feat(standalone): support JSON format [#12333](https://github.com/apache/apisix/pull/12333) +- feat: enhance admin api filter [#12291](https://github.com/apache/apisix/pull/12291) +- feat: add warning for data plane writing to etcd [#12241](https://github.com/apache/apisix/pull/12241) +- chore: upgrade openresty version to v1.27.1.2 [#12307](https://github.com/apache/apisix/pull/12307) +- chore: upgrade luarocks version to 3.12.0 [#12305](https://github.com/apache/apisix/pull/12305) + +### Plugins + +- refactor(ai-proxy): move read_response into ai_driver.request function [#12101](https://github.com/apache/apisix/pull/12101) +- refactor: mcp server framework implementation [#12168](https://github.com/apache/apisix/pull/12168) +- feat: add mcp-bridge plugin [#12151](https://github.com/apache/apisix/pull/12151) +- feat: add lago plugin [#12196](https://github.com/apache/apisix/pull/12196) +- feat: add headers attribute for loki-logger [#12243](https://github.com/apache/apisix/pull/12243) +- feat: expose apisix version in prometheus node info metric [#12367](https://github.com/apache/apisix/pull/12367) + +## Doc improvements + +- docs: update stream proxy doc for proxy_mode and some formatting [#12108](https://github.com/apache/apisix/pull/12108) +- docs: improve loki-logger plugin docs [#11921](https://github.com/apache/apisix/pull/11921) +- docs: improve ua-restriction plugin docs [#11956](https://github.com/apache/apisix/pull/11956) +- docs: improve elasticsearch-logger plugin docs [#11922](https://github.com/apache/apisix/pull/11922) +- fix file logger example wrong data structure [#12125](https://github.com/apache/apisix/pull/12125) +- docs: improve limit-req plugin docs [#11873](https://github.com/apache/apisix/pull/11873) +- docs: improve body-transformer plugin docs [#11856](https://github.com/apache/apisix/pull/11856) +- docs: update ai-rate-limiting and ai-rag docs [#12107](https://github.com/apache/apisix/pull/12107) +- docs: improve basic-auth docs and update docs for anonymous consumer [#11859](https://github.com/apache/apisix/pull/11859) +- docs: improve key-auth docs and update docs for anonymous consumer [#11860](https://github.com/apache/apisix/pull/11860) +- docs: improve hmac-auth plugin docs and update docs for anonymous consumer [#11867](https://github.com/apache/apisix/pull/11867) +- docs: improve jwt-auth plugin docs and update docs for anonymous consumer [#11865](https://github.com/apache/apisix/pull/11865) +- docs: improve request-validation plugin docs [#11853](https://github.com/apache/apisix/pull/11853) +- docs: update variable in building apisix from source [#11640](https://github.com/apache/apisix/pull/11640) +- docs: update readme with APISIX AI Gateway product link and MCP feature [#12166](https://github.com/apache/apisix/pull/12166) +- docs: improve plugin-develop docs [#12242](https://github.com/apache/apisix/pull/12242) +- docs: fix typo in real-ip.md [#12236](https://github.com/apache/apisix/pull/12236) +- docs: the configuration type of the WASM plugin can be an object. [#12251](https://github.com/apache/apisix/pull/12251) + +## Developer productivity + +- feat: support devcontainer for containerized development of APISIX [#11765](https://github.com/apache/apisix/pull/11765) + +## 3.12.0 + +**The changes marked with :warning: are not backward compatible.** + +### Change + +- :warning: replace plugin attribute with plugin metadata in `opentelemetry` plugin [#11940](https://github.com/apache/apisix/pull/11940) +- :warning: refactor: ai-content-moderation to ai-aws-content-moderation [#12010](https://github.com/apache/apisix/pull/12010) +- add expiration time for all Prometheus metrics [#11838](https://github.com/apache/apisix/pull/11838) +- allow workflow config without case [#11787](https://github.com/apache/apisix/pull/11787) +- unify google-cloud-oauth.lua file [#11596](https://github.com/apache/apisix/pull/11596) +- :warning: ai-proxy remove passthrough [#12014](https://github.com/apache/apisix/pull/12014) +- :warning: remove model options' `stream` default value [#12013](https://github.com/apache/apisix/pull/12013) +- :warning: grpc-web response contains two trailer chunks [#11988](https://github.com/apache/apisix/pull/11988). +This PR returns `405 Method not allowed` instead of `400 Bad Request` when request HTTP method errors. +- :warning: disallow empty key configuration attributes [#11852](https://github.com/apache/apisix/pull/11852) +- :warning: set default value of ssl_trusted_certificate to system [#11993](https://github.com/apache/apisix/pull/11993) + +### Bugfixes + +- Fix: timeout risk in usages of lua-resty-aws [#12070](https://github.com/apache/apisix/pull/12070) +- Fix: ai-rate-limiting not allowed to limit to a single instance [#12061](https://github.com/apache/apisix/pull/12061) +- Fix: update watch_ctx.revision to avoid multiple resyncs [#12021](https://github.com/apache/apisix/pull/12021) +- Fix: ai-proxy remove passthrough [#12014](https://github.com/apache/apisix/pull/12014) +- Fix: ai-proxy dead loop when retrying [#12012](https://github.com/apache/apisix/pull/12012) +- Fix: error while trying to log table in ai-content-moderation plugin [#11994](https://github.com/apache/apisix/pull/11994) +- Fix: resync etcd when a lower revision is found [#12015](https://github.com/apache/apisix/pull/12015) +- Fix: remove model options' `stream` default value [#12013](https://github.com/apache/apisix/pull/12013) +- Fix: grpc-web response contains two trailer chunks [#11988](https://github.com/apache/apisix/pull/11988) +- Fix: event_id is nil in chaitin-waf [#11651](https://github.com/apache/apisix/pull/11651) +- Fix: race condition problem while update upstream.nodes [#11916](https://github.com/apache/apisix/pull/11916) +- Fix: `upstream_obj.upstream` should not be a string [#11932](https://github.com/apache/apisix/pull/11932) +- Fix: query params in override.endpoint not being sent to LLMs [#11863](https://github.com/apache/apisix/pull/11863) +- Fix: add support for ignoring "load" global variable [#11862](https://github.com/apache/apisix/pull/11862) +- Fix: corrupt data in routes() response due to healthchecker data [#11844](https://github.com/apache/apisix/pull/11844) +- Fix: deepcopy should copy same table exactly only once [#11861](https://github.com/apache/apisix/pull/11861) +- Fix: disallow empty key configuration attributes [#11852](https://github.com/apache/apisix/pull/11852) +- Fix: etcd watch restart when receive invalid revision [#11833](https://github.com/apache/apisix/pull/11833) +- Fix: missing parsed_url nil check [#11637](https://github.com/apache/apisix/pull/11637) +- Fix: use `plugin.get` to fetch plugin configured in multi-auth plugin [#11794](https://github.com/apache/apisix/pull/11794) +- Fix: allow special characters in uri params [#11788](https://github.com/apache/apisix/pull/11788) +- Fix: add nil check to conf in body-transformer [#11768](https://github.com/apache/apisix/pull/11768) +- Fix: use max_req_body_bytes field in custom_format [#11771](https://github.com/apache/apisix/pull/11771) +- Fix: health checker can't be released due to health parent being released early [#11760](https://github.com/apache/apisix/pull/11760) +- Fix: use right modifiedIndex for consumer when use credential [#11649](https://github.com/apache/apisix/pull/11649) + +### Core + +- set default value of ssl_trusted_certificate to system [#11993](https://github.com/apache/apisix/pull/11993) +- upgrade openresty version to v1.27.11 [#11936](https://github.com/apache/apisix/pull/11936) +- Support the use of system-provided CA certs in `ssl_trusted_certificate` [#11809](https://github.com/apache/apisix/pull/11809) +- support _meta.pre_function to execute custom logic before execution of each phase [#11793](https://github.com/apache/apisix/pull/11793) +- support anonymous consumer [#11917](https://github.com/apache/apisix/pull/11917) +- accelerate the creation of the consumer cache [#11840](https://github.com/apache/apisix/pull/11840) +- replace 'string.find' with 'core.string.find' [#11886](https://github.com/apache/apisix/pull/11886) +- workflow plugin registration [#11832](https://github.com/apache/apisix/pull/11832) + +### Plugins + +- refactor ai-proxy and ai-proxy-multi [#12030](https://github.com/apache/apisix/pull/12030) +- support embeddings API [#12062](https://github.com/apache/apisix/pull/12062) +- implement rate limiting based fallback strategy [#12047](https://github.com/apache/apisix/pull/12047) +- ai-rate-limiting plugin [#12037](https://github.com/apache/apisix/pull/12037) +- add `valid_issuers` field in `openid-connect` plugin [#12002](https://github.com/apache/apisix/pull/12002) +- add ai-prompt-guard plugin [#12008](https://github.com/apache/apisix/pull/12008) +- add jwt audience validator [#11987](https://github.com/apache/apisix/pull/11987) +- store JWT in the request context [#11675](https://github.com/apache/apisix/pull/11675) +- support proxying openai compatible LLMs [#12004](https://github.com/apache/apisix/pull/12004) +- add `ai-proxy-multi` plugin [#11986](https://github.com/apache/apisix/pull/11986) [#12030](https://github.com/apache/apisix/pull/12030) +- make rate limiting response header names configurable [#11831](https://github.com/apache/apisix/pull/11831) +- support mulipart content-type in `body-transformer` [#11767](https://github.com/apache/apisix/pull/11767) +- plugins in multi-auth returns error instead of logging it [#11775](https://github.com/apache/apisix/pull/11775) +- support configuring `key_claim_name` [#11772](https://github.com/apache/apisix/pull/11772) +- add Total request per second panel in grafana dashboard [#11692](https://github.com/apache/apisix/pull/11692) +- add ai-rag plugin [#11568](https://github.com/apache/apisix/pull/11568) +- add ai-content-moderation plugin [#11541](https://github.com/apache/apisix/pull/11541) +- use setmetatable to set hidden variables without effecting serialisation [#11770](https://github.com/apache/apisix/pull/11770) + +## 3.11.0 + +**The changes marked with :warning: are not backward compatible.** + +### Change + +- :warning: remove JWT signing endpoint and no longer require a private key to be uploaded in the jwt-auth plugin. [#11597](https://github.com/apache/apisix/pull/11597) +- :warning: rewrite hmac-auth plugin for usability [#11581](https://github.com/apache/apisix/pull/11581) + +### Plugins + +- allow configuring keepalive_timeout in splunk-logger [#11611](https://github.com/apache/apisix/pull/11611) +- add plugin attach-consmer-label [#11604](https://github.com/apache/apisix/pull/11604) +- ai-proxy plugin [#11499](https://github.com/apache/apisix/pull/11499) +- ai-prompt-decorator plugin [#11515](https://github.com/apache/apisix/pull/11515) +- ai-prompt-template plugin [#11517](https://github.com/apache/apisix/pull/11517) + +### Bugfixes + +- Fix: adjust the position of enums in pb_option_def [#11448](https://github.com/apache/apisix/pull/11448) +- Fix: encryption/decryption for non-auth plugins in consumer [#11600](https://github.com/apache/apisix/pull/11600) +- Fix: confusion when substituting ENV in config file [#11545](https://github.com/apache/apisix/pull/11545) + +### Core + +- support gcp secret manager [#11436](https://github.com/apache/apisix/pull/11436) +- support aws secret manager [#11417](https://github.com/apache/apisix/pull/11417) +- add credential resource and include `X-Consumer-Username`, `X-Credential-Identifier`, and `X-Consumer-Custom-ID` headers in requests to upstream services [#11601](https://github.com/apache/apisix/pull/11601) + +## 3.10.0 + +**The changes marked with :warning: are not backward compatible.** + +### Change + +- :warning: remove `core.grpc` module [#11427](https://github.com/apache/apisix/pull/11427) +- add max req/resp body size attributes [#11133](https://github.com/apache/apisix/pull/11133) +- :warning: autogenerate admin api key if not passed [#11080](https://github.com/apache/apisix/pull/11080) +- :warning: enable sensitive fields encryption by default [#11076](https://github.com/apache/apisix/pull/11076) +- support more sensitive fields for encryption [#11095](https://github.com/apache/apisix/pull/11095) +- :warning: avoid overwriting `Access-Control-Expose-Headers` response header [#11136](https://github.com/apache/apisix/pull/11136) +This change removes the default `*` value for `expose_headers` and only sets the header when explicitly configured. +- :warning: add a default limit of 100 for `get_headers()` [#11140](https://github.com/apache/apisix/pull/11140) +- :warning: core.request.header return strings instead of table [#11127](https://github.com/apache/apisix/pull/11127) +This function now always returns strings, previously it returned tables when duplicate headers existed. + +### Plugins + +- allow set headers in introspection request [#11090](https://github.com/apache/apisix/pull/11090) + +### Bugfixes + +- Fix: add libyaml-dev dependency for apt [#11291](https://github.com/apache/apisix/pull/11291) +- Fix: etcd sync data checker should work [#11457](https://github.com/apache/apisix/pull/11457) +- Fix: plugin metadata add id value for etcd checker [#11452](https://github.com/apache/apisix/pull/11452) +- Fix: allow trailing period in SNI and CN for SSL [#11414](https://github.com/apache/apisix/pull/11414) +- Fix: filter out illegal INT(string) formats [#11367](https://github.com/apache/apisix/pull/11367) +- Fix: make the message clearer when API key is missing [#11370](https://github.com/apache/apisix/pull/11370) +- Fix: report consumer username tag in datadog [#11354](https://github.com/apache/apisix/pull/11354) +- Fix: after updating the header, get the old value from the ctx.var [#11329](https://github.com/apache/apisix/pull/11329) +- Fix: ssl key rotation caused request failure [#11305](https://github.com/apache/apisix/pull/11305) +- Fix: validation fails causing etcd events not to be handled correctly [#11268](https://github.com/apache/apisix/pull/11268) +- Fix: stream route matcher is nil after first match [#11269](https://github.com/apache/apisix/pull/11269) +- Fix: rectify the way to fetch secret resource by id [#11164](https://github.com/apache/apisix/pull/11164) +- Fix: multi-auth raise 500 error when use default conf [#11145](https://github.com/apache/apisix/pull/11145) +- Fix: avoid overwriting `Access-Control-Expose-Headers` response header [#11136](https://github.com/apache/apisix/pull/11136) +- Fix: close session in case of error to avoid blocked session [#11089](https://github.com/apache/apisix/pull/11089) +- Fix: restore `pb.state` appropriately [#11135](https://github.com/apache/apisix/pull/11135) +- Fix: add a default limit of 100 for `get_headers()` [#11140](https://github.com/apache/apisix/pull/11140) +- Fix: disable features when prometheus plugin is turned off [#11117](https://github.com/apache/apisix/pull/11117) +- Fix: add post request headers only if auth request method is POST [#11021](https://github.com/apache/apisix/pull/11021) +- Fix: core.request.header return strings instead of table [#11127](https://github.com/apache/apisix/pull/11127) +- Fix: brotli partial response [#11087](https://github.com/apache/apisix/pull/11087) +- Fix: the port value greater than 65535 should not be allowed [#11043](https://github.com/apache/apisix/pull/11043) + +### Core + +- upgrade openresty version to 1.25.3.2 [#11419](https://github.com/apache/apisix/pull/11419) +- move config-default.yaml to hardcoded lua file [#11343](https://github.com/apache/apisix/pull/11343) +- warn log when sending requests to external services insecurely [#11403](https://github.com/apache/apisix/pull/11403) +- update casbin to 1.41.9 [#11400](https://github.com/apache/apisix/pull/11400) +- update lua-resty-t1k to 1.1.5 [#11391](https://github.com/apache/apisix/pull/11391) +- support store ssl.keys ssl.certs in secrets mamager [#11339](https://github.com/apache/apisix/pull/11339) +- move tinyyaml to lyaml [#11312](https://github.com/apache/apisix/pull/11312) +- support hcv namespace [#11277](https://github.com/apache/apisix/pull/11277) +- add discovery k8s dump data interface [#11111](https://github.com/apache/apisix/pull/11111) +- make fetch_secrets use cache for performance [#11201](https://github.com/apache/apisix/pull/11201) +- replace 'string.len' with '#' [#11078](https://github.com/apache/apisix/pull/11078) + +## 3.9.0 + +**The changes marked with :warning: are not backward compatible.** + +### Change + +- :warning: use apisix.enable_http2 to enable HTTP/2 in APISIX [#11032](https://github.com/apache/apisix/pull/11032) +- :warning: unify the keyring and key_encrypt_salt fields [#10771](https://github.com/apache/apisix/pull/10771) + +### Core + +- :sunrise: Support HTTP3/QUIC + - [#10989](https://github.com/apache/apisix/pull/10989) + - [#11010](https://github.com/apache/apisix/pull/11010) + - [#11027](https://github.com/apache/apisix/pull/11027) +- :sunrise: add plugins/reload to control api [#10905](https://github.com/apache/apisix/pull/10905) +- :sunrise: consul deduplicate and sort [#10941](https://github.com/apache/apisix/pull/10941) +- :sunrise: support uri_arg_ when use radixtree_uri_with_parameter [#10645](https://github.com/apache/apisix/pull/10645) + +### Plugins + +- :sunrise: add session.cookie configuration [#10919](https://github.com/apache/apisix/pull/10919) +- :sunrise: support endpointslices in kubernetes discovery [#10916](https://github.com/apache/apisix/pull/10916) +- :sunrise: add redis and redis-cluster in limit-req [#10874](https://github.com/apache/apisix/pull/10874) +- :sunrise: support expire prometheus metrics [#10869](https://github.com/apache/apisix/pull/10869) +- :sunrise: add redis and redis-cluster in limit-conn [#10866](https://github.com/apache/apisix/pull/10866) +- :sunrise: allow configuring allow-headers in grpc-web plugin [#10904](https://github.com/apache/apisix/pull/10904) +- :sunrise: Add forward-auth plugin exception configuration status_on_error [#10898](https://github.com/apache/apisix/pull/10898) +- :sunrise: add option to include request body and response body in log util [#10888](https://github.com/apache/apisix/pull/10888) +- :sunrise: support compressed responses in loggers [#10884](https://github.com/apache/apisix/pull/10884) +- :sunrise: add http-dubbo plugin [#10703](https://github.com/apache/apisix/pull/10703) +- :sunrise: support built-in variables in response_headers in mocking plugin [#10872](https://github.com/apache/apisix/pull/10872) +- :sunrise: support other data formats without warnings [#10862](https://github.com/apache/apisix/pull/10862) +- :sunrise: add ocsp-stapling plugin [#10817](https://github.com/apache/apisix/pull/10817) + +### Bug Fixes + +- Fix: keep different strategy response header consistency [#11048](https://github.com/apache/apisix/pull/11048) +- Fix: add apisix/plugin/limit-req to makefile [#10955](https://github.com/apache/apisix/pull/10959) +- Fix: wrong namespace related endpoint in k8s [#10917](https://github.com/apache/apisix/pull/10917) +- Fix: when delete the secret cause 500 error [#10902](https://github.com/apache/apisix/pull/10902) +- Fix: jwe-decrypt secret length restriction [#10928](https://github.com/apache/apisix/pull/10928) +- Fix: unnecessary YAML Config reloads [#9065](https://github.com/apache/apisix/pull/9065) +- Fix: real_payload was overridden by malicious payload [#10982](https://github.com/apache/apisix/pull/10982) +- Fix: all origins could pass when allow_origins_by_metadata is set [#10948](https://github.com/apache/apisix/pull/10948) +- Fix: add compatibility headers [#10828](https://github.com/apache/apisix/pull/10828) +- Fix: missing trailers issue [#10851](https://github.com/apache/apisix/pull/10851) +- Fix: decryption failure [#10843](https://github.com/apache/apisix/pull/10843) +- Fix: linux-install-luarocks are not compatible with the openresty environment [#10813](https://github.com/apache/apisix/pull/10813) +- Fix: server-side sessions locked by not calling explicit session:close() [#10788](https://github.com/apache/apisix/pull/10788) +- Fix: skip brotli compression for upstream compressed response [#10740](https://github.com/apache/apisix/pull/10740) +- Fix: use_jwks breaking authentication header [#10670](https://github.com/apache/apisix/pull/10670) +- Fix: authz_keycloak plugin giving 500 error [#10763](https://github.com/apache/apisix/pull/10763) + +## 3.8.0 + +### Core + +- :sunrise: Support the use of lua-resty-events module for better performance: + - [#10550](https://github.com/apache/apisix/pull/10550) + - [#10558](https://github.com/apache/apisix/pull/10558) +- :sunrise: Upgrade OpenSSL 1.1.1 to OpenSSL 3: [#10724](https://github.com/apache/apisix/pull/10724) + +### Plugins + +- :sunrise: Add jwe-decrypt plugin: [#10252](https://github.com/apache/apisix/pull/10252) +- :sunrise: Support brotli when use filters.regex option (response-rewrite): [#10733](https://github.com/apache/apisix/pull/10733) +- :sunrise: Add multi-auth plugin: [#10482](https://github.com/apache/apisix/pull/10482) +- :sunrise: Add `required scopes` configuration property to `openid-connect` plugin: [#10493](https://github.com/apache/apisix/pull/10493) +- :sunrise: Support for the Timing-Allow-Origin header (cors): [#9365](https://github.com/apache/apisix/pull/9365) +- :sunrise: Add brotli plugin: [#10515](https://github.com/apache/apisix/pull/10515) +- :sunrise: Body-transformer plugin enhancement(#10472): [#10496](https://github.com/apache/apisix/pull/10496) +- :sunrise: Set minLength of redis_cluster_nodes to 1 for limit-count plugin: [#10612](https://github.com/apache/apisix/pull/10612) +- :sunrise: Allow to use environment variables for limit-count plugin settings: [#10607](https://github.com/apache/apisix/pull/10607) + +### Bugfixes + +- Fix: When the upstream nodes are of array type, the port should be an optional field: [#10477](https://github.com/apache/apisix/pull/10477) +- Fix: Incorrect variable extraction in fault-injection plugin: [#10485](https://github.com/apache/apisix/pull/10485) +- Fix: All consumers should share the same counter (limit-count): [#10541](https://github.com/apache/apisix/pull/10541) +- Fix: Safely remove upstream when sending route to opa plugin: [#10552](https://github.com/apache/apisix/pull/10552) +- Fix: Missing etcd init_dir and unable to list resource: [#10569](https://github.com/apache/apisix/pull/10569) +- Fix: Forward-auth request body is too large: [#10589](https://github.com/apache/apisix/pull/10589) +- Fix: Memory leak caused by timer that never quit: [#10614](https://github.com/apache/apisix/pull/10614) +- Fix: Do not invoke add_header if value resolved as nil in proxy-rewrite plugin: [#10619](https://github.com/apache/apisix/pull/10619) +- Fix: Frequent traversal of all keys in etcd leads to high CPU usage: [#10671](https://github.com/apache/apisix/pull/10671) +- Fix: For prometheus upstream_status metrics, mostly_healthy is healthy: [#10639](https://github.com/apache/apisix/pull/10639) +- Fix: Avoid getting a nil value in log phase in zipkin: [#10666](https://github.com/apache/apisix/pull/10666) +- Fix: Enable openid-connect plugin without redirect_uri got 500 error: [#7690](https://github.com/apache/apisix/pull/7690) +- Fix: Add redirect_after_logout_uri for ODIC that do not have an end_session_endpoint: [#10653](https://github.com/apache/apisix/pull/10653) +- Fix: Response-rewrite filters.regex does not apply when content-encoding is gzip: [#10637](https://github.com/apache/apisix/pull/10637) +- Fix: The leak of prometheus metrics: [#10655](https://github.com/apache/apisix/pull/10655) +- Fix: Authz-keycloak add return detail err: [#10691](https://github.com/apache/apisix/pull/10691) +- Fix: upstream nodes was not updated correctly by service discover: [#10722](https://github.com/apache/apisix/pull/10722) +- Fix: apisix restart failed: [#10696](https://github.com/apache/apisix/pull/10696) + +## 3.7.0 + +**The changes marked with :warning: are not backward compatible.** + +### Change + +- :warning: Creating core resources does not allow passing in `create_time` and `update_time`: [#10232](https://github.com/apache/apisix/pull/10232) +- :warning: Remove self-contained info fields `exptime` and `validity_start` and `validity_end` from ssl schema: [10323](https://github.com/apache/apisix/pull/10323) +- :warning: Replace `route` with `apisix.route_name`, `service` with `apisix.service_name` in the attributes of opentelemetry plugin to follow the standards for span name and attributes: [#10393](https://github.com/apache/apisix/pull/10393) + +### Core + +- :sunrise: Added token to support access control for consul discovery: [#10278](https://github.com/apache/apisix/pull/10278) +- :sunrise: Support configuring `service_id` in stream_route to reference service resources: [#10298](https://github.com/apache/apisix/pull/10298) +- :sunrise: Using `apisix-runtime` as the apisix runtime: + - [#10415](https://github.com/apache/apisix/pull/10415) + - [#10427](https://github.com/apache/apisix/pull/10427) + +### Plugins + +- :sunrise: Add tests for authz-keycloak with apisix secrets: [#10353](https://github.com/apache/apisix/pull/10353) +- :sunrise: Add authorization params to openid-connect plugin: [#10058](https://github.com/apache/apisix/pull/10058) +- :sunrise: Support set variable in zipkin plugin: [#10361](https://github.com/apache/apisix/pull/10361) +- :sunrise: Support Nacos ak/sk authentication: [#10445](https://github.com/apache/apisix/pull/10445) + +### Bugfixes + +- Fix: Use warn log for get healthcheck target status failure: + - [#10156](https://github.com/apache/apisix/pull/10156) +- Fix: Keep healthcheck target state when upstream changes: + - [#10312](https://github.com/apache/apisix/pull/10312) + - [#10307](https://github.com/apache/apisix/pull/10307) +- Fix: Add name field in plugin_config schema for consistency: [#10315](https://github.com/apache/apisix/pull/10315) +- Fix: Optimize tls in upstream_schema and wrong variable: [#10269](https://github.com/apache/apisix/pull/10269) +- Fix(consul): Failed to exit normally: [#10342](https://github.com/apache/apisix/pull/10342) +- Fix: The request header with `Content-Type: application/x-www-form-urlencoded;charset=utf-8` will cause vars condition `post_arg_xxx` matching to failed: [#10372](https://github.com/apache/apisix/pull/10372) +- Fix: Make install failed on mac: [#10403](https://github.com/apache/apisix/pull/10403) +- Fix(log-rotate): Log compression timeout caused data loss: [#8620](https://github.com/apache/apisix/pull/8620) +- Fix(kafka-logger): Remove 0 from enum of required_acks: [#10469](https://github.com/apache/apisix/pull/10469) + +## 3.6.0 + +**The changes marked with :warning: are not backward compatible.** + +### Change + +- :warning: Remove gRPC support between APISIX and etcd and remove `etcd.use_grpc` configuration option: [#10015](https://github.com/apache/apisix/pull/10015) +- :warning: Remove conf server. The data plane no longer supports direct communication with the control plane, and the configuration should be adjusted from `config_provider: control_plane` to `config_provider: etcd`: [#10012](https://github.com/apache/apisix/pull/10012) +- :warning: Enforce strict schema validation on the properties of the core APISIX resources: [#10233](https://github.com/apache/apisix/pull/10233) + +### Core + +- :sunrise: Support configuring the buffer size of the access log: [#10225](https://github.com/apache/apisix/pull/10225) +- :sunrise: Support the use of local DNS resolvers in service discovery by configuring `resolv_conf`: [#9770](https://github.com/apache/apisix/pull/9770) +- :sunrise: Remove Rust dependency for installation: [#10121](https://github.com/apache/apisix/pull/10121) +- :sunrise: Support Dubbo protocol in xRPC [#9660](https://github.com/apache/apisix/pull/9660) + +### Plugins + +- :sunrise: Support https in traffic-split plugin: [#9115](https://github.com/apache/apisix/pull/9115) +- :sunrise: Support rewrite request body in external plugin:[#9990](https://github.com/apache/apisix/pull/9990) +- :sunrise: Support set nginx variables in opentelemetry plugin: [#8871](https://github.com/apache/apisix/pull/8871) +- :sunrise: Support unix sock host pattern in the chaitin-waf plugin: [#10161](https://github.com/apache/apisix/pull/10161) + +### Bugfixes + +- Fix GraphQL POST request route matching exception: [#10198](https://github.com/apache/apisix/pull/10198) +- Fix error on array of multiline string in `apisix.yaml`: [#10193](https://github.com/apache/apisix/pull/10193) +- Add error handlers for invalid `cache_zone` configuration in the `proxy-cache` plugin: [#10138](https://github.com/apache/apisix/pull/10138) + +## 3.5.0 + +**The changes marked with :warning: are not backward compatible.** + +### Change + +- :warning: remove snowflake algorithm in the request-id plugin: [#9715](https://github.com/apache/apisix/pull/9715) +- :warning: No longer compatible with OpenResty 1.19, it needs to be upgraded to 1.21+: [#9913](https://github.com/apache/apisix/pull/9913) +- :warning: Remove the configuration item `apisix.stream_proxy.only`, the L4/L7 proxy needs to be enabled through the configuration item `apisix.proxy_mode`: [#9607](https://github.com/apache/apisix/pull/9607) +- :warning: The admin-api `/apisix/admin/plugins?all=true` marked as deprecated: [#9580](https://github.com/apache/apisix/pull/9580) +- :warning: allowlist and denylist can't be enabled at the same time in ua-restriction plugin: [#9841](https://github.com/apache/apisix/pull/9841) + +### Core + +- :sunrise: Support host level dynamic setting of tls protocol version: [#9903](https://github.com/apache/apisix/pull/9903) +- :sunrise: Support force delete resource: [#9810](https://github.com/apache/apisix/pull/9810) +- :sunrise: Support pulling env vars from yaml keys: [#9855](https://github.com/apache/apisix/pull/9855) +- :sunrise: Add schema validate API in admin-api: [#10065](https://github.com/apache/apisix/pull/10065) + +### Plugins + +- :sunrise: Add chaitin-waf plugin: [#9838](https://github.com/apache/apisix/pull/9838) +- :sunrise: Support vars for file-logger plugin: [#9712](https://github.com/apache/apisix/pull/9712) +- :sunrise: Support adding response headers for mock plugin: [#9720](https://github.com/apache/apisix/pull/9720) +- :sunrise: Support regex_uri with unsafe_uri for proxy-rewrite plugin: [#9813](https://github.com/apache/apisix/pull/9813) +- :sunrise: Support set client_email field for google-cloud-logging plugin: [#9813](https://github.com/apache/apisix/pull/9813) +- :sunrise: Support sending headers upstream returned by OPA server for opa plugin: [#9710](https://github.com/apache/apisix/pull/9710) +- :sunrise: Support configuring proxy server for openid-connect plugin: [#9948](https://github.com/apache/apisix/pull/9948) + +### Bugfixes + +- Fix(log-rotate): the max_kept configuration doesn't work when using custom name: [#9749](https://github.com/apache/apisix/pull/9749) +- Fix(limit_conn): do not use the http variable in stream mode: [#9816](https://github.com/apache/apisix/pull/9816) +- Fix(loki-logger): getting an error with log_labels: [#9850](https://github.com/apache/apisix/pull/9850) +- Fix(limit-count): X-RateLimit-Reset shouldn't be set to 0 after request be rejected: [#9978](https://github.com/apache/apisix/pull/9978) +- Fix(nacos): attempt to index upvalue 'applications' (a nil value): [#9960](https://github.com/apache/apisix/pull/9960) +- Fix(etcd): can't sync etcd data if key has special character: [#9967](https://github.com/apache/apisix/pull/9967) +- Fix(tencent-cloud-cls): dns parsing failure: [#9843](https://github.com/apache/apisix/pull/9843) +- Fix(reload): worker not exited when executing quit or reload command [#9909](https://github.com/apache/apisix/pull/9909) +- Fix(traffic-split): upstream_id validity verification [#10008](https://github.com/apache/apisix/pull/10008) + +## 3.4.0 + +### Core + +- :sunrise: Support route-level MTLS [#9322](https://github.com/apache/apisix/pull/9322) +- :sunrise: Support id schema for global_rules [#9517](https://github.com/apache/apisix/pull/9517) +- :sunrise: Support use a single long http connection to watch all resources for etcd [#9456](https://github.com/apache/apisix/pull/9456) +- :sunrise: Support max len 256 for ssl label [#9301](https://github.com/apache/apisix/pull/9301) + +### Plugins + +- :sunrise: Support multiple regex pattern matching for proxy_rewrite plugin [#9194](https://github.com/apache/apisix/pull/9194) +- :sunrise: Add loki-logger plugin [#9399](https://github.com/apache/apisix/pull/9399) +- :sunrise: Allow user configure DEFAULT_BUCKETS for prometheus plugin [#9673](https://github.com/apache/apisix/pull/9673) + +### Bugfixes + +- Fix(body-transformer): xml2lua: replace empty table with empty string [#9669](https://github.com/apache/apisix/pull/9669) +- Fix: opentelemetry and grpc-transcode plugins cannot work together [#9606](https://github.com/apache/apisix/pull/9606) +- Fix(skywalking-logger, error-log-logger): support $hostname in skywalking service_instance_name [#9401](https://github.com/apache/apisix/pull/9401) +- Fix(admin): fix secrets do not support to update attributes by PATCH [#9510](https://github.com/apache/apisix/pull/9510) +- Fix(http-logger): default request path should be '/' [#9472](https://github.com/apache/apisix/pull/9472) +- Fix: syslog plugin doesn't work [#9425](https://github.com/apache/apisix/pull/9425) +- Fix: wrong log format for splunk-hec-logging [#9478](https://github.com/apache/apisix/pull/9478) +- Fix(etcd): reuse cli and enable keepalive [#9420](https://github.com/apache/apisix/pull/9420) +- Fix: upstream key config add mqtt_client_id support [#9450](https://github.com/apache/apisix/pull/9450) +- Fix: body-transformer plugin return raw body anytime [#9446](https://github.com/apache/apisix/pull/9446) +- Fix(wolf-rbac): other plugin in consumer not effective when consumer used wolf-rbac plugin [#9298](https://github.com/apache/apisix/pull/9298) +- Fix: always parse domain when host is domain name [#9332](https://github.com/apache/apisix/pull/9332) +- Fix: response-rewrite plugin can't add only one character [#9372](https://github.com/apache/apisix/pull/9372) +- Fix(consul): support to fetch only health endpoint [#9204](https://github.com/apache/apisix/pull/9204) + +## 3.3.0 + +**The changes marked with :warning: are not backward compatible.** + +### Change + +- :warning: Change the default router from `radixtree_uri` to `radixtree_host_uri`: [#9047](https://github.com/apache/apisix/pull/9047) +- :warning: CORS plugin will add `Vary: Origin` header when `allow_origin` is not `*`: [#9010](https://github.com/apache/apisix/pull/9010) + +### Core + +- :sunrise: Support store route's cert in secrets manager: [#9247](https://github.com/apache/apisix/pull/9247) +- :sunrise: Support bypassing Admin API Auth by configuration: [#9147](https://github.com/apache/apisix/pull/9147) + +### Plugins + +- :sunrise: Support header injection for `fault-injection` plugin: [#9039](https://github.com/apache/apisix/pull/9039) +- :sunrise: Support variable when rewrite header in `proxy-rewrite` plugin: [#9112](https://github.com/apache/apisix/pull/9112) +- :sunrise: `limit-count` plugin supports `username` and `ssl` for redis policy: [#9185](https://github.com/apache/apisix/pull/9185) + +### Bugfixes + +- Fix etcd data sync exception: [#8493](https://github.com/apache/apisix/pull/8493) +- Fix invalidate cache in `core.request.add_header` and fix some calls: [#8824](https://github.com/apache/apisix/pull/8824) +- Fix the high CPU and memory usage cause by healthcheck impl: [#9015](https://github.com/apache/apisix/pull/9015) +- Consider using `allow_origins_by_regex` only when it is not `nil`: [#9028](https://github.com/apache/apisix/pull/9028) +- Check upstream reference in `traffic-split` plugin when delete upstream: [#9044](https://github.com/apache/apisix/pull/9044) +- Fix failing to connect to etcd at startup: [#9077](https://github.com/apache/apisix/pull/9077) +- Fix health checker leak for domain nodes: [#9090](https://github.com/apache/apisix/pull/9090) +- Prevent non `127.0.0.0/24` to access admin api with empty admin_key: [#9146](https://github.com/apache/apisix/pull/9146) +- Ensure `hold_body_chunk` should use separate buffer for each plugin in case of pollution: [#9266](https://github.com/apache/apisix/pull/9266) +- Ensure `batch-requests` plugin read trailer headers if existed: [#9289](https://github.com/apache/apisix/pull/9289) +- Ensure `proxy-rewrite` should set `ngx.var.uri`: [#9309](https://github.com/apache/apisix/pull/9309) + +## 3.2.1 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/3.2` branch.** + +[https://github.com/apache/apisix/blob/release/3.2/CHANGELOG.md#321](https://github.com/apache/apisix/blob/release/3.2/CHANGELOG.md#321) + +## 3.2.0 + +### Change + +- Deprecated separate Vault configuration in jwt-auth. Users can use secret to achieve the same function: [#8660](https://github.com/apache/apisix/pull/8660) + +### Core + +- :sunrise: Support Vault token to configure secret through environment variables: [#8866](https://github.com/apache/apisix/pull/8866) +- :sunrise: Supports service discovery on stream subsystem: + - [#8583](https://github.com/apache/apisix/pull/8583) + - [#8593](https://github.com/apache/apisix/pull/8593) + - [#8584](https://github.com/apache/apisix/pull/8584) + - [#8640](https://github.com/apache/apisix/pull/8640) + - [#8633](https://github.com/apache/apisix/pull/8633) + - [#8696](https://github.com/apache/apisix/pull/8696) + - [#8826](https://github.com/apache/apisix/pull/8826) + +### Plugins + +- :sunrise: Add RESTful to graphQL conversion plugin: [#8959](https://github.com/apache/apisix/pull/8959) +- :sunrise: Supports setting the log format on each log plugin: + - [#8806](https://github.com/apache/apisix/pull/8806) + - [#8643](https://github.com/apache/apisix/pull/8643) +- :sunrise: Add request body/response body conversion plugin: [#8766](https://github.com/apache/apisix/pull/8766) +- :sunrise: Support sending error logs to Kafka: [#8693](https://github.com/apache/apisix/pull/8693) +- :sunrise: limit-count plugin supports X-RateLimit-Reset: [#8578](https://github.com/apache/apisix/pull/8578) +- :sunrise: limit-count plugin supports setting TLS to access Redis cluster: [#8558](https://github.com/apache/apisix/pull/8558) +- :sunrise: consumer-restriction plugin supports permission control via consumer_group_id: [#8567](https://github.com/apache/apisix/pull/8567) + +### Bugfixes + +- Fix mTLS protection when the host and SNI mismatch: [#8967](https://github.com/apache/apisix/pull/8967) +- The proxy-rewrite plugin should escape URI parameter parts if they do not come from user config: [#8888](https://github.com/apache/apisix/pull/8888) +- Admin API PATCH operation should return 200 status code after success: [#8855](https://github.com/apache/apisix/pull/8855) +- Under certain conditions, the reload after etcd synchronization failure does not take effect: [#8736](https://github.com/apache/apisix/pull/8736) +- Fix the problem that the nodes found by the Consul service discovery are incomplete: [#8651](https://github.com/apache/apisix/pull/8651) +- Fix grpc-transcode plugin's conversion of Map data: [#8731](https://github.com/apache/apisix/pull/8731) +- External plugins should be able to set the content-type response header: [#8588](https://github.com/apache/apisix/pull/8588) +- When hotloading plugins, redundant timers may be left behind if the request-id plugin initializes the snowflake generator incorrectly: [#8556](https://github.com/apache/apisix/pull/8556) +- Close previous proto synchronizer for grpc-transcode when hotloading plugins: [#8557](https://github.com/apache/apisix/pull/8557) + +## 3.1.0 + +### Core + +- :sunrise: Support for etcd configuration synchronization via gRPC: + - [#8485](https://github.com/apache/apisix/pull/8485) + - [#8450](https://github.com/apache/apisix/pull/8450) + - [#8411](https://github.com/apache/apisix/pull/8411) +- :sunrise: Support for configuring encrypted fields in plugins: + - [#8487](https://github.com/apache/apisix/pull/8487) + - [#8403](https://github.com/apache/apisix/pull/8403) +- :sunrise: Support for placing partial fields in Vault or environment variable using secret resources: + - [#8448](https://github.com/apache/apisix/pull/8448) + - [#8421](https://github.com/apache/apisix/pull/8421) + - [#8412](https://github.com/apache/apisix/pull/8412) + - [#8394](https://github.com/apache/apisix/pull/8394) + - [#8390](https://github.com/apache/apisix/pull/8390) +- :sunrise: Allows upstream configuration in the stream subsystem as a domain name: [#8500](https://github.com/apache/apisix/pull/8500) +- :sunrise: Support Consul service discovery: [#8380](https://github.com/apache/apisix/pull/8380) + +### Plugin + +- :sunrise: Optimize resource usage for prometheus collection: [#8434](https://github.com/apache/apisix/pull/8434) +- :sunrise: Add inspect plugin for easy debugging: [#8400](https://github.com/apache/apisix/pull/8400) +- :sunrise: jwt-auth plugin supports parameters to hide authentication token from upstream : [#8206](https://github.com/apache/apisix/pull/8206) +- :sunrise: proxy-rewrite plugin supports adding new request headers without overwriting existing request headers with the same name: [#8336](https://github.com/apache/apisix/pull/8336) +- :sunrise: grpc-transcode plugin supports setting the grpc-status-details-bin response header into the response body: [#7639](https://github.com/apache/apisix/pull/7639) +- :sunrise: proxy-mirror plugin supports setting the prefix: [#8261](https://github.com/apache/apisix/pull/8261) + +### Bugfix + +- Fix the problem that the plug-in configured under service object cannot take effect in time under some circumstances: [#8482](https://github.com/apache/apisix/pull/8482) +- Fix an occasional 502 problem when http and grpc share the same upstream connection due to connection pool reuse: [#8364](https://github.com/apache/apisix/pull/8364) +- file-logger should avoid buffer-induced log truncation when writing logs: [#7884](https://github.com/apache/apisix/pull/7884) +- max_kept parameter of log-rotate plugin should take effect on compressed files: [#8366](https://github.com/apache/apisix/pull/8366) +- Fix userinfo not being set when use_jwks is true in the openid-connect plugin: [#8347](https://github.com/apache/apisix/pull/8347) +- Fix an issue where x-forwarded-host cannot be changed in the proxy-rewrite plugin: [#8200](https://github.com/apache/apisix/pull/8200) +- Fix a bug where disabling the v3 admin API resulted in missing response bodies under certain circumstances: [#8349](https://github.com/apache/apisix/pull/8349) +- In zipkin plugin, pass trace ID even if there is a rejected sampling decision: [#8099](https://github.com/apache/apisix/pull/8099) +- Fix `_meta.filter` in plugin configuration not working with variables assigned after upstream response and custom variables in APISIX. + - [#8162](https://github.com/apache/apisix/pull/8162) + - [#8256](https://github.com/apache/apisix/pull/8256) + +## 3.0.0 + +### Change + +- `enable_cpu_affinity` is disabled by default to avoid this configuration affecting the behavior of APSISIX deployed in the container: [#8074](https://github.com/apache/apisix/pull/8074) + +### Core + +- :sunrise: Added Consumer Group entity to manage multiple consumers: [#7980](https://github.com/apache/apisix/pull/7980) +- :sunrise: Supports configuring the order in which DNS resolves domain name types: [#7935](https://github.com/apache/apisix/pull/7935) +- :sunrise: Support configuring multiple `key_encrypt_salt` for rotation: [#7925](https://github.com/apache/apisix/pull/7925) + +### Plugin + +- :sunrise: Added ai plugin to dynamically optimize the execution path of APISIX according to the scene: + - [#8102](https://github.com/apache/apisix/pull/8102) + - [#8113](https://github.com/apache/apisix/pull/8113) + - [#8120](https://github.com/apache/apisix/pull/8120) + - [#8128](https://github.com/apache/apisix/pull/8128) + - [#8130](https://github.com/apache/apisix/pull/8130) + - [#8149](https://github.com/apache/apisix/pull/8149) + - [#8157](https://github.com/apache/apisix/pull/8157) +- :sunrise: Support `session_secret` in openid-connect plugin to resolve the inconsistency of `session_secret` among multiple workers: [#8068](https://github.com/apache/apisix/pull/8068) +- :sunrise: Support sasl config in kafka-logger plugin: [#8050](https://github.com/apache/apisix/pull/8050) +- :sunrise: Support set resolve domain in proxy-mirror plugin: [#7861](https://github.com/apache/apisix/pull/7861) +- :sunrise: Support `brokers` property in kafka-logger plugin, which supports different broker to set the same host: [#7999](https://github.com/apache/apisix/pull/7999) +- :sunrise: Support get response body in ext-plugin-post-resp: [#7947](https://github.com/apache/apisix/pull/7947) +- :sunrise: Added cas-auth plugin to support CAS authentication: [#7932](https://github.com/apache/apisix/pull/7932) + +### Bugfix + +- Conditional expressions of workflow plugin should support operators: [#8121](https://github.com/apache/apisix/pull/8121) +- Fix loading problem of batch processor plugin when prometheus plugin is disabled: [#8079](https://github.com/apache/apisix/pull/8079) +- When APISIX starts, delete the old conf server sock file if it exists: [#8022](https://github.com/apache/apisix/pull/8022) +- Disable core.grpc when gRPC-client-nginx-module module is not compiled: [#8007](https://github.com/apache/apisix/pull/8007) + +## 3.0.0-beta + +Here we use 2.99.0 as the version number in the source code instead of the code name +`3.0.0-beta` for two reasons: + +1. avoid unexpected errors when some programs try to compare the +version, as `3.0.0-beta` contains `3.0.0` and is longer than it. +2. some package system might not allow package which has a suffix +after the version number. + +### Change + +#### Moves the config_center, etcd and Admin API configuration to the deployment + +We've adjusted the configuration in the static configuration file, so you need to update the configuration in `config.yaml` as well: + +- The `config_center` function is now implemented by `config_provider` under `deployment`: [#7901](https://github.com/apache/apisix/pull/7901) +- The `etcd` field is moved to `deployment`: [#7860](https://github.com/apache/apisix/pull/7860) +- The following Admin API configuration is moved to the `admin` field under `deployment`: [#7823](https://github.com/apache/apisix/pull/7823) + - admin_key + - enable_admin_cors + - allow_admin + - admin_listen + - https_admin + - admin_api_mtls + - admin_api_version + +You can refer to the latest `config-default.yaml` for details. + +#### Removing multiple deprecated configurations + +With the new 3.0 release, we took the opportunity to clean out many configurations that were previously marked as deprecated. + +In the static configuration, we removed several fields as follows: + +- Removed `enable_http2` and `listen_port` from `apisix.ssl`: [#7717](https://github.com/apache/apisix/pull/7717) +- Removed `apisix.port_admin`: [#7716](https://github.com/apache/apisix/pull/7716) +- Removed `etcd.health_check_retry`: [#7676](https://github.com/apache/apisix/pull/7676) +- Removed `nginx_config.http.lua_shared_dicts`: [#7677](https://github.com/apache/apisix/pull/7677) +- Removed `apisix.real_ip_header`: [#7696](https://github.com/apache/apisix/pull/7696) + +In the dynamic configuration, we made the following adjustments: + +- Moved `disable` of the plugin configuration under `_meta`: [#7707](https://github.com/apache/apisix/pull/7707) +- Removed `service_protocol` from the Route: [#7701](https://github.com/apache/apisix/pull/7701) + +There are also specific plugin level changes: + +- Removed `audience` field from authz-keycloak: [#7683](https://github.com/apache/apisix/pull/7683) +- Removed `upstream` field from mqtt-proxy: [#7694](https://github.com/apache/apisix/pull/7694) +- tcp-related configuration placed under the `tcp` field in error-log-logger: [#7700](https://github.com/apache/apisix/pull/7700) +- Removed `max_retry_times` and `retry_interval` fields from syslog: [#7699](https://github.com/apache/apisix/pull/7699) +- The `scheme` field has been removed from proxy-rewrite: [#7695](https://github.com/apache/apisix/pull/7695) + +#### New Admin API response format + +We have adjusted the response format of the Admin API in several PRs as follows: + +- [#7630](https://github.com/apache/apisix/pull/7630) +- [#7622](https://github.com/apache/apisix/pull/7622) + +The new response format is shown below: + +Returns a single configuration: + +```json +{ + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 +} +``` + +Returns multiple configurations: + +```json +{ + "list": [ + { + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 + }, + { + "modifiedIndex": 2685163, + "value": { + "id": "2", + ... + }, + "key": "/apisix/routes/2", + "createdIndex": 2685163 + } + ], + "total": 2 +} +``` + +#### Other + +- Port of Admin API changed to 9180: [#7806](https://github.com/apache/apisix/pull/7806) +- We only support OpenResty 1.19.3.2 and above: [#7625](https://github.com/apache/apisix/pull/7625) +- Adjusted the priority of the Plugin Config object so that the priority of a plugin configuration with the same name changes from Consumer > Plugin Config > Route > Service to Consumer > Route > Plugin Config > Service: [#7614](https://github.com/apache/apisix/pull/7614) + +### Core + +- Integrating grpc-client-nginx-module to APISIX: [#7917](https://github.com/apache/apisix/pull/7917) +- k8s service discovery support for configuring multiple clusters: [#7895](https://github.com/apache/apisix/pull/7895) + +### Plugin + +- Support for injecting header with specified prefix in opentelemetry plugin: [#7822](https://github.com/apache/apisix/pull/7822) +- Added openfunction plugin: [#7634](https://github.com/apache/apisix/pull/7634) +- Added elasticsearch-logger plugin: [#7643](https://github.com/apache/apisix/pull/7643) +- response-rewrite plugin supports adding response bodies: [#7794](https://github.com/apache/apisix/pull/7794) +- log-rorate supports specifying the maximum size to cut logs: [#7749](https://github.com/apache/apisix/pull/7749) +- Added workflow plug-in. + - [#7760](https://github.com/apache/apisix/pull/7760) + - [#7771](https://github.com/apache/apisix/pull/7771) +- Added Tencent Cloud Log Service plugin: [#7593](https://github.com/apache/apisix/pull/7593) +- jwt-auth supports ES256 algorithm: [#7627](https://github.com/apache/apisix/pull/7627) +- ldap-auth internal implementation, switching from lualdap to lua-resty-ldap: [#7590](https://github.com/apache/apisix/pull/7590) +- http request metrics within the prometheus plugin supports setting additional labels via variables: [#7549](https://github.com/apache/apisix/pull/7549) +- The clickhouse-logger plugin supports specifying multiple clickhouse endpoints: [#7517](https://github.com/apache/apisix/pull/7517) + +### Bugfix + +- gRPC proxy sets :authority request header to configured upstream Host: [#7939](https://github.com/apache/apisix/pull/7939) +- response-rewrite writing to an empty body may cause AIPSIX to fail to respond to the request: [#7836](https://github.com/apache/apisix/pull/7836) +- Fix the problem that when using Plugin Config and Consumer at the same time, there is a certain probability that the plugin configuration is not updated: [#7965](https://github.com/apache/apisix/pull/7965) +- Only reopen log files once when log cutting: [#7869](https://github.com/apache/apisix/pull/7869) +- Passive health checks should not be enabled by default: [#7850](https://github.com/apache/apisix/pull/7850) +- The zipkin plugin should pass trace IDs upstream even if it does not sample: [#7833](https://github.com/apache/apisix/pull/7833) +- Correction of opentelemetry span kind to server: [#7830](https://github.com/apache/apisix/pull/7830) +- in limit-count plugin, different routes with the same configuration should not share the same counter: [#7750](https://github.com/apache/apisix/pull/7750) +- Fix occasional exceptions thrown when removing clean_handler: [#7648](https://github.com/apache/apisix/pull/7648) +- Allow direct use of IPv6 literals when configuring upstream nodes: [#7594](https://github.com/apache/apisix/pull/7594) +- The wolf-rbac plugin adjusts the way it responds to errors: + - [#7561](https://github.com/apache/apisix/pull/7561) + - [#7497](https://github.com/apache/apisix/pull/7497) +- the phases after proxy didn't run when 500 error happens before proxy: [#7703](https://github.com/apache/apisix/pull/7703) +- avoid error when multiple plugins associated with consumer and have rewrite phase: [#7531](https://github.com/apache/apisix/pull/7531) +- upgrade lua-resty-etcd to 1.8.3 which fixes various issues: [#7565](https://github.com/apache/apisix/pull/7565) + +## 2.15.3 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.15` branch.** + +[https://github.com/apache/apisix/blob/release/2.15/CHANGELOG.md#2153](https://github.com/apache/apisix/blob/release/2.15/CHANGELOG.md#2153) + +## 2.15.2 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.15` branch.** + +[https://github.com/apache/apisix/blob/release/2.15/CHANGELOG.md#2152](https://github.com/apache/apisix/blob/release/2.15/CHANGELOG.md#2152) + +## 2.15.1 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.15` branch.** + +[https://github.com/apache/apisix/blob/release/2.15/CHANGELOG.md#2151](https://github.com/apache/apisix/blob/release/2.15/CHANGELOG.md#2151) + +## 2.15.0 + +### Change + +- We now map the grpc error code OUT_OF_RANGE to http code 400 in grpc-transcode plugin: [#7419](https://github.com/apache/apisix/pull/7419) +- Rename health_check_retry configuration in etcd section of `config-default.yaml` to startup_retry: [#7304](https://github.com/apache/apisix/pull/7304) +- Remove `upstream.enable_websocket` which is deprecated since 2020: [#7222](https://github.com/apache/apisix/pull/7222) + +### Core + +- Support running plugins conditionally: [#7453](https://github.com/apache/apisix/pull/7453) +- Allow users to specify plugin execution priority: [#7273](https://github.com/apache/apisix/pull/7273) +- Support getting upstream certificate from ssl object: [#7221](https://github.com/apache/apisix/pull/7221) +- Allow customizing error response in the plugin: [#7128](https://github.com/apache/apisix/pull/7128) +- Add metrics to xRPC Redis proxy: [#7183](https://github.com/apache/apisix/pull/7183) +- Introduce deployment role to simplify the deployment of APISIX: + - [#7405](https://github.com/apache/apisix/pull/7405) + - [#7417](https://github.com/apache/apisix/pull/7417) + - [#7392](https://github.com/apache/apisix/pull/7392) + - [#7365](https://github.com/apache/apisix/pull/7365) + - [#7249](https://github.com/apache/apisix/pull/7249) + +### Plugin + +- Add ngx.shared.dict statistic in promethues plugin: [#7412](https://github.com/apache/apisix/pull/7412) +- Allow using unescaped raw URL in proxy-rewrite plugin: [#7401](https://github.com/apache/apisix/pull/7401) +- Add PKCE support to the openid-connect plugin: [#7370](https://github.com/apache/apisix/pull/7370) +- Support custom log format in sls-logger plugin: [#7328](https://github.com/apache/apisix/pull/7328) +- Export some params for kafka-client in kafka-logger plugin: [#7266](https://github.com/apache/apisix/pull/7266) +- Add support for capturing OIDC refresh tokens in openid-connect plugin: [#7220](https://github.com/apache/apisix/pull/7220) +- Add prometheus plugin in stream subsystem: [#7174](https://github.com/apache/apisix/pull/7174) + +### Bugfix + +- clear remain state from the latest try before retrying in Kubernetes discovery: [#7506](https://github.com/apache/apisix/pull/7506) +- the query string was repeated twice when enabling both http_to_https and append_query_string in the redirect plugin: [#7433](https://github.com/apache/apisix/pull/7433) +- don't send empty Authorization header by default in http-logger: [#7444](https://github.com/apache/apisix/pull/7444) +- ensure both `group` and `disable` configurations can be used in limit-count: [#7384](https://github.com/apache/apisix/pull/7384) +- adjust the execution priority of request-id so the tracing plugins can use the request id: [#7281](https://github.com/apache/apisix/pull/7281) +- correct the transcode of repeated Message in grpc-transcode: [#7231](https://github.com/apache/apisix/pull/7231) +- var missing in proxy-cache cache key should be ignored: [#7168](https://github.com/apache/apisix/pull/7168) +- reduce memory usage when abnormal weights are given in chash: [#7103](https://github.com/apache/apisix/pull/7103) +- cache should be bypassed when the method mismatch in proxy-cache: [#7111](https://github.com/apache/apisix/pull/7111) +- Upstream keepalive should consider TLS param: +    - [#7054](https://github.com/apache/apisix/pull/7054) +    - [#7466](https://github.com/apache/apisix/pull/7466) +- The redirect plugin sets a correct port during redirecting HTTP to HTTPS: +    - [#7065](https://github.com/apache/apisix/pull/7065) + +## 2.14.1 + +### Bugfix + +- The "unix:" in the `real_ip_from` configuration should not break the batch-requests plugin: [#7106](https://github.com/apache/apisix/pull/7106) + +## 2.14.0 + +### Change + +- To adapt the change of OpenTelemetry spec, the default port of OTLP/HTTP is changed to 4318: [#7007](https://github.com/apache/apisix/pull/7007) + +### Core + +- Introduce an experimental feature to allow subscribing Kafka message via APISIX. This feature is based on the pubsub framework running above websocket: + - [#7028](https://github.com/apache/apisix/pull/7028) + - [#7032](https://github.com/apache/apisix/pull/7032) +- Introduce an experimental framework called xRPC to manage non-HTTP L7 traffic: + - [#6885](https://github.com/apache/apisix/pull/6885) + - [#6901](https://github.com/apache/apisix/pull/6901) + - [#6919](https://github.com/apache/apisix/pull/6919) + - [#6960](https://github.com/apache/apisix/pull/6960) + - [#6965](https://github.com/apache/apisix/pull/6965) + - [#7040](https://github.com/apache/apisix/pull/7040) +- Now we support adding delay according to the command & key during proxying Redis traffic, which is built above xRPC: + - [#6999](https://github.com/apache/apisix/pull/6999) +- Introduce an experimental support to configure APISIX via xDS: + - [#6614](https://github.com/apache/apisix/pull/6614) + - [#6759](https://github.com/apache/apisix/pull/6759) +- Add `normalize_uri_like_servlet` option to normalize uri like servlet: [#6984](https://github.com/apache/apisix/pull/6984) +- Zookeeper service discovery via apisix-seed: [#6751](https://github.com/apache/apisix/pull/6751) + +### Plugin + +- The real-ip plugin supports recursive IP search like `real_ip_recursive`: [#6988](https://github.com/apache/apisix/pull/6988) +- The api-breaker plugin allows configuring response: [#6949](https://github.com/apache/apisix/pull/6949) +- The response-rewrite plugin supports body filters: [#6750](https://github.com/apache/apisix/pull/6750) +- The request-id plugin adds nanoid algorithm to generate ID: [#6779](https://github.com/apache/apisix/pull/6779) +- The file-logger plugin can cache & reopen file handler: [#6721](https://github.com/apache/apisix/pull/6721) +- Add casdoor plugin: [#6382](https://github.com/apache/apisix/pull/6382) +- The authz-keycloak plugin supports password grant: [#6586](https://github.com/apache/apisix/pull/6586) + +### Bugfix + +- Upstream keepalive should consider TLS param: [#7054](https://github.com/apache/apisix/pull/7054) +- Do not expose internal error message to the client: + - [#6982](https://github.com/apache/apisix/pull/6982) + - [#6859](https://github.com/apache/apisix/pull/6859) + - [#6854](https://github.com/apache/apisix/pull/6854) + - [#6853](https://github.com/apache/apisix/pull/6853) + - [#6846](https://github.com/apache/apisix/pull/6846) +- DNS supports SRV record with port 0: [#6739](https://github.com/apache/apisix/pull/6739) +- client mTLS was ignored sometimes in TLS session reuse: [#6906](https://github.com/apache/apisix/pull/6906) +- The grpc-web plugin doesn't override Access-Control-Allow-Origin header in response: [#6842](https://github.com/apache/apisix/pull/6842) +- The syslog plugin's default timeout is corrected: [#6807](https://github.com/apache/apisix/pull/6807) +- The authz-keycloak plugin's `access_denied_redirect_uri` was bypassed sometimes: [#6794](https://github.com/apache/apisix/pull/6794) +- Handle `USR2` signal properly: [#6758](https://github.com/apache/apisix/pull/6758) +- The redirect plugin set a correct port during redirecting HTTP to HTTPS: + - [#7065](https://github.com/apache/apisix/pull/7065) + - [#6686](https://github.com/apache/apisix/pull/6686) +- Admin API rejects unknown stream plugin: [#6813](https://github.com/apache/apisix/pull/6813) + +## 2.13.3 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.13` branch.** + +[https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2133](https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2133) + +## 2.13.2 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.13` branch.** + +[https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2132](https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2132) + +## 2.13.1 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.13` branch.** + +[https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2131](https://github.com/apache/apisix/blob/release/2.13/CHANGELOG.md#2131) + +## 2.13.0 + +### Change + +- change(syslog): correct the configuration [#6551](https://github.com/apache/apisix/pull/6551) +- change(server-info): use a new approach(keepalive) to report DP info [#6202](https://github.com/apache/apisix/pull/6202) +- change(admin): empty nodes should be encoded as array [#6384](https://github.com/apache/apisix/pull/6384) +- change(prometheus): replace wrong apisix_nginx_http_current_connections{state="total"} label [#6327](https://github.com/apache/apisix/pull/6327) +- change: don't expose public API by default & remove plugin interceptor [#6196](https://github.com/apache/apisix/pull/6196) + +### Core + +- :sunrise: feat: add delayed_body_filter phase [#6605](https://github.com/apache/apisix/pull/6605) +- :sunrise: feat: support for reading environment variables from yaml configuration files [#6505](https://github.com/apache/apisix/pull/6505) +- :sunrise: feat: rerun rewrite phase for newly added plugins in consumer [#6502](https://github.com/apache/apisix/pull/6502) +- :sunrise: feat: add config to control write all status to x-upsream-apisix-status [#6392](https://github.com/apache/apisix/pull/6392) +- :sunrise: feat: add kubernetes discovery module [#4880](https://github.com/apache/apisix/pull/4880) +- :sunrise: feat(graphql): support http get and post json request [#6343](https://github.com/apache/apisix/pull/6343) + +### Plugin + +- :sunrise: feat: jwt-auth support custom parameters [#6561](https://github.com/apache/apisix/pull/6561) +- :sunrise: feat: set cors allow origins by plugin metadata [#6546](https://github.com/apache/apisix/pull/6546) +- :sunrise: feat: support post_logout_redirect_uri config in openid-connect plugin [#6455](https://github.com/apache/apisix/pull/6455) +- :sunrise: feat: mocking plugin [#5940](https://github.com/apache/apisix/pull/5940) +- :sunrise: feat(error-log-logger): add clickhouse for error-log-logger [#6256](https://github.com/apache/apisix/pull/6256) +- :sunrise: feat: clickhouse logger [#6215](https://github.com/apache/apisix/pull/6215) +- :sunrise: feat(grpc-transcode): support .pb file [#6264](https://github.com/apache/apisix/pull/6264) +- :sunrise: feat: development of Loggly logging plugin [#6113](https://github.com/apache/apisix/pull/6113) +- :sunrise: feat: add opentelemetry plugin [#6119](https://github.com/apache/apisix/pull/6119) +- :sunrise: feat: add public api plugin [#6145](https://github.com/apache/apisix/pull/6145) +- :sunrise: feat: add CSRF plugin [#5727](https://github.com/apache/apisix/pull/5727) + +### Bugfix + +- fix(skywalking,opentelemetry): trace request rejected by auth [#6617](https://github.com/apache/apisix/pull/6617) +- fix(log-rotate): should rotate logs strictly hourly(or minutely) [#6521](https://github.com/apache/apisix/pull/6521) +- fix: deepcopy doesn't copy the metatable [#6623](https://github.com/apache/apisix/pull/6623) +- fix(request-validate): handle duplicate key in JSON [#6625](https://github.com/apache/apisix/pull/6625) +- fix(prometheus): conflict between global rule and route configure [#6579](https://github.com/apache/apisix/pull/6579) +- fix(proxy-rewrite): when conf.headers are missing,conf.method can make effect [#6300](https://github.com/apache/apisix/pull/6300) +- fix(traffic-split): failed to match rule when the first rule failed [#6292](https://github.com/apache/apisix/pull/6292) +- fix(config_etcd): skip resync_delay while etcd watch timeout [#6259](https://github.com/apache/apisix/pull/6259) +- fix(proto): avoid sharing state [#6199](https://github.com/apache/apisix/pull/6199) +- fix(limit-count): keep the counter if the plugin conf is the same [#6151](https://github.com/apache/apisix/pull/6151) +- fix(admin): correct the count field of plugin-metadata/global-rule [#6155](https://github.com/apache/apisix/pull/6155) +- fix: add missing labels after merging route and service [#6177](https://github.com/apache/apisix/pull/6177) + +## 2.12.1 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.12` branch.** + +[https://github.com/apache/apisix/blob/release/2.12/CHANGELOG.md#2121](https://github.com/apache/apisix/blob/release/2.12/CHANGELOG.md#2121) + +## 2.12.0 + +### Change + +- change(serverless): rename "balancer" phase to "before_proxy" [#5992](https://github.com/apache/apisix/pull/5992) +- change: don't promise to support Tengine [#5961](https://github.com/apache/apisix/pull/5961) +- change: enable HTTP when stream proxy is set and enable_admin is true [#5867](https://github.com/apache/apisix/pull/5867) + +### Core + +- :sunrise: feat(L4): support TLS over TCP upstream [#6030](https://github.com/apache/apisix/pull/6030) +- :sunrise: feat: support registering custom variable [#5941](https://github.com/apache/apisix/pull/5941) +- :sunrise: feat(vault): vault lua module, integration with jwt-auth authentication plugin [#5745](https://github.com/apache/apisix/pull/5745) +- :sunrise: feat: enable L4 stream logging [#5768](https://github.com/apache/apisix/pull/5768) +- :sunrise: feat: add http_server_location_configuration_snippet configuration [#5740](https://github.com/apache/apisix/pull/5740) +- :sunrise: feat: support resolve default value when environment not set [#5675](https://github.com/apache/apisix/pull/5675) +- :sunrise: feat(wasm): run in http header_filter [#5544](https://github.com/apache/apisix/pull/5544) + +### Plugin + +- :sunrise: feat: support hide the authentication header in basic-auth with a config [#6039](https://github.com/apache/apisix/pull/6039) +- :sunrise: feat: set proxy_request_buffering dynamically [#6075](https://github.com/apache/apisix/pull/6075) +- :sunrise: feat(mqtt): balance by client id [#6079](https://github.com/apache/apisix/pull/6079) +- :sunrise: feat: add forward-auth plugin [#6037](https://github.com/apache/apisix/pull/6037) +- :sunrise: feat(grpc-web): support gRPC-Web Proxy [#5964](https://github.com/apache/apisix/pull/5964) +- :sunrise: feat(limit-count): add constant key type [#5984](https://github.com/apache/apisix/pull/5984) +- :sunrise: feat(limit-count): allow sharing counter [#5881](https://github.com/apache/apisix/pull/5881) +- :sunrise: feat(splunk): support splunk hec logging plugin [#5819](https://github.com/apache/apisix/pull/5819) +- :sunrise: feat: basic support OPA plugin [#5734](https://github.com/apache/apisix/pull/5734) +- :sunrise: feat: rocketmq logger [#5653](https://github.com/apache/apisix/pull/5653) +- :sunrise: feat(mqtt-proxy): support using route's upstream [#5666](https://github.com/apache/apisix/pull/5666) +- :sunrise: feat(ext-plugin): support to get request body [#5600](https://github.com/apache/apisix/pull/5600) +- :sunrise: feat(plugins): aws lambda serverless [#5594](https://github.com/apache/apisix/pull/5594) +- :sunrise: feat(http/kafka-logger): support to log response body [#5550](https://github.com/apache/apisix/pull/5550) +- :sunrise: feat: Apache OpenWhisk plugin [#5518](https://github.com/apache/apisix/pull/5518) +- :sunrise: feat(plugin): support google cloud logging service [#5538](https://github.com/apache/apisix/pull/5538) + +### Bugfix + +- fix: the prometheus labels are inconsistent when error-log-logger is enabled [#6055](https://github.com/apache/apisix/pull/6055) +- fix(ipv6): allow disabling IPv6 resolve [#6023](https://github.com/apache/apisix/pull/6023) +- fix(mqtt): handle properties for MQTT 5 [#5916](https://github.com/apache/apisix/pull/5916) +- fix(sls-logger): unable to get millisecond part of the timestamp [#5820](https://github.com/apache/apisix/pull/5820) +- fix(mqtt-proxy): client id can be empty [#5816](https://github.com/apache/apisix/pull/5816) +- fix(ext-plugin): don't use stale key [#5782](https://github.com/apache/apisix/pull/5782) +- fix(log-rotate): race between reopen log & compression [#5715](https://github.com/apache/apisix/pull/5715) +- fix(batch-processor): we didn't free stale object actually [#5700](https://github.com/apache/apisix/pull/5700) +- fix: data pollution after passive health check is changed [#5589](https://github.com/apache/apisix/pull/5589) + +## 2.11.0 + +### Change + +- change(wolf-rbac): change default port number and add `authType` parameter to documentation [#5477](https://github.com/apache/apisix/pull/5477) + +### Core + +- :sunrise: feat: support advanced matching based on post form [#5409](https://github.com/apache/apisix/pull/5409) +- :sunrise: feat: initial wasm support [#5288](https://github.com/apache/apisix/pull/5288) +- :sunrise: feat(control): expose services[#5271](https://github.com/apache/apisix/pull/5271) +- :sunrise: feat(control): add dump upstream api [#5259](https://github.com/apache/apisix/pull/5259) +- :sunrise: feat: etcd cluster single node failure APISIX startup failure [#5158](https://github.com/apache/apisix/pull/5158) +- :sunrise: feat: support specify custom sni in etcd conf [#5206](https://github.com/apache/apisix/pull/5206) + +### Plugin + +- :sunrise: feat(plugin): azure serverless functions [#5479](https://github.com/apache/apisix/pull/5479) +- :sunrise: feat(kafka-logger): supports logging request body [#5501](https://github.com/apache/apisix/pull/5501) +- :sunrise: feat: provide skywalking logger plugin [#5478](https://github.com/apache/apisix/pull/5478) +- :sunrise: feat(plugins): Datadog for metrics collection [#5372](https://github.com/apache/apisix/pull/5372) +- :sunrise: feat(limit-* plugin): fallback to remote_addr when key is missing [#5422](https://github.com/apache/apisix/pull/5422) +- :sunrise: feat(limit-count): support multiple variables as key [#5378](https://github.com/apache/apisix/pull/5378) +- :sunrise: feat(limit-conn): support multiple variables as key [#5354](https://github.com/apache/apisix/pull/5354) +- :sunrise: feat(proxy-rewrite): rewrite method [#5292](https://github.com/apache/apisix/pull/5292) +- :sunrise: feat(limit-req): support multiple variables as key [#5302](https://github.com/apache/apisix/pull/5302) +- :sunrise: feat(proxy-cache): support memory-based strategy [#5028](https://github.com/apache/apisix/pull/5028) +- :sunrise: feat(ext-plugin): avoid sending conf request more times [#5183](https://github.com/apache/apisix/pull/5183) +- :sunrise: feat: Add ldap-auth plugin [#3894](https://github.com/apache/apisix/pull/3894) + +## 2.10.5 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.10` branch.** + +[https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2105](https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2105) + +## 2.10.4 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.10` branch.** + +[https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2104](https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2104) + +## 2.10.3 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.10` branch.** + +[https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2103](https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2103) + +## 2.10.2 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.10` branch.** + +[https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2102](https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2102) + +## 2.10.1 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/2.10` branch.** + +[https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2101](https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2101) + +## 2.10.0 + +### Change + +- change(debug): move 'enable_debug' form config.yaml to debug.yaml [#5046](https://github.com/apache/apisix/pull/5046) +- change: use a new name to customize lua_shared_dict in nginx.conf [#5030](https://github.com/apache/apisix/pull/5030) +- change: drop the support of shell script installation [#4985](https://github.com/apache/apisix/pull/4985) + +### Core + +- :sunrise: feat(debug-mode): add dynamic debug mode [#5012](https://github.com/apache/apisix/pull/5012) +- :sunrise: feat: allow injecting logic to APISIX's method [#5068](https://github.com/apache/apisix/pull/5068) +- :sunrise: feat: allow configuring fallback SNI [#5000](https://github.com/apache/apisix/pull/5000) +- :sunrise: feat(stream_route): support CIDR in ip match [#4980](https://github.com/apache/apisix/pull/4980) +- :sunrise: feat: allow route to inherit hosts from service [#4977](https://github.com/apache/apisix/pull/4977) +- :sunrise: feat: support configurating the node listening address[#4856](https://github.com/apache/apisix/pull/4856) + +### Plugin + +- :sunrise: feat(hmac-auth): Add validate request body for hmac auth plugin [#5038](https://github.com/apache/apisix/pull/5038) +- :sunrise: feat(proxy-mirror): support mirror requests sample_ratio [#4965](https://github.com/apache/apisix/pull/4965) +- :sunrise: feat(referer-restriction): add blacklist and message [#4916](https://github.com/apache/apisix/pull/4916) +- :sunrise: feat(kafka-logger): add cluster name support [#4876](https://github.com/apache/apisix/pull/4876) +- :sunrise: feat(kafka-logger): add required_acks option [#4878](https://github.com/apache/apisix/pull/4878) +- :sunrise: feat(uri-blocker): add case insensitive switch [#4868](https://github.com/apache/apisix/pull/4868) + +### Bugfix + +- fix(radixtree_host_uri): correct matched host [#5124](https://github.com/apache/apisix/pull/5124) +- fix(radixtree_host_uri): correct matched path [#5104](https://github.com/apache/apisix/pull/5104) +- fix(nacos): distinguish services that has same name but in different groups or namespaces [#5083](https://github.com/apache/apisix/pull/5083) +- fix(nacos): continue to process other services when request failed [#5112](https://github.com/apache/apisix/pull/5112) +- fix(ssl): match sni in case-insensitive way [#5074](https://github.com/apache/apisix/pull/5074) +- fix(upstream): should not override default keepalive value [#5054](https://github.com/apache/apisix/pull/5054) +- fix(DNS): prefer SRV in service discovery [#4992](https://github.com/apache/apisix/pull/4992) +- fix(consul): retry connecting after a delay [#4979](https://github.com/apache/apisix/pull/4979) +- fix: avoid copying unwanted data when the domain's IP changed [#4952](https://github.com/apache/apisix/pull/4952) +- fix(plugin_config): recover plugin when plugin_config changed [#4888](https://github.com/apache/apisix/pull/4888) + +## 2.9.0 + +### Change + +- change: rename plugin's balancer method to before_proxy [#4697](https://github.com/apache/apisix/pull/4697) + +### Core + +- :sunrise: feat: increase timers limitation [#4843](https://github.com/apache/apisix/pull/4843) +- :sunrise: feat: make A/B test APISIX easier by removing "additionalProperties = false" [#4797](https://github.com/apache/apisix/pull/4797) +- :sunrise: feat: support dash in args (#4519) [#4676](https://github.com/apache/apisix/pull/4676) +- :sunrise: feat(admin): reject invalid proto [#4750](https://github.com/apache/apisix/pull/4750) + +### Plugin + +- :sunrise: feat(ext-plugin): support ExtraInfo [#4835](https://github.com/apache/apisix/pull/4835) +- :sunrise: feat(gzip): support special * to match any type [#4817](https://github.com/apache/apisix/pull/4817) +- :sunrise: feat(real-ip): implement the first version [#4813](https://github.com/apache/apisix/pull/4813) +- :sunrise: feat(limit-*): add custom reject-message for traffic control [#4808](https://github.com/apache/apisix/pull/4808) +- :sunrise: feat: Request-ID plugin add snowflake algorithm [#4559](https://github.com/apache/apisix/pull/4559) +- :sunrise: feat: Added authz-casbin plugin and doc and tests for it [#4710](https://github.com/apache/apisix/pull/4710) +- :sunrise: feat: add error log skywalking reporter [#4633](https://github.com/apache/apisix/pull/4633) +- :sunrise: feat(ext-plugin): send the idempotent key when preparing conf [#4736](https://github.com/apache/apisix/pull/4736) + +### Bugfix + +- fix: the issue that plugins in global rule may be cached to route [#4867](https://github.com/apache/apisix/pull/4867) +- fix(grpc-transcode): support converting nested message [#4859](https://github.com/apache/apisix/pull/4859) +- fix(authz-keycloak): set permissions as empty table when lazy_load_path is false [#4845](https://github.com/apache/apisix/pull/4845) +- fix(proxy-cache): keep cache_method same with nginx's proxy_cache_methods [#4814](https://github.com/apache/apisix/pull/4814) +- fix(admin): inject updatetime when the request is PATCH with sub path [#4765](https://github.com/apache/apisix/pull/4765) +- fix(admin): check username for updating consumer [#4756](https://github.com/apache/apisix/pull/4756) +- fix(error-log-logger): avoid sending stale error log [#4690](https://github.com/apache/apisix/pull/4690) +- fix(grpc-transcode): handle enum type [#4706](https://github.com/apache/apisix/pull/4706) +- fix: when a request caused a 500 error, the status was converted to 405 [#4696](https://github.com/apache/apisix/pull/4696) + +## 2.8.0 + +### Change + +- change: enable stream proxy only by default [#4580](https://github.com/apache/apisix/pull/4580) + +### Core + +- :sunrise: feat: allow user-defined balancer with metadata in node [#4605](https://github.com/apache/apisix/pull/4605) +- :sunrise: feat: Add option retry_timeout that like nginx's proxy_next_upstream_timeout [#4574](https://github.com/apache/apisix/pull/4574) +- :sunrise: feat: enable balancer phase for plugins [#4549](https://github.com/apache/apisix/pull/4549) +- :sunrise: feat: allow setting separate keepalive pool [#4506](https://github.com/apache/apisix/pull/4506) +- :sunrise: feat: enable etcd health-check [#4191](https://github.com/apache/apisix/pull/4191) + +### Plugin + +- :sunrise: feat: add gzip plugin [#4640](https://github.com/apache/apisix/pull/4640) +- :sunrise: feat(plugin): Add new plugin ua-restriction for bot spider restriction [#4587](https://github.com/apache/apisix/pull/4587) +- :sunrise: feat(stream): add ip-restriction [#4602](https://github.com/apache/apisix/pull/4602) +- :sunrise: feat(stream): add limit-conn [#4515](https://github.com/apache/apisix/pull/4515) +- :sunrise: feat: increase ext-plugin timeout to 60s [#4557](https://github.com/apache/apisix/pull/4557) +- :sunrise: feat(key-auth): supporting key-auth plugin to get key from query string [#4490](https://github.com/apache/apisix/pull/4490) +- :sunrise: feat(kafka-logger): support for specified the log formats via admin API. [#4483](https://github.com/apache/apisix/pull/4483) + +### Bugfix + +- fix(stream): sni router is broken when session reuses [#4607](https://github.com/apache/apisix/pull/4607) +- fix: the limit-conn plugin cannot effectively intercept requests in special scenarios [#4585](https://github.com/apache/apisix/pull/4585) +- fix: ref check while deleting proto via Admin API [#4575](https://github.com/apache/apisix/pull/4575) +- fix(skywalking): handle conflict between global rule and route [#4589](https://github.com/apache/apisix/pull/4589) +- fix: `ctx.var.cookie_*` cookie not found log [#4564](https://github.com/apache/apisix/pull/4564) +- fix(request-id): we can use different ids with the same request [#4479](https://github.com/apache/apisix/pull/4479) + +## 2.7.0 + +### Change + +- change: check metadata_schema with check_schema like the other schema [#4381](https://github.com/apache/apisix/pull/4381) +- change(echo): remove odd auth_value [#4055](https://github.com/apache/apisix/pull/4055) +- fix(admin): correct the resources' count field and change its type to integer [#4385](https://github.com/apache/apisix/pull/4385) + +### Core + +- :sunrise: feat(stream): support client certificate verification [#4445](https://github.com/apache/apisix/pull/4445) +- :sunrise: feat(stream): accept tls over tcp [#4409](https://github.com/apache/apisix/pull/4409) +- :sunrise: feat(stream): support domain in the upstream [#4386](https://github.com/apache/apisix/pull/4386) +- :sunrise: feat(cli): wrap nginx quit cmd [#4360](https://github.com/apache/apisix/pull/4360) +- :sunrise: feat: allow to set custom timeout for route [#4340](https://github.com/apache/apisix/pull/4340) +- :sunrise: feat: nacos discovery support group [#4325](https://github.com/apache/apisix/pull/4325) +- :sunrise: feat: nacos discovery support namespace [#4313](https://github.com/apache/apisix/pull/4313) + +### Plugin + +- :sunrise: feat(client-control): set client_max_body_size dynamically [#4423](https://github.com/apache/apisix/pull/4423) +- :sunrise: feat(ext-plugin): stop the runner with SIGTERM [#4367](https://github.com/apache/apisix/pull/4367) +- :sunrise: feat(limit-req) support nodelay [#4395](https://github.com/apache/apisix/pull/4395) +- :sunrise: feat(mqtt-proxy): support domain [#4391](https://github.com/apache/apisix/pull/4391) +- :sunrise: feat(redirect): support appending query string [#4298](https://github.com/apache/apisix/pull/4298) + +### Bugfix + +- fix: solve memory leak when the client aborts [#4405](https://github.com/apache/apisix/pull/4405) +- fix(etcd): check res.body.error before accessing the data [#4371](https://github.com/apache/apisix/pull/4371) +- fix(ext-plugin): when token is stale, refresh token and try again [#4345](https://github.com/apache/apisix/pull/4345) +- fix(ext-plugin): pass environment variables [#4349](https://github.com/apache/apisix/pull/4349) +- fix: ensure the plugin is always reloaded [#4319](https://github.com/apache/apisix/pull/4319) + +## 2.6.0 + +### Change + +- change(prometheus): redesign the latency metrics & update grafana [#3993](https://github.com/apache/apisix/pull/3993) +- change(prometheus): don't expose metrics to internet [#3994](https://github.com/apache/apisix/pull/3994) +- change(limit-count): ensure redis cluster name is set correctly [#3910](https://github.com/apache/apisix/pull/3910) +- change: drop support of OpenResty 1.15 [#3960](https://github.com/apache/apisix/pull/3960) + +### Core + +- :sunrise: feat: support passing different host headers in multiple nodes [#4208](https://github.com/apache/apisix/pull/4208) +- :sunrise: feat: add 50x html for error page [#4164](https://github.com/apache/apisix/pull/4164) +- :sunrise: feat: support to use upstream_id in stream_route [#4121](https://github.com/apache/apisix/pull/4121) +- :sunrise: feat: support client certificate verification [#4034](https://github.com/apache/apisix/pull/4034) +- :sunrise: feat: add nacos support [#3820](https://github.com/apache/apisix/pull/3820) +- :sunrise: feat: patch tcp.sock.connect to use our DNS resolver [#4114](https://github.com/apache/apisix/pull/4114) + +### Plugin + +- :sunrise: feat(redirect): support uri encoding [#4244](https://github.com/apache/apisix/pull/4244) +- :sunrise: feat(key-auth): allow customizing header [#4013](https://github.com/apache/apisix/pull/4013) +- :sunrise: feat(response-rewrite): allow using variable in the header [#4194](https://github.com/apache/apisix/pull/4194) +- :sunrise: feat(ext-plugin): APISIX can support Java, Go and other languages to implement custom plugin [#4183](https://github.com/apache/apisix/pull/4183) + +### Bugfix + +- fix(DNS): support IPv6 resolver [#4242](https://github.com/apache/apisix/pull/4242) +- fix(healthcheck): only one_loop is needed in the passive health check report [#4116](https://github.com/apache/apisix/pull/4116) +- fix(traffic-split): configure multiple "rules", the request will be confused between upstream [#4092](https://github.com/apache/apisix/pull/4092) +- fix: ensure upstream with domain is cached [#4061](https://github.com/apache/apisix/pull/4061) +- fix: be compatible with the router created before 2.5 [#4056](https://github.com/apache/apisix/pull/4056) +- fix(standalone): the conf should be available during start [#4027](https://github.com/apache/apisix/pull/4027) +- fix: ensure atomic operation in limit-count plugin [#3991](https://github.com/apache/apisix/pull/3991) + +## 2.5.0 + +**The changes marked with :warning: are not backward compatible.** +**Please upgrade your data accordingly before upgrading to this version.** +**[#3809](https://github.com/apache/apisix/pull/3809) Means that empty vars will make the route fail to match any requests.** + +### Change + +- :warning: change: remove unused consumer.id [#3868](https://github.com/apache/apisix/pull/3868) +- :warning: change: remove deprecated upstream.enable_websocket [#3854](https://github.com/apache/apisix/pull/3854) +- change(zipkin): rearrange the child span [#3877](https://github.com/apache/apisix/pull/3877) + +### Core + +- :sunrise: feat: support mTLS with etcd [#3905](https://github.com/apache/apisix/pull/3905) +- :warning: feat: upgrade lua-resty-expr/radixtree to support logical expression [#3809](https://github.com/apache/apisix/pull/3809) +- :sunrise: feat: load etcd configuration when apisix starts [#3799](https://github.com/apache/apisix/pull/3799) +- :sunrise: feat: let balancer support priority [#3755](https://github.com/apache/apisix/pull/3755) +- :sunrise: feat: add control api for discovery module [#3742](https://github.com/apache/apisix/pull/3742) + +### Plugin + +- :sunrise: feat(skywalking): allow destroy and configure report interval for reporter [#3925](https://github.com/apache/apisix/pull/3925) +- :sunrise: feat(traffic-split): the upstream pass_host needs to support IP mode [#3870](https://github.com/apache/apisix/pull/3870) +- :sunrise: feat: Add filter on HTTP methods for consumer-restriction plugin [#3691](https://github.com/apache/apisix/pull/3691) +- :sunrise: feat: add allow_origins_by_regex to cors plugin [#3839](https://github.com/apache/apisix/pull/3839) +- :sunrise: feat: support conditional response rewrite [#3577](https://github.com/apache/apisix/pull/3577) + +### Bugfix + +- fix(error-log-logger): the logger should be run in each process [#3912](https://github.com/apache/apisix/pull/3912) +- fix: use the builtin server by default [#3907](https://github.com/apache/apisix/pull/3907) +- fix(traffic-split): binding upstream via upstream_id is invalid [#3842](https://github.com/apache/apisix/pull/3842) +- fix: correct the validation for ssl_trusted_certificate [#3832](https://github.com/apache/apisix/pull/3832) +- fix: don't override cache relative headers [#3789](https://github.com/apache/apisix/pull/3789) +- fix: fail to run `make deps` on macOS [#3718](https://github.com/apache/apisix/pull/3718) + +## 2.4.0 + +### Change + +- change: global rules should not be executed on the internal api by default [#3396](https://github.com/apache/apisix/pull/3396) +- change: default to cache DNS record according to the TTL [#3530](https://github.com/apache/apisix/pull/3530) + +### Core + +- :sunrise: feat: support SRV record [#3686](https://github.com/apache/apisix/pull/3686) +- :sunrise: feat: add dns discovery [#3629](https://github.com/apache/apisix/pull/3629) +- :sunrise: feat: add consul kv discovery module [#3615](https://github.com/apache/apisix/pull/3615) +- :sunrise: feat: support to bind plugin config by `plugin_config_id` [#3567](https://github.com/apache/apisix/pull/3567) +- :sunrise: feat: support listen http2 with plaintext [#3547](https://github.com/apache/apisix/pull/3547) +- :sunrise: feat: support DNS AAAA record [#3484](https://github.com/apache/apisix/pull/3484) + +### Plugin + +- :sunrise: feat: the traffic-split plugin supports upstream_id [#3512](https://github.com/apache/apisix/pull/3512) +- :sunrise: feat(zipkin): support b3 req header [#3551](https://github.com/apache/apisix/pull/3551) + +### Bugfix + +- fix(chash): ensure retry can try every node [#3651](https://github.com/apache/apisix/pull/3651) +- fix: script does not work when the route is bound to a service [#3678](https://github.com/apache/apisix/pull/3678) +- fix: use openssl111 in openresty dir in precedence [#3603](https://github.com/apache/apisix/pull/3603) +- fix(zipkin): don't cache the per-req sample ratio [#3522](https://github.com/apache/apisix/pull/3522) + +For more changes, please refer to [Milestone](https://github.com/apache/apisix/milestone/13) + +## 2.3.0 + +### Change + +- fix: use luajit by default when run apisix [#3335](https://github.com/apache/apisix/pull/3335) +- feat: use luasocket instead of curl in etcd.lua [#2965](https://github.com/apache/apisix/pull/2965) + +### Core + +- :sunrise: feat: support to communicate with etcd by TLS without verification in command line [#3415](https://github.com/apache/apisix/pull/3415) +- :sunrise: feat: chaos test on route could still works when etcd is down [#3404](https://github.com/apache/apisix/pull/3404) +- :sunrise: feat: ewma use p2c to improve performance [#3300](https://github.com/apache/apisix/pull/3300) +- :sunrise: feat: support specifying https in upstream to talk with https backend [#3430](https://github.com/apache/apisix/pull/3430) +- :sunrise: feat: allow customizing lua_package_path & lua_package_cpath [#3417](https://github.com/apache/apisix/pull/3417) +- :sunrise: feat: allow to pass SNI in HTTPS proxy [#3420](https://github.com/apache/apisix/pull/3420) +- :sunrise: feat: support gRPCS [#3411](https://github.com/apache/apisix/pull/3411) +- :sunrise: feat: allow getting upstream health check status via control API [#3345](https://github.com/apache/apisix/pull/3345) +- :sunrise: feat: support dubbo [#3224](https://github.com/apache/apisix/pull/3224) +- :sunrise: feat: load balance by least connections [#3304](https://github.com/apache/apisix/pull/3304) + +### Plugin + +- :sunrise: feat: kafka-logger implemented reuse kafka producer [#3429](https://github.com/apache/apisix/pull/3429) +- :sunrise: feat(authz-keycloak): dynamic scope and resource mapping. [#3308](https://github.com/apache/apisix/pull/3308) +- :sunrise: feat: proxy-rewrite host support host with port [#3428](https://github.com/apache/apisix/pull/3428) +- :sunrise: feat(fault-injection): support conditional fault injection using nginx variables [#3363](https://github.com/apache/apisix/pull/3363) + +### Bugfix + +- fix(standalone): require consumer's id to be the same as username [#3394](https://github.com/apache/apisix/pull/3394) +- fix: support upstream_id & consumer with grpc [#3387](https://github.com/apache/apisix/pull/3387) +- fix: set conf info when global rule is hit without matched rule [#3332](https://github.com/apache/apisix/pull/3332) +- fix: avoid caching outdated discovery upstream nodes [#3295](https://github.com/apache/apisix/pull/3295) +- fix: create the health checker in `access` phase [#3240](https://github.com/apache/apisix/pull/3240) +- fix: make set_more_retries() work when upstream_type is chash [#2676](https://github.com/apache/apisix/pull/2676) + +For more changes, please refer to [Milestone](https://github.com/apache/apisix/milestone/12) + +## 2.2.0 + +### Change + +- disable node-status plugin by default [#2968](https://github.com/apache/apisix/pull/2968) +- k8s_deployment_info is no longer allowed in upstream [#3098](https://github.com/apache/apisix/pull/3098) +- don't treat route segment with ':' as parameter by default [#3154](https://github.com/apache/apisix/pull/3154) + +### Core + +- :sunrise: allow create consumers with multiple auth plugins [#2898](https://github.com/apache/apisix/pull/2898) +- :sunrise: increase the delay before resync etcd [#2977](https://github.com/apache/apisix/pull/2977) +- :sunrise: support enable/disable route [#2943](https://github.com/apache/apisix/pull/2943) +- :sunrise: route according to the graphql attributes [#2964](https://github.com/apache/apisix/pull/2964) +- :sunrise: share etcd auth token [#2932](https://github.com/apache/apisix/pull/2932) +- :sunrise: add control API [#3048](https://github.com/apache/apisix/pull/3048) + +### Plugin + +- :sunrise: feat(limt-count): use 'remote_addr' as default key [#2927](https://github.com/apache/apisix/pull/2927) +- :sunrise: feat(fault-injection): support Nginx variable in abort.body [#2986](https://github.com/apache/apisix/pull/2986) +- :sunrise: feat: implement new plugin `server-info` [#2926](https://github.com/apache/apisix/pull/2926) +- :sunrise: feat: add batch process metrics [#3070](https://github.com/apache/apisix/pull/3070) +- :sunrise: feat: Implement traffic splitting plugin [#2935](https://github.com/apache/apisix/pull/2935) +- :sunrise: feat: the proxy-rewrite plugin support pass nginx variable within header [#3144](https://github.com/apache/apisix/pull/3144) +- :sunrise: feat: Make headers to add to request in openid-connect plugin configurable [#2903](https://github.com/apache/apisix/pull/2903) +- :sunrise: feat: support var in upstream_uri on proxy-rewrite plugin [#3139](https://github.com/apache/apisix/pull/3139) + +### Bugfix + +- basic-auth plugin should run in rewrite phases. [#2905](https://github.com/apache/apisix/pull/2905) +- fixed the non effective config update in http/udp-logger [#2901](https://github.com/apache/apisix/pull/2901) +- always necessary to save the data of the limit concurrency, and release the statistical status in the log phase [#2465](https://github.com/apache/apisix/pull/2465) +- avoid duplicate auto-generated id [#3003](https://github.com/apache/apisix/pull/3003) +- fix: ctx being contaminated due to a new feature of openresty 1.19. **For openresty 1.19 users, it is recommended to upgrade the APISIX version as soon as possible.** [#3105](https://github.com/apache/apisix/pull/3105) +- fix: correct the validation of route.vars [#3124](https://github.com/apache/apisix/pull/3124) + +For more changes, please refer to [Milestone](https://github.com/apache/apisix/milestone/10) + +## 2.1.0 + +### Core + +- :sunrise: **support ENV variable in configuration.** [#2743](https://github.com/apache/apisix/pull/2743) +- :sunrise: **support TLS connection with etcd.** [#2548](https://github.com/apache/apisix/pull/2548) +- generate create/update_time automatically. [#2740](https://github.com/apache/apisix/pull/2740) +- add a deprecate log for enable_websocket in upstream.[#2691](https://github.com/apache/apisix/pull/2691) +- add a deprecate log for consumer id.[#2829](https://github.com/apache/apisix/pull/2829) +- Added `X-APISIX-Upstream-Status` header to distinguish 5xx errors from upstream or APISIX itself. [#2817](https://github.com/apache/apisix/pull/2817) +- support Nginx configuration snippet. [#2803](https://github.com/apache/apisix/pull/2803) + +### Plugin + +- :sunrise: **Upgrade protocol to support Apache Skywalking 8.0**[#2389](https://github.com/apache/apisix/pull/2389). So this version only supports skywalking 8.0 protocol. This plugin is disabled by default, you need to modify config.yaml to enable, which is not backward compatible. +- :sunrise: add aliyun sls logging plugin.[#2169](https://github.com/apache/apisix/issues/2169) +- proxy-cache: the cache_zone field in the schema should be optional.[#2776](https://github.com/apache/apisix/pull/2776) +- fix: validate plugin configuration in the DP [#2856](https://github.com/apache/apisix/pull/2856) + +### Bugfix + +- :bug: fix(etcd): handle etcd compaction.[#2687](https://github.com/apache/apisix/pull/2687) +- fix: move `conf/cert` to `t/certs` and disable ssl by default, which is not backward compatible. [#2112](https://github.com/apache/apisix/pull/2112) +- fix: check decrypt key to prevent lua thread aborted [#2815](https://github.com/apache/apisix/pull/2815) + +### Not downward compatible features in future versions + +-In the 2.3 release, the consumer will only support user names and discard the id. The consumer needs to manually clean up the id field in etcd, otherwise the schema verification will report an error during use +-In the 2.3 release, opening websocket on upstream will no longer be supported +-In version 3.0, the data plane and control plane will be separated into two independent ports, that is, the current port 9080 will only process data plane requests, and no longer process admin API requests + +For more changes, please refer to [Milestone](https://github.com/apache/apisix/milestone/8) + +## 2.0.0 + +This is release candidate. + +### Core + +- :sunrise: **Migrate from etcd v2 to v3 protocol, which is not backward compatible. Apache APISIX only supports etcd 3.4 and above versions.** [#2036](https://github.com/apache/apisix/pull/2036) +- add labels for upstream object.[#2279](https://github.com/apache/apisix/pull/2279) +- add managed fields in json schema for resources, such as create_time and update_time.[#2444](https://github.com/apache/apisix/pull/2444) +- use interceptors to protect plugin's route[#2416](https://github.com/apache/apisix/pull/2416) +- support multiple ports for http and https listen.[#2409](https://github.com/apache/apisix/pull/2409) +- implement `core.sleep`.[#2397](https://github.com/apache/apisix/pull/2397) + +### Plugin + +- :sunrise: **add AK/SK(HMAC) auth plugin.**[#2192](https://github.com/apache/apisix/pull/2192) +- :sunrise: add referer-restriction plugin.[#2352](https://github.com/apache/apisix/pull/2352) +- `limit-count` support to use `redis` cluster.[#2406](https://github.com/apache/apisix/pull/2406) +- feat(proxy-cache): store the temporary file under cache directory. [#2317](https://github.com/apache/apisix/pull/2317) +- feat(http-logger): support for specified the log formats via admin API [#2309](https://github.com/apache/apisix/pull/2309) + +### Bugfix + +- :bug: **`high priority`** When the data plane receives an instruction to delete a resource(router or upstream etc.), it does not properly clean up the cache, resulting in the existing resources cannot be found. This problem only occurs in the case of long and frequent deletion operations.[#2168](https://github.com/apache/apisix/pull/2168) +- fix routing priority does not take effect.[#2447](https://github.com/apache/apisix/pull/2447) +- set random seed for each worker process at `init_worker` phase, only `init` phase is not enough.[#2357](https://github.com/apache/apisix/pull/2357) +- remove unsupported algorithm in jwt plugin.[#2356](https://github.com/apache/apisix/pull/2356) +- return correct response code when `http_to_https` enabled in redirect plugin.[#2311](https://github.com/apache/apisix/pull/2311) + +For more changes, please refer to [Milestone](https://github.com/apache/apisix/milestone/7) + +### CVE + +- Fixed Admin API default access token vulnerability + +## 1.5.0 + +### Core + +- Admin API: support authentication with SSL certificates. [1747](https://github.com/apache/apisix/pull/1747) +- Admin API: support both standard `PATCH` and sub path `PATCH`. [1930](https://github.com/apache/apisix/pull/1930) +- HealthCheck: supports custom host port. [1914](https://github.com/apache/apisix/pull/1914) +- Upstream: supports turning off the default retry mechanism. [1919](https://github.com/apache/apisix/pull/1919) +- URI: supports delete the '/' at the end of the `URI`. [1766](https://github.com/apache/apisix/pull/1766) + +### New Plugin + +- :sunrise: **Request Validator** [1709](https://github.com/apache/apisix/pull/1709) + +### Improvements + +- change: nginx worker_shutdown_timeout is changed from 3s to recommended value 240s. [1883](https://github.com/apache/apisix/pull/1883) +- change: the `healthcheck` timeout time type changed from `integer` to `number`. [1892](https://github.com/apache/apisix/pull/1892) +- change: the `request-validation` plugin input parameter supports `Schema` validation. [1920](https://github.com/apache/apisix/pull/1920) +- change: add comments for Makefile `install` command. [1912](https://github.com/apache/apisix/pull/1912) +- change: update comment for config.yaml `etcd.timeout` configuration. [1929](https://github.com/apache/apisix/pull/1929) +- change: add more prometheus metrics. [1888](https://github.com/apache/apisix/pull/1888) +- change: add more configuration options for `cors` plugin. [1963](https://github.com/apache/apisix/pull/1963) + +### Bugfix + +- fixed: failed to get `host` in health check configuration. [1871](https://github.com/apache/apisix/pull/1871) +- fixed: should not save the runtime data of plugin into `etcd`. [1910](https://github.com/apache/apisix/pull/1910) +- fixed: run `apisix start` several times will start multi nginx processes. [1913](https://github.com/apache/apisix/pull/1913) +- fixed: read the request body from the temporary file if it was cached. [1863](https://github.com/apache/apisix/pull/1863) +- fixed: batch processor name and error return type. [1927](https://github.com/apache/apisix/pull/1927) +- fixed: failed to read redis.ttl in `limit-count` plugin. [1928](https://github.com/apache/apisix/pull/1928) +- fixed: passive health check seems never provide a healthy report. [1918](https://github.com/apache/apisix/pull/1918) +- fixed: avoid to modify the original plugin conf. [1958](https://github.com/apache/apisix/pull/1958) +- fixed: the test case of `invalid-upstream` is unstable and sometimes fails to run. [1925](https://github.com/apache/apisix/pull/1925) + +### Doc + +- doc: added APISIX Lua Coding Style Guide. [1874](https://github.com/apache/apisix/pull/1874) +- doc: fixed link syntax in README.md. [1894](https://github.com/apache/apisix/pull/1894) +- doc: fixed image links in zh-cn benchmark. [1896](https://github.com/apache/apisix/pull/1896) +- doc: fixed typos in `FAQ`、`admin-api`、`architecture-design`、`discovery`、`prometheus`、`proxy-rewrite`、`redirect`、`http-logger` documents. [1916](https://github.com/apache/apisix/pull/1916) +- doc: added improvements for OSx unit tests and request validation plugin. [1926](https://github.com/apache/apisix/pull/1926) +- doc: fixed typos in `architecture-design` document. [1938](https://github.com/apache/apisix/pull/1938) +- doc: added the default import path of `Nginx` for unit testing in `Linux` and `macOS` systems in the `how-to-build` document. [1936](https://github.com/apache/apisix/pull/1936) +- doc: add `request-validation` plugin chinese document. [1932](https://github.com/apache/apisix/pull/1932) +- doc: fixed file path of `gRPC transcoding` in `README`. [1945](https://github.com/apache/apisix/pull/1945) +- doc: fixed `uri-blocker` plugin path error in `README`. [1950](https://github.com/apache/apisix/pull/1950) +- doc: fixed `grpc-transcode` plugin path error in `README`. [1946](https://github.com/apache/apisix/pull/1946) +- doc: removed unnecessary configurations for `k8s` document. [1891](https://github.com/apache/apisix/pull/1891) + +## 1.4.1 + +### Bugfix + +- Fix: multiple SSL certificates are configured, but only one certificate working fine. [1818](https://github.com/apache/incubator-apisix/pull/1818) + +## 1.4.0 + +### Core + +- Admin API: Support unique names for routes [1655](https://github.com/apache/incubator-apisix/pull/1655) +- Optimization of log buffer size and flush time [1570](https://github.com/apache/incubator-apisix/pull/1570) + +### New plugins + +- :sunrise: **Apache Skywalking plugin** [1241](https://github.com/apache/incubator-apisix/pull/1241) +- :sunrise: **Keycloak Identity Server Plugin** [1701](https://github.com/apache/incubator-apisix/pull/1701) +- :sunrise: **Echo Plugin** [1632](https://github.com/apache/incubator-apisix/pull/1632) +- :sunrise: **Consume Restriction Plugin** [1437](https://github.com/apache/incubator-apisix/pull/1437) + +### Improvements + +- Batch Request : Copy all headers to every request [1697](https://github.com/apache/incubator-apisix/pull/1697) +- SSL private key encryption [1678](https://github.com/apache/incubator-apisix/pull/1678) +- Improvement of docs for multiple plugins + +## 1.3.0 + +The 1.3 version is mainly for security update. + +### Security + +- reject invalid header[#1462](https://github.com/apache/incubator-apisix/pull/1462) and uri safe encode[#1461](https://github.com/apache/incubator-apisix/pull/1461) +- only allow 127.0.0.1 access admin API and dashboard by default. [#1458](https://github.com/apache/incubator-apisix/pull/1458) + +### Plugin + +- :sunrise: **add batch request plugin**. [#1388](https://github.com/apache/incubator-apisix/pull/1388) +- implemented plugin `sys logger`. [#1414](https://github.com/apache/incubator-apisix/pull/1414) + +## 1.2.0 + +The 1.2 version brings many new features, including core and plugins. + +### Core + +- :sunrise: **support etcd cluster**. [#1283](https://github.com/apache/incubator-apisix/pull/1283) +- using the local DNS resolver by default, which is friendly for k8s. [#1387](https://github.com/apache/incubator-apisix/pull/1387) +- support to run `header_filter`, `body_filter` and `log` phases for global rules. [#1364](https://github.com/apache/incubator-apisix/pull/1364) +- changed the `lua/apisix` dir to `apisix`(**not backward compatible**). [#1351](https://github.com/apache/incubator-apisix/pull/1351) +- add dashboard as submodule. [#1360](https://github.com/apache/incubator-apisix/pull/1360) +- allow adding custom shared dict. [#1367](https://github.com/apache/incubator-apisix/pull/1367) + +### Plugin + +- :sunrise: **add Apache Kafka plugin**. [#1312](https://github.com/apache/incubator-apisix/pull/1312) +- :sunrise: **add CORS plugin**. [#1327](https://github.com/apache/incubator-apisix/pull/1327) +- :sunrise: **add TCP logger plugin**. [#1221](https://github.com/apache/incubator-apisix/pull/1221) +- :sunrise: **add UDP logger plugin**. [1070](https://github.com/apache/incubator-apisix/pull/1070) +- :sunrise: **add proxy mirror plugin**. [#1288](https://github.com/apache/incubator-apisix/pull/1288) +- :sunrise: **add proxy cache plugin**. [#1153](https://github.com/apache/incubator-apisix/pull/1153) +- drop websocket enable control in proxy-rewrite plugin(**not backward compatible**). [1332](https://github.com/apache/incubator-apisix/pull/1332) +- Adding support to public key based introspection for OAuth plugin. [#1266](https://github.com/apache/incubator-apisix/pull/1266) +- response-rewrite plugin support binary data to client by base64. [#1381](https://github.com/apache/incubator-apisix/pull/1381) +- plugin `grpc-transcode` supports grpc deadline. [#1149](https://github.com/apache/incubator-apisix/pull/1149) +- support password auth for limit-count-redis. [#1150](https://github.com/apache/incubator-apisix/pull/1150) +- Zipkin plugin add service name and report local server IP. [#1386](https://github.com/apache/incubator-apisix/pull/1386) +- add `change_pwd` and `user_info` for Wolf-Rbac plugin. [#1204](https://github.com/apache/incubator-apisix/pull/1204) + +### Admin API + +- :sunrise: support key-based authentication for Admin API(**not backward compatible**). [#1169](https://github.com/apache/incubator-apisix/pull/1169) +- hide SSL private key in admin API. [#1240](https://github.com/apache/incubator-apisix/pull/1240) + +### Bugfix + +- missing `clear` table before to reuse table (**will cause memory leak**). [#1134](https://github.com/apache/incubator-apisix/pull/1134) +- print warning error message if the yaml route file is invalid. [#1141](https://github.com/apache/incubator-apisix/pull/1141) +- the balancer IP may be nil, use an empty string instead. [#1166](https://github.com/apache/incubator-apisix/pull/1166) +- plugin node-status and heartbeat don't have schema. [#1249](https://github.com/apache/incubator-apisix/pull/1249) +- the plugin basic-auth needs required field. [#1251](https://github.com/apache/incubator-apisix/pull/1251) +- check the count of upstream valid node. [#1292](https://github.com/apache/incubator-apisix/pull/1292) + +## 1.1.0 + +This release is mainly to strengthen the stability of the code and add more documentation. + +### Core + +- always specify perl include path when running test cases. [#1097](https://github.com/apache/incubator-apisix/pull/1097) +- Feature: Add support for PROXY Protocol. [#1113](https://github.com/apache/incubator-apisix/pull/1113) +- enhancement: add verify command to verify apisix configuration(nginx.conf). [#1112](https://github.com/apache/incubator-apisix/pull/1112) +- feature: increase the default size of the core file. [#1105](https://github.com/apache/incubator-apisix/pull/1105) +- feature: make the number of file is as configurable as the connections. [#1098](https://github.com/apache/incubator-apisix/pull/1098) +- core: improve the core.log module. [#1093](https://github.com/apache/incubator-apisix/pull/1093) +- Modify bin/apisix to support the SO_REUSEPORT. [#1085](https://github.com/apache/incubator-apisix/pull/1085) + +### Doc + +- doc: add link to download grafana meta data. [#1119](https://github.com/apache/incubator-apisix/pull/1119) +- doc: Update README.md. [#1118](https://github.com/apache/incubator-apisix/pull/1118) +- doc: doc: add wolf-rbac plugin. [#1116](https://github.com/apache/incubator-apisix/pull/1116) +- doc: update the download link of rpm. [#1108](https://github.com/apache/incubator-apisix/pull/1108) +- doc: add more english article. [#1092](https://github.com/apache/incubator-apisix/pull/1092) +- Adding contribution guidelines for the documentation. [#1086](https://github.com/apache/incubator-apisix/pull/1086) +- doc: getting-started.md check. [#1084](https://github.com/apache/incubator-apisix/pull/1084) +- Added additional information and refactoring sentences. [#1078](https://github.com/apache/incubator-apisix/pull/1078) +- Update admin-api-cn.md. [#1067](https://github.com/apache/incubator-apisix/pull/1067) +- Update architecture-design-cn.md. [#1065](https://github.com/apache/incubator-apisix/pull/1065) + +### CI + +- ci: remove patch which is no longer necessary and removed in the upst. [#1090](https://github.com/apache/incubator-apisix/pull/1090) +- fix path error when install with luarocks. [#1068](https://github.com/apache/incubator-apisix/pull/1068) +- travis: run a apisix instance which intalled by luarocks. [#1063](https://github.com/apache/incubator-apisix/pull/1063) + +### Plugins + +- feature: Add wolf rbac plugin. [#1095](https://github.com/apache/incubator-apisix/pull/1095) +- Adding UDP logger plugin. [#1070](https://github.com/apache/incubator-apisix/pull/1070) +- enhancement: using internal request instead of external request in node-status plugin. [#1109](https://github.com/apache/incubator-apisix/pull/1109) + +## 1.0.0 + +This release is mainly to strengthen the stability of the code and add more documentation. + +### Core + +- :sunrise: Support routing priority. You can match different upstream services based on conditions such as header, args, priority, etc. under the same URI. [#998](https://github.com/apache/incubator-apisix/pull/998) +- When no route is matched, an error message is returned. To distinguish it from other 404 requests. [#1013](https://github.com/apache/incubator-apisix/pull/1013) +- The address of the dashboard `/apisix/admin` supports CORS. [#982](https://github.com/apache/incubator-apisix/pull/982) +- The jsonschema validator returns a clearer error message. [#1011](https://github.com/apache/incubator-apisix/pull/1011) +- Upgrade the `ngx_var` module to version 0.5. [#1005](https://github.com/apache/incubator-apisix/pull/1005) +- Upgrade the `lua-resty-etcd` module to version 0.8. [#980](https://github.com/apache/incubator-apisix/pull/980) +- In development mode, the number of workers is automatically adjusted to 1. [#926](https://github.com/apache/incubator-apisix/pull/926) +- Remove the nginx.conf file from the code repository. It is automatically generated every time and cannot be modified manually. [#974](https://github.com/apache/incubator-apisix/pull/974) + +### Doc + +- Added documentation on how to customize development plugins. [#909](https://github.com/apache/incubator-apisix/pull/909) +- fixed example's bugs in the serverless plugin documentation. [#1006](https://github.com/apache/incubator-apisix/pull/1006) +- Added documentation for using the Oauth plugin. [#987](https://github.com/apache/incubator-apisix/pull/987) +- Added dashboard compiled documentation. [#985](https://github.com/apache/incubator-apisix/pull/985) +- Added documentation on how to perform a/b testing. [#957](https://github.com/apache/incubator-apisix/pull/957) +- Added documentation on how to enable the MQTT plugin. [#916](https://github.com/apache/incubator-apisix/pull/916) + +### Test case + +- Add test cases for key-auth plugin under normal circumstances. [#964](https://github.com/apache/incubator-apisix/pull/964/) +- Added tests for gRPC transcode pb options. [#920](https://github.com/apache/incubator-apisix/pull/920) + +## 0.9.0 + +This release brings many new features, such as support for running APISIX with Tengine, +an advanced debugging mode that is more developer friendly, and a new URI redirection plugin. + +### Core + +- :sunrise: Supported to run APISIX with tengine. [#683](https://github.com/apache/incubator-apisix/pull/683) +- :sunrise: Enabled HTTP2 and supported to set ssl_protocols. [#663](https://github.com/apache/incubator-apisix/pull/663) +- :sunrise: Advanced Debug Mode, Target module function's input arguments or returned value would be printed once this option is enabled. [#614](https://github.com/apache/incubator-apisix/pull/641) +- Support to install APISIX without dashboard. [#686](https://github.com/apache/incubator-apisix/pull/686) +- Removed router R3 [#725](https://github.com/apache/incubator-apisix/pull/725) + +### Plugins + +- [Redirect URI](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/redirect.md): Redirect URI plugin. [#732](https://github.com/apache/incubator-apisix/pull/732) +- [Proxy Rewrite](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/proxy-rewrite.md): Supported remove `header` feature. [#658](https://github.com/apache/incubator-apisix/pull/658) +- [Limit Count](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/limit-count.md): Supported global limit count with `Redis Server`.[#624](https://github.com/apache/incubator-apisix/pull/624) + +### lua-resty-* + +- lua-resty-radixtree + - Support for `host + uri` as an index. +- lua-resty-jsonschema + - This extension is a JSON data validator that replaces the existing `lua-rapidjson` extension. + +### Bugfix + +- key-auth plugin cannot run accurately in the case of multiple consumers. [#826](https://github.com/apache/incubator-apisix/pull/826) +- Exported schema for plugin serverless. [#787](https://github.com/apache/incubator-apisix/pull/787) +- Discard args of uri when using proxy-write plugin [#642](https://github.com/apache/incubator-apisix/pull/642) +- Zipkin plugin not set tracing data to request header. [#715](https://github.com/apache/incubator-apisix/pull/715) +- Skipped check cjson for luajit environment in apisix CLI. [#652](https://github.com/apache/incubator-apisix/pull/652) +- Skipped to init etcd if use local file as config center. [#737](https://github.com/apache/incubator-apisix/pull/737) +- Support more built-in parameters when set chash balancer. [#775](https://github.com/apache/incubator-apisix/pull/775) + +### Dependencies + +- Replace the `lua-rapidjson` module with `lua-resty-jsonschema` global, `lua-resty-jsonschema` is faster and easier to compile. + +## 0.8.0 + +> Released on 2019/09/30 + +This release brings many new features, such as stream proxy, support MQTT protocol proxy, +and support for ARM platform, and proxy rewrite plugin. + +### Core + +- :sunrise: **[support standalone mode](https://github.com/apache/apisix/blob/master/docs/en/latest/deployment-modes.md#standalone)**: using yaml to update configurations of APISIX, more friendly to kubernetes. [#464](https://github.com/apache/incubator-apisix/pull/464) +- :sunrise: **[support stream proxy](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/stream-proxy.md)**. [#513](https://github.com/apache/incubator-apisix/pull/513) +- :sunrise: support consumer bind plugins. [#544](https://github.com/apache/incubator-apisix/pull/544) +- support domain name in upstream, not only IP. [#522](https://github.com/apache/incubator-apisix/pull/522) +- ignored upstream node when it's weight is 0. [#536](https://github.com/apache/incubator-apisix/pull/536) + +### Plugins + +- :sunrise: **[MQTT Proxy](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/mqtt-proxy.md)**: support to load balance MQTT by `client_id`, both support MQTT 3.1 and 5.0. [#513](https://github.com/apache/incubator-apisix/pull/513) +- [proxy-rewrite](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/proxy-rewrite.md): rewrite uri, + schema, host for upstream. [#594](https://github.com/apache/incubator-apisix/pull/594) + +### ARM + +- :sunrise: **APISIX can run normally under Ubuntu 18.04 of ARM64 architecture**, so you can use APISIX as IoT gateway with MQTT plugin. + +### lua-resty-* + +- lua-resty-ipmatcher + - support IPv6 + - IP white/black list, route. +- lua-resty-radixtree + - allow to specify multiple host, remote_addr and uri. + - allow to define user-function to filter request. + - use `lua-resty-ipmatcher` instead of `lua-resty-iputils`, `lua-resty-ipmatcher` matches fast and support IPv6. + +### Bugfix + +- healthcheck: the checker name is wrong if APISIX works under multiple processes. [#568](https://github.com/apache/incubator-apisix/issues/568) + +### Dependencies + +- removed `lua-tinyyaml` from source code base, and install through Luarocks. + +## 0.7.0 + +> Released on 2019/09/06 + +This release brings many new features, such as IP black and white list, gPRC protocol transcoding, IPv6, IdP (identity provider) services, serverless, Change the default route to radix tree (**not downward compatible**), and more. + +### Core + +- :sunrise: **[gRPC transcoding](https://github.com/apache/apisix/blob/master/docs/en/latest/plugins/grpc-transcode.md)**: supports protocol transcoding so that clients can access your gRPC API by using HTTP/JSON. [#395](https://github.com/apache/incubator-apisix/issues/395) +- :sunrise: **[radix tree router](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/router-radixtree.md)**: The radix tree is used as the default router implementation. It supports the uri, host, cookie, request header, request parameters, Nginx built-in variables, etc. as the routing conditions, and supports common operators such as equal, greater than, less than, etc., more powerful and flexible.**IMPORTANT: This change is not downward compatible. All users who use historical versions need to manually modify their routing to work properly.** [#414](https://github.com/apache/incubator-apisix/issues/414) +- Dynamic upstream supports more parameters, you can specify the upstream uri and host, and whether to enable websocket. [#451](https://github.com/apache/incubator-apisix/pull/451) +- Support for get values from cookies directly from `ctx.var`. [#449](https://github.com/apache/incubator-apisix/pull/449) +- Routing support IPv6. [#331](https://github.com/apache/incubator-apisix/issues/331) + +### Plugins + +- :sunrise: **[serverless](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/serverless.md)**: With serverless support, users can dynamically run any Lua function on a gateway node. Users can also use this feature as a lightweight plugin.[#86](https://github.com/apache/incubator-apisix/pull/86) +- :sunrise: **support IdP**: Support external authentication services, such as Auth0, okta, etc., users can use this to connect to Oauth2.0 and other authentication methods. [#447](https://github.com/apache/incubator-apisix/pull/447) +- [rate limit](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/limit-conn.md): Support for more restricted keys, such as `X-Forwarded-For` and `X-Real-IP`, and allows users to use Nginx variables, request headers, and request parameters as keys. [#228](https://github.com/apache/incubator-apisix/issues/228) +- [IP black and white list](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/ip-restriction.md) Support IP black and white list for security. [#398](https://github.com/apache/incubator-apisix/pull/398) + +### CLI + +- Add the `version` directive to get the version number of APISIX. [#420](https://github.com/apache/incubator-apisix/issues/420) + +### Admin + +- The `PATCH` API is supported and can be modified individually for a configuration without submitting the entire configuration. [#365](https://github.com/apache/incubator-apisix/pull/365) + +### Dashboard + +- :sunrise: **Add the online version of the dashboard**,users can [experience APISIX](http://apisix.iresty.com/) without install. [#374](https://github.com/apache/incubator-apisix/issues/374) + +[Back to TOC](#table-of-contents) + +## 0.6.0 + +> Released on 2019/08/05 + +This release brings many new features such as health check and circuit breaker, debug mode, opentracing and JWT auth. And add **built-in dashboard**. + +### Core + +- :sunrise: **[Health Check and Circuit Breaker](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/tutorials/health-check.md)**: Enable health check on the upstream node, and will automatically filter unhealthy nodes during load balancing to ensure system stability. [#249](https://github.com/apache/incubator-apisix/pull/249) +- Anti-ReDoS(Regular expression Denial of Service). [#252](https://github.com/apache/incubator-apisix/pull/250) +- supported debug mode. [#319](https://github.com/apache/incubator-apisix/pull/319) +- allowed to use different router. [#364](https://github.com/apache/incubator-apisix/pull/364) +- supported to match route by host + uri. [#325](https://github.com/apache/incubator-apisix/pull/325) +- allowed plugins to handler balance phase. [#299](https://github.com/apache/incubator-apisix/pull/299) +- added desc for upstream and service in schema. [#289](https://github.com/apache/incubator-apisix/pull/289) + +### Plugins + +- :sunrise: **[OpenTracing](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/zipkin.md)**: support Zipkin and Apache SkyWalking. [#304](https://github.com/apache/incubator-apisix/pull/304) +- [JWT auth](https://github.com/apache/apisix/blob/master/docs/en/latest/plugins/jwt-auth.md). [#303](https://github.com/apache/incubator-apisix/pull/303) + +### CLI + +- support multiple ips of `allow`. [#340](https://github.com/apache/incubator-apisix/pull/340) +- supported real_ip configure in nginx.conf and added functions to get ip and remote ip. [#236](https://github.com/apache/incubator-apisix/pull/236) + +### Dashboard + +- :sunrise: **add built-in dashboard**. [#327](https://github.com/apache/incubator-apisix/pull/327) + +### Test + +- support OSX in Travis CI. [#217](https://github.com/apache/incubator-apisix/pull/217) +- installed all of the dependencies to `deps` folder. [#248](https://github.com/apache/incubator-apisix/pull/248) + +[Back to TOC](#table-of-contents) diff --git a/CloudronPackages/APISIX/apisix-source/CODE_OF_CONDUCT.md b/CloudronPackages/APISIX/apisix-source/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..fe93188 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/CODE_OF_CONDUCT.md @@ -0,0 +1,121 @@ + + +*The following is copied for your convenience from . If there's a discrepancy between the two, let us know or submit a PR to fix it.* + +# Code of Conduct # + +## Introduction ## + +This code of conduct applies to all spaces managed by the Apache +Software Foundation, including IRC, all public and private mailing +lists, issue trackers, wikis, blogs, Twitter, and any other +communication channel used by our communities. A code of conduct which +is specific to in-person events (ie., conferences) is codified in the +published ASF anti-harassment policy. + +We expect this code of conduct to be honored by everyone who +participates in the Apache community formally or informally, or claims +any affiliation with the Foundation, in any Foundation-related +activities and especially when representing the ASF, in any role. + +This code __is not exhaustive or complete__. It serves to distill our +common understanding of a collaborative, shared environment and goals. +We expect it to be followed in spirit as much as in the letter, so that +it can enrich all of us and the technical communities in which we participate. + +## Specific Guidelines ## + +We strive to: + +1. __Be open.__ We invite anyone to participate in our community. We preferably use public methods of communication for project-related messages, unless discussing something sensitive. This applies to messages for help or project-related support, too; not only is a public support request much more likely to result in an answer to a question, it also makes sure that any inadvertent mistakes made by people answering will be more easily detected and corrected. + +2. __Be `empathetic`, welcoming, friendly, and patient.__ We work together to resolve conflict, assume good intentions, and do our best to act in an empathetic fashion. We may all experience some frustration from time to time, but we do not allow frustration to turn into a personal attack. A community where people feel uncomfortable or threatened is not a productive one. We should be respectful when dealing with other community members as well as with people outside our community. + +3. __Be collaborative.__ Our work will be used by other people, and in turn we will depend on the work of others. When we make something for the benefit of the project, we are willing to explain to others how it works, so that they can build on the work to make it even better. Any decision we make will affect users and colleagues, and we take those consequences seriously when making decisions. + +4. __Be inquisitive.__ Nobody knows everything! Asking questions early avoids many problems later, so questions are encouraged, though they may be directed to the appropriate forum. Those who are asked should be responsive and helpful, within the context of our shared goal of improving Apache project code. + +5. __Be careful in the words that we choose.__ Whether we are participating as professionals or volunteers, we value professionalism in all interactions, and take responsibility for our own speech. Be kind to others. Do not insult or put down other participants. Harassment and other exclusionary behaviour are not acceptable. This includes, but is not limited to: + + * Violent threats or language directed against another person. + * Sexist, racist, or otherwise discriminatory jokes and language. + * Posting sexually explicit or violent material. + * Posting (or threatening to post) other people's personally identifying information ("doxing"). + * Sharing private content, such as emails sent privately or non-publicly, or unlogged forums such as IRC channel history. + * Personal insults, especially those using racist or sexist terms. + * Unwelcome sexual attention. + * Excessive or unnecessary profanity. + * Repeated harassment of others. In general, if someone asks you to stop, then stop. + * Advocating for, or encouraging, any of the above behaviour. + +6. __Be concise.__ Keep in mind that what you write once will be read by hundreds of people. Writing a short email means people can understand the conversation as efficiently as possible. Short emails should always strive to be empathetic, welcoming, friendly and patient. When a long explanation is necessary, consider adding a summary.

+ + Try to bring new ideas to a conversation so that each mail adds something unique to the thread, keeping in mind that the rest of the thread still contains the other messages with arguments that have already been made. + + Try to stay on topic, especially in discussions that are already fairly large. + +7. __Step down considerately.__ Members of every project come and go. When somebody leaves or disengages from the project they should tell people they are leaving and take the proper steps to ensure that others can pick up where they left off. In doing so, they should remain respectful of those who continue to participate in the project and should not misrepresent the project's goals or achievements. Likewise, community members should respect any individual's choice to leave the project.

+ +## Diversity Statement ## + +Apache welcomes and encourages participation by everyone. We are committed to being a community that everyone feels good about joining. Although we may not be able to satisfy everyone, we will always work to treat everyone well. + +No matter how you identify yourself or how others perceive you: we welcome you. Though no list can hope to be comprehensive, we explicitly honour diversity in: age, culture, ethnicity, genotype, gender identity or expression, language, national origin, neurotype, phenotype, political beliefs, profession, race, religion, sexual orientation, socioeconomic status, subculture and technical ability. + +Though we welcome people fluent in all languages, Apache development is conducted in English. + +Standards for behaviour in the Apache community are detailed in the Code of Conduct above. We expect participants in our community to meet these standards in all their interactions and to help others to do so as well. + +## Reporting Guidelines ## + +While this code of conduct should be adhered to by participants, we recognize that sometimes people may have a bad day, or be unaware of some of the guidelines in this code of conduct. When that happens, you may reply to them and point out this code of conduct. Such messages may be in public or in private, whatever is most appropriate. However, regardless of whether the message is public or not, it should still adhere to the relevant parts of this code of conduct; in particular, it should not be abusive or disrespectful. + +If you believe someone is violating this code of conduct, you may reply to +them and point out this code of conduct. Such messages may be in public or in +private, whatever is most appropriate. Assume good faith; it is more likely +that participants are unaware of their bad behaviour than that they +intentionally try to degrade the quality of the discussion. Should there be +difficulties in dealing with the situation, you may report your compliance +issues in confidence to either: + + * President of the Apache Software Foundation: Sam Ruby (rubys at intertwingly dot net) + +or one of our volunteers: + + * [Mark Thomas](http://home.apache.org/~markt/coc.html) + * [Joan Touzet](http://home.apache.org/~wohali/) + * [Sharan Foga](http://home.apache.org/~sharan/coc.html) + +If the violation is in documentation or code, for example inappropriate pronoun usage or word choice within official documentation, we ask that people report these privately to the project in question at private@project.apache.org, and, if they have sufficient ability within the project, to resolve or remove the concerning material, being mindful of the perspective of the person originally reporting the issue. + +## End Notes ## + +This Code defines __empathy__ as "a vicarious participation in the emotions, ideas, or opinions of others; the ability to imagine oneself in the condition or predicament of another." __Empathetic__ is the adjectival form of empathy. + +This statement thanks the following, on which it draws for content and inspiration: + + * [CouchDB Project Code of conduct](http://couchdb.apache.org/conduct.html) + * [Fedora Project Code of Conduct](http://fedoraproject.org/code-of-conduct) + * [Django Code of Conduct](https://www.djangoproject.com/conduct/) + * [Debian Code of Conduct](http://www.debian.org/vote/2014/vote_002) + * [Twitter Open Source Code of Conduct](https://github.com/twitter/code-of-conduct/blob/master/code-of-conduct.md) + * [Mozilla Code of Conduct/Draft](https://wiki.mozilla.org/Code_of_Conduct/Draft#Conflicts_of_Interest) + * [Python Diversity Appendix](https://www.python.org/community/diversity/) + * [Python Mentors Home Page](http://pythonmentors.com/) diff --git a/CloudronPackages/APISIX/apisix-source/CODE_STYLE.md b/CloudronPackages/APISIX/apisix-source/CODE_STYLE.md new file mode 100644 index 0000000..f6c0cc6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/CODE_STYLE.md @@ -0,0 +1,440 @@ +--- +title: APISIX Lua Coding Style Guide +--- + + + +## Indentation + +Use 4 spaces as an indent: + +```lua +--No +if a then +ngx.say("hello") +end +``` + +```lua +--Yes +if a then + ngx.say("hello") +end +``` + +You can simplify the operation by changing the tab to 4 spaces in the editor you are using. + +## Space + +On both sides of the operator, you need to use a space to separate: + +```lua +--No +local i=1 +local s = "apisix" +``` + +```lua +--Yes +local i = 1 +local s = "apisix" +``` + +## Blank line + +Many developers will add a semicolon at the end of the line: + +```lua +--No +if a then +    ngx.say("hello"); +end; +``` + +Adding a semicolon will make the Lua code look ugly and unnecessary. Also, don't want to save the number of lines in the code, the latter turns the multi-line code into one line in order to appear "simple". This will not know when the positioning error is in the end of the code: + +```lua +--No +if a then ngx.say("hello") end +``` + +```lua +--Yes +if a then + ngx.say("hello") +end +``` + +The functions needs to be separated by two blank lines: + +```lua +--No +local function foo() +end +local function bar() +end +``` + +```lua +--Yes +local function foo() +end + + +local function bar() +end +``` + +If there are multiple if elseif branches, they need a blank line to separate them: + +```lua +--No +if a == 1 then + foo() +elseif a== 2 then + bar() +elseif a == 3 then + run() +else + error() +end +``` + +```lua +--Yes +if a == 1 then + foo() + +elseif a == 2 then + bar() + +elseif a == 3 then + run() + +else + error() +end +``` + +## Maximum length per line + +Each line cannot exceed 100 characters. If it exceeds, you need to wrap and align: + +```lua +--No +return limit_conn_new("plugin-limit-conn", conf.conn, conf.burst, conf.default_conn_delay) +``` + +```lua +--Yes +return limit_conn_new("plugin-limit-conn", conf.conn, conf.burst, + conf.default_conn_delay) +``` + +When the linefeed is aligned, the correspondence between the upper and lower lines should be reflected. For the example above, the parameters of the second line of functions are to the right of the left parenthesis of the first line. + +If it is a string stitching alignment, you need to put `..` in the next line: + +```lua +--No +return limit_conn_new("plugin-limit-conn" .. "plugin-limit-conn" .. + "plugin-limit-conn") +``` + +```lua +--Yes +return limit_conn_new("plugin-limit-conn" .. "plugin-limit-conn" + .. "plugin-limit-conn") +``` + +```lua +--Yes +return "param1", "plugin-limit-conn" + .. "plugin-limit-conn" +``` + +## Variable + +Local variables should always be used, not global variables: + +```lua +--No +i = 1 +s = "apisix" +``` + +```lua +--Yes +local i = 1 +local s = "apisix" +``` + +Variable naming uses the `snake_case` style: + +```lua +--No +local IndexArr = 1 +local str_Name = "apisix" +``` + +```lua +--Yes +local index_arr = 1 +local str_name = "apisix" +``` + +Use all capitalization for constants: + +```lua +--No +local max_int = 65535 +local server_name = "apisix" +``` + +```lua +--Yes +local MAX_INT = 65535 +local SERVER_NAME = "apisix" +``` + +## Table + +Use `table.new` to pre-allocate the table: + +```lua +--No +local t = {} +for i = 1, 100 do + t[i] = i +end +``` + +```lua +--Yes +local new_tab = require "table.new" +local t = new_tab(100, 0) +for i = 1, 100 do + t[i] = i +end +``` + +Don't use `nil` in an array: + +```lua +--No +local t = {1, 2, nil, 3} +``` + +If you must use null values, use `ngx.null` to indicate: + +```lua +--Yes +local t = {1, 2, ngx.null, 3} +``` + +## String + +Do not splicing strings on the hot code path: + +```lua +--No +local s = "" +for i = 1, 100000 do + s = s .. "a" +end +``` + +```lua +--Yes +local new_tab = require "table.new" +local t = new_tab(100000, 0) +for i = 1, 100000 do + t[i] = "a" +end +local s = table.concat(t, "") +``` + +## Function + +The naming of functions also follows `snake_case`: + +```lua +--No +local function testNginx() +end +``` + +```lua +--Yes +local function test_nginx() +end +``` + +The function should return as early as possible: + +```lua +--No +local function check(age, name) + local ret = true + if age < 20 then + ret = false + end + + if name == "a" then + ret = false + end + -- do something else + return ret +end +``` + +```lua +--Yes +local function check(age, name) + if age < 20 then + return false + end + + if name == "a" then + return false + end + -- do something else + return true +end +``` + +The function should return ``, `err`. +The first return value means successful or not, if not, the second return value specifies the error message. +The error message can be ignored in some cases. + +```lua +--No +local function check() + return "failed" +end +``` + +```lua +--Yes +local function check() + return false, "failed" +end +``` + +## Module + +All require libraries must be localized: + +```lua +--No +local function foo() + local ok, err = ngx.timer.at(delay, handler) +end +``` + +```lua +--Yes +local timer_at = ngx.timer.at + +local function foo() + local ok, err = timer_at(delay, handler) +end +``` + +For style unification, `require` and `ngx` also need to be localized: + +```lua +--No +local core = require("apisix.core") +local timer_at = ngx.timer.at + +local function foo() + local ok, err = timer_at(delay, handler) +end +``` + +```lua +--Yes +local ngx = ngx +local require = require +local core = require("apisix.core") +local timer_at = ngx.timer.at + +local function foo() + local ok, err = timer_at(delay, handler) +end +``` + +## Error handling + +For functions that return with error information, the error information must be judged and processed: + +```lua +--No +local sock = ngx.socket.tcp() +local ok = sock:connect("www.google.com", 80) +ngx.say("successfully connected to google!") +``` + +```lua +--Yes +local sock = ngx.socket.tcp() +local ok, err = sock:connect("www.google.com", 80) +if not ok then + ngx.say("failed to connect to google: ", err) + return +end +ngx.say("successfully connected to google!") +``` + +The function you wrote yourself, the error message is to be returned as a second parameter in the form of a string: + +```lua +--No +local function foo() + local ok, err = func() + if not ok then + return false + end + return true +end +``` + +```lua +--No +local function foo() + local ok, err = func() + if not ok then + return false, {msg = err} + end + return true +end +``` + +```lua +--Yes +local function foo() + local ok, err = func() + if not ok then + return false, "failed to call func(): " .. err + end + return true +end +``` diff --git a/CloudronPackages/APISIX/apisix-source/CONTRIBUTING.md b/CloudronPackages/APISIX/apisix-source/CONTRIBUTING.md new file mode 100644 index 0000000..872e2a5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/CONTRIBUTING.md @@ -0,0 +1,152 @@ + + +# Contributing to APISIX + +Firstly, thanks for your interest in contributing! I hope that this will be a pleasant first experience for you, and that you will return to continue +contributing. + +## How to contribute? + +Most of the contributions that we receive are code contributions, but you can also contribute to the documentation or simply report solid bugs for us to fix. Nor is code the only way to contribute to the project. We strongly value documentation, integration with other project, and gladly accept improvements for these aspects. + +For new contributors, please take a look at issues with a tag called [Good first issue](https://github.com/apache/apisix/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) or [Help wanted](https://github.com/apache/apisix/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). + +## How to report a bug? + +* **Ensure the bug was not already reported** by searching on GitHub under [Issues](https://github.com/apache/apisix/issues). + +* If you're unable to find an open issue addressing the problem, [open a new one](https://github.com/apache/apisix/issues/new). Be sure to include a **title and clear description**, as much relevant information as possible, and a **code sample** or an **executable test case** demonstrating the expected behavior that is not occurring. + +## How to add a new feature or change an existing one + +_Before making any significant changes, please [open an issue](https://github.com/apache/apisix/issues)._ Discussing your proposed changes ahead of time will make the contribution process smooth for everyone. + +Once we've discussed your changes and you've got your code ready, make sure that tests are passing and open your pull request. Your PR is most likely to be accepted if it: + +* Update the README.md with details of changes to the interface. +* Includes tests for new functionality. +* References the original issue in the description, e.g. "Resolves #123". +* Has a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). +* Ensure your pull request's title starts from one of the word in the `types` section of [semantic.yml](https://github.com/apache/apisix/blob/master/.github/workflows/semantic.yml). +* Follow the [PR manners](https://raw.githubusercontent.com/apache/apisix/master/.github/PULL_REQUEST_TEMPLATE.md) + +## Contribution Guidelines for Documentation + +* Linting/Style + + For linting both our Markdown and YAML files we use: + + - npm based [markdownlint-cli](https://www.npmjs.com/package/markdownlint-cli) + + For linting all files' license header we use: + + - [license-eye](https://github.com/apache/skywalking-eyes) + + For linting our shell files we use: + + - [shellcheck](https://github.com/koalaman/shellcheck) + + For linting our zh document files we use: + + - [autocorrect](https://github.com/huacnlee/autocorrect) + +* Active Voice + + In general, use active voice when formulating the sentence instead of passive voice. A sentence written in the active voice will emphasize + the person or thing who is performing an action (eg.The dog chased the ball). In contrast, the passive voice will highlight + the recipient of the action (The ball was chased by the dog). Therefore use the passive voice, only when it's less important + who or what completed the action and more important that the action was completed. For example: + + - Recommended: The key-auth plugin authenticates the requests. + - Not recommended: The requests are authenticated by the key-auth plugin. + +* Capitalization: + + * For titles of a section, capitalize the first letter of each word except for the [closed-class words](https://en.wikipedia.org/wiki/Part_of_speech#Open_and_closed_classes) + such as determiners, pronouns, conjunctions, and prepositions. Use the following [link](https://capitalizemytitle.com/#Chicago) for guidance. + - Recommended: Authentication **with** APISIX + + * For normal sentences, don't [capitalize](https://www.grammarly.com/blog/capitalization-rules/) random words in the middle of the sentences. + Use the Chicago manual for capitalization rules for the documentation. + +* Second Person + + In general, use second person in your docs rather than first person. For example: + + - Recommended: You are recommended to use the docker based deployment. + - Not Recommended: We recommend to use the docker based deployment. + +* Spellings + + Use [American spellings](https://www.oxfordinternationalenglish.com/differences-in-british-and-american-spelling/) when + contributing to the documentation. + +* Voice + + * Use a friendly and conversational tone. Always use simple sentences. If the sentence is lengthy try to break it in to smaller sentences. + +## Check code style and test case style + +* code style + * Please take a look at [APISIX Lua Coding Style Guide](CODE_STYLE.md). + * Use tool to check your code statically by command: `make lint`. + +```shell + # install `luacheck` first before run it + $ luarocks install luacheck + # check source code + $ make lint + ./utils/check-lua-code-style.sh + + luacheck -q apisix t/lib + Total: 0 warnings / 0 errors in 146 files + + find apisix -name *.lua ! -wholename apisix/cli/ngx_tpl.lua -exec ./utils/lj-releng {} + + + grep -E ERROR.*.lua: /tmp/check.log + + true + + [ -s /tmp/error.log ] + ./utils/check-test-code-style.sh + + find t -name '*.t' -exec grep -E '\-\-\-\s+(SKIP|ONLY|LAST|FIRST)$' '{}' + + + true + + '[' -s /tmp/error.log ']' + + find t -name '*.t' -exec ./utils/reindex '{}' + + + grep done. /tmp/check.log + + true + + '[' -s /tmp/error.log ']' +``` + + The `lj-releng` and `reindex` will be downloaded automatically by `make lint` if not exists. + +* test case style + * Use tool to check your test case style statically by command, eg: `make lint`. + * When the test file is too large, for example > 800 lines, you should split it to a new file. + Please take a look at `t/plugin/limit-conn.t` and `t/plugin/limit-conn2.t`. + * For more details, see the [testing framework](https://github.com/apache/apisix/blob/master/docs/en/latest/internal/testing-framework.md) + +## Contributor gifts + +If you have contributed to Apache APISIX, no matter it is a code contribution to fix a bug or a feature request, or a documentation change, Congratulations! You are eligible to receive the APISIX special gifts with a digital certificate! It's always been the community effort that has made Apache APISIX be understood and used by more developers. + +![Contributor gifts](https://static.apiseven.com/2022/12/29/63acfb2f208e1.png) + +Contributors can request gifts by filling out this [Google form](https://forms.gle/DhPL96LnJwuaHjHU7) or [QQ Form](https://wj.qq.com/s2/11438041/7b07/). After filling in the form, please wait patiently. The community needs some time to review submissions. + +## Do you have questions about the source code? + +- **QQ Group**: 781365357(recommended), 578997126, 552030619 +- Join in `apisix` channel at [Apache Slack](http://s.apache.org/slack-invite). If the link is not working, find the latest one at [Apache INFRA WIKI](https://cwiki.apache.org/confluence/display/INFRA/Slack+Guest+Invites). diff --git a/CloudronPackages/APISIX/apisix-source/LICENSE b/CloudronPackages/APISIX/apisix-source/LICENSE new file mode 100644 index 0000000..5cadce4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/LICENSE @@ -0,0 +1,219 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +======================================================================= +Apache APISIX Subcomponents: + +The Apache APISIX project contains subcomponents with separate copyright +notices and license terms. Your use of the source code for the these +subcomponents is subject to the terms and conditions of the following +licenses. + +======================================================================== +Apache 2.0 licenses +======================================================================== + +The following components are provided under the Apache License. See project link for details. +The text of each license is the standard Apache 2.0 license. + + ewma.lua file from kubernetes/ingress-nginx: https://github.com/kubernetes/ingress-nginx Apache 2.0 + hello.go file from OpenFunction/samples: https://github.com/OpenFunction/samples Apache 2.0 diff --git a/CloudronPackages/APISIX/apisix-source/MAINTAIN.md b/CloudronPackages/APISIX/apisix-source/MAINTAIN.md new file mode 100644 index 0000000..795aa8c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/MAINTAIN.md @@ -0,0 +1,62 @@ + + +## Release steps + +### Release patch version + +1. Create a [pull request](https://github.com/apache/apisix/commit/7db31a1a7186b966bc0f066539d4de8011871012) (contains the changelog and version change) to master + > The changelog only needs to provide a link to the minor branch. +2. Create a [pull request](https://github.com/apache/apisix/commit/21d7673c6e8ff995677456cdebc8ded5afbb3d0a) (contains the backport commits, and the change in step 1) to minor branch + > This should include those PRs that contain the `need backport` tag since the last patch release. Also, the title of these PRs need to be added to the changelog of the minor branch. +3. Merge it into minor branch +4. Package a vote artifact to Apache's dev-apisix repo. The artifact can be created via `VERSION=x.y.z make release-src` +5. Send the [vote email](https://lists.apache.org/thread/vq4qtwqro5zowpdqhx51oznbjy87w9d0) to dev@apisix.apache.org + > After executing the `VERSION=x.y.z make release-src` command, the content of the vote email will be automatically generated in the `./release` directory named `apache-apisix-${x.y.z}-vote-contents` +6. When the vote is passed, send the [vote result email](https://lists.apache.org/thread/k2frnvj4zj9oynsbr7h7nd6n6m3q5p89) to dev@apisix.apache.org +7. Move the vote artifact to Apache's apisix repo +8. Register the release info in https://reporter.apache.org/addrelease.html?apisix +9. Create a [GitHub release](https://github.com/apache/apisix/releases/tag/2.10.2) from the minor branch +10. Update [APISIX's website](https://github.com/apache/apisix-website/commit/f9104bdca50015722ab6e3714bbcd2d17e5c5bb3) if the version number is the largest +11. Update APISIX rpm package + > Go to [apisix-build-tools](https://github.com/api7/apisix-build-tools) repository and create a new tag named `apisix-${x.y.z}` to automatically submit the + package to yum repo +12. - If the version number is the largest, update [APISIX docker](https://github.com/apache/apisix-docker/commit/829d45559c303bea7edde5bebe9fcf4938071601) in [APISIX docker repository](https://github.com/apache/apisix-docker), after PR merged, then create a new branch from master, named as `release/apisix-${version}`, e.g. `release/apisix-2.10.2`. + - If released an LTS version and the version number less than the current largest(e.g. the current largest version number is 2.14.1, but the LTS version 2.13.2 is to be released), submit a PR like [APISIX docker](https://github.com/apache/apisix-docker/pull/322) in [APISIX docker repository](https://github.com/apache/apisix-docker) and named as `release/apisix-${version}`, e.g. `release/apisix-2.13.2`, after PR reviewed, don't need to merged PR, just close the PR and push the branch to APISIX docker repository. +13. Update [APISIX helm chart](https://github.com/apache/apisix-helm-chart/pull/234) if the version number is the largest +14. Send the [ANNOUNCE email](https://lists.apache.org/thread.html/ree7b06e6eac854fd42ba4f302079661a172f514a92aca2ef2f1aa7bb%40%3Cdev.apisix.apache.org%3E) to dev@apisix.apache.org & announce@apache.org + +### Release minor version + +1. Create a minor branch, and create [pull request](https://github.com/apache/apisix/commit/bc6ddf51f15e41fffea6c5bd7d01da9838142b66) to master branch from it +2. Package a vote artifact to Apache's dev-apisix repo. The artifact can be created via `VERSION=x.y.z make release-src` +3. Send the [vote email](https://lists.apache.org/thread/q8zq276o20r5r9qjkg074nfzb77xwry9) to dev@apisix.apache.org + > After executing the `VERSION=x.y.z make release-src` command, the content of the vote email will be automatically generated in the `./release` directory named `apache-apisix-${x.y.z}-vote-contents` +4. When the vote is passed, send the [vote result email](https://lists.apache.org/thread/p1m9s116rojlhb91g38cj8646393qkz7) to dev@apisix.apache.org +5. Move the vote artifact to Apache's apisix repo +6. Register the release info in https://reporter.apache.org/addrelease.html?apisix +7. Create a [GitHub release](https://github.com/apache/apisix/releases/tag/2.10.0) from the minor branch +8. Merge the pull request into master branch +9. Update [APISIX's website](https://github.com/apache/apisix-website/commit/7bf0ab5a1bbd795e6571c4bb89a6e646115e7ca3) +10. Update APISIX rpm package. + > Go to [apisix-build-tools](https://github.com/api7/apisix-build-tools) repository and create a new tag named `apisix-${x.y.z}` to automatically submit the rpm package to yum repo +11. - If the version number is the largest, update [APISIX docker](https://github.com/apache/apisix-docker/commit/829d45559c303bea7edde5bebe9fcf4938071601) in [APISIX docker repository](https://github.com/apache/apisix-docker), after PR merged, then create a new branch from master, named as `release/apisix-${version}`, e.g. `release/apisix-2.10.2`. + - If released an LTS version and the version number less than the current largest(e.g. the current largest version number is 2.14.1, but the LTS version 2.13.2 is to be released), submit a PR like [APISIX docker](https://github.com/apache/apisix-docker/pull/322) in [APISIX docker repository](https://github.com/apache/apisix-docker) and named as `release/apisix-${version}`, e.g. `release/apisix-2.13.2`, after PR reviewed, don't need to merged PR, just close the PR and push the branch to APISIX docker repository. +12. Update [APISIX helm chart](https://github.com/apache/apisix-helm-chart/pull/234) +13. Send the [ANNOUNCE email](https://lists.apache.org/thread/4s4msqwl1tq13p9dnv3hx7skbgpkozw1) to dev@apisix.apache.org & announce@apache.org diff --git a/CloudronPackages/APISIX/apisix-source/Makefile b/CloudronPackages/APISIX/apisix-source/Makefile new file mode 100644 index 0000000..423b240 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/Makefile @@ -0,0 +1,523 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Makefile basic env setting +.DEFAULT_GOAL := help +# add pipefail support for default shell +SHELL := /bin/bash -o pipefail + + +# Project basic setting +VERSION ?= master +project_name ?= apache-apisix +project_release_name ?= $(project_name)-$(VERSION)-src + +OTEL_CONFIG ?= ./ci/pod/otelcol-contrib/data-otlp.json + +# Hyperconverged Infrastructure +ENV_OS_NAME ?= $(shell uname -s | tr '[:upper:]' '[:lower:]') +ENV_OS_ARCH ?= $(shell uname -m | tr '[:upper:]' '[:lower:]') +ENV_APISIX ?= $(CURDIR)/bin/apisix +ENV_GIT ?= git +ENV_TAR ?= tar +ENV_INSTALL ?= install +ENV_RM ?= rm -vf +ENV_DOCKER ?= docker +ENV_DOCKER_COMPOSE ?= docker compose --project-directory $(CURDIR) -p $(project_name) -f $(project_compose_ci) +ENV_NGINX ?= $(ENV_NGINX_EXEC) -p $(CURDIR) -c $(CURDIR)/conf/nginx.conf +ENV_NGINX_EXEC := $(shell command -v openresty 2>/dev/null || command -v nginx 2>/dev/null) +ENV_OPENSSL_PREFIX ?= /usr/local/openresty/openssl3 +ENV_LIBYAML_INSTALL_PREFIX ?= /usr +ENV_LUAROCKS ?= luarocks +## These variables can be injected by luarocks +ENV_INST_PREFIX ?= /usr +ENV_INST_LUADIR ?= $(ENV_INST_PREFIX)/share/lua/5.1 +ENV_INST_BINDIR ?= $(ENV_INST_PREFIX)/bin +ENV_RUNTIME_VER ?= $(shell $(ENV_NGINX_EXEC) -V 2>&1 | tr ' ' '\n' | grep 'APISIX_RUNTIME_VER' | cut -d '=' -f2) + +IMAGE_NAME = apache/apisix +ENV_APISIX_IMAGE_TAG_NAME ?= $(IMAGE_NAME):$(VERSION) + +-include .requirements +export + +ifneq ($(shell whoami), root) + ENV_LUAROCKS_FLAG_LOCAL := --local +endif + +ifdef ENV_LUAROCKS_SERVER + ENV_LUAROCKS_SERVER_OPT := --server $(ENV_LUAROCKS_SERVER) +endif + +ifneq ($(shell test -d $(ENV_OPENSSL_PREFIX) && echo -n yes), yes) + ENV_NGINX_PREFIX := $(shell $(ENV_NGINX_EXEC) -V 2>&1 | grep -Eo 'prefix=(.*)/nginx\s+' | grep -Eo '/.*/') + ifeq ($(shell test -d $(addprefix $(ENV_NGINX_PREFIX), openssl3) && echo -n yes), yes) + ENV_OPENSSL_PREFIX := $(addprefix $(ENV_NGINX_PREFIX), openssl3) + endif +endif + + +# Makefile basic extension function +_color_red =\E[1;31m +_color_green =\E[1;32m +_color_yellow =\E[1;33m +_color_blue =\E[1;34m +_color_wipe =\E[0m + + +define func_echo_status + printf "[%b info %b] %s\n" "$(_color_blue)" "$(_color_wipe)" $(1) +endef + + +define func_echo_warn_status + printf "[%b info %b] %s\n" "$(_color_yellow)" "$(_color_wipe)" $(1) +endef + + +define func_echo_success_status + printf "[%b info %b] %s\n" "$(_color_green)" "$(_color_wipe)" $(1) +endef + + +define func_check_folder + if [[ ! -d $(1) ]]; then \ + mkdir -p $(1); \ + $(call func_echo_status, 'folder check -> create `$(1)`'); \ + else \ + $(call func_echo_success_status, 'folder check -> found `$(1)`'); \ + fi +endef + + +# Makefile target +.PHONY: runtime +runtime: +ifeq ($(ENV_NGINX_EXEC), ) +ifeq ("$(wildcard /usr/local/openresty/bin/openresty)", "") + @$(call func_echo_warn_status, "WARNING: OpenResty not found. You have to install OpenResty and add the binary file to PATH before install Apache APISIX.") + exit 1 +else + $(eval ENV_NGINX_EXEC := /usr/local/openresty/bin/openresty) + @$(call func_echo_status, "Use openresty as default runtime") +endif +endif + + +### help : Show Makefile rules +### If there're awk failures, please make sure +### you are using awk or gawk +.PHONY: help +help: + @$(call func_echo_success_status, "Makefile rules:") + @awk '{ if(match($$0, /^\s*#{3}\s*([^:]+)\s*:\s*(.*)$$/, res)){ printf(" make %-15s : %-10s\n", res[1], res[2]) } }' Makefile + + +### deps : Installing dependencies +.PHONY: deps +deps: install-runtime + $(eval ENV_LUAROCKS_VER := $(shell $(ENV_LUAROCKS) --version | grep -E -o "luarocks [0-9]+.")) + @if [ '$(ENV_LUAROCKS_VER)' = 'luarocks 3.' ]; then \ + mkdir -p ~/.luarocks; \ + $(ENV_LUAROCKS) config $(ENV_LUAROCKS_FLAG_LOCAL) variables.OPENSSL_LIBDIR $(addprefix $(ENV_OPENSSL_PREFIX), /lib); \ + $(ENV_LUAROCKS) config $(ENV_LUAROCKS_FLAG_LOCAL) variables.OPENSSL_INCDIR $(addprefix $(ENV_OPENSSL_PREFIX), /include); \ + $(ENV_LUAROCKS) config $(ENV_LUAROCKS_FLAG_LOCAL) variables.YAML_DIR $(ENV_LIBYAML_INSTALL_PREFIX); \ + $(ENV_LUAROCKS) install apisix-master-0.rockspec --tree deps --only-deps $(ENV_LUAROCKS_SERVER_OPT); \ + else \ + $(call func_echo_warn_status, "WARNING: You're not using LuaRocks 3.x; please remove the luarocks and reinstall it via https://raw.githubusercontent.com/apache/apisix/master/utils/linux-install-luarocks.sh"); \ + exit 1; \ + fi + + +### undeps : Uninstalling dependencies +.PHONY: undeps +undeps: uninstall-rocks uninstall-runtime + + +.PHONY: uninstall-rocks +uninstall-rocks: + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_LUAROCKS) purge --tree=deps + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +### utils : Installation tools +.PHONY: utils +utils: +ifeq ("$(wildcard utils/lj-releng)", "") + wget -qP utils https://raw.githubusercontent.com/iresty/openresty-devel-utils/master/lj-releng + chmod a+x utils/lj-releng +endif +ifeq ("$(wildcard utils/reindex)", "") + wget -qP utils https://raw.githubusercontent.com/iresty/openresty-devel-utils/master/reindex + chmod a+x utils/reindex +endif + + +### lint : Lint source code +.PHONY: lint +lint: utils + @$(call func_echo_status, "$@ -> [ Start ]") + ./utils/check-lua-code-style.sh + ./utils/check-test-code-style.sh + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +### init : Initialize the runtime environment +.PHONY: init +init: runtime + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_APISIX) init + $(ENV_APISIX) init_etcd + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +### run : Start the apisix server +.PHONY: run +run: runtime + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_APISIX) start + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +### quit : Stop the apisix server, exit gracefully +.PHONY: quit +quit: runtime + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_APISIX) quit + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +### stop : Stop the apisix server, exit immediately +.PHONY: stop +stop: runtime + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_APISIX) stop + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +### verify : Verify the configuration of apisix server +.PHONY: verify +verify: runtime + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_NGINX) -t + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +### clean : Remove generated files +.PHONY: clean +clean: + @$(call func_echo_status, "$@ -> [ Start ]") + rm -rf logs/ + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +### reload : Reload the apisix server +.PHONY: reload +reload: runtime + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_APISIX) reload + @$(call func_echo_success_status, "$@ -> [ Done ]") + +.PHONY: install-runtime +install-runtime: +ifneq ($(ENV_RUNTIME_VER), $(APISIX_RUNTIME)) + ./utils/install-dependencies.sh + @sudo $(ENV_INSTALL) /usr/local/openresty/bin/openresty $(ENV_INST_BINDIR)/openresty +endif + +.PHONY: uninstall-runtime +uninstall-runtime: + ./utils/install-dependencies.sh uninstall + rm -rf /usr/local/openresty + rm -f $(ENV_INST_BINDIR)/openresty + +### install : Install the apisix (only for luarocks) +.PHONY: install +install: runtime + $(ENV_INSTALL) -d /usr/local/apisix/ + $(ENV_INSTALL) -d /usr/local/apisix/logs/ + $(ENV_INSTALL) -d /usr/local/apisix/conf/cert + $(ENV_INSTALL) conf/mime.types /usr/local/apisix/conf/mime.types + $(ENV_INSTALL) conf/config.yaml /usr/local/apisix/conf/config.yaml + $(ENV_INSTALL) conf/debug.yaml /usr/local/apisix/conf/debug.yaml + $(ENV_INSTALL) conf/cert/* /usr/local/apisix/conf/cert/ + + # directories listed in alphabetical order + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix + $(ENV_INSTALL) apisix/*.lua $(ENV_INST_LUADIR)/apisix/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/admin + $(ENV_INSTALL) apisix/admin/*.lua $(ENV_INST_LUADIR)/apisix/admin/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/balancer + $(ENV_INSTALL) apisix/balancer/*.lua $(ENV_INST_LUADIR)/apisix/balancer/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/cli + $(ENV_INSTALL) apisix/cli/*.lua $(ENV_INST_LUADIR)/apisix/cli/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/control + $(ENV_INSTALL) apisix/control/*.lua $(ENV_INST_LUADIR)/apisix/control/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/core + $(ENV_INSTALL) apisix/core/*.lua $(ENV_INST_LUADIR)/apisix/core/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/core/dns + $(ENV_INSTALL) apisix/core/dns/*.lua $(ENV_INST_LUADIR)/apisix/core/dns + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/discovery + $(ENV_INSTALL) apisix/discovery/*.lua $(ENV_INST_LUADIR)/apisix/discovery/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/discovery/{consul,consul_kv,dns,eureka,nacos,kubernetes,tars} + $(ENV_INSTALL) apisix/discovery/consul/*.lua $(ENV_INST_LUADIR)/apisix/discovery/consul + $(ENV_INSTALL) apisix/discovery/consul_kv/*.lua $(ENV_INST_LUADIR)/apisix/discovery/consul_kv + $(ENV_INSTALL) apisix/discovery/dns/*.lua $(ENV_INST_LUADIR)/apisix/discovery/dns + $(ENV_INSTALL) apisix/discovery/eureka/*.lua $(ENV_INST_LUADIR)/apisix/discovery/eureka + $(ENV_INSTALL) apisix/discovery/kubernetes/*.lua $(ENV_INST_LUADIR)/apisix/discovery/kubernetes + $(ENV_INSTALL) apisix/discovery/nacos/*.lua $(ENV_INST_LUADIR)/apisix/discovery/nacos + $(ENV_INSTALL) apisix/discovery/tars/*.lua $(ENV_INST_LUADIR)/apisix/discovery/tars + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/http + $(ENV_INSTALL) apisix/http/*.lua $(ENV_INST_LUADIR)/apisix/http/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/http/router + $(ENV_INSTALL) apisix/http/router/*.lua $(ENV_INST_LUADIR)/apisix/http/router/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/include/apisix/model + $(ENV_INSTALL) apisix/include/apisix/model/*.proto $(ENV_INST_LUADIR)/apisix/include/apisix/model/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/inspect + $(ENV_INSTALL) apisix/inspect/*.lua $(ENV_INST_LUADIR)/apisix/inspect/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins + $(ENV_INSTALL) apisix/plugins/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ext-plugin + $(ENV_INSTALL) apisix/plugins/ext-plugin/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ext-plugin/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/grpc-transcode + $(ENV_INSTALL) apisix/plugins/grpc-transcode/*.lua $(ENV_INST_LUADIR)/apisix/plugins/grpc-transcode/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ip-restriction + $(ENV_INSTALL) apisix/plugins/ip-restriction/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ip-restriction/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/limit-conn + $(ENV_INSTALL) apisix/plugins/limit-conn/*.lua $(ENV_INST_LUADIR)/apisix/plugins/limit-conn/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/limit-req + $(ENV_INSTALL) apisix/plugins/limit-req/*.lua $(ENV_INST_LUADIR)/apisix/plugins/limit-req/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/limit-count + $(ENV_INSTALL) apisix/plugins/limit-count/*.lua $(ENV_INST_LUADIR)/apisix/plugins/limit-count/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/opa + $(ENV_INSTALL) apisix/plugins/opa/*.lua $(ENV_INST_LUADIR)/apisix/plugins/opa/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/prometheus + $(ENV_INSTALL) apisix/plugins/prometheus/*.lua $(ENV_INST_LUADIR)/apisix/plugins/prometheus/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/proxy-cache + $(ENV_INSTALL) apisix/plugins/proxy-cache/*.lua $(ENV_INST_LUADIR)/apisix/plugins/proxy-cache/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/serverless + $(ENV_INSTALL) apisix/plugins/serverless/*.lua $(ENV_INST_LUADIR)/apisix/plugins/serverless/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/syslog + $(ENV_INSTALL) apisix/plugins/syslog/*.lua $(ENV_INST_LUADIR)/apisix/plugins/syslog/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/tencent-cloud-cls + $(ENV_INSTALL) apisix/plugins/tencent-cloud-cls/*.lua $(ENV_INST_LUADIR)/apisix/plugins/tencent-cloud-cls/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/pubsub + $(ENV_INSTALL) apisix/pubsub/*.lua $(ENV_INST_LUADIR)/apisix/pubsub/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/secret + $(ENV_INSTALL) apisix/secret/*.lua $(ENV_INST_LUADIR)/apisix/secret/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/zipkin + $(ENV_INSTALL) apisix/plugins/zipkin/*.lua $(ENV_INST_LUADIR)/apisix/plugins/zipkin/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/ssl/router + $(ENV_INSTALL) apisix/ssl/router/*.lua $(ENV_INST_LUADIR)/apisix/ssl/router/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream + $(ENV_INSTALL) apisix/stream/*.lua $(ENV_INST_LUADIR)/apisix/stream/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/plugins + $(ENV_INSTALL) apisix/stream/plugins/*.lua $(ENV_INST_LUADIR)/apisix/stream/plugins/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/router + $(ENV_INSTALL) apisix/stream/router/*.lua $(ENV_INST_LUADIR)/apisix/stream/router/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc + $(ENV_INSTALL) apisix/stream/xrpc/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/redis + $(ENV_INSTALL) apisix/stream/xrpc/protocols/redis/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/redis/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/dubbo + $(ENV_INSTALL) apisix/stream/xrpc/protocols/dubbo/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/dubbo/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/utils + $(ENV_INSTALL) apisix/utils/*.lua $(ENV_INST_LUADIR)/apisix/utils/ + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ai-proxy + $(ENV_INSTALL) apisix/plugins/ai-proxy/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ai-proxy + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ai-drivers + $(ENV_INSTALL) apisix/plugins/ai-drivers/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ai-drivers + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ai-rag/embeddings + $(ENV_INSTALL) apisix/plugins/ai-rag/embeddings/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ai-rag/embeddings + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/ai-rag/vector-search + $(ENV_INSTALL) apisix/plugins/ai-rag/vector-search/*.lua $(ENV_INST_LUADIR)/apisix/plugins/ai-rag/vector-search + + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/mcp/broker + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/mcp/transport + $(ENV_INSTALL) apisix/plugins/mcp/*.lua $(ENV_INST_LUADIR)/apisix/plugins/mcp + $(ENV_INSTALL) apisix/plugins/mcp/broker/*.lua $(ENV_INST_LUADIR)/apisix/plugins/mcp/broker + $(ENV_INSTALL) apisix/plugins/mcp/transport/*.lua $(ENV_INST_LUADIR)/apisix/plugins/mcp/transport + + $(ENV_INSTALL) bin/apisix $(ENV_INST_BINDIR)/apisix + + +### uninstall : Uninstall the apisix +.PHONY: uninstall +uninstall: + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_RM) -r /usr/local/apisix + $(ENV_RM) -r $(ENV_INST_LUADIR)/apisix + $(ENV_RM) $(ENV_INST_BINDIR)/apisix + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +### test : Run the test case +.PHONY: test +test: runtime + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_GIT) submodule update --init --recursive + prove -I../test-nginx/lib -I./ -r -s t/ + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +### license-check : Check project source code for Apache License +.PHONY: license-check +license-check: + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_DOCKER) run -it --rm -v $(CURDIR):/github/workspace apache/skywalking-eyes header check + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +.PHONY: release-src +release-src: compress-tar + @$(call func_echo_status, "$@ -> [ Start ]") + gpg --batch --yes --armor --detach-sig $(project_release_name).tgz + shasum -a 512 $(project_release_name).tgz > $(project_release_name).tgz.sha512 + + $(call func_check_folder,release) + mv $(project_release_name).tgz release/$(project_release_name).tgz + mv $(project_release_name).tgz.asc release/$(project_release_name).tgz.asc + mv $(project_release_name).tgz.sha512 release/$(project_release_name).tgz.sha512 + ./utils/gen-vote-contents.sh $(VERSION) + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +.PHONY: compress-tar +compress-tar: + # The $VERSION can be major.minor.patch (from developer) + # or major.minor (from the branch name in the CI) + $(ENV_TAR) -zcvf $(project_release_name).tgz \ + ./apisix \ + ./bin \ + ./conf \ + ./apisix-master-0.rockspec \ + LICENSE \ + Makefile \ + NOTICE \ + *.md + + +### container +### ci-env-up : CI env launch +.PHONY: ci-env-up +ci-env-up: + @$(call func_echo_status, "$@ -> [ Start ]") + touch $(OTEL_CONFIG) + chmod 777 $(OTEL_CONFIG) + $(ENV_DOCKER_COMPOSE) up -d + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +### ci-env-ps : CI env ps +.PHONY: ci-env-ps +ci-env-ps: + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_DOCKER_COMPOSE) ps + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +### ci-env-rebuild : CI env image rebuild +.PHONY: ci-env-rebuild +ci-env-rebuild: + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_DOCKER_COMPOSE) build + @$(call func_echo_success_status, "$@ -> [ Done ]") + + +### ci-env-down : CI env destroy +.PHONY: ci-env-down +ci-env-down: + @$(call func_echo_status, "$@ -> [ Start ]") + rm $(OTEL_CONFIG) + $(ENV_DOCKER_COMPOSE) down + @$(call func_echo_success_status, "$@ -> [ Done ]") + +### ci-env-stop : CI env temporary stop +.PHONY: ci-env-stop +ci-env-stop: + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_DOCKER_COMPOSE) stop + @$(call func_echo_success_status, "$@ -> [ Done ]") + +### build-on-debian-dev : Build apache/apisix:xx-debian-dev image +.PHONY: build-on-debian-dev +build-on-debian-dev: + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_DOCKER) build -t $(ENV_APISIX_IMAGE_TAG_NAME)-debian-dev \ + --build-arg TARGETARCH=$(ENV_OS_ARCH) \ + --build-arg CODE_PATH=. \ + --build-arg ENTRYPOINT_PATH=./docker/debian-dev/docker-entrypoint.sh \ + --build-arg INSTALL_BROTLI=./docker/debian-dev/install-brotli.sh \ + --build-arg CHECK_STANDALONE_CONFIG=./docker/utils/check_standalone_config.sh \ + -f ./docker/debian-dev/Dockerfile . + @$(call func_echo_success_status, "$@ -> [ Done ]") + +.PHONY: push-on-debian-dev +push-on-debian-dev: + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_DOCKER) tag $(ENV_APISIX_IMAGE_TAG_NAME)-debian-dev $(IMAGE_NAME):dev-$(ENV_OS_ARCH) + $(ENV_DOCKER) push $(IMAGE_NAME):dev-$(ENV_OS_ARCH) + @$(call func_echo_success_status, "$@ -> [ Done ]") + +### merge-dev-tags : Merge architecture-specific dev tags into a single dev tag +.PHONY: merge-dev-tags +merge-dev-tags: + @$(call func_echo_status, "$@ -> [ Start ]") + $(ENV_DOCKER) manifest create $(IMAGE_NAME):dev \ + $(IMAGE_NAME):dev-amd64 \ + $(IMAGE_NAME):dev-arm64 + $(ENV_DOCKER) manifest push $(IMAGE_NAME):dev + @$(call func_echo_success_status, "$@ -> [ Done ]") diff --git a/CloudronPackages/APISIX/apisix-source/NOTICE b/CloudronPackages/APISIX/apisix-source/NOTICE new file mode 100644 index 0000000..fdab115 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/NOTICE @@ -0,0 +1,5 @@ +Apache APISIX +Copyright 2019-2025 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/CloudronPackages/APISIX/apisix-source/README.md b/CloudronPackages/APISIX/apisix-source/README.md new file mode 100644 index 0000000..a61b52d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/README.md @@ -0,0 +1,241 @@ + + +# Apache APISIX API Gateway | AI Gateway + +APISIX logo + +[![Build Status](https://github.com/apache/apisix/actions/workflows/build.yml/badge.svg?branch=master)](https://github.com/apache/apisix/actions/workflows/build.yml) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/apache/apisix/blob/master/LICENSE) +[![Commit activity](https://img.shields.io/github/commit-activity/m/apache/apisix)](https://github.com/apache/apisix/graphs/commit-activity) +[![Average time to resolve an issue](http://isitmaintained.com/badge/resolution/apache/apisix.svg)](http://isitmaintained.com/project/apache/apisix "Average time to resolve an issue") +[![Percentage of issues still open](http://isitmaintained.com/badge/open/apache/apisix.svg)](http://isitmaintained.com/project/apache/apisix "Percentage of issues still open") +[![Slack](https://badgen.net/badge/Slack/Join%20Apache%20APISIX?icon=slack)](https://apisix.apache.org/slack) + +**Apache APISIX** is a dynamic, real-time, high-performance API Gateway. + +APISIX API Gateway provides rich traffic management features such as load balancing, dynamic upstream, canary release, circuit breaking, authentication, observability, and more. + +APISIX can serve as an **[AI Gateway](https://apisix.apache.org/ai-gateway/)** through its flexible plugin system, providing AI proxying, load balancing for LLMs, retries and fallbacks, token-based rate limiting, and robust security to ensure the efficiency and reliability of AI agents. APISIX also provides the [`mcp-bridge`](https://apisix.apache.org/blog/2025/04/21/host-mcp-server-with-api-gateway/) plugin to seamlessly convert stdio-based MCP servers to scalable HTTP SSE services. + +You can use APISIX API Gateway to handle traditional north-south traffic, as well as east-west traffic between services. It can also be used as a [k8s ingress controller](https://github.com/apache/apisix-ingress-controller). + +The technical architecture of Apache APISIX: + +![Technical architecture of Apache APISIX](docs/assets/images/apisix.png) + +## Community + +- [Kindly Write a Review](https://www.g2.com/products/apache-apisix/reviews) for APISIX in G2. +- Mailing List: Mail to dev-subscribe@apisix.apache.org, follow the reply to subscribe to the mailing list. +- Slack Workspace - [invitation link](https://apisix.apache.org/slack) (Please open an [issue](https://apisix.apache.org/docs/general/submit-issue) if this link is expired), and then join the #apisix channel (Channels -> Browse channels -> search for "apisix"). +- ![Twitter Follow](https://img.shields.io/twitter/follow/ApacheAPISIX?style=social) - follow and interact with us using hashtag `#ApacheAPISIX` +- [Documentation](https://apisix.apache.org/docs/) +- [Discussions](https://github.com/apache/apisix/discussions) +- [Blog](https://apisix.apache.org/blog) + +## Features + +You can use APISIX API Gateway as a traffic entrance to process all business data, including dynamic routing, dynamic upstream, dynamic certificates, +A/B testing, canary release, blue-green deployment, limit rate, defense against malicious attacks, metrics, monitoring alarms, service observability, service governance, etc. + +- **All platforms** + + - Cloud-Native: Platform agnostic, No vendor lock-in, APISIX API Gateway can run from bare-metal to Kubernetes. + - Supports ARM64: Don't worry about the lock-in of the infra technology. + +- **Multi protocols** + + - [TCP/UDP Proxy](docs/en/latest/stream-proxy.md): Dynamic TCP/UDP proxy. + - [Dubbo Proxy](docs/en/latest/plugins/dubbo-proxy.md): Dynamic HTTP to Dubbo proxy. + - [Dynamic MQTT Proxy](docs/en/latest/plugins/mqtt-proxy.md): Supports to load balance MQTT by `client_id`, both support MQTT [3.1.\*](http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html), [5.0](https://docs.oasis-open.org/mqtt/mqtt/v5.0/mqtt-v5.0.html). + - [gRPC proxy](docs/en/latest/grpc-proxy.md): Proxying gRPC traffic. + - [gRPC Web Proxy](docs/en/latest/plugins/grpc-web.md): Proxying gRPC Web traffic to gRPC Service. + - [gRPC transcoding](docs/en/latest/plugins/grpc-transcode.md): Supports protocol transcoding so that clients can access your gRPC API by using HTTP/JSON. + - Proxy Websocket + - Proxy Protocol + - HTTP(S) Forward Proxy + - [SSL](docs/en/latest/certificate.md): Dynamically load an SSL certificate + - [HTTP/3 with QUIC](docs/en/latest/http3.md) + +- **Full Dynamic** + + - [Hot Updates And Hot Plugins](docs/en/latest/terminology/plugin.md): Continuously updates its configurations and plugins without restarts! + - [Proxy Rewrite](docs/en/latest/plugins/proxy-rewrite.md): Support rewrite the `host`, `uri`, `schema`, `method`, `headers` of the request before send to upstream. + - [Response Rewrite](docs/en/latest/plugins/response-rewrite.md): Set customized response status code, body and header to the client. + - Dynamic Load Balancing: Round-robin load balancing with weight. + - Hash-based Load Balancing: Load balance with consistent hashing sessions. + - [Health Checks](docs/en/latest/tutorials/health-check.md): Enable health check on the upstream node and will automatically filter unhealthy nodes during load balancing to ensure system stability. + - Circuit-Breaker: Intelligent tracking of unhealthy upstream services. + - [Proxy Mirror](docs/en/latest/plugins/proxy-mirror.md): Provides the ability to mirror client requests. + - [Traffic Split](docs/en/latest/plugins/traffic-split.md): Allows users to incrementally direct percentages of traffic between various upstreams. + +- **Fine-grained routing** + + - [Supports full path matching and prefix matching](docs/en/latest/router-radixtree.md#how-to-use-libradixtree-in-apisix) + - [Support all Nginx built-in variables as conditions for routing](docs/en/latest/router-radixtree.md#how-to-filter-route-by-nginx-builtin-variable), so you can use `cookie`, `args`, etc. as routing conditions to implement canary release, A/B testing, etc. + - Support [various operators as judgment conditions for routing](https://github.com/iresty/lua-resty-radixtree#operator-list), for example `{"arg_age", ">", 24}` + - Support [custom route matching function](https://github.com/iresty/lua-resty-radixtree/blob/master/t/filter-fun.t#L10) + - IPv6: Use IPv6 to match the route. + - Support [TTL](docs/en/latest/admin-api.md#route) + - [Support priority](docs/en/latest/router-radixtree.md#3-match-priority) + - [Support Batch Http Requests](docs/en/latest/plugins/batch-requests.md) + - [Support filtering route by GraphQL attributes](docs/en/latest/router-radixtree.md#how-to-filter-route-by-graphql-attributes) + +- **Security** + + - Rich authentication & authorization support: + * [key-auth](docs/en/latest/plugins/key-auth.md) + * [JWT](docs/en/latest/plugins/jwt-auth.md) + * [basic-auth](docs/en/latest/plugins/basic-auth.md) + * [wolf-rbac](docs/en/latest/plugins/wolf-rbac.md) + * [casbin](docs/en/latest/plugins/authz-casbin.md) + * [keycloak](docs/en/latest/plugins/authz-keycloak.md) + * [casdoor](docs/en/latest/plugins/authz-casdoor.md) + - [IP Whitelist/Blacklist](docs/en/latest/plugins/ip-restriction.md) + - [Referer Whitelist/Blacklist](docs/en/latest/plugins/referer-restriction.md) + - [IdP](docs/en/latest/plugins/openid-connect.md): Support external Identity platforms, such as Auth0, okta, etc.. + - [Limit-req](docs/en/latest/plugins/limit-req.md) + - [Limit-count](docs/en/latest/plugins/limit-count.md) + - [Limit-concurrency](docs/en/latest/plugins/limit-conn.md) + - Anti-ReDoS(Regular expression Denial of Service): Built-in policies to Anti ReDoS without configuration. + - [CORS](docs/en/latest/plugins/cors.md) Enable CORS(Cross-origin resource sharing) for your API. + - [URI Blocker](docs/en/latest/plugins/uri-blocker.md): Block client request by URI. + - [Request Validator](docs/en/latest/plugins/request-validation.md) + - [CSRF](docs/en/latest/plugins/csrf.md) Based on the [`Double Submit Cookie`](https://en.wikipedia.org/wiki/Cross-site_request_forgery#Double_Submit_Cookie) way, protect your API from CSRF attacks. + +- **OPS friendly** + + - Zipkin tracing: [Zipkin](docs/en/latest/plugins/zipkin.md) + - Open source APM: support [Apache SkyWalking](docs/en/latest/plugins/skywalking.md) + - Works with external service discovery: In addition to the built-in etcd, it also supports [Consul](docs/en/latest/discovery/consul.md), [Consul_kv](docs/en/latest/discovery/consul_kv.md), [Nacos](docs/en/latest/discovery/nacos.md), [Eureka](docs/en/latest/discovery/eureka.md) and [Zookeeper (CP)](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md). + - Monitoring And Metrics: [Prometheus](docs/en/latest/plugins/prometheus.md) + - Clustering: APISIX nodes are stateless, creates clustering of the configuration center, please refer to [etcd Clustering Guide](https://etcd.io/docs/v3.5/op-guide/clustering/). + - High availability: Support to configure multiple etcd addresses in the same cluster. + - [Dashboard](https://github.com/apache/apisix-dashboard) + - Version Control: Supports rollbacks of operations. + - CLI: start\stop\reload APISIX through the command line. + - [Standalone](docs/en/latest/deployment-modes.md#standalone): Supports to load route rules from local YAML file, it is more friendly such as under the kubernetes(k8s). + - [Global Rule](docs/en/latest/terminology/global-rule.md): Allows to run any plugin for all request, eg: limit rate, IP filter etc. + - High performance: The single-core QPS reaches 18k with an average delay of fewer than 0.2 milliseconds. + - [Fault Injection](docs/en/latest/plugins/fault-injection.md) + - [REST Admin API](docs/en/latest/admin-api.md): Using the REST Admin API to control Apache APISIX, which only allows 127.0.0.1 access by default, you can modify the `allow_admin` field in `conf/config.yaml` to specify a list of IPs that are allowed to call the Admin API. Also, note that the Admin API uses key auth to verify the identity of the caller. + - External Loggers: Export access logs to external log management tools. ([HTTP Logger](docs/en/latest/plugins/http-logger.md), [TCP Logger](docs/en/latest/plugins/tcp-logger.md), [Kafka Logger](docs/en/latest/plugins/kafka-logger.md), [UDP Logger](docs/en/latest/plugins/udp-logger.md), [RocketMQ Logger](docs/en/latest/plugins/rocketmq-logger.md), [SkyWalking Logger](docs/en/latest/plugins/skywalking-logger.md), [Alibaba Cloud Logging(SLS)](docs/en/latest/plugins/sls-logger.md), [Google Cloud Logging](docs/en/latest/plugins/google-cloud-logging.md), [Splunk HEC Logging](docs/en/latest/plugins/splunk-hec-logging.md), [File Logger](docs/en/latest/plugins/file-logger.md), [SolarWinds Loggly Logging](docs/en/latest/plugins/loggly.md), [TencentCloud CLS](docs/en/latest/plugins/tencent-cloud-cls.md)). + - [ClickHouse](docs/en/latest/plugins/clickhouse-logger.md): push logs to ClickHouse. + - [Elasticsearch](docs/en/latest/plugins/elasticsearch-logger.md): push logs to Elasticsearch. + - [Datadog](docs/en/latest/plugins/datadog.md): push custom metrics to the DogStatsD server, comes bundled with [Datadog agent](https://docs.datadoghq.com/agent/), over the UDP protocol. DogStatsD basically is an implementation of StatsD protocol which collects the custom metrics for Apache APISIX agent, aggregates it into a single data point and sends it to the configured Datadog server. + - [Helm charts](https://github.com/apache/apisix-helm-chart) + - [HashiCorp Vault](https://www.vaultproject.io/): Support secret management solution for accessing secrets from Vault secure storage backed in a low trust environment. Currently, RS256 keys (public-private key pairs) or secret keys can be linked from vault in jwt-auth authentication plugin using [APISIX Secret](docs/en/latest/terminology/secret.md) resource. + +- **Highly scalable** + - [Custom plugins](docs/en/latest/plugin-develop.md): Allows hooking of common phases, such as `rewrite`, `access`, `header filter`, `body filter` and `log`, also allows to hook the `balancer` stage. + - [Plugin can be written in Java/Go/Python](docs/en/latest/external-plugin.md) + - [Plugin can be written with Proxy Wasm SDK](docs/en/latest/wasm.md) + - Custom load balancing algorithms: You can use custom load balancing algorithms during the `balancer` phase. + - Custom routing: Support users to implement routing algorithms themselves. + +- **Multi-Language support** + - Apache APISIX is a multi-language gateway for plugin development and provides support via `RPC` and `Wasm`. + ![Multi Language Support into Apache APISIX](docs/assets/images/external-plugin.png) + - The RPC way, is the current way. Developers can choose the language according to their needs and after starting an independent process with the RPC, it exchanges data with APISIX through local RPC communication. Till this moment, APISIX has support for [Java](https://github.com/apache/apisix-java-plugin-runner), [Golang](https://github.com/apache/apisix-go-plugin-runner), [Python](https://github.com/apache/apisix-python-plugin-runner) and Node.js. + - The Wasm or WebAssembly, is an experimental way. APISIX can load and run Wasm bytecode via APISIX [wasm plugin](https://github.com/apache/apisix/blob/master/docs/en/latest/wasm.md) written with the [Proxy Wasm SDK](https://github.com/proxy-wasm/spec#sdks). Developers only need to write the code according to the SDK and then compile it into a Wasm bytecode that runs on Wasm VM with APISIX. + +- **Serverless** + - [Lua functions](docs/en/latest/plugins/serverless.md): Invoke functions in each phase in APISIX. + - [AWS Lambda](docs/en/latest/plugins/aws-lambda.md): Integration with AWS Lambda function as a dynamic upstream to proxy all requests for a particular URI to the AWS API gateway endpoint. Supports authorization via api key and AWS IAM access secret. + - [Azure Functions](docs/en/latest/plugins/azure-functions.md): Seamless integration with Azure Serverless Function as a dynamic upstream to proxy all requests for a particular URI to the Microsoft Azure cloud. + - [Apache OpenWhisk](docs/en/latest/plugins/openwhisk.md): Seamless integration with Apache OpenWhisk as a dynamic upstream to proxy all requests for a particular URI to your own OpenWhisk cluster. + +## Get Started + +1. Installation + + Please refer to [install documentation](https://apisix.apache.org/docs/apisix/installation-guide/). + +2. Getting started + + The getting started guide is a great way to learn the basics of APISIX. Just follow the steps in [Getting Started](https://apisix.apache.org/docs/apisix/getting-started/). + + Further, you can follow the documentation to try more [plugins](docs/en/latest/plugins). + +3. Admin API + + Apache APISIX provides [REST Admin API](docs/en/latest/admin-api.md) to dynamically control the Apache APISIX cluster. + +4. Plugin development + + You can refer to [plugin development guide](docs/en/latest/plugin-develop.md), and sample plugin `example-plugin`'s code implementation. + Reading [plugin concept](docs/en/latest/terminology/plugin.md) would help you learn more about the plugin. + +For more documents, please refer to [Apache APISIX Documentation site](https://apisix.apache.org/docs/apisix/getting-started/) + +## Benchmark + +Using AWS's eight-core server, APISIX's QPS reaches 140,000 with a latency of only 0.2 ms. + +[Benchmark script](benchmark/run.sh) has been open sourced, welcome to try and contribute. + +[APISIX also works perfectly in AWS graviton3 C7g.](https://apisix.apache.org/blog/2022/06/07/installation-performance-test-of-apigateway-apisix-on-aws-graviton3) + +## User Stories + +- [European eFactory Platform: API Security Gateway – Using APISIX in the eFactory Platform](https://www.efactory-project.eu/post/api-security-gateway-using-apisix-in-the-efactory-platform) +- [Copernicus Reference System Software](https://github.com/COPRS/infrastructure/wiki/Networking-trade-off) +- [More Stories](https://apisix.apache.org/blog/tags/case-studies/) + +## Who Uses APISIX API Gateway? + +A wide variety of companies and organizations use APISIX API Gateway for research, production and commercial product, below are some of them: + +- Airwallex +- Bilibili +- CVTE +- European eFactory Platform +- European Copernicus Reference System +- Geely +- HONOR +- Horizon Robotics +- iQIYI +- Lenovo +- NASA JPL +- Nayuki +- OPPO +- QingCloud +- Swisscom +- Tencent Game +- Travelsky +- vivo +- Sina Weibo +- WeCity +- WPS +- XPENG +- Zoom + +## Logos + +- [Apache APISIX logo(PNG)](https://github.com/apache/apisix/tree/master/logos/apache-apisix.png) +- [Apache APISIX logo source](https://apache.org/logos/#apisix) + +## Acknowledgments + +Inspired by Kong and Orange. + +## License + +[Apache 2.0 License](https://github.com/apache/apisix/tree/master/LICENSE) diff --git a/CloudronPackages/APISIX/apisix-source/THREAT_MODEL.md b/CloudronPackages/APISIX/apisix-source/THREAT_MODEL.md new file mode 100644 index 0000000..c10560c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/THREAT_MODEL.md @@ -0,0 +1,60 @@ + + +## Threat Model + +Here is the threat model of Apache APISIX, which is relative to our developers and operators. + +### Where the system might be attacked + +As a proxy, Apache APISIX needs to be able to run in front of untrusted downstream traffic. + +However, some features need to assume the downstream traffic is trusted. They should be either +not exposed to the internet by default (for example, listening to 127.0.0.1), or disclaim in +the doc explicitly. + +As Apache APISIX is evolving rapidly, some newly added features may not be strong enough to defend against potential attacks. +Therefore, we need to divide the features into two groups: premature and mature ones. +Features that are just merged in half a year or are declared as experimental are premature. +Premature features are not fully tested on the battlefield and are not covered by the security policy normally. + +Additionally, we require the components below are trustable: + +1. the upstream +2. the configuration +3. the way we relay the configuration +4. the 3rd party components involved in the Apache APISIX, for example, the authorization server + +### How can we reduce the likelihood or impact of a potential threat + +As the user: +First of all, don't expose the components which are required to be trustable to the internet, including the control plane (Dashboard or something else) and the configuration relay mechanism (etcd or etcd adapter or something else). + +Then, harden the trusted components. For example, + +1. if possible, enable authentication or use https for the etcd +2. read the doc and disable plugins that are not needed, so that we can reduce the attack vector +3. restrict and audit the change of configuration + +As the developer: +We should keep security in mind, and validate the input from the client before use. + +As the maintainer: +We should keep security in mind, and review the code line by line. +We are open to discussion from the security researchers. diff --git a/CloudronPackages/APISIX/apisix-source/Vision-and-Milestones.md b/CloudronPackages/APISIX/apisix-source/Vision-and-Milestones.md new file mode 100644 index 0000000..333d991 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/Vision-and-Milestones.md @@ -0,0 +1,40 @@ + + +### Vision + +Apache APISIX is an open source API gateway designed to help developers connect any APIs securely and efficiently in any environment. + +Managing thousands or tens of thousands of APIs and microservices in a multi-cloud and hybrid cloud environment is not an easy task. +There will be many challenges as authentication, observability, security, etc. + +Apache APISIX, a community-driven project, hopes to help everyone better manage and use APIs through the power of developers. +Every developer's contribution will used by thousands of companies and served by billions of users. + +### Milestones + +Apache APISIX has relatively complete features for north-south traffic, +and will be iterated around the following directions in the next 6 months (if you have any ideas, feel free to create issue to discuss): + +- More complete support for Gateway API on APISIX ingress controller +- Add support for service mesh +- User-friendly documentation +- More plugins for public cloud and SaaS services +- Java/Go plugins and Wasm production-ready +- Add dynamic debugging tools for Apache APISIX diff --git a/CloudronPackages/APISIX/apisix-source/apisix-master-0.rockspec b/CloudronPackages/APISIX/apisix-source/apisix-master-0.rockspec new file mode 100644 index 0000000..82ca9d8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix-master-0.rockspec @@ -0,0 +1,108 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "master-0" +supported_platforms = {"linux"} + +source = { + url = "git://github.com/apache/apisix", + branch = "master", +} + +description = { + summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-ctxdump = 0.1-0", + "lyaml = 6.2.8", + "api7-lua-resty-dns-client = 7.0.1", + "lua-resty-template = 2.0", + "lua-resty-etcd = 1.10.5", + "api7-lua-resty-http = 0.2.2-0", + "lua-resty-balancer = 0.04", + "lua-resty-ngxvar = 0.5.2", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-worker-events = 1.0.0", + "lua-resty-healthcheck-api7 = 3.2.0", + "api7-lua-resty-jwt = 0.2.5", + "lua-resty-hmac-ffi = 0.06-1", + "lua-resty-cookie = 0.2.0-1", + "lua-resty-session = 3.10", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 2.9.2", + "lua-protobuf = 0.5.2-1", + "lua-resty-openidc = 1.7.6-3", + "luafilesystem = 1.7.0-2", + "nginx-lua-prometheus-api7 = 0.20240201-1", + "jsonschema = 0.9.8", + "lua-resty-ipmatcher = 0.6.1", + "lua-resty-kafka = 0.23-0", + "lua-resty-logger-socket = 2.0.1-0", + "skywalking-nginx-lua = 1.0.1", + "base64 = 1.5-2", + "binaryheap = 0.4", + "api7-dkjson = 0.1.1", + "resty-redis-cluster = 1.05-1", + "lua-resty-expr = 1.3.2", + "graphql = 0.0.2", + "argparse = 0.7.1-1", + "luasocket = 3.1.0-1", + "luasec = 1.3.2-1", + "lua-resty-consul = 0.3-2", + "penlight = 1.13.1", + "ext-plugin-proto = 0.6.1", + "casbin = 1.41.9-1", + "inspect == 3.1.1", + "lua-resty-rocketmq = 0.3.0-0", + "opentelemetry-lua = 0.2-3", + "net-url = 0.9-1", + "xml2lua = 1.5-2", + "nanoid = 0.1-1", + "lua-resty-mediador = 0.1.2-1", + "lua-resty-ldap = 0.1.0-0", + "lua-resty-t1k = 1.1.5", + "brotli-ffi = 0.3-1", + "lua-ffi-zlib = 0.6-0", + "jsonpath = 1.0-1", + "api7-lua-resty-aws == 2.0.2-1", + "multipart = 0.5.9-1", +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + OPENSSL_INCDIR="$(OPENSSL_INCDIR)", + OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)", + }, + install_variables = { + ENV_INST_PREFIX="$(PREFIX)", + ENV_INST_BINDIR="$(BINDIR)", + ENV_INST_LIBDIR="$(LIBDIR)", + ENV_INST_LUADIR="$(LUADIR)", + ENV_INST_CONFDIR="$(CONFDIR)", + }, +} diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/consumer_group.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/consumer_group.lua new file mode 100644 index 0000000..9aed58f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/consumer_group.lua @@ -0,0 +1,66 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local consumers = require("apisix.consumer").consumers +local resource = require("apisix.admin.resource") +local schema_plugin = require("apisix.admin.plugins").check_schema +local type = type +local tostring = tostring +local ipairs = ipairs + + +local function check_conf(id, conf, need_id, schema) + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, {error_msg = "invalid configuration: " .. err} + end + + local ok, err = schema_plugin(conf.plugins) + if not ok then + return nil, {error_msg = err} + end + + return true +end + + +local function delete_checker(id) + local consumers, consumers_ver = consumers() + if consumers_ver and consumers then + for _, consumer in ipairs(consumers) do + if type(consumer) == "table" and consumer.value + and consumer.value.group_id + and tostring(consumer.value.group_id) == id then + return 400, {error_msg = "can not delete this consumer group," + .. " consumer [" .. consumer.value.id + .. "] is still using it now"} + end + end + end + + return nil, nil +end + + +return resource.new({ + name = "consumer_groups", + kind = "consumer group", + schema = core.schema.consumer_group, + checker = check_conf, + unsupported_methods = {"post"}, + delete_checker = delete_checker +}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/consumers.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/consumers.lua new file mode 100644 index 0000000..e027890 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/consumers.lua @@ -0,0 +1,65 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local plugins = require("apisix.admin.plugins") +local resource = require("apisix.admin.resource") + + +local function check_conf(username, conf, need_username, schema) + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, {error_msg = "invalid configuration: " .. err} + end + + if username and username ~= conf.username then + return nil, {error_msg = "wrong username" } + end + + if conf.plugins then + ok, err = plugins.check_schema(conf.plugins, core.schema.TYPE_CONSUMER) + if not ok then + return nil, {error_msg = "invalid plugins configuration: " .. err} + end + end + + if conf.group_id then + local key = "/consumer_groups/" .. conf.group_id + local res, err = core.etcd.get(key) + if not res then + return nil, {error_msg = "failed to fetch consumer group info by " + .. "consumer group id [" .. conf.group_id .. "]: " + .. err} + end + + if res.status ~= 200 then + return nil, {error_msg = "failed to fetch consumer group info by " + .. "consumer group id [" .. conf.group_id .. "], " + .. "response code: " .. res.status} + end + end + + return conf.username +end + + +return resource.new({ + name = "consumers", + kind = "consumer", + schema = core.schema.consumer, + checker = check_conf, + unsupported_methods = {"post", "patch"} +}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/credentials.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/credentials.lua new file mode 100644 index 0000000..3622867 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/credentials.lua @@ -0,0 +1,74 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local plugins = require("apisix.admin.plugins") +local plugin = require("apisix.plugin") +local resource = require("apisix.admin.resource") +local pairs = pairs + +local function check_conf(_id, conf, _need_id, schema) + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, {error_msg = "invalid configuration: " .. err} + end + + if conf.plugins then + ok, err = plugins.check_schema(conf.plugins, core.schema.TYPE_CONSUMER) + if not ok then + return nil, {error_msg = "invalid plugins configuration: " .. err} + end + + for name, _ in pairs(conf.plugins) do + local plugin_obj = plugin.get(name) + if not plugin_obj then + return nil, {error_msg = "unknown plugin " .. name} + end + if plugin_obj.type ~= "auth" then + return nil, {error_msg = "only supports auth type plugins in consumer credential"} + end + end + end + + return true, nil +end + +-- get_credential_etcd_key is used to splice the credential's etcd key (without prefix) +-- from credential_id and sub_path. +-- Parameter credential_id is from the uri or payload; sub_path is in the form of +-- {consumer_name}/credentials or {consumer_name}/credentials/{credential_id}. +-- Only if GET credentials list, credential_id is nil, sub_path is like {consumer_name}/credentials, +-- so return value is /consumers/{consumer_name}/credentials. +-- In the other methods, credential_id is not nil, return value is +-- /consumers/{consumer_name}/credentials/{credential_id}. +local function get_credential_etcd_key(credential_id, _conf, sub_path, _args) + if credential_id then + local uri_segs = core.utils.split_uri(sub_path) + local consumer_name = uri_segs[1] + return "/consumers/" .. consumer_name .. "/credentials/" .. credential_id + end + + return "/consumers/" .. sub_path +end + +return resource.new({ + name = "credentials", + kind = "credential", + schema = core.schema.credential, + checker = check_conf, + get_resource_etcd_key = get_credential_etcd_key, + unsupported_methods = {"post", "patch"} +}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/global_rules.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/global_rules.lua new file mode 100644 index 0000000..81db850 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/global_rules.lua @@ -0,0 +1,43 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local resource = require("apisix.admin.resource") +local schema_plugin = require("apisix.admin.plugins").check_schema + + +local function check_conf(id, conf, need_id, schema) + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, {error_msg = "invalid configuration: " .. err} + end + + local ok, err = schema_plugin(conf.plugins) + if not ok then + return nil, {error_msg = err} + end + + return true +end + + +return resource.new({ + name = "global_rules", + kind = "global rule", + schema = core.schema.global_rule, + checker = check_conf, + unsupported_methods = {"post"} +}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/init.lua new file mode 100644 index 0000000..7bcb806 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/init.lua @@ -0,0 +1,526 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local get_uri_args = ngx.req.get_uri_args +local route = require("apisix.utils.router") +local plugin = require("apisix.plugin") +local standalone = require("apisix.admin.standalone") +local v3_adapter = require("apisix.admin.v3_adapter") +local utils = require("apisix.admin.utils") +local ngx = ngx +local get_method = ngx.req.get_method +local ngx_time = ngx.time +local ngx_timer_at = ngx.timer.at +local ngx_worker_id = ngx.worker.id +local tonumber = tonumber +local tostring = tostring +local str_lower = string.lower +local reload_event = "/apisix/admin/plugins/reload" +local ipairs = ipairs +local error = error +local type = type + + +local events +local MAX_REQ_BODY = 1024 * 1024 * 1.5 -- 1.5 MiB + + +local viewer_methods = { + get = true, +} + + +local resources = { + routes = require("apisix.admin.routes"), + services = require("apisix.admin.services"), + upstreams = require("apisix.admin.upstreams"), + consumers = require("apisix.admin.consumers"), + credentials = require("apisix.admin.credentials"), + schema = require("apisix.admin.schema"), + ssls = require("apisix.admin.ssl"), + plugins = require("apisix.admin.plugins"), + protos = require("apisix.admin.proto"), + global_rules = require("apisix.admin.global_rules"), + stream_routes = require("apisix.admin.stream_routes"), + plugin_metadata = require("apisix.admin.plugin_metadata"), + plugin_configs = require("apisix.admin.plugin_config"), + consumer_groups = require("apisix.admin.consumer_group"), + secrets = require("apisix.admin.secrets"), +} + + +local _M = {version = 0.4} +local router + + +local function check_token(ctx) + local local_conf = core.config.local_conf() + + -- check if admin_key is required + if local_conf.deployment.admin.admin_key_required == false then + return true + end + + local admin_key = core.table.try_read_attr(local_conf, "deployment", "admin", "admin_key") + if not admin_key then + return true + end + + local req_token = ctx.var.arg_api_key or ctx.var.http_x_api_key + or ctx.var.cookie_x_api_key + if not req_token then + return false, "missing apikey" + end + + local admin + for i, row in ipairs(admin_key) do + if req_token == row.key then + admin = row + break + end + end + + if not admin then + return false, "wrong apikey" + end + + if admin.role == "viewer" and + not viewer_methods[str_lower(get_method())] then + return false, "invalid method for role viewer" + end + + return true +end + +-- Set the `apictx` variable and check admin api token, if the check fails, the current +-- request will be interrupted and an error response will be returned. +-- +-- NOTE: This is a higher wrapper for `check_token` function. +local function set_ctx_and_check_token() + local api_ctx = {} + core.ctx.set_vars_meta(api_ctx) + ngx.ctx.api_ctx = api_ctx + + local ok, err = check_token(api_ctx) + if not ok then + core.log.warn("failed to check token: ", err) + core.response.exit(401, { error_msg = "failed to check token", description = err }) + end +end + + +local function strip_etcd_resp(data) + if type(data) == "table" + and data.header ~= nil + and data.header.revision ~= nil + and data.header.raft_term ~= nil + then + -- strip etcd data + data.header = nil + data.responses = nil + data.succeeded = nil + + if data.node then + data.node.createdIndex = nil + data.node.modifiedIndex = nil + end + + data.count = nil + data.more = nil + data.prev_kvs = nil + + if data.deleted then + -- We used to treat the type incorrectly. But for compatibility we follow + -- the existing type. + data.deleted = tostring(data.deleted) + end + end + + return data +end + + +local function head() + core.response.exit(200) +end + + +local function run() + set_ctx_and_check_token() + + local uri_segs = core.utils.split_uri(ngx.var.uri) + core.log.info("uri: ", core.json.delay_encode(uri_segs)) + + -- /apisix/admin/schema/route + local seg_res, seg_id = uri_segs[4], uri_segs[5] + local seg_sub_path = core.table.concat(uri_segs, "/", 6) + if seg_res == "schema" and seg_id == "plugins" then + -- /apisix/admin/schema/plugins/limit-count + seg_res, seg_id = uri_segs[5], uri_segs[6] + seg_sub_path = core.table.concat(uri_segs, "/", 7) + end + + if seg_res == "stream_routes" then + local local_conf = core.config.local_conf() + if local_conf.apisix.proxy_mode ~= "stream" and + local_conf.apisix.proxy_mode ~= "http&stream" then + core.log.warn("stream mode is disabled, can not add any stream ", + "routes") + core.response.exit(400, {error_msg = "stream mode is disabled, " .. + "can not add stream routes"}) + end + end + + if seg_res == "consumers" and #uri_segs >= 6 and uri_segs[6] == "credentials" then + seg_sub_path = seg_id .. "/" .. seg_sub_path + seg_res = uri_segs[6] + seg_id = uri_segs[7] + end + + local resource = resources[seg_res] + if not resource then + core.response.exit(404, {error_msg = "Unsupported resource type: ".. seg_res}) + end + + local method = str_lower(get_method()) + if not resource[method] then + core.response.exit(404, {error_msg = "not found"}) + end + + local req_body, err = core.request.get_body(MAX_REQ_BODY) + if err then + core.log.error("failed to read request body: ", err) + core.response.exit(400, {error_msg = "invalid request body: " .. err}) + end + + if req_body then + local data, err = core.json.decode(req_body) + if err then + core.log.error("invalid request body: ", req_body, " err: ", err) + core.response.exit(400, {error_msg = "invalid request body: " .. err, + req_body = req_body}) + end + + req_body = data + end + + local uri_args = ngx.req.get_uri_args() or {} + if uri_args.ttl then + if not tonumber(uri_args.ttl) then + core.response.exit(400, {error_msg = "invalid argument ttl: " + .. "should be a number"}) + end + end + + local code, data + if seg_res == "schema" or seg_res == "plugins" then + code, data = resource[method](seg_id, req_body, seg_sub_path, uri_args) + else + code, data = resource[method](resource, seg_id, req_body, seg_sub_path, uri_args) + end + + if code then + if method == "get" and plugin.enable_data_encryption then + if seg_res == "consumers" or seg_res == "credentials" then + utils.decrypt_params(plugin.decrypt_conf, data, core.schema.TYPE_CONSUMER) + elseif seg_res == "plugin_metadata" then + utils.decrypt_params(plugin.decrypt_conf, data, core.schema.TYPE_METADATA) + else + utils.decrypt_params(plugin.decrypt_conf, data) + end + end + + if v3_adapter.enable_v3() then + core.response.set_header("X-API-VERSION", "v3") + else + core.response.set_header("X-API-VERSION", "v2") + end + + data = v3_adapter.filter(data, resource) + data = strip_etcd_resp(data) + + core.response.exit(code, data) + end +end + + +local function get_plugins_list() + set_ctx_and_check_token() + local args = get_uri_args() + local subsystem = args["subsystem"] + -- If subsystem is passed then it should be either http or stream. + -- If it is not passed/nil then http will be default. + subsystem = subsystem or "http" + if subsystem == "http" or subsystem == "stream" then + local plugins = resources.plugins.get_plugins_list(subsystem) + core.response.exit(200, plugins) + end + core.response.exit(400,"invalid subsystem passed") +end + +-- Handle unsupported request methods for the virtual "reload" plugin +local function unsupported_methods_reload_plugin() + set_ctx_and_check_token() + + core.response.exit(405, { + error_msg = "please use PUT method to reload the plugins, " + .. get_method() .. " method is not allowed." + }) +end + + +local function post_reload_plugins() + set_ctx_and_check_token() + + local success, err = events:post(reload_event, get_method(), ngx_time()) + if not success then + core.response.exit(503, err) + end + + core.response.exit(200, "done") +end + + +local function plugins_eq(old, new) + local old_set = {} + for _, p in ipairs(old) do + old_set[p.name] = p + end + + local new_set = {} + for _, p in ipairs(new) do + new_set[p.name] = p + end + + return core.table.set_eq(old_set, new_set) +end + + +local function sync_local_conf_to_etcd(reset) + local local_conf = core.config.local_conf() + + local plugins = {} + for _, name in ipairs(local_conf.plugins) do + core.table.insert(plugins, { + name = name, + }) + end + + for _, name in ipairs(local_conf.stream_plugins) do + core.table.insert(plugins, { + name = name, + stream = true, + }) + end + + if reset then + local res, err = core.etcd.get("/plugins") + if not res then + core.log.error("failed to get current plugins: ", err) + return + end + + if res.status == 404 then + -- nothing need to be reset + return + end + + if res.status ~= 200 then + core.log.error("failed to get current plugins, status: ", res.status) + return + end + + local stored_plugins = res.body.node.value + local revision = res.body.node.modifiedIndex + if plugins_eq(stored_plugins, plugins) then + core.log.info("plugins not changed, don't need to reset") + return + end + + core.log.warn("sync local conf to etcd") + + local res, err = core.etcd.atomic_set("/plugins", plugins, nil, revision) + if not res then + core.log.error("failed to set plugins: ", err) + end + + return + end + + core.log.warn("sync local conf to etcd") + + -- need to store all plugins name into one key so that it can be updated atomically + local res, err = core.etcd.set("/plugins", plugins) + if not res then + core.log.error("failed to set plugins: ", err) + end +end + + +local function reload_plugins(data, event, source, pid) + core.log.info("start to hot reload plugins") + plugin.load() + + if ngx_worker_id() == 0 then + sync_local_conf_to_etcd() + end +end + + +local function schema_validate() + local uri_segs = core.utils.split_uri(ngx.var.uri) + core.log.info("uri: ", core.json.delay_encode(uri_segs)) + + local seg_res = uri_segs[6] + local resource = resources[seg_res] + if not resource then + core.response.exit(404, {error_msg = "Unsupported resource type: ".. seg_res}) + end + + local req_body, err = core.request.get_body(MAX_REQ_BODY) + if err then + core.log.error("failed to read request body: ", err) + core.response.exit(400, {error_msg = "invalid request body: " .. err}) + end + + if req_body then + local data, err = core.json.decode(req_body) + if err then + core.log.error("invalid request body: ", req_body, " err: ", err) + core.response.exit(400, {error_msg = "invalid request body: " .. err, + req_body = req_body}) + end + + req_body = data + end + + local ok, err = core.schema.check(resource.schema, req_body) + if ok then + core.response.exit(200) + end + core.response.exit(400, {error_msg = err}) +end + + +local function standalone_run() + set_ctx_and_check_token() + return standalone.run() +end + + +local http_head_route = { + paths = [[/apisix/admin]], + methods = {"HEAD"}, + handler = head, +} + + +local uri_route = { + http_head_route, + { + paths = [[/apisix/admin/*]], + methods = {"GET", "PUT", "POST", "DELETE", "PATCH"}, + handler = run, + }, + { + paths = [[/apisix/admin/plugins/list]], + methods = {"GET"}, + handler = get_plugins_list, + }, + { + paths = [[/apisix/admin/schema/validate/*]], + methods = {"POST"}, + handler = schema_validate, + }, + { + paths = reload_event, + methods = {"PUT"}, + handler = post_reload_plugins, + }, + -- Handle methods other than "PUT" on "/plugin/reload" to inform user + { + paths = reload_event, + methods = { "GET", "POST", "DELETE", "PATCH" }, + handler = unsupported_methods_reload_plugin, + }, +} + + +local standalone_uri_route = { + http_head_route, + { + paths = [[/apisix/admin/configs]], + methods = {"GET", "PUT"}, + handler = standalone_run, + }, +} + + +function _M.init_worker() + local local_conf = core.config.local_conf() + if not local_conf.apisix or not local_conf.apisix.enable_admin then + return + end + + local is_yaml_config_provider = local_conf.deployment.config_provider == "yaml" + + if is_yaml_config_provider then + router = route.new(standalone_uri_route) + standalone.init_worker() + else + router = route.new(uri_route) + end + + -- register reload plugin handler + events = require("apisix.events") + events:register(reload_plugins, reload_event, "PUT") + + if ngx_worker_id() == 0 then + -- check if admin_key is required + if local_conf.deployment.admin.admin_key_required == false then + core.log.warn("Admin key is bypassed! ", + "If you are deploying APISIX in a production environment, ", + "please enable `admin_key_required` and set a secure admin key!") + end + + if is_yaml_config_provider then -- standalone mode does not need sync to etcd + return + end + + local ok, err = ngx_timer_at(0, function(premature) + if premature then + return + end + + -- try to reset the /plugins to the current configuration in the admin + sync_local_conf_to_etcd(true) + end) + + if not ok then + error("failed to sync local configure to etcd: " .. err) + end + end +end + + +function _M.get() + return router +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/plugin_config.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/plugin_config.lua new file mode 100644 index 0000000..153a7bd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/plugin_config.lua @@ -0,0 +1,66 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local get_routes = require("apisix.router").http_routes +local resource = require("apisix.admin.resource") +local schema_plugin = require("apisix.admin.plugins").check_schema +local type = type +local tostring = tostring +local ipairs = ipairs + + +local function check_conf(id, conf, need_id, schema) + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, {error_msg = "invalid configuration: " .. err} + end + + local ok, err = schema_plugin(conf.plugins) + if not ok then + return nil, {error_msg = err} + end + + return true +end + + +local function delete_checker(id) + local routes, routes_ver = get_routes() + if routes_ver and routes then + for _, route in ipairs(routes) do + if type(route) == "table" and route.value + and route.value.plugin_config_id + and tostring(route.value.plugin_config_id) == id then + return 400, {error_msg = "can not delete this plugin config," + .. " route [" .. route.value.id + .. "] is still using it now"} + end + end + end + + return nil, nil +end + + +return resource.new({ + name = "plugin_configs", + kind = "plugin config", + schema = core.schema.plugin_config, + checker = check_conf, + unsupported_methods = {"post"}, + delete_checker = delete_checker +}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/plugin_metadata.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/plugin_metadata.lua new file mode 100644 index 0000000..1387ca0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/plugin_metadata.lua @@ -0,0 +1,83 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local pcall = pcall +local require = require +local core = require("apisix.core") +local resource = require("apisix.admin.resource") +local encrypt_conf = require("apisix.plugin").encrypt_conf + +local injected_mark = "injected metadata_schema" + + +local function validate_plugin(name) + local pkg_name = "apisix.plugins." .. name + local ok, plugin_object = pcall(require, pkg_name) + if ok then + return true, plugin_object + end + + pkg_name = "apisix.stream.plugins." .. name + return pcall(require, pkg_name) +end + + +local function check_conf(plugin_name, conf) + if not plugin_name then + return nil, {error_msg = "missing plugin name"} + end + + local ok, plugin_object = validate_plugin(plugin_name) + if not ok then + return nil, {error_msg = "invalid plugin name"} + end + + if not plugin_object.metadata_schema then + plugin_object.metadata_schema = { + type = "object", + ['$comment'] = injected_mark, + properties = {}, + } + end + local schema = plugin_object.metadata_schema + + local ok, err + if schema['$comment'] == injected_mark + -- check_schema is not required. If missing, fallback to check schema directly + or not plugin_object.check_schema + then + ok, err = core.schema.check(schema, conf) + else + ok, err = plugin_object.check_schema(conf, core.schema.TYPE_METADATA) + end + + encrypt_conf(plugin_name, conf, core.schema.TYPE_METADATA) + + if not ok then + return nil, {error_msg = "invalid configuration: " .. err} + end + + return plugin_name +end + + +return resource.new({ + name = "plugin_metadata", + kind = "plugin_metadata", + schema = core.schema.plugin_metadata, + checker = check_conf, + unsupported_methods = {"post", "patch"} +}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/plugins.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/plugins.lua new file mode 100644 index 0000000..201f8f3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/plugins.lua @@ -0,0 +1,139 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local check_schema = require("apisix.plugin").check_schema +local ipairs = ipairs +local table_sort = table.sort +local table_insert = table.insert +local get_uri_args = ngx.req.get_uri_args +local plugin_get_all = require("apisix.plugin").get_all +local plugin_get_http = require("apisix.plugin").get +local plugin_get_stream = require("apisix.plugin").get_stream +local encrypt_conf = require("apisix.plugin").encrypt_conf +local pairs = pairs + +local _M = {} + + +function _M.check_schema(plugins_conf, schema_type) + local ok, err = check_schema(plugins_conf, schema_type, false) + if ok then + for name, conf in pairs(plugins_conf) do + encrypt_conf(name, conf, schema_type) + end + end + return ok, err +end + + +function _M.get(name) + local arg = get_uri_args() + -- If subsystem is passed inside args then it should be oneOf: http / stream. + local subsystem = arg["subsystem"] or "http" + if subsystem ~= "http" and subsystem ~= "stream" then + return 400, {error_msg = "unsupported subsystem: "..subsystem} + end + + -- arg all to be deprecated + if (arg and arg["all"] == "true") then + core.log.warn("query parameter \"all\" will be deprecated soon.") + local http_plugins, stream_plugins = plugin_get_all({ + version = true, + priority = true, + schema = true, + metadata_schema = true, + consumer_schema = true, + type = true, + scope = true, + }) + + if arg["subsystem"] == "stream" then + return 200, stream_plugins + end + + return 200, http_plugins + end + + local plugin + + if subsystem == "http" then + plugin = plugin_get_http(name) + else + plugin = plugin_get_stream(name) + end + + if not plugin then + local err = "plugin not found in subsystem " .. subsystem + core.log.warn(err) + return 404, {error_msg = err} + end + + local json_schema = plugin.schema + if arg and arg["schema_type"] == "consumer" then + json_schema = plugin.consumer_schema + end + + if not json_schema then + return 400, {error_msg = "not found schema"} + end + + return 200, json_schema +end + + +function _M.get_plugins_list(subsystem) + local http_plugins + local stream_plugins + if subsystem == "http" then + http_plugins = core.config.local_conf().plugins + else + stream_plugins = core.config.local_conf().stream_plugins + end + + local priorities = {} + local success = {} + if http_plugins then + for i, name in ipairs(http_plugins) do + local plugin = plugin_get_http(name) + if plugin and plugin.priority then + priorities[name] = plugin.priority + table_insert(success, name) + end + end + end + + if stream_plugins then + for i, name in ipairs(stream_plugins) do + local plugin = plugin_get_stream(name) + if plugin and plugin.priority then + priorities[name] = plugin.priority + table_insert(success, name) + end + end + end + + local function cmp(x, y) + return priorities[x] > priorities[y] + end + + table_sort(success, cmp) + return success +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/proto.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/proto.lua new file mode 100644 index 0000000..f8133cc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/proto.lua @@ -0,0 +1,111 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local type = type +local ipairs = ipairs +local core = require("apisix.core") +local resource = require("apisix.admin.resource") +local get_routes = require("apisix.router").http_routes +local get_services = require("apisix.http.service").services +local compile_proto = require("apisix.plugins.grpc-transcode.proto").compile_proto +local tostring = tostring + + +local function check_conf(id, conf, need_id, schema) + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, {error_msg = "invalid configuration: " .. err} + end + + local ok, err = compile_proto(conf.content) + if not ok then + return nil, {error_msg = "invalid content: " .. err} + end + + return true +end + + +local function check_proto_used(plugins, deleting, ptype, pid) + + --core.log.info("check_proto_used plugins: ", core.json.delay_encode(plugins, true)) + --core.log.info("check_proto_used deleting: ", deleting) + --core.log.info("check_proto_used ptype: ", ptype) + --core.log.info("check_proto_used pid: ", pid) + + if plugins then + if type(plugins) == "table" and plugins["grpc-transcode"] + and plugins["grpc-transcode"].proto_id + and tostring(plugins["grpc-transcode"].proto_id) == deleting then + return false, {error_msg = "can not delete this proto, " + .. ptype .. " [" .. pid + .. "] is still using it now"} + end + end + return true +end + +local function delete_checker(id) + core.log.info("proto delete: ", id) + + local routes, routes_ver = get_routes() + + core.log.info("routes: ", core.json.delay_encode(routes, true)) + core.log.info("routes_ver: ", routes_ver) + + if routes_ver and routes then + for _, route in ipairs(routes) do + core.log.info("proto delete route item: ", core.json.delay_encode(route, true)) + if type(route) == "table" and route.value and route.value.plugins then + local ret, err = check_proto_used(route.value.plugins, id, "route",route.value.id) + if not ret then + return 400, err + end + end + end + end + core.log.info("proto delete route ref check pass: ", id) + + local services, services_ver = get_services() + + core.log.info("services: ", core.json.delay_encode(services, true)) + core.log.info("services_ver: ", services_ver) + + if services_ver and services then + for _, service in ipairs(services) do + if type(service) == "table" and service.value and service.value.plugins then + local ret, err = check_proto_used(service.value.plugins, id, + "service", service.value.id) + if not ret then + return 400, err + end + end + end + end + core.log.info("proto delete service ref check pass: ", id) + + return nil, nil +end + + +return resource.new({ + name = "protos", + kind = "proto", + schema = core.schema.proto, + checker = check_conf, + unsupported_methods = {"patch"}, + delete_checker = delete_checker +}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/resource.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/resource.lua new file mode 100644 index 0000000..f3a1806 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/resource.lua @@ -0,0 +1,468 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local utils = require("apisix.admin.utils") +local apisix_ssl = require("apisix.ssl") +local apisix_consumer = require("apisix.consumer") +local setmetatable = setmetatable +local tostring = tostring +local ipairs = ipairs +local type = type + + +local _M = { + list_filter_fields = {}, +} +local mt = { + __index = _M +} + + +local no_id_res = { + consumers = true, + plugin_metadata = true +} + + +local function split_typ_and_id(id, sub_path) + local uri_segs = core.utils.split_uri(sub_path) + local typ = id + local id = nil + if #uri_segs > 0 then + id = uri_segs[1] + end + return typ, id +end + + +local function check_forbidden_properties(conf, forbidden_properties) + local not_allow_properties = "the property is forbidden: " + + if conf then + for _, v in ipairs(forbidden_properties) do + if conf[v] then + return not_allow_properties .. " " .. v + end + end + + if conf.upstream then + for _, v in ipairs(forbidden_properties) do + if conf.upstream[v] then + return not_allow_properties .. " upstream." .. v + end + end + end + + if conf.plugins then + for _, v in ipairs(forbidden_properties) do + if conf.plugins[v] then + return not_allow_properties .. " plugins." .. v + end + end + end + end + + return nil +end + + +function _M:check_conf(id, conf, need_id, typ, allow_time) + if self.name == "secrets" then + id = typ .. "/" .. id + end + -- check if missing configurations + if not conf then + return nil, {error_msg = "missing configurations"} + end + + -- check id if need id + if not no_id_res[self.name] then + id = id or conf.id + if need_id and not id then + return nil, {error_msg = "missing ".. self.kind .. " id"} + end + + if not need_id and id then + return nil, {error_msg = "wrong ".. self.kind .. " id, do not need it"} + end + + if need_id and conf.id and tostring(conf.id) ~= tostring(id) then + return nil, {error_msg = "wrong ".. self.kind .. " id"} + end + + conf.id = id + end + + -- check create time and update time + if not allow_time then + local forbidden_properties = {"create_time", "update_time"} + local err = check_forbidden_properties(conf, forbidden_properties) + if err then + return nil, {error_msg = err} + end + end + + core.log.info("conf : ", core.json.delay_encode(conf)) + + -- check the resource own rules + if self.name ~= "secrets" then + core.log.info("schema: ", core.json.delay_encode(self.schema)) + end + + local ok, err = self.checker(id, conf, need_id, self.schema, typ) + + if not ok then + return ok, err + else + if no_id_res[self.name] then + return ok + else + return need_id and id or true + end + end +end + + +function _M:get(id, conf, sub_path) + if core.table.array_find(self.unsupported_methods, "get") then + return 405, {error_msg = "not supported `GET` method for " .. self.kind} + end + + local key = "/" .. self.name + local typ = nil + if self.name == "secrets" then + key = key .. "/" + typ, id = split_typ_and_id(id, sub_path) + end + + if id then + if self.name == "secrets" then + key = key .. typ + end + key = key .. "/" .. id + end + + -- some resources(consumers) have sub resources(credentials), + -- the key format of sub resources will differ from the main resource + if self.get_resource_etcd_key then + key = self.get_resource_etcd_key(id, conf, sub_path) + end + + local res, err = core.etcd.get(key, not id) + if not res then + core.log.error("failed to get ", self.kind, "[", key, "] from etcd: ", err) + return 503, {error_msg = err} + end + + if self.name == "ssls" then + -- not return private key for security + if res.body and res.body.node and res.body.node.value then + res.body.node.value.key = nil + end + end + + -- consumers etcd range response will include credentials, so need to filter out them + if self.name == "consumers" and res.body.list then + res.body.list = apisix_consumer.filter_consumers_list(res.body.list) + res.body.total = #res.body.list + end + + utils.fix_count(res.body, id) + return res.status, res.body +end + + +function _M:post(id, conf, sub_path, args) + if core.table.array_find(self.unsupported_methods, "post") then + return 405, {error_msg = "not supported `POST` method for " .. self.kind} + end + + local id, err = self:check_conf(id, conf, false) + if not id then + return 400, err + end + + if self.name == "ssls" then + -- encrypt private key + conf.key = apisix_ssl.aes_encrypt_pkey(conf.key) + + if conf.keys then + for i = 1, #conf.keys do + conf.keys[i] = apisix_ssl.aes_encrypt_pkey(conf.keys[i]) + end + end + end + + local key = "/" .. self.name + utils.inject_timestamp(conf) + + local ttl = nil + if args then + ttl = args.ttl + end + + local res, err = core.etcd.push(key, conf, ttl) + if not res then + core.log.error("failed to post ", self.kind, "[", key, "] to etcd: ", err) + return 503, {error_msg = err} + end + + return res.status, res.body +end + + +function _M:put(id, conf, sub_path, args) + if core.table.array_find(self.unsupported_methods, "put") then + return 405, {error_msg = "not supported `PUT` method for " .. self.kind} + end + + local key = "/" .. self.name + local typ = nil + if self.name == "secrets" then + typ, id = split_typ_and_id(id, sub_path) + key = key .. "/" .. typ + end + + local need_id = not no_id_res[self.name] + local ok, err = self:check_conf(id, conf, need_id, typ) + if not ok then + return 400, err + end + + if self.name ~= "secrets" then + id = ok + end + + if self.name == "ssls" then + -- encrypt private key + conf.key = apisix_ssl.aes_encrypt_pkey(conf.key) + + if conf.keys then + for i = 1, #conf.keys do + conf.keys[i] = apisix_ssl.aes_encrypt_pkey(conf.keys[i]) + end + end + end + + key = key .. "/" .. id + + if self.get_resource_etcd_key then + key = self.get_resource_etcd_key(id, conf, sub_path, args) + end + + if self.name == "credentials" then + local consumer_key = apisix_consumer.get_consumer_key_from_credential_key(key) + local res, err = core.etcd.get(consumer_key, false) + if not res then + return 503, {error_msg = err} + end + if res.status == 404 then + return res.status, {error_msg = "consumer not found"} + end + if res.status ~= 200 then + core.log.debug("failed to get consumer for the credential, credential key: ", key, + ", consumer key: ", consumer_key, ", res.status: ", res.status) + return res.status, {error_msg = "failed to get the consumer"} + end + end + + if self.name ~= "plugin_metadata" then + local ok, err = utils.inject_conf_with_prev_conf(self.kind, key, conf) + if not ok then + return 503, {error_msg = err} + end + else + conf.id = id + end + + local ttl = nil + if args then + ttl = args.ttl + end + + local res, err = core.etcd.set(key, conf, ttl) + if not res then + core.log.error("failed to put ", self.kind, "[", key, "] to etcd: ", err) + return 503, {error_msg = err} + end + + return res.status, res.body +end + +-- Keep the unused conf to make the args list consistent with other methods +function _M:delete(id, conf, sub_path, uri_args) + if core.table.array_find(self.unsupported_methods, "delete") then + return 405, {error_msg = "not supported `DELETE` method for " .. self.kind} + end + + local key = "/" .. self.name + local typ = nil + if self.name == "secrets" then + typ, id = split_typ_and_id(id, sub_path) + end + + if not id then + return 400, {error_msg = "missing " .. self.kind .. " id"} + end + + -- core.log.error("failed to delete ", self.kind, "[", key, "] in etcd: ", err) + + if self.name == "secrets" then + key = key .. "/" .. typ + end + + key = key .. "/" .. id + + if self.get_resource_etcd_key then + key = self.get_resource_etcd_key(id, conf, sub_path, uri_args) + end + + if self.delete_checker and uri_args.force ~= "true" then + local code, err = self.delete_checker(id) + if err then + return code, err + end + end + + if self.name == "consumers" then + local res, err = core.etcd.rmdir(key .. "/credentials/") + if not res then + return 503, {error_msg = err} + end + end + + local res, err = core.etcd.delete(key) + if not res then + core.log.error("failed to delete ", self.kind, "[", key, "] in etcd: ", err) + return 503, {error_msg = err} + end + + return res.status, res.body +end + + +function _M:patch(id, conf, sub_path, args) + if core.table.array_find(self.unsupported_methods, "patch") then + return 405, {error_msg = "not supported `PATCH` method for " .. self.kind} + end + + local key = "/" .. self.name + local typ = nil + if self.name == "secrets" then + local uri_segs = core.utils.split_uri(sub_path) + if #uri_segs < 1 then + return 400, {error_msg = "no secret id"} + end + typ = id + id = uri_segs[1] + sub_path = core.table.concat(uri_segs, "/", 2) + end + + if not id then + return 400, {error_msg = "missing " .. self.kind .. " id"} + end + + if self.name == "secrets" then + key = key .. "/" .. typ + end + + key = key .. "/" .. id + + if conf == nil then + return 400, {error_msg = "missing new configuration"} + end + + if not sub_path or sub_path == "" then + if type(conf) ~= "table" then + return 400, {error_msg = "invalid configuration"} + end + end + + local res_old, err = core.etcd.get(key) + if not res_old then + core.log.error("failed to get ", self.kind, " [", key, "] in etcd: ", err) + return 503, {error_msg = err} + end + + if res_old.status ~= 200 then + return res_old.status, res_old.body + end + core.log.info("key: ", key, " old value: ", core.json.delay_encode(res_old, true)) + + local node_value = res_old.body.node.value + local modified_index = res_old.body.node.modifiedIndex + + if sub_path and sub_path ~= "" then + if self.name == "ssls" then + if sub_path == "key" then + conf = apisix_ssl.aes_encrypt_pkey(conf) + elseif sub_path == "keys" then + for i = 1, #conf do + conf[i] = apisix_ssl.aes_encrypt_pkey(conf[i]) + end + end + end + local code, err, node_val = core.table.patch(node_value, sub_path, conf) + node_value = node_val + if code then + return code, {error_msg = err} + end + utils.inject_timestamp(node_value, nil, true) + else + if self.name == "ssls" then + if conf.key then + conf.key = apisix_ssl.aes_encrypt_pkey(conf.key) + end + + if conf.keys then + for i = 1, #conf.keys do + conf.keys[i] = apisix_ssl.aes_encrypt_pkey(conf.keys[i]) + end + end + end + node_value = core.table.merge(node_value, conf) + utils.inject_timestamp(node_value, nil, conf) + end + + core.log.info("new conf: ", core.json.delay_encode(node_value, true)) + + local ok, err = self:check_conf(id, node_value, true, typ, true) + if not ok then + return 400, err + end + + local ttl = nil + if args then + ttl = args.ttl + end + + local res, err = core.etcd.atomic_set(key, node_value, ttl, modified_index) + if not res then + core.log.error("failed to set new ", self.kind, "[", key, "] to etcd: ", err) + return 503, {error_msg = err} + end + + return res.status, res.body +end + + +function _M.new(opt) + return setmetatable(opt, mt) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/routes.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/routes.lua new file mode 100644 index 0000000..e13bb23 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/routes.lua @@ -0,0 +1,184 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local expr = require("resty.expr.v1") +local core = require("apisix.core") +local apisix_upstream = require("apisix.upstream") +local resource = require("apisix.admin.resource") +local schema_plugin = require("apisix.admin.plugins").check_schema +local type = type +local loadstring = loadstring +local ipairs = ipairs +local jp = require("jsonpath") + +local function validate_post_arg(node) + if type(node) ~= "table" then + return true + end + + -- Handle post_arg conditions + if #node >= 3 and type(node[1]) == "string" and node[1]:find("^post_arg%.") then + local key = node[1] + local json_path = "$." .. key:sub(11) -- Remove "post_arg." prefix + local _, err = jp.parse(json_path) + if err then + return false, err + end + return true + end + + for _, child in ipairs(node) do + local ok, err = validate_post_arg(child) + if not ok then + return false, err + end + end + return true +end + + +local function check_conf(id, conf, need_id, schema) + if conf.host and conf.hosts then + return nil, {error_msg = "only one of host or hosts is allowed"} + end + + if conf.remote_addr and conf.remote_addrs then + return nil, {error_msg = "only one of remote_addr or remote_addrs is " + .. "allowed"} + end + + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, {error_msg = "invalid configuration: " .. err} + end + + local upstream_conf = conf.upstream + if upstream_conf then + local ok, err = apisix_upstream.check_upstream_conf(upstream_conf) + if not ok then + return nil, {error_msg = err} + end + end + + local upstream_id = conf.upstream_id + if upstream_id then + local key = "/upstreams/" .. upstream_id + local res, err = core.etcd.get(key) + if not res then + return nil, {error_msg = "failed to fetch upstream info by " + .. "upstream id [" .. upstream_id .. "]: " + .. err} + end + + if res.status ~= 200 then + return nil, {error_msg = "failed to fetch upstream info by " + .. "upstream id [" .. upstream_id .. "], " + .. "response code: " .. res.status} + end + end + + local service_id = conf.service_id + if service_id then + local key = "/services/" .. service_id + local res, err = core.etcd.get(key) + if not res then + return nil, {error_msg = "failed to fetch service info by " + .. "service id [" .. service_id .. "]: " + .. err} + end + + if res.status ~= 200 then + return nil, {error_msg = "failed to fetch service info by " + .. "service id [" .. service_id .. "], " + .. "response code: " .. res.status} + end + end + + local plugin_config_id = conf.plugin_config_id + if plugin_config_id then + local key = "/plugin_configs/" .. plugin_config_id + local res, err = core.etcd.get(key) + if not res then + return nil, {error_msg = "failed to fetch plugin config info by " + .. "plugin config id [" .. plugin_config_id .. "]: " + .. err} + end + + if res.status ~= 200 then + return nil, {error_msg = "failed to fetch plugin config info by " + .. "plugin config id [" .. plugin_config_id .. "], " + .. "response code: " .. res.status} + end + end + + if conf.plugins then + local ok, err = schema_plugin(conf.plugins) + if not ok then + return nil, {error_msg = err} + end + end + + if conf.vars then + ok, err = expr.new(conf.vars) + if not ok then + return nil, {error_msg = "failed to validate the 'vars' expression: " .. err} + end + end + + ok, err = validate_post_arg(conf.vars) + if not ok then + return nil, {error_msg = "failed to validate the 'vars' expression: " .. + err} + end + + if conf.filter_func then + local func, err = loadstring("return " .. conf.filter_func) + if not func then + return nil, {error_msg = "failed to load 'filter_func' string: " + .. err} + end + + if type(func()) ~= "function" then + return nil, {error_msg = "'filter_func' should be a function"} + end + end + + if conf.script then + local obj, err = loadstring(conf.script) + if not obj then + return nil, {error_msg = "failed to load 'script' string: " + .. err} + end + + if type(obj()) ~= "table" then + return nil, {error_msg = "'script' should be a Lua object"} + end + end + + return true +end + + +return resource.new({ + name = "routes", + kind = "route", + schema = core.schema.route, + checker = check_conf, + list_filter_fields = { + service_id = true, + upstream_id = true, + }, +}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/schema.lua new file mode 100644 index 0000000..73d9e78 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/schema.lua @@ -0,0 +1,35 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") + +local _M = { + version = 0.1, +} + + +function _M.get(name) + local json_schema = core.schema[name] + core.log.info("schema: ", core.json.delay_encode(core.schema, true)) + if not json_schema then + return 400, {error_msg = "not found schema: " .. name} + end + + return 200, json_schema +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/secrets.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/secrets.lua new file mode 100644 index 0000000..b149ef0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/secrets.lua @@ -0,0 +1,45 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require + +local core = require("apisix.core") +local resource = require("apisix.admin.resource") + +local pcall = pcall + + +local function check_conf(id, conf, need_id, schema, typ) + local ok, secret_manager = pcall(require, "apisix.secret." .. typ) + if not ok then + return false, {error_msg = "invalid secret manager: " .. typ} + end + + local ok, err = core.schema.check(secret_manager.schema, conf) + if not ok then + return nil, {error_msg = "invalid configuration: " .. err} + end + + return true +end + + +return resource.new({ + name = "secrets", + kind = "secret", + checker = check_conf, + unsupported_methods = {"post"} +}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/services.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/services.lua new file mode 100644 index 0000000..4218b77 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/services.lua @@ -0,0 +1,128 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local get_routes = require("apisix.router").http_routes +local get_stream_routes = require("apisix.router").stream_routes +local apisix_upstream = require("apisix.upstream") +local resource = require("apisix.admin.resource") +local schema_plugin = require("apisix.admin.plugins").check_schema +local tostring = tostring +local ipairs = ipairs +local type = type +local loadstring = loadstring + + +local function check_conf(id, conf, need_id, schema) + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, {error_msg = "invalid configuration: " .. err} + end + + if need_id and not id then + return nil, {error_msg = "wrong type of service id"} + end + + local upstream_conf = conf.upstream + if upstream_conf then + local ok, err = apisix_upstream.check_upstream_conf(upstream_conf) + if not ok then + return nil, {error_msg = err} + end + end + + local upstream_id = conf.upstream_id + if upstream_id then + local key = "/upstreams/" .. upstream_id + local res, err = core.etcd.get(key) + if not res then + return nil, {error_msg = "failed to fetch upstream info by " + .. "upstream id [" .. upstream_id .. "]: " + .. err} + end + + if res.status ~= 200 then + return nil, {error_msg = "failed to fetch upstream info by " + .. "upstream id [" .. upstream_id .. "], " + .. "response code: " .. res.status} + end + end + + if conf.plugins then + local ok, err = schema_plugin(conf.plugins) + if not ok then + return nil, {error_msg = err} + end + end + + if conf.script then + local obj, err = loadstring(conf.script) + if not obj then + return nil, {error_msg = "failed to load 'script' string: " + .. err} + end + + if type(obj()) ~= "table" then + return nil, {error_msg = "'script' should be a Lua object"} + end + end + + return true +end + + +local function delete_checker(id) + local routes, routes_ver = get_routes() + core.log.info("routes: ", core.json.delay_encode(routes, true)) + core.log.info("routes_ver: ", routes_ver) + if routes_ver and routes then + for _, route in ipairs(routes) do + if type(route) == "table" and route.value + and route.value.service_id + and tostring(route.value.service_id) == id then + return 400, {error_msg = "can not delete this service directly," + .. " route [" .. route.value.id + .. "] is still using it now"} + end + end + end + + local stream_routes, stream_routes_ver = get_stream_routes() + core.log.info("stream_routes: ", core.json.delay_encode(stream_routes, true)) + core.log.info("stream_routes_ver: ", stream_routes_ver) + if stream_routes_ver and stream_routes then + for _, route in ipairs(stream_routes) do + if type(route) == "table" and route.value + and route.value.service_id + and tostring(route.value.service_id) == id then + return 400, {error_msg = "can not delete this service directly," + .. " stream_route [" .. route.value.id + .. "] is still using it now"} + end + end + end + + return nil, nil +end + + +return resource.new({ + name = "services", + kind = "service", + schema = core.schema.service, + checker = check_conf, + delete_checker = delete_checker +}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/ssl.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/ssl.lua new file mode 100644 index 0000000..d13d08f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/ssl.lua @@ -0,0 +1,37 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local resource = require("apisix.admin.resource") +local apisix_ssl = require("apisix.ssl") + + +local function check_conf(id, conf, need_id, schema) + local ok, err = apisix_ssl.check_ssl_conf(false, conf) + if not ok then + return nil, {error_msg = err} + end + + return need_id and id or true +end + + +return resource.new({ + name = "ssls", + kind = "ssl", + schema = core.schema.ssl, + checker = check_conf +}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/standalone.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/standalone.lua new file mode 100644 index 0000000..0d17b15 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/standalone.lua @@ -0,0 +1,339 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local type = type +local pairs = pairs +local ipairs = ipairs +local str_lower = string.lower +local ngx = ngx +local get_method = ngx.req.get_method +local shared_dict = ngx.shared["standalone-config"] +local table_insert = table.insert +local table_new = require("table.new") +local yaml = require("lyaml") +local events = require("apisix.events") +local core = require("apisix.core") +local config_yaml = require("apisix.core.config_yaml") +local check_schema = require("apisix.core.schema").check +local tbl_deepcopy = require("apisix.core.table").deepcopy + +local EVENT_UPDATE = "standalone-api-configuration-update" + +local _M = {} + +local function check_duplicate(item, key, id_set) + local identifier, identifier_type + if key == "consumers" then + identifier = item.id or item.username + identifier_type = item.id and "credential id" or "username" + else + identifier = item.id + identifier_type = "id" + end + + if id_set[identifier] then + return true, "found duplicate " .. identifier_type .. " " .. identifier .. " in " .. key + end + id_set[identifier] = true + return false +end + +local function get_config() + local config = shared_dict:get("config") + if not config then + return nil, "not found" + end + + local err + config, err = core.json.decode(config) + if not config then + return nil, "failed to decode json: " .. err + end + return config +end + + +local function update_and_broadcast_config(apisix_yaml) + local raw, err = core.json.encode(apisix_yaml) + if not raw then + core.log.error("failed to encode json: ", err) + return nil, "failed to encode json: " .. err + end + + if shared_dict then + -- the worker that handles Admin API calls is responsible for writing the shared dict + local ok, err = shared_dict:set("config", raw) + if not ok then + return nil, "failed to save config to shared dict: " .. err + end + core.log.info("standalone config updated: ", raw) + else + core.log.crit(config_yaml.ERR_NO_SHARED_DICT) + end + return events:post(EVENT_UPDATE, EVENT_UPDATE) +end + + +local function update(ctx) + local content_type = core.request.header(nil, "content-type") or "application/json" + + -- read the request body + local req_body, err = core.request.get_body() + if err then + return core.response.exit(400, {error_msg = "invalid request body: " .. err}) + end + + if not req_body or #req_body <= 0 then + return core.response.exit(400, {error_msg = "invalid request body: empty request body"}) + end + + -- parse the request body + local data + if core.string.has_prefix(content_type, "application/yaml") then + data = yaml.load(req_body, { all = false }) + if not data or type(data) ~= "table" then + err = "invalid yaml request body" + end + else + data, err = core.json.decode(req_body) + end + if err then + core.log.error("invalid request body: ", req_body, " err: ", err) + core.response.exit(400, {error_msg = "invalid request body: " .. err}) + end + req_body = data + + local config, err = get_config() + if not config then + if err ~= "not found" then + core.log.error("failed to get config from shared dict: ", err) + return core.response.exit(500, { + error_msg = "failed to get config from shared dict: " .. err + }) + end + end + + -- check input by jsonschema + local apisix_yaml = {} + local created_objs = config_yaml.fetch_all_created_obj() + + for key, obj in pairs(created_objs) do + local conf_version_key = obj.conf_version_key + local conf_version = config and config[conf_version_key] or obj.conf_version + local items = req_body[key] + local new_conf_version = req_body[conf_version_key] + if not new_conf_version then + new_conf_version = conf_version + 1 + else + if type(new_conf_version) ~= "number" then + return core.response.exit(400, { + error_msg = conf_version_key .. " must be a number", + }) + end + if new_conf_version < conf_version then + return core.response.exit(400, { + error_msg = conf_version_key .. + " must be greater than or equal to (" .. conf_version .. ")", + }) + end + end + + apisix_yaml[conf_version_key] = new_conf_version + if new_conf_version == conf_version then + apisix_yaml[key] = config and config[key] + elseif items and #items > 0 then + apisix_yaml[key] = table_new(#items, 0) + local item_schema = obj.item_schema + local item_checker = obj.checker + local id_set = {} + + for index, item in ipairs(items) do + local item_temp = tbl_deepcopy(item) + local valid, err + -- need to recover to 0-based subscript + local err_prefix = "invalid " .. key .. " at index " .. (index - 1) .. ", err: " + if item_schema then + valid, err = check_schema(obj.item_schema, item_temp) + if not valid then + core.log.error(err_prefix, err) + core.response.exit(400, {error_msg = err_prefix .. err}) + end + end + if item_checker then + local item_checker_key + if item.id then + -- credential need to check key + item_checker_key = "/" .. key .. "/" .. item_temp.id + end + valid, err = item_checker(item_temp, item_checker_key) + if not valid then + core.log.error(err_prefix, err) + core.response.exit(400, {error_msg = err_prefix .. err}) + end + end + -- prevent updating resource with the same ID + -- (e.g., service ID or other resource IDs) in a single request + local duplicated, err = check_duplicate(item, key, id_set) + if duplicated then + core.log.error(err) + core.response.exit(400, { error_msg = err }) + end + + table_insert(apisix_yaml[key], item) + end + end + end + + local ok, err = update_and_broadcast_config(apisix_yaml) + if not ok then + core.response.exit(500, err) + end + + return core.response.exit(202) +end + + +local function get(ctx) + local accept = core.request.header(nil, "accept") or "application/json" + local want_yaml_resp = core.string.has_prefix(accept, "application/yaml") + + local config, err = get_config() + if not config then + if err ~= "not found" then + core.log.error("failed to get config from shared dict: ", err) + return core.response.exit(500, { + error_msg = "failed to get config from shared dict: " .. err + }) + end + config = {} + local created_objs = config_yaml.fetch_all_created_obj() + for _, obj in pairs(created_objs) do + config[obj.conf_version_key] = obj.conf_version + end + end + + local resp, err + if want_yaml_resp then + core.response.set_header("Content-Type", "application/yaml") + resp = yaml.dump({ config }) + if not resp then + err = "failed to encode yaml" + end + + -- remove the first line "---" and the last line "..." + -- because the yaml.dump() will add them for multiple documents + local m = ngx.re.match(resp, [[^---\s*([\s\S]*?)\s*\.\.\.\s*$]], "jo") + if m and m[1] then + resp = m[1] + end + else + core.response.set_header("Content-Type", "application/json") + resp, err = core.json.encode(config, true) + if not resp then + err = "failed to encode json: " .. err + end + end + + if not resp then + return core.response.exit(500, {error_msg = err}) + end + return core.response.exit(200, resp) +end + + +function _M.run() + local ctx = ngx.ctx.api_ctx + local method = str_lower(get_method()) + if method == "put" then + return update(ctx) + else + return get(ctx) + end +end + + +local patch_schema +do + local resource_schema = { + "proto", + "global_rule", + "route", + "service", + "upstream", + "consumer", + "consumer_group", + "credential", + "ssl", + "plugin_config", + } + local function attach_modifiedIndex_schema(name) + local schema = core.schema[name] + if not schema then + core.log.error("schema for ", name, " not found") + return + end + if schema.properties and not schema.properties.modifiedIndex then + schema.properties.modifiedIndex = { + type = "integer", + } + end + end + + local function patch_credential_schema() + local credential_schema = core.schema["credential"] + if credential_schema and credential_schema.properties then + credential_schema.properties.id = { + type = "string", + minLength = 15, + maxLength = 128, + pattern = [[^[a-zA-Z0-9-_]+/credentials/[a-zA-Z0-9-_.]+$]], + } + end + end + + function patch_schema() + -- attach modifiedIndex schema to all resource schemas + for _, name in ipairs(resource_schema) do + attach_modifiedIndex_schema(name) + end + -- patch credential schema + patch_credential_schema() + end +end + + +function _M.init_worker() + local function update_config() + local config, err = shared_dict:get("config") + if not config then + core.log.error("failed to get config from shared dict: ", err) + return + end + + config, err = core.json.decode(config) + if not config then + core.log.error("failed to decode json: ", err) + return + end + config_yaml._update_config(config) + end + events:register(update_config, EVENT_UPDATE, EVENT_UPDATE) + + patch_schema() +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/stream_routes.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/stream_routes.lua new file mode 100644 index 0000000..699b0aa --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/stream_routes.lua @@ -0,0 +1,81 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local resource = require("apisix.admin.resource") +local stream_route_checker = require("apisix.stream.router.ip_port").stream_route_checker + + +local function check_conf(id, conf, need_id, schema) + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, {error_msg = "invalid configuration: " .. err} + end + + local upstream_id = conf.upstream_id + if upstream_id then + local key = "/upstreams/" .. upstream_id + local res, err = core.etcd.get(key) + if not res then + return nil, {error_msg = "failed to fetch upstream info by " + .. "upstream id [" .. upstream_id .. "]: " + .. err} + end + + if res.status ~= 200 then + return nil, {error_msg = "failed to fetch upstream info by " + .. "upstream id [" .. upstream_id .. "], " + .. "response code: " .. res.status} + end + end + + local service_id = conf.service_id + if service_id then + local key = "/services/" .. service_id + local res, err = core.etcd.get(key) + if not res then + return nil, {error_msg = "failed to fetch service info by " + .. "service id [" .. service_id .. "]: " + .. err} + end + + if res.status ~= 200 then + return nil, {error_msg = "failed to fetch service info by " + .. "service id [" .. service_id .. "], " + .. "response code: " .. res.status} + end + end + + local ok, err = stream_route_checker(conf, true) + if not ok then + return nil, {error_msg = err} + end + + return true +end + + +return resource.new({ + name = "stream_routes", + kind = "stream route", + schema = core.schema.stream_route, + checker = check_conf, + unsupported_methods = { "patch" }, + list_filter_fields = { + service_id = true, + upstream_id = true, + }, +}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/upstreams.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/upstreams.lua new file mode 100644 index 0000000..6c04d93 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/upstreams.lua @@ -0,0 +1,134 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local config_util = require("apisix.core.config_util") +local get_routes = require("apisix.router").http_routes +local get_services = require("apisix.http.service").services +local get_plugin_configs = require("apisix.plugin_config").plugin_configs +local get_consumers = require("apisix.consumer").consumers +local get_consumer_groups = require("apisix.consumer_group").consumer_groups +local get_global_rules = require("apisix.global_rules").global_rules +local apisix_upstream = require("apisix.upstream") +local resource = require("apisix.admin.resource") +local tostring = tostring +local ipairs = ipairs + + +local function check_conf(id, conf, need_id) + local ok, err = apisix_upstream.check_upstream_conf(conf) + if not ok then + return nil, {error_msg = err} + end + + return true +end + + +local function up_id_in_plugins(plugins, up_id) + if plugins and plugins["traffic-split"] + and plugins["traffic-split"].rules then + + for _, rule in ipairs(plugins["traffic-split"].rules) do + local plugin_upstreams = rule.weighted_upstreams + for _, plugin_upstream in ipairs(plugin_upstreams) do + if plugin_upstream.upstream_id + and tostring(plugin_upstream.upstream_id) == up_id then + return true + end + end + end + + return false + end +end + + +local function check_resources_reference(resources, up_id, + only_check_plugin, resources_name) + if resources then + for _, resource in config_util.iterate_values(resources) do + if resource and resource.value then + if up_id_in_plugins(resource.value.plugins, up_id) then + return {error_msg = "can not delete this upstream," + .. " plugin in " + .. resources_name .. " [" + .. resource.value.id + .. "] is still using it now"} + end + + if not only_check_plugin and resource.value.upstream_id + and tostring(resource.value.upstream_id) == up_id then + return {error_msg = "can not delete this upstream, " + .. resources_name .. " [" .. resource.value.id + .. "] is still using it now"} + end + end + end + end +end + + +local function delete_checker(id) + local routes = get_routes() + local err_msg = check_resources_reference(routes, id, false, "route") + if err_msg then + return 400, err_msg + end + + local services, services_ver = get_services() + core.log.info("services: ", core.json.delay_encode(services, true)) + core.log.info("services_ver: ", services_ver) + local err_msg = check_resources_reference(services, id, false, "service") + if err_msg then + return 400, err_msg + end + + local plugin_configs = get_plugin_configs() + local err_msg = check_resources_reference(plugin_configs, id, true, "plugin_config") + if err_msg then + return 400, err_msg + end + + local consumers = get_consumers() + local err_msg = check_resources_reference(consumers, id, true, "consumer") + if err_msg then + return 400, err_msg + end + + local consumer_groups = get_consumer_groups() + local err_msg = check_resources_reference(consumer_groups, id, true, "consumer_group") + if err_msg then + return 400, err_msg + end + + local global_rules = get_global_rules() + err_msg = check_resources_reference(global_rules, id, true, "global_rules") + if err_msg then + return 400, err_msg + end + + return nil, nil +end + + +return resource.new({ + name = "upstreams", + kind = "upstream", + schema = core.schema.upstream, + checker = check_conf, + delete_checker = delete_checker +}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/utils.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/utils.lua new file mode 100644 index 0000000..eee2787 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/utils.lua @@ -0,0 +1,113 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ngx_time = ngx.time +local tonumber = tonumber +local ipairs = ipairs +local pairs = pairs + + +local _M = {} + + +local function inject_timestamp(conf, prev_conf, patch_conf) + if not conf.create_time then + if prev_conf and (prev_conf.node or prev_conf.list).value.create_time then + conf.create_time = (prev_conf.node or prev_conf.list).value.create_time + else + -- As we don't know existent data's create_time, we have to pretend + -- they are created now. + conf.create_time = ngx_time() + end + end + + if not conf.update_time or + -- For PATCH request, the modification is passed as 'patch_conf' + -- If the sub path is used, the 'patch_conf' will be a placeholder `true` + (patch_conf and (patch_conf == true or patch_conf.update_time == nil)) + then + -- reset the update_time if: + -- 1. PATCH request, with sub path + -- 2. PATCH request, update_time not given + -- 3. Other request, update_time not given + conf.update_time = ngx_time() + end +end +_M.inject_timestamp = inject_timestamp + + +function _M.inject_conf_with_prev_conf(kind, key, conf) + local res, err = core.etcd.get(key) + if not res or (res.status ~= 200 and res.status ~= 404) then + core.log.error("failed to get " .. kind .. "[", key, "] from etcd: ", err or res.status) + return nil, err + end + + if res.status == 404 then + inject_timestamp(conf) + else + inject_timestamp(conf, res.body) + end + + return true +end + + +-- fix_count makes the "count" field returned by etcd reasonable +function _M.fix_count(body, id) + if body.count then + if not id then + -- remove the count of placeholder (init_dir) + body.count = tonumber(body.count) - 1 + else + body.count = tonumber(body.count) + end + end +end + + +function _M.decrypt_params(decrypt_func, body, schema_type) + -- list + if body.list then + for _, route in ipairs(body.list) do + if route.value and route.value.plugins then + for name, conf in pairs(route.value.plugins) do + decrypt_func(name, conf, schema_type) + end + end + end + return + end + + -- node + local plugins = body.node and body.node.value + and body.node.value.plugins + + if plugins then + for name, conf in pairs(plugins) do + decrypt_func(name, conf, schema_type) + end + end + + -- metadata + if schema_type == core.schema.TYPE_METADATA then + local conf = body.node and body.node.value + decrypt_func(conf.name, conf, schema_type) + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/admin/v3_adapter.lua b/CloudronPackages/APISIX/apisix-source/apisix/admin/v3_adapter.lua new file mode 100644 index 0000000..8941eef --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/admin/v3_adapter.lua @@ -0,0 +1,249 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local type = type +local pairs = pairs +local tonumber = tonumber +local ngx = ngx +local re_find = ngx.re.find +local fetch_local_conf = require("apisix.core.config_local").local_conf +local try_read_attr = require("apisix.core.table").try_read_attr +local deepcopy = require("apisix.core.table").deepcopy +local log = require("apisix.core.log") +local request = require("apisix.core.request") +local response = require("apisix.core.response") +local table = require("apisix.core.table") + +local _M = {} + + +local admin_api_version +local function enable_v3() + if admin_api_version then + if admin_api_version == "v3" then + return true + end + + if admin_api_version == "default" then + return false + end + end + + local local_conf, err = fetch_local_conf() + if not local_conf then + admin_api_version = "default" + log.error("failed to fetch local conf: ", err) + return false + end + + local api_ver = try_read_attr(local_conf, "deployment", "admin", "admin_api_version") + if api_ver ~= "v3" then + admin_api_version = "default" + return false + end + + admin_api_version = api_ver + return true +end +_M.enable_v3 = enable_v3 + + +function _M.to_v3(body, action) + if not enable_v3() then + body.action = action + end +end + + +function _M.to_v3_list(body) + if not enable_v3() then + return + end + + if body.node.dir then + body.list = body.node.nodes + body.node = nil + end +end + + +local function sort(l, r) + return l.createdIndex < r.createdIndex +end + + +local function pagination(body, args) + args.page = tonumber(args.page) + args.page_size = tonumber(args.page_size) + if not args.page or not args.page_size then + return + end + + if args.page_size < 10 or args.page_size > 500 then + return response.exit(400, "page_size must be between 10 and 500") + end + + if not args.page or args.page < 1 then + -- default page is 1 + args.page = 1 + end + + local list = body.list + + -- sort nodes by there createdIndex + table.sort(list, sort) + + local to = args.page * args.page_size + local from = to - args.page_size + 1 + + local res = table.new(20, 0) + + for i = from, to do + if list[i] then + res[i - from + 1] = list[i] + end + end + + body.list = res +end + + +local function _filter(item, args, resource) + if not args.filter then + return true + end + + local filters, err = ngx.decode_args(args.filter or "", 100) + if not filters then + log.error("failed to decode filter args: ", err) + return false + end + + for key, value in pairs(filters) do + if not resource.list_filter_fields[key] then + log.warn("filter field '", key, "' is not supported by resource: ", resource.name) + goto CONTINUE + end + + if not item[key] then + return false + end + + if type(value) == "table" then + value = value[#value] -- get the last value in the table + end + + if item[key] ~= value then + return false + end + + ::CONTINUE:: + end + + return true +end + + +local function filter(body, args, resource) + for i = #body.list, 1, -1 do + local name_matched = true + local label_matched = true + local uri_matched = true + if args.name then + name_matched = false + local matched = re_find(body.list[i].value.name, args.name, "jo") + if matched then + name_matched = true + end + end + + if args.label then + label_matched = false + if body.list[i].value.labels then + for k, _ in pairs(body.list[i].value.labels) do + if k == args.label then + label_matched = true + break + end + end + end + end + + if args.uri then + uri_matched = false + if body.list[i].value.uri then + local matched = re_find(body.list[i].value.uri, args.uri, "jo") + if matched then + uri_matched = true + end + end + + if body.list[i].value.uris then + for _, uri in pairs(body.list[i].value.uris) do + if re_find(uri, args.uri, "jo") then + uri_matched = true + break + end + end + end + end + + if not name_matched or not label_matched or not uri_matched + or not _filter(body.list[i].value, args, resource) then + table.remove(body.list, i) + end + end +end + + +function _M.filter(body, resource) + if not enable_v3() then + return body + end + + local args = request.get_uri_args() + local processed_body = deepcopy(body) + + if processed_body.deleted then + processed_body.node = nil + end + + -- strip node wrapping for single query, create, and update scenarios. + if processed_body.node then + processed_body = processed_body.node + end + + -- filter and paging logic for list query only + if processed_body.list then + filter(processed_body, args, resource) + + -- calculate the total amount of filtered data + processed_body.total = processed_body.list and #processed_body.list or 0 + + pagination(processed_body, args) + + -- remove the count field returned by etcd + -- we don't need a field that reflects the length of the currently returned data, + -- it doesn't make sense + processed_body.count = nil + end + + return processed_body +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/api_router.lua b/CloudronPackages/APISIX/apisix-source/apisix/api_router.lua new file mode 100644 index 0000000..9fbf328 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/api_router.lua @@ -0,0 +1,116 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local router = require("apisix.utils.router") +local plugin_mod = require("apisix.plugin") +local core = require("apisix.core") +local ipairs = ipairs +local ngx_header = ngx.header +local type = type + + +local _M = {} +local match_opts = {} +local has_route_not_under_apisix + + +local fetch_api_router +do + local routes = {} +function fetch_api_router() + core.table.clear(routes) + + has_route_not_under_apisix = false + + for _, plugin in ipairs(plugin_mod.plugins) do + local api_fun = plugin.api + if api_fun then + local api_routes = api_fun() + core.log.debug("fetched api routes: ", + core.json.delay_encode(api_routes, true)) + for _, route in ipairs(api_routes) do + if route.uri == nil then + core.log.error("got nil uri in api route: ", + core.json.delay_encode(route, true)) + break + end + + local typ_uri = type(route.uri) + if not has_route_not_under_apisix then + if typ_uri == "string" then + if not core.string.has_prefix(route.uri, "/apisix/") then + has_route_not_under_apisix = true + end + else + for _, uri in ipairs(route.uri) do + if not core.string.has_prefix(uri, "/apisix/") then + has_route_not_under_apisix = true + break + end + end + end + end + + core.table.insert(routes, { + methods = route.methods, + paths = route.uri, + handler = function (api_ctx) + local code, body = route.handler(api_ctx) + if code or body then + if type(body) == "table" and ngx_header["Content-Type"] == nil then + core.response.set_header("Content-Type", "application/json") + end + + core.response.exit(code, body) + end + end + }) + end + end + end + + return router.new(routes) +end + +end -- do + + +function _M.has_route_not_under_apisix() + if has_route_not_under_apisix == nil then + return true + end + + return has_route_not_under_apisix +end + + +function _M.match(api_ctx) + local api_router = core.lrucache.global("api_router", plugin_mod.load_times, fetch_api_router) + if not api_router then + core.log.error("failed to fetch valid api router") + return false + end + + core.table.clear(match_opts) + match_opts.method = api_ctx.var.request_method + + local ok = api_router:dispatch(api_ctx.var.uri, match_opts, api_ctx) + return ok +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/balancer.lua b/CloudronPackages/APISIX/apisix-source/apisix/balancer.lua new file mode 100644 index 0000000..0fe2e65 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/balancer.lua @@ -0,0 +1,400 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local balancer = require("ngx.balancer") +local core = require("apisix.core") +local priority_balancer = require("apisix.balancer.priority") +local apisix_upstream = require("apisix.upstream") +local ipairs = ipairs +local is_http = ngx.config.subsystem == "http" +local enable_keepalive = balancer.enable_keepalive and is_http +local set_more_tries = balancer.set_more_tries +local get_last_failure = balancer.get_last_failure +local set_timeouts = balancer.set_timeouts +local ngx_now = ngx.now +local str_byte = string.byte + + +local module_name = "balancer" +local pickers = {} + +local lrucache_server_picker = core.lrucache.new({ + ttl = 300, count = 256 +}) +local lrucache_addr = core.lrucache.new({ + ttl = 300, count = 1024 * 4 +}) + + +local _M = { + version = 0.2, + name = module_name, +} + + +local function transform_node(new_nodes, node) + if not new_nodes._priority_index then + new_nodes._priority_index = {} + end + + if not new_nodes[node.priority] then + new_nodes[node.priority] = {} + core.table.insert(new_nodes._priority_index, node.priority) + end + + new_nodes[node.priority][node.host .. ":" .. node.port] = node.weight + return new_nodes +end + + +local function fetch_health_nodes(upstream, checker) + local nodes = upstream.nodes + if not checker then + local new_nodes = core.table.new(0, #nodes) + for _, node in ipairs(nodes) do + new_nodes = transform_node(new_nodes, node) + end + return new_nodes + end + + local host = upstream.checks and upstream.checks.active and upstream.checks.active.host + local port = upstream.checks and upstream.checks.active and upstream.checks.active.port + local up_nodes = core.table.new(0, #nodes) + for _, node in ipairs(nodes) do + local ok, err = checker:get_target_status(node.host, port or node.port, host) + if ok then + up_nodes = transform_node(up_nodes, node) + elseif err then + core.log.warn("failed to get health check target status, addr: ", + node.host, ":", port or node.port, ", host: ", host, ", err: ", err) + end + end + + if core.table.nkeys(up_nodes) == 0 then + core.log.warn("all upstream nodes is unhealthy, use default") + for _, node in ipairs(nodes) do + up_nodes = transform_node(up_nodes, node) + end + end + + return up_nodes +end + + +local function create_server_picker(upstream, checker) + local picker = pickers[upstream.type] + if not picker then + pickers[upstream.type] = require("apisix.balancer." .. upstream.type) + picker = pickers[upstream.type] + end + + if picker then + local nodes = upstream.nodes + local addr_to_domain = {} + for _, node in ipairs(nodes) do + if node.domain then + local addr = node.host .. ":" .. node.port + addr_to_domain[addr] = node.domain + end + end + + local up_nodes = fetch_health_nodes(upstream, checker) + + if #up_nodes._priority_index > 1 then + core.log.info("upstream nodes: ", core.json.delay_encode(up_nodes)) + local server_picker = priority_balancer.new(up_nodes, upstream, picker) + server_picker.addr_to_domain = addr_to_domain + return server_picker + end + + core.log.info("upstream nodes: ", + core.json.delay_encode(up_nodes[up_nodes._priority_index[1]])) + local server_picker = picker.new(up_nodes[up_nodes._priority_index[1]], upstream) + server_picker.addr_to_domain = addr_to_domain + return server_picker + end + + return nil, "invalid balancer type: " .. upstream.type, 0 +end + + +local function parse_addr(addr) + local host, port, err = core.utils.parse_addr(addr) + return {host = host, port = port}, err +end + + +-- set_balancer_opts will be called in balancer phase and before any tries +local function set_balancer_opts(route, ctx) + local up_conf = ctx.upstream_conf + + -- If the matched route has timeout config, prefer to use the route config. + local timeout = nil + if route and route.value and route.value.timeout then + timeout = route.value.timeout + else + if up_conf.timeout then + timeout = up_conf.timeout + end + end + if timeout then + local ok, err = set_timeouts(timeout.connect, timeout.send, + timeout.read) + if not ok then + core.log.error("could not set upstream timeouts: ", err) + end + end + + local retries = up_conf.retries + if not retries or retries < 0 then + retries = #up_conf.nodes - 1 + end + + if retries > 0 then + if up_conf.retry_timeout and up_conf.retry_timeout > 0 then + ctx.proxy_retry_deadline = ngx_now() + up_conf.retry_timeout + end + local ok, err = set_more_tries(retries) + if not ok then + core.log.error("could not set upstream retries: ", err) + elseif err then + core.log.warn("could not set upstream retries: ", err) + end + end +end + + +local function parse_server_for_upstream_host(picked_server, upstream_scheme) + local standard_port = apisix_upstream.scheme_to_port[upstream_scheme] + local host = picked_server.domain or picked_server.host + if upstream_scheme and (not standard_port or standard_port ~= picked_server.port) then + host = host .. ":" .. picked_server.port + end + return host +end + + +-- pick_server will be called: +-- 1. in the access phase so that we can set headers according to the picked server +-- 2. each time we need to retry upstream +local function pick_server(route, ctx) + core.log.info("route: ", core.json.delay_encode(route, true)) + core.log.info("ctx: ", core.json.delay_encode(ctx, true)) + local up_conf = ctx.upstream_conf + + for _, node in ipairs(up_conf.nodes) do + if core.utils.parse_ipv6(node.host) and str_byte(node.host, 1) ~= str_byte("[") then + node.host = '[' .. node.host .. ']' + end + end + + local nodes_count = #up_conf.nodes + if nodes_count == 1 then + local node = up_conf.nodes[1] + ctx.balancer_ip = node.host + ctx.balancer_port = node.port + node.upstream_host = parse_server_for_upstream_host(node, ctx.upstream_scheme) + return node + end + + local version = ctx.upstream_version + local key = ctx.upstream_key + local checker = ctx.up_checker + + ctx.balancer_try_count = (ctx.balancer_try_count or 0) + 1 + if ctx.balancer_try_count > 1 then + if ctx.server_picker and ctx.server_picker.after_balance then + ctx.server_picker.after_balance(ctx, true) + end + + if checker then + local state, code = get_last_failure() + local host = up_conf.checks and up_conf.checks.active and up_conf.checks.active.host + local port = up_conf.checks and up_conf.checks.active and up_conf.checks.active.port + if state == "failed" then + if code == 504 then + checker:report_timeout(ctx.balancer_ip, port or ctx.balancer_port, host) + else + checker:report_tcp_failure(ctx.balancer_ip, port or ctx.balancer_port, host) + end + else + checker:report_http_status(ctx.balancer_ip, port or ctx.balancer_port, host, code) + end + end + end + + if checker then + version = version .. "#" .. checker.status_ver + end + + -- the same picker will be used in the whole request, especially during the retry + local server_picker = ctx.server_picker + if not server_picker then + server_picker = lrucache_server_picker(key, version, + create_server_picker, up_conf, checker) + end + if not server_picker then + return nil, "failed to fetch server picker" + end + + local server, err = server_picker.get(ctx) + if not server then + err = err or "no valid upstream node" + return nil, "failed to find valid upstream server, " .. err + end + ctx.balancer_server = server + + local domain = server_picker.addr_to_domain[server] + local res, err = lrucache_addr(server, nil, parse_addr, server) + if err then + core.log.error("failed to parse server addr: ", server, " err: ", err) + return core.response.exit(502) + end + + res.domain = domain + ctx.balancer_ip = res.host + ctx.balancer_port = res.port + ctx.server_picker = server_picker + res.upstream_host = parse_server_for_upstream_host(res, ctx.upstream_scheme) + + return res +end + + +-- for test +_M.pick_server = pick_server + + +local set_current_peer +do + local pool_opt = {} + local default_keepalive_pool + + function set_current_peer(server, ctx) + local up_conf = ctx.upstream_conf + local keepalive_pool = up_conf.keepalive_pool + + if enable_keepalive then + if not keepalive_pool then + if not default_keepalive_pool then + local local_conf = core.config.local_conf() + local up_keepalive_conf = + core.table.try_read_attr(local_conf, "nginx_config", + "http", "upstream") + default_keepalive_pool = {} + default_keepalive_pool.idle_timeout = + core.config_util.parse_time_unit(up_keepalive_conf.keepalive_timeout) + default_keepalive_pool.size = up_keepalive_conf.keepalive + default_keepalive_pool.requests = up_keepalive_conf.keepalive_requests + end + + keepalive_pool = default_keepalive_pool + end + + local idle_timeout = keepalive_pool.idle_timeout + local size = keepalive_pool.size + local requests = keepalive_pool.requests + + core.table.clear(pool_opt) + pool_opt.pool_size = size + + local scheme = up_conf.scheme + local pool = scheme .. "#" .. server.host .. "#" .. server.port + -- other TLS schemes don't use http balancer keepalive + if (scheme == "https" or scheme == "grpcs") then + local sni = ctx.var.upstream_host + pool = pool .. "#" .. sni + + if up_conf.tls and up_conf.tls.client_cert then + pool = pool .. "#" .. up_conf.tls.client_cert + end + end + pool_opt.pool = pool + + local ok, err = balancer.set_current_peer(server.host, server.port, + pool_opt) + if not ok then + return ok, err + end + + return balancer.enable_keepalive(idle_timeout, requests) + end + + return balancer.set_current_peer(server.host, server.port) + end +end + + +function _M.run(route, ctx, plugin_funcs) + local server, err + + if ctx.picked_server then + -- use the server picked in the access phase + server = ctx.picked_server + ctx.picked_server = nil + + set_balancer_opts(route, ctx) + + else + if ctx.proxy_retry_deadline and ctx.proxy_retry_deadline < ngx_now() then + -- retry count is (try count - 1) + core.log.error("proxy retry timeout, retry count: ", (ctx.balancer_try_count or 1) - 1, + ", deadline: ", ctx.proxy_retry_deadline, " now: ", ngx_now()) + return core.response.exit(502) + end + -- retry + server, err = pick_server(route, ctx) + if not server then + core.log.error("failed to pick server: ", err) + return core.response.exit(502) + end + + local header_changed + local pass_host = ctx.pass_host + if pass_host == "node" then + local host = server.upstream_host + if host ~= ctx.var.upstream_host then + -- retried node has a different host + ctx.var.upstream_host = host + header_changed = true + end + end + + local _, run = plugin_funcs("before_proxy") + -- always recreate request as the request may be changed by plugins + if run or header_changed then + balancer.recreate_request() + end + end + + core.log.info("proxy request to ", server.host, ":", server.port) + + local ok, err = set_current_peer(server, ctx) + if not ok then + core.log.error("failed to set server peer [", server.host, ":", + server.port, "] err: ", err) + return core.response.exit(502) + end + + ctx.proxy_passed = true +end + + +function _M.init_worker() +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/balancer/chash.lua b/CloudronPackages/APISIX/apisix-source/apisix/balancer/chash.lua new file mode 100644 index 0000000..f0e971a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/balancer/chash.lua @@ -0,0 +1,154 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local resty_chash = require("resty.chash") +local str_char = string.char +local str_gsub = string.gsub +local pairs = pairs + + +local CONSISTENT_POINTS = 160 -- points per server, taken from `resty.chash` + + +local _M = {} + + +local function fetch_chash_hash_key(ctx, upstream) + local key = upstream.key + local hash_on = upstream.hash_on or "vars" + local chash_key + + if hash_on == "consumer" then + chash_key = ctx.consumer_name + elseif hash_on == "vars" then + chash_key = ctx.var[key] + elseif hash_on == "header" then + chash_key = ctx.var["http_" .. key] + elseif hash_on == "cookie" then + chash_key = ctx.var["cookie_" .. key] + elseif hash_on == "vars_combinations" then + local err, n_resolved + chash_key, err, n_resolved = core.utils.resolve_var(key, ctx.var) + if err then + core.log.error("could not resolve vars in ", key, " error: ", err) + end + + if n_resolved == 0 then + chash_key = nil + end + end + + if not chash_key then + chash_key = ctx.var["remote_addr"] + core.log.warn("chash_key fetch is nil, use default chash_key ", + "remote_addr: ", chash_key) + end + core.log.info("upstream key: ", key) + core.log.info("hash_on: ", hash_on) + core.log.info("chash_key: ", core.json.delay_encode(chash_key)) + + return chash_key +end + + +function _M.new(up_nodes, upstream) + local str_null = str_char(0) + + local nodes_count = 0 + local safe_limit = 0 + local gcd = 0 + local servers, nodes = {}, {} + + for serv, weight in pairs(up_nodes) do + if gcd == 0 then + gcd = weight + else + gcd = core.math.gcd(gcd, weight) + end + end + + if gcd == 0 then + -- all nodes' weight are 0 + gcd = 1 + end + + for serv, weight in pairs(up_nodes) do + local id = str_gsub(serv, ":", str_null) + + nodes_count = nodes_count + 1 + weight = weight / gcd + safe_limit = safe_limit + weight + servers[id] = serv + nodes[id] = weight + end + safe_limit = safe_limit * CONSISTENT_POINTS + + local picker = resty_chash:new(nodes) + return { + upstream = upstream, + get = function (ctx) + local id + if ctx.balancer_tried_servers then + if ctx.balancer_tried_servers_count == nodes_count then + return nil, "all upstream servers tried" + end + + -- the 'safe_limit' is a best effort limit to prevent infinite loop caused by bug + for i = 1, safe_limit do + id, ctx.chash_last_server_index = picker:next(ctx.chash_last_server_index) + if not ctx.balancer_tried_servers[servers[id]] then + break + end + end + else + local chash_key = fetch_chash_hash_key(ctx, upstream) + id, ctx.chash_last_server_index = picker:find(chash_key) + end + -- core.log.warn("chash id: ", id, " val: ", servers[id]) + return servers[id] + end, + after_balance = function (ctx, before_retry) + if not before_retry then + if ctx.balancer_tried_servers then + core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers) + ctx.balancer_tried_servers = nil + end + + return nil + end + + if not ctx.balancer_tried_servers then + ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2) + end + + ctx.balancer_tried_servers[ctx.balancer_server] = true + ctx.balancer_tried_servers_count = (ctx.balancer_tried_servers_count or 0) + 1 + end, + before_retry_next_priority = function (ctx) + if ctx.balancer_tried_servers then + core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers) + ctx.balancer_tried_servers = nil + end + + ctx.balancer_tried_servers_count = 0 + end, + } +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/balancer/ewma.lua b/CloudronPackages/APISIX/apisix-source/apisix/balancer/ewma.lua new file mode 100644 index 0000000..c533a01 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/balancer/ewma.lua @@ -0,0 +1,243 @@ +-- Original Authors: Shiv Nagarajan & Scott Francis +-- Accessed: March 12, 2018 +-- Inspiration drawn from: +-- https://github.com/twitter/finagle/blob/1bc837c4feafc0096e43c0e98516a8e1c50c4421 +-- /finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/PeakEwma.scala +local core = require("apisix.core") +local resty_lock = require("resty.lock") + +local nkeys = core.table.nkeys +local table_insert = core.table.insert +local ngx = ngx +local ngx_shared = ngx.shared +local ngx_now = ngx.now +local math = math +local pairs = pairs +local ipairs = ipairs +local next = next +local error = error + +local DECAY_TIME = 10 -- this value is in seconds +local LOCK_KEY = ":ewma_key" + +local shm_ewma = ngx_shared["balancer-ewma"] +local shm_last_touched_at = ngx_shared["balancer-ewma-last-touched-at"] + +local lrucache_addr = core.lrucache.new({ttl = 300, count = 1024}) +local lrucache_trans_format = core.lrucache.new({ttl = 300, count = 256}) + +local ewma_lock, ewma_lock_err = resty_lock:new("balancer-ewma-locks", {timeout = 0, exptime = 0.1}) + +local _M = {name = "ewma"} + +local function lock(upstream) + local _, err = ewma_lock:lock(upstream .. LOCK_KEY) + if err and err ~= "timeout" then + core.log.error("EWMA Balancer failed to lock: ", err) + end + + return err +end + +local function unlock() + local ok, err = ewma_lock:unlock() + if not ok then + core.log.error("EWMA Balancer failed to unlock: ", err) + end + + return err +end + +local function decay_ewma(ewma, last_touched_at, rtt, now) + local td = now - last_touched_at + td = math.max(td, 0) + local weight = math.exp(-td / DECAY_TIME) + + ewma = ewma * weight + rtt * (1.0 - weight) + return ewma +end + +local function store_stats(upstream, ewma, now) + local success, err, forcible = shm_last_touched_at:set(upstream, now) + if not success then + core.log.error("shm_last_touched_at:set failed: ", err) + end + if forcible then + core.log.warn("shm_last_touched_at:set valid items forcibly overwritten") + end + + success, err, forcible = shm_ewma:set(upstream, ewma) + if not success then + core.log.error("shm_ewma:set failed: ", err) + end + if forcible then + core.log.warn("shm_ewma:set valid items forcibly overwritten") + end +end + +local function get_or_update_ewma(upstream, rtt, update) + if update then + local lock_err = lock(upstream) + if lock_err ~= nil then + return 0, lock_err + end + end + + local ewma = shm_ewma:get(upstream) or 0 + + local now = ngx_now() + local last_touched_at = shm_last_touched_at:get(upstream) or 0 + ewma = decay_ewma(ewma, last_touched_at, rtt, now) + + if not update then + return ewma, nil + end + + store_stats(upstream, ewma, now) + + unlock() + + return ewma, nil +end + +local function get_upstream_name(upstream) + return upstream.host .. ":" .. upstream.port +end + +local function score(upstream) + -- Original implementation used names + -- Endpoints don't have names, so passing in IP:Port as key instead + local upstream_name = get_upstream_name(upstream) + return get_or_update_ewma(upstream_name, 0, false) +end + +local function parse_addr(addr) + local host, port, err = core.utils.parse_addr(addr) + return {host = host, port = port}, err +end + +local function _trans_format(up_nodes) + -- trans + -- {"1.2.3.4:80":100,"5.6.7.8:8080":100} + -- into + -- [{"host":"1.2.3.4","port":"80"},{"host":"5.6.7.8","port":"8080"}] + local peers = {} + local res, err + + for addr, _ in pairs(up_nodes) do + res, err = lrucache_addr(addr, nil, parse_addr, addr) + if not err then + core.table.insert(peers, res) + else + core.log.error('parse_addr error: ', addr, err) + end + end + + return next(peers) and peers or nil +end + +local function _ewma_find(ctx, up_nodes) + local peers + + if not up_nodes or nkeys(up_nodes) == 0 then + return nil, 'up_nodes empty' + end + + if ctx.balancer_tried_servers and ctx.balancer_tried_servers_count == nkeys(up_nodes) then + return nil, "all upstream servers tried" + end + + peers = lrucache_trans_format(up_nodes, ctx.upstream_version, _trans_format, up_nodes) + if not peers then + return nil, 'up_nodes trans error' + end + + local filtered_peers + if ctx.balancer_tried_servers then + for _, peer in ipairs(peers) do + if not ctx.balancer_tried_servers[get_upstream_name(peer)] then + if not filtered_peers then + filtered_peers = {} + end + + table_insert(filtered_peers, peer) + end + end + else + filtered_peers = peers + end + + local endpoint = filtered_peers[1] + + if #filtered_peers > 1 then + local a, b = math.random(1, #filtered_peers), math.random(1, #filtered_peers - 1) + if b >= a then + b = b + 1 + end + + local backendpoint + endpoint, backendpoint = filtered_peers[a], filtered_peers[b] + if score(endpoint) > score(backendpoint) then + endpoint = backendpoint + end + end + + return get_upstream_name(endpoint) +end + +local function _ewma_after_balance(ctx, before_retry) + if before_retry then + if not ctx.balancer_tried_servers then + ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2) + end + + ctx.balancer_tried_servers[ctx.balancer_server] = true + ctx.balancer_tried_servers_count = (ctx.balancer_tried_servers_count or 0) + 1 + + return nil + end + + if ctx.balancer_tried_servers then + core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers) + ctx.balancer_tried_servers = nil + end + + local response_time = ctx.var.upstream_response_time or 0 + local connect_time = ctx.var.upstream_connect_time or 0 + local rtt = connect_time + response_time + local upstream = ctx.var.upstream_addr + + if not upstream then + return nil, "no upstream addr found" + end + + return get_or_update_ewma(upstream, rtt, true) +end + +function _M.new(up_nodes, upstream) + if not shm_ewma or not shm_last_touched_at then + return nil, "dictionary not find" + end + + if not ewma_lock then + error(ewma_lock_err) + end + + return { + upstream = upstream, + get = function(ctx) + return _ewma_find(ctx, up_nodes) + end, + after_balance = _ewma_after_balance, + before_retry_next_priority = function (ctx) + if ctx.balancer_tried_servers then + core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers) + ctx.balancer_tried_servers = nil + end + + ctx.balancer_tried_servers_count = 0 + end, + } +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/balancer/least_conn.lua b/CloudronPackages/APISIX/apisix-source/apisix/balancer/least_conn.lua new file mode 100644 index 0000000..8923d17 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/balancer/least_conn.lua @@ -0,0 +1,113 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local binaryHeap = require("binaryheap") +local ipairs = ipairs +local pairs = pairs + + +local _M = {} + + +local function least_score(a, b) + return a.score < b.score +end + + +function _M.new(up_nodes, upstream) + local servers_heap = binaryHeap.minUnique(least_score) + for server, weight in pairs(up_nodes) do + local score = 1 / weight + -- Note: the argument order of insert is different from others + servers_heap:insert({ + server = server, + effect_weight = 1 / weight, + score = score, + }, server) + end + + return { + upstream = upstream, + get = function (ctx) + local server, info, err + if ctx.balancer_tried_servers then + local tried_server_list = {} + while true do + server, info = servers_heap:peek() + -- we need to let the retry > #nodes so this branch can be hit and + -- the request will retry next priority of nodes + if server == nil then + err = "all upstream servers tried" + break + end + + if not ctx.balancer_tried_servers[server] then + break + end + + servers_heap:pop() + core.table.insert(tried_server_list, info) + end + + for _, info in ipairs(tried_server_list) do + servers_heap:insert(info, info.server) + end + else + server, info = servers_heap:peek() + end + + if not server then + return nil, err + end + + info.score = info.score + info.effect_weight + servers_heap:update(server, info) + return server + end, + after_balance = function (ctx, before_retry) + local server = ctx.balancer_server + local info = servers_heap:valueByPayload(server) + info.score = info.score - info.effect_weight + servers_heap:update(server, info) + + if not before_retry then + if ctx.balancer_tried_servers then + core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers) + ctx.balancer_tried_servers = nil + end + + return nil + end + + if not ctx.balancer_tried_servers then + ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2) + end + + ctx.balancer_tried_servers[server] = true + end, + before_retry_next_priority = function (ctx) + if ctx.balancer_tried_servers then + core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers) + ctx.balancer_tried_servers = nil + end + end, + } +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/balancer/priority.lua b/CloudronPackages/APISIX/apisix-source/apisix/balancer/priority.lua new file mode 100644 index 0000000..af5d60c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/balancer/priority.lua @@ -0,0 +1,81 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local ipairs = ipairs + + +local _M = {} + + +local function max_priority(a, b) + return a > b +end + + +function _M.new(up_nodes, upstream, picker_mod) + local priority_index = up_nodes._priority_index + core.table.sort(priority_index, max_priority) + + local pickers = core.table.new(#priority_index, 0) + for i, priority in ipairs(priority_index) do + local picker, err = picker_mod.new(up_nodes[priority], upstream) + if not picker then + return nil, "failed to create picker with priority " .. priority .. ": " .. err + end + if not picker.before_retry_next_priority then + return nil, "picker should define 'before_retry_next_priority' to reset ctx" + end + + pickers[i] = picker + end + + return { + upstream = upstream, + get = function (ctx) + for i = ctx.priority_balancer_picker_idx or 1, #pickers do + local picker = pickers[i] + local server, err = picker.get(ctx) + if server then + ctx.priority_balancer_picker_idx = i + return server + end + + core.log.notice("failed to get server from current priority ", + priority_index[i], + ", try next one, err: ", err) + + picker.before_retry_next_priority(ctx) + end + + return nil, "all servers tried" + end, + after_balance = function (ctx, before_retry) + local priority_balancer_picker = pickers[ctx.priority_balancer_picker_idx] + if not priority_balancer_picker or + not priority_balancer_picker.after_balance + then + return + end + + priority_balancer_picker.after_balance(ctx, before_retry) + end + } +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/balancer/roundrobin.lua b/CloudronPackages/APISIX/apisix-source/apisix/balancer/roundrobin.lua new file mode 100644 index 0000000..7090f52 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/balancer/roundrobin.lua @@ -0,0 +1,89 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local roundrobin = require("resty.roundrobin") +local core = require("apisix.core") +local nkeys = core.table.nkeys +local pairs = pairs + + +local _M = {} + + +function _M.new(up_nodes, upstream) + local safe_limit = 0 + for _, weight in pairs(up_nodes) do + -- the weight can be zero + safe_limit = safe_limit + weight + 1 + end + + local picker = roundrobin:new(up_nodes) + local nodes_count = nkeys(up_nodes) + return { + upstream = upstream, + get = function (ctx) + if ctx.balancer_tried_servers and ctx.balancer_tried_servers_count == nodes_count then + return nil, "all upstream servers tried" + end + + local server, err + for i = 1, safe_limit do + server, err = picker:find() + if not server then + return nil, err + end + if ctx.balancer_tried_servers then + if not ctx.balancer_tried_servers[server] then + break + end + else + break + end + end + + return server + end, + after_balance = function (ctx, before_retry) + if not before_retry then + if ctx.balancer_tried_servers then + core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers) + ctx.balancer_tried_servers = nil + end + + return nil + end + + if not ctx.balancer_tried_servers then + ctx.balancer_tried_servers = core.tablepool.fetch("balancer_tried_servers", 0, 2) + end + + ctx.balancer_tried_servers[ctx.balancer_server] = true + ctx.balancer_tried_servers_count = (ctx.balancer_tried_servers_count or 0) + 1 + end, + before_retry_next_priority = function (ctx) + if ctx.balancer_tried_servers then + core.tablepool.release("balancer_tried_servers", ctx.balancer_tried_servers) + ctx.balancer_tried_servers = nil + end + + ctx.balancer_tried_servers_count = 0 + end, + } +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/cli/apisix.lua b/CloudronPackages/APISIX/apisix-source/apisix/cli/apisix.lua new file mode 100755 index 0000000..079691f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/cli/apisix.lua @@ -0,0 +1,40 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local pkg_cpath_org = package.cpath +local pkg_path_org = package.path + +local _, find_pos_end = string.find(pkg_path_org, ";", -1, true) +if not find_pos_end then + pkg_path_org = pkg_path_org .. ";" +end + +local apisix_home = "/usr/local/apisix" +local pkg_cpath = apisix_home .. "/deps/lib64/lua/5.1/?.so;" + .. apisix_home .. "/deps/lib/lua/5.1/?.so;" +local pkg_path_deps = apisix_home .. "/deps/share/lua/5.1/?.lua;" +local pkg_path_env = apisix_home .. "/?.lua;" + +-- modify the load path to load our dependencies +package.cpath = pkg_cpath .. pkg_cpath_org +package.path = pkg_path_deps .. pkg_path_org .. pkg_path_env + +-- pass path to construct the final result +local env = require("apisix.cli.env")(apisix_home, pkg_cpath_org, pkg_path_org) +local ops = require("apisix.cli.ops") + +ops.execute(env, arg) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/cli/config.lua b/CloudronPackages/APISIX/apisix-source/apisix/cli/config.lua new file mode 100644 index 0000000..20f0e04 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/cli/config.lua @@ -0,0 +1,385 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local table_conact = table.concat + +local _M = { + apisix = { + node_listen = { 9080 }, + enable_admin = true, + enable_dev_mode = false, + enable_reuseport = true, + show_upstream_status_in_response_header = false, + enable_ipv6 = true, + enable_http2 = true, + enable_server_tokens = true, + extra_lua_path = "", + extra_lua_cpath = "", + proxy_cache = { + cache_ttl = "10s", + zones = { + { + name = "disk_cache_one", + memory_size = "50m", + disk_size = "1G", + disk_path = "/tmp/disk_cache_one", + cache_levels = "1:2" + }, + { + name = "memory_cache", + memory_size = "50m" + } + } + }, + delete_uri_tail_slash = false, + normalize_uri_like_servlet = false, + router = { + http = "radixtree_host_uri", + ssl = "radixtree_sni" + }, + proxy_mode = "http", + resolver_timeout = 5, + enable_resolv_search_opt = true, + ssl = { + enable = true, + listen = { { + port = 9443, + enable_http3 = false + } }, + ssl_protocols = "TLSv1.2 TLSv1.3", + ssl_ciphers = table_conact({ + "ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", "ECDHE-RSA-CHACHA20-POLY1305", + "DHE-RSA-AES128-GCM-SHA256", "DHE-RSA-AES256-GCM-SHA384", + }, ":"), + ssl_session_tickets = false, + ssl_trusted_certificate = "system" + }, + enable_control = true, + disable_sync_configuration_during_start = false, + data_encryption = { + enable_encrypt_fields = true, + keyring = { "qeddd145sfvddff3", "edd1c9f0985e76a2" } + }, + events = { + module = "lua-resty-events" + } + }, + nginx_config = { + error_log = "logs/error.log", + error_log_level = "warn", + worker_processes = "auto", + enable_cpu_affinity = false, + worker_rlimit_nofile = 20480, + worker_shutdown_timeout = "240s", + max_pending_timers = 16384, + max_running_timers = 4096, + event = { + worker_connections = 10620 + }, + meta = { + lua_shared_dict = { + ["prometheus-metrics"] = "15m", + ["standalone-config"] = "10m", + ["status-report"] = "1m", + } + }, + stream = { + enable_access_log = false, + access_log = "logs/access_stream.log", + -- luacheck: push max code line length 300 + access_log_format = "$remote_addr [$time_local] $protocol $status $bytes_sent $bytes_received $session_time", + -- luacheck: pop + access_log_format_escape = "default", + lua_shared_dict = { + ["etcd-cluster-health-check-stream"] = "10m", + ["lrucache-lock-stream"] = "10m", + ["plugin-limit-conn-stream"] = "10m", + ["worker-events-stream"] = "10m", + ["tars-stream"] = "1m", + ["upstream-healthcheck-stream"] = "10m", + } + }, + main_configuration_snippet = "", + http_configuration_snippet = "", + http_server_configuration_snippet = "", + http_server_location_configuration_snippet = "", + http_admin_configuration_snippet = "", + http_end_configuration_snippet = "", + stream_configuration_snippet = "", + http = { + enable_access_log = true, + access_log = "logs/access.log", + access_log_buffer = 16384, + -- luacheck: push max code line length 300 + access_log_format = + '$remote_addr - $remote_user [$time_local] $http_host "$request" $status $body_bytes_sent $request_time "$http_referer" "$http_user_agent" $upstream_addr $upstream_status $upstream_response_time "$upstream_scheme://$upstream_host$upstream_uri"', + -- luacheck: pop + access_log_format_escape = "default", + keepalive_timeout = "60s", + client_header_timeout = "60s", + client_body_timeout = "60s", + client_max_body_size = 0, + send_timeout = "10s", + underscores_in_headers = "on", + real_ip_header = "X-Real-IP", + real_ip_recursive = "off", + real_ip_from = { "127.0.0.1", "unix:" }, + proxy_ssl_server_name = true, + upstream = { + keepalive = 320, + keepalive_requests = 1000, + keepalive_timeout = "60s" + }, + charset = "utf-8", + variables_hash_max_size = 2048, + lua_shared_dict = { + ["internal-status"] = "10m", + ["plugin-limit-req"] = "10m", + ["plugin-limit-count"] = "10m", + ["prometheus-metrics"] = "10m", + ["plugin-limit-conn"] = "10m", + ["upstream-healthcheck"] = "10m", + ["worker-events"] = "10m", + ["lrucache-lock"] = "10m", + ["balancer-ewma"] = "10m", + ["balancer-ewma-locks"] = "10m", + ["balancer-ewma-last-touched-at"] = "10m", + ["plugin-limit-req-redis-cluster-slot-lock"] = "1m", + ["plugin-limit-count-redis-cluster-slot-lock"] = "1m", + ["plugin-limit-conn-redis-cluster-slot-lock"] = "1m", + ["plugin-ai-rate-limiting"] = "10m", + ["plugin-ai-rate-limiting-reset-header"] = "10m", + tracing_buffer = "10m", + ["plugin-api-breaker"] = "10m", + ["etcd-cluster-health-check"] = "10m", + discovery = "1m", + jwks = "1m", + introspection = "10m", + ["access-tokens"] = "1m", + ["ext-plugin"] = "1m", + tars = "1m", + ["cas-auth"] = "10m", + ["ocsp-stapling"] = "10m", + ["mcp-session"] = "10m", + } + } + }, + graphql = { + max_size = 1048576 + }, + plugins = { + "real-ip", + "ai", + "client-control", + "proxy-control", + "request-id", + "zipkin", + "ext-plugin-pre-req", + "fault-injection", + "mocking", + "serverless-pre-function", + "cors", + "ip-restriction", + "ua-restriction", + "referer-restriction", + "csrf", + "uri-blocker", + "request-validation", + "chaitin-waf", + "multi-auth", + "openid-connect", + "cas-auth", + "authz-casbin", + "authz-casdoor", + "wolf-rbac", + "ldap-auth", + "hmac-auth", + "basic-auth", + "jwt-auth", + "jwe-decrypt", + "key-auth", + "consumer-restriction", + "attach-consumer-label", + "forward-auth", + "opa", + "authz-keycloak", + "proxy-cache", + "body-transformer", + "ai-prompt-template", + "ai-prompt-decorator", + "ai-prompt-guard", + "ai-rag", + "ai-rate-limiting", + "ai-proxy-multi", + "ai-proxy", + "ai-aws-content-moderation", + "proxy-mirror", + "proxy-rewrite", + "workflow", + "api-breaker", + "limit-conn", + "limit-count", + "limit-req", + "gzip", + -- deprecated and will be removed in a future release + -- "server-info", + "traffic-split", + "redirect", + "response-rewrite", + "mcp-bridge", + "degraphql", + "kafka-proxy", + "grpc-transcode", + "grpc-web", + "http-dubbo", + "public-api", + "prometheus", + "datadog", + "lago", + "loki-logger", + "elasticsearch-logger", + "echo", + "loggly", + "http-logger", + "splunk-hec-logging", + "skywalking-logger", + "google-cloud-logging", + "sls-logger", + "tcp-logger", + "kafka-logger", + "rocketmq-logger", + "syslog", + "udp-logger", + "file-logger", + "clickhouse-logger", + "tencent-cloud-cls", + "inspect", + "example-plugin", + "aws-lambda", + "azure-functions", + "openwhisk", + "openfunction", + "serverless-post-function", + "ext-plugin-post-req", + "ext-plugin-post-resp", + "ai-request-rewrite", + }, + stream_plugins = { "ip-restriction", "limit-conn", "mqtt-proxy", "syslog" }, + plugin_attr = { + ["log-rotate"] = { + timeout = 10000, + interval = 3600, + max_kept = 168, + max_size = -1, + enable_compression = false + }, + skywalking = { + service_name = "APISIX", + service_instance_name = "APISIX Instance Name", + endpoint_addr = "http://127.0.0.1:12800", + report_interval = 3 + }, + opentelemetry = { + trace_id_source = "x-request-id", + resource = { + ["service.name"] = "APISIX" + }, + collector = { + address = "127.0.0.1:4318", + request_timeout = 3, + request_headers = { + Authorization = "token" + } + }, + batch_span_processor = { + drop_on_queue_full = false, + max_queue_size = 1024, + batch_timeout = 2, + inactive_timeout = 1, + max_export_batch_size = tonumber(os.getenv("OTEL_BSP_MAX_EXPORT_BATCH_SIZE")) or 16 + }, + set_ngx_var = false + }, + prometheus = { + export_uri = "/apisix/prometheus/metrics", + metric_prefix = "apisix_", + enable_export_server = true, + export_addr = { + ip = "127.0.0.1", + port = 9091 + } + }, + ["server-info"] = { + report_ttl = 60 + }, + ["dubbo-proxy"] = { + upstream_multiplex_count = 32 + }, + ["proxy-mirror"] = { + timeout = { + connect = "60s", + read = "60s", + send = "60s" + } + }, + inspect = { + delay = 3, + hooks_file = "/usr/local/apisix/plugin_inspect_hooks.lua" + }, + zipkin = { + set_ngx_var = false + } + }, + deployment = { + role = "traditional", + role_traditional = { + config_provider = "etcd" + }, + admin = { + admin_key_required = true, + admin_key = { + { + name = "admin", + key = "", + role = "admin" + } + }, + enable_admin_cors = true, + enable_admin_ui = true, + allow_admin = { "127.0.0.0/24" }, + admin_listen = { + ip = "0.0.0.0", + port = 9180 + }, + admin_api_version = "v3" + }, + etcd = { + host = { "http://127.0.0.1:2379" }, + prefix = "/apisix", + timeout = 30, + watch_timeout = 50, + startup_retry = 2, + tls = { + verify = true + } + } + } +} + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/cli/env.lua b/CloudronPackages/APISIX/apisix-source/apisix/cli/env.lua new file mode 100644 index 0000000..3631483 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/cli/env.lua @@ -0,0 +1,115 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local require = require +local util = require("apisix.cli.util") + +local pcall = pcall +local error = error +local exit = os.exit +local stderr = io.stderr +local str_find = string.find +local arg = arg +local package = package +local tonumber = tonumber + +return function (apisix_home, pkg_cpath_org, pkg_path_org) + -- ulimit setting should be checked when APISIX starts + local res, err = util.execute_cmd("ulimit -n") + if not res then + error("failed to exec ulimit cmd \'ulimit -n \', err: " .. err) + end + local trimed_res = util.trim(res) + local ulimit = trimed_res == "unlimited" and trimed_res or tonumber(trimed_res) + if not ulimit then + error("failed to fetch current maximum number of open file descriptors") + end + + -- only for developer, use current folder as working space + local is_root_path = false + local script_path = arg[0] + if script_path:sub(1, 2) == './' then + apisix_home = util.trim(util.execute_cmd("pwd")) + if not apisix_home then + error("failed to fetch current path") + end + + -- determine whether the current path is under the "/root" folder. + -- "/root/" is the root folder flag. + if str_find(apisix_home .. "/", '/root/', nil, true) == 1 then + is_root_path = true + end + + local pkg_cpath = apisix_home .. "/deps/lib64/lua/5.1/?.so;" + .. apisix_home .. "/deps/lib/lua/5.1/?.so;" + + local pkg_path = apisix_home .. "/?/init.lua;" + .. apisix_home .. "/deps/share/lua/5.1/?/init.lua;" + .. apisix_home .. "/deps/share/lua/5.1/?.lua;;" + + package.cpath = pkg_cpath .. package.cpath + package.path = pkg_path .. package.path + end + + do + -- skip luajit environment + local ok = pcall(require, "table.new") + if not ok then + local ok, json = pcall(require, "cjson") + if ok and json then + stderr:write("please remove the cjson library in Lua, it may " + .. "conflict with the cjson library in openresty. " + .. "\n luarocks remove lua-cjson\n") + exit(1) + end + end + end + + -- pre-transform openresty path + res, err = util.execute_cmd("command -v openresty") + if not res then + error("failed to exec cmd \'command -v openresty\', err: " .. err) + end + local openresty_path_abs = util.trim(res) + + local openresty_args = openresty_path_abs .. [[ -p ]] .. apisix_home .. [[ -c ]] + .. apisix_home .. [[/conf/nginx.conf]] + + local or_info, err = util.execute_cmd("openresty -V 2>&1") + if not or_info then + error("failed to exec cmd \'openresty -V 2>&1\', err: " .. err) + end + + local use_apisix_base = true + if not or_info:find("apisix-nginx-module", 1, true) then + use_apisix_base = false + end + + local min_etcd_version = "3.4.0" + + return { + apisix_home = apisix_home, + is_root_path = is_root_path, + openresty_args = openresty_args, + openresty_info = or_info, + use_apisix_base = use_apisix_base, + pkg_cpath_org = pkg_cpath_org, + pkg_path_org = pkg_path_org, + min_etcd_version = min_etcd_version, + ulimit = ulimit, + } +end diff --git a/CloudronPackages/APISIX/apisix-source/apisix/cli/etcd.lua b/CloudronPackages/APISIX/apisix-source/apisix/cli/etcd.lua new file mode 100644 index 0000000..548a5d2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/cli/etcd.lua @@ -0,0 +1,405 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local base64_encode = require("base64").encode +local dkjson = require("dkjson") +local constants = require("apisix.constants") +local util = require("apisix.cli.util") +local file = require("apisix.cli.file") +local http = require("socket.http") +local https = require("ssl.https") +local ltn12 = require("ltn12") + +local type = type +local ipairs = ipairs +local pairs = pairs +local print = print +local tonumber = tonumber +local str_format = string.format +local str_sub = string.sub +local table_concat = table.concat +local table_insert = table.insert +local io_stderr = io.stderr + +local _M = {} + +-- Timeout for all I/O operations +http.TIMEOUT = 3 + +local function parse_semantic_version(ver) + local errmsg = "invalid semantic version: " .. ver + + local parts = util.split(ver, "-") + if #parts > 2 then + return nil, errmsg + end + + if #parts == 2 then + ver = parts[1] + end + + local fields = util.split(ver, ".") + if #fields ~= 3 then + return nil, errmsg + end + + local major = tonumber(fields[1]) + local minor = tonumber(fields[2]) + local patch = tonumber(fields[3]) + + if not (major and minor and patch) then + return nil, errmsg + end + + return { + major = major, + minor = minor, + patch = patch, + } +end + + +local function compare_semantic_version(v1, v2) + local ver1, err = parse_semantic_version(v1) + if not ver1 then + return nil, err + end + + local ver2, err = parse_semantic_version(v2) + if not ver2 then + return nil, err + end + + if ver1.major ~= ver2.major then + return ver1.major < ver2.major + end + + if ver1.minor ~= ver2.minor then + return ver1.minor < ver2.minor + end + + return ver1.patch < ver2.patch +end + + +local function request(url, yaml_conf) + local response_body = {} + local single_request = false + if type(url) == "string" then + url = { + url = url, + method = "GET", + sink = ltn12.sink.table(response_body), + } + single_request = true + end + + local res, code + + if str_sub(url.url, 1, 8) == "https://" then + local verify = "peer" + if yaml_conf.etcd.tls then + local cfg = yaml_conf.etcd.tls + + if cfg.verify == false then + verify = "none" + end + + url.certificate = cfg.cert + url.key = cfg.key + + local apisix_ssl = yaml_conf.apisix.ssl + if apisix_ssl and apisix_ssl.ssl_trusted_certificate then + url.cafile = apisix_ssl.ssl_trusted_certificate + end + end + + url.verify = verify + res, code = https.request(url) + else + + res, code = http.request(url) + end + + -- In case of failure, request returns nil followed by an error message. + -- Else the first return value is the response body + -- and followed by the response status code. + if single_request and res ~= nil then + return table_concat(response_body), code + end + + return res, code +end + + +local function prepare_dirs_via_http(yaml_conf, args, index, host, host_count) + local is_success = true + + local errmsg + local auth_token + local user = yaml_conf.etcd.user + local password = yaml_conf.etcd.password + if user and password then + local auth_url = host .. "/v3/auth/authenticate" + local json_auth = { + name = user, + password = password + } + + local post_json_auth = dkjson.encode(json_auth) + local response_body = {} + + local res, err + local retry_time = 0 + while retry_time < 2 do + res, err = request({ + url = auth_url, + method = "POST", + source = ltn12.source.string(post_json_auth), + sink = ltn12.sink.table(response_body), + headers = { + ["Content-Length"] = #post_json_auth + } + }, yaml_conf) + -- In case of failure, request returns nil followed by an error message. + -- Else the first return value is just the number 1 + -- and followed by the response status code. + if res then + break + end + retry_time = retry_time + 1 + print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s", + auth_url, err, retry_time)) + end + + if not res then + errmsg = str_format("request etcd endpoint \"%s\" error, %s\n", auth_url, err) + util.die(errmsg) + end + + local res_auth = table_concat(response_body) + local body_auth, _, err_auth = dkjson.decode(res_auth) + if err_auth or (body_auth and not body_auth["token"]) then + errmsg = str_format("got malformed auth message: \"%s\" from etcd \"%s\"\n", + res_auth, auth_url) + util.die(errmsg) + end + + auth_token = body_auth.token + end + + + local dirs = {} + for name in pairs(constants.HTTP_ETCD_DIRECTORY) do + dirs[name] = true + end + for name in pairs(constants.STREAM_ETCD_DIRECTORY) do + dirs[name] = true + end + + for dir_name in pairs(dirs) do + local key = (yaml_conf.etcd.prefix or "") .. dir_name .. "/" + + local put_url = host .. "/v3/kv/put" + local post_json = '{"value":"' .. base64_encode("init_dir") + .. '", "key":"' .. base64_encode(key) .. '"}' + local response_body = {} + local headers = {["Content-Length"] = #post_json} + if auth_token then + headers["Authorization"] = auth_token + end + + local res, err + local retry_time = 0 + while retry_time < 2 do + res, err = request({ + url = put_url, + method = "POST", + source = ltn12.source.string(post_json), + sink = ltn12.sink.table(response_body), + headers = headers + }, yaml_conf) + retry_time = retry_time + 1 + if res then + break + end + print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s", + put_url, err, retry_time)) + end + + if not res then + errmsg = str_format("request etcd endpoint \"%s\" error, %s\n", put_url, err) + util.die(errmsg) + end + + local res_put = table_concat(response_body) + if res_put:find("404 page not found", 1, true) then + errmsg = str_format("gRPC gateway is not enabled in etcd cluster \"%s\",", + "which is required by Apache APISIX\n") + util.die(errmsg) + end + + if res_put:find("CommonName of client sending a request against gateway", 1, true) then + errmsg = str_format("etcd \"client-cert-auth\" cannot be used with gRPC-gateway, " + .. "please configure the etcd username and password " + .. "in configuration file\n") + util.die(errmsg) + end + + if res_put:find("error", 1, true) then + is_success = false + if (index == host_count) then + errmsg = str_format("got malformed key-put message: \"%s\" from etcd \"%s\"\n", + res_put, put_url) + util.die(errmsg) + end + + break + end + + if args and args["verbose"] then + print(res_put) + end + end + + return is_success +end + + +local function prepare_dirs(yaml_conf, args, index, host, host_count) + return prepare_dirs_via_http(yaml_conf, args, index, host, host_count) +end + + +function _M.init(env, args) + -- read_yaml_conf + local yaml_conf, err = file.read_yaml_conf(env.apisix_home) + if not yaml_conf then + util.die("failed to read local yaml config of apisix: ", err) + end + + if not yaml_conf.apisix then + util.die("failed to read `apisix` field from yaml file when init etcd") + end + + if yaml_conf.deployment.config_provider ~= "etcd" then + return true + end + + if not yaml_conf.etcd then + util.die("failed to read `etcd` field from yaml file when init etcd") + end + + -- convert old single etcd config to multiple etcd config + if type(yaml_conf.etcd.host) == "string" then + yaml_conf.etcd.host = {yaml_conf.etcd.host} + end + + local host_count = #(yaml_conf.etcd.host) + local scheme + for i = 1, host_count do + local host = yaml_conf.etcd.host[i] + local fields = util.split(host, "://") + if not fields then + util.die("malformed etcd endpoint: ", host, "\n") + end + + if not scheme then + scheme = fields[1] + elseif scheme ~= fields[1] then + print([[WARNING: mixed protocols among etcd endpoints]]) + end + end + + -- check the etcd cluster version + local etcd_healthy_hosts = {} + for index, host in ipairs(yaml_conf.etcd.host) do + local version_url = host .. "/version" + local errmsg + + local res, err + local retry_time = 0 + + local etcd = yaml_conf.etcd + local max_retry = tonumber(etcd.startup_retry) or 2 + while retry_time < max_retry do + res, err = request(version_url, yaml_conf) + -- In case of failure, request returns nil followed by an error message. + -- Else the first return value is the response body + -- and followed by the response status code. + if res then + break + end + retry_time = retry_time + 1 + print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s", + version_url, err, retry_time)) + end + + if res then + local body, _, err = dkjson.decode(res) + if err or (body and not body["etcdcluster"]) then + errmsg = str_format("got malformed version message: \"%s\" from etcd \"%s\"\n", res, + version_url) + util.die(errmsg) + end + + local cluster_version = body["etcdcluster"] + if compare_semantic_version(cluster_version, env.min_etcd_version) then + util.die("etcd cluster version ", cluster_version, + " is less than the required version ", env.min_etcd_version, + ", please upgrade your etcd cluster\n") + end + + table_insert(etcd_healthy_hosts, host) + else + io_stderr:write(str_format("request etcd endpoint \'%s\' error, %s\n", version_url, + err)) + end + end + + if #etcd_healthy_hosts <= 0 then + util.die("all etcd nodes are unavailable\n") + end + + if (#etcd_healthy_hosts / host_count * 100) <= 50 then + util.die("the etcd cluster needs at least 50% and above healthy nodes\n") + end + + -- access from the data plane to etcd should be read-only. + -- data plane writes to etcd may cause security issues. + if yaml_conf.deployment.role == "data_plane" then + print("access from the data plane to etcd should be read-only, " + .."skip initializing the data of etcd") + return true + end + + print("trying to initialize the data of etcd") + local etcd_ok = false + for index, host in ipairs(etcd_healthy_hosts) do + if prepare_dirs(yaml_conf, args, index, host, host_count) then + etcd_ok = true + break + end + end + + if not etcd_ok then + util.die("none of the configured etcd works well\n") + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/cli/file.lua b/CloudronPackages/APISIX/apisix-source/apisix/cli/file.lua new file mode 100644 index 0000000..3687363 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/cli/file.lua @@ -0,0 +1,343 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local ngx = ngx +local yaml = require("lyaml") +local profile = require("apisix.core.profile") +local util = require("apisix.cli.util") +local schema = require("apisix.cli.schema") +local default_conf = require("apisix.cli.config") +local dkjson = require("dkjson") +local pl_path = require("pl.path") + +local pairs = pairs +local type = type +local tonumber = tonumber +local getenv = os.getenv +local str_gmatch = string.gmatch +local str_find = string.find +local str_sub = string.sub +local print = print + +local _M = {} +local exported_vars + + +function _M.get_exported_vars() + return exported_vars +end + + +local function is_empty_yaml_line(line) + return line == '' or str_find(line, '^%s*$') or str_find(line, '^%s*#') +end + + +local function tab_is_array(t) + local count = 0 + for k, v in pairs(t) do + count = count + 1 + end + + return #t == count +end + + +local function var_sub(val) + local err + local var_used = false + -- we use '${{var}}' because '$var' and '${var}' are taken + -- by Nginx + local new_val = val:gsub("%$%{%{%s*([%w_]+[%:%=]?.-)%s*%}%}", function(var) + local i, j = var:find("%:%=") + local default + if i and j then + default = var:sub(i + 2, #var) + default = default:gsub('^%s*(.-)%s*$', '%1') + var = var:sub(1, i - 1) + end + + local v = getenv(var) or default + if v then + if not exported_vars then + exported_vars = {} + end + + exported_vars[var] = v + var_used = true + return v + end + + err = "failed to handle configuration: " .. + "can't find environment variable " .. var + return "" + end) + return new_val, var_used, err +end + + +local function resolve_conf_var(conf) + local new_keys = {} + for key, val in pairs(conf) do + -- avoid re-iterating the table for already iterated key + if new_keys[key] then + goto continue + end + -- substitute environment variables from conf keys + if type(key) == "string" then + local new_key, _, err = var_sub(key) + if err then + return nil, err + end + if new_key ~= key then + new_keys[new_key] = "dummy" -- we only care about checking the key + conf.key = nil + conf[new_key] = val + key = new_key + end + end + if type(val) == "table" then + local ok, err = resolve_conf_var(val) + if not ok then + return nil, err + end + + elseif type(val) == "string" then + local new_val, var_used, err = var_sub(val) + + if err then + return nil, err + end + + if var_used then + if tonumber(new_val) ~= nil then + new_val = tonumber(new_val) + elseif new_val == "true" then + new_val = true + elseif new_val == "false" then + new_val = false + end + end + + conf[key] = new_val + end + ::continue:: + end + + return true +end + + +_M.resolve_conf_var = resolve_conf_var + + +local function replace_by_reserved_env_vars(conf) + -- TODO: support more reserved environment variables + local v = getenv("APISIX_DEPLOYMENT_ETCD_HOST") + if v and conf["deployment"] and conf["deployment"]["etcd"] then + local val, _, err = dkjson.decode(v) + if err or not val then + print("parse ${APISIX_DEPLOYMENT_ETCD_HOST} failed, error:", err) + return + end + + conf["deployment"]["etcd"]["host"] = val + end +end + + +local function path_is_multi_type(path, type_val) + if str_sub(path, 1, 14) == "nginx_config->" and + (type_val == "number" or type_val == "string") then + return true + end + + if path == "apisix->node_listen" and type_val == "number" then + return true + end + + if path == "apisix->data_encryption->keyring" then + return true + end + + return false +end + + +local function merge_conf(base, new_tab, ppath) + ppath = ppath or "" + + for key, val in pairs(new_tab) do + if type(val) == "table" then + if val == yaml.null then + base[key] = nil + + elseif tab_is_array(val) then + base[key] = val + + else + if base[key] == nil then + base[key] = {} + end + + local ok, err = merge_conf( + base[key], + val, + ppath == "" and key or ppath .. "->" .. key + ) + if not ok then + return nil, err + end + end + else + local type_val = type(val) + + if base[key] == nil then + base[key] = val + elseif type(base[key]) ~= type_val then + local path = ppath == "" and key or ppath .. "->" .. key + + if path_is_multi_type(path, type_val) then + base[key] = val + else + return nil, "failed to merge, path[" .. path .. "] expect: " .. + type(base[key]) .. ", but got: " .. type_val + end + else + base[key] = val + end + end + end + + return base +end + + +function _M.read_yaml_conf(apisix_home) + if apisix_home then + profile.apisix_home = apisix_home .. "/" + end + + local local_conf_path = profile:customized_yaml_path() + if not local_conf_path then + local_conf_path = profile:yaml_path("config") + end + local user_conf_yaml, err = util.read_file(local_conf_path) + if not user_conf_yaml then + return nil, err + end + + local is_empty_file = true + for line in str_gmatch(user_conf_yaml .. '\n', '(.-)\r?\n') do + if not is_empty_yaml_line(line) then + is_empty_file = false + break + end + end + + if not is_empty_file then + local user_conf = yaml.load(user_conf_yaml) + if not user_conf then + return nil, "invalid config.yaml file" + end + + local ok, err = resolve_conf_var(user_conf) + if not ok then + return nil, err + end + + ok, err = merge_conf(default_conf, user_conf) + if not ok then + return nil, err + end + end + + -- fill the default value by the schema + local ok, err = schema.validate(default_conf) + if not ok then + return nil, err + end + if default_conf.deployment then + default_conf.deployment.config_provider = "etcd" + if default_conf.deployment.role == "traditional" then + default_conf.etcd = default_conf.deployment.etcd + if default_conf.deployment.role_traditional.config_provider == "yaml" then + default_conf.deployment.config_provider = "yaml" + end + + elseif default_conf.deployment.role == "control_plane" then + default_conf.etcd = default_conf.deployment.etcd + default_conf.apisix.enable_admin = true + + elseif default_conf.deployment.role == "data_plane" then + default_conf.etcd = default_conf.deployment.etcd + if default_conf.deployment.role_data_plane.config_provider == "yaml" then + default_conf.deployment.config_provider = "yaml" + elseif default_conf.deployment.role_data_plane.config_provider == "json" then + default_conf.deployment.config_provider = "json" + elseif default_conf.deployment.role_data_plane.config_provider == "xds" then + default_conf.deployment.config_provider = "xds" + end + default_conf.apisix.enable_admin = false + end + end + + --- using `not ngx` to check whether the current execution environment is apisix cli module, + --- because it is only necessary to parse and validate `apisix.yaml` in apisix cli. + if default_conf.deployment.config_provider == "yaml" and not ngx then + local apisix_conf_path = profile:yaml_path("apisix") + local apisix_conf_yaml, _ = util.read_file(apisix_conf_path) + if apisix_conf_yaml then + local apisix_conf = yaml.load(apisix_conf_yaml) + if apisix_conf then + local ok, err = resolve_conf_var(apisix_conf) + if not ok then + return nil, err + end + end + end + end + + local apisix_ssl = default_conf.apisix.ssl + if apisix_ssl and apisix_ssl.ssl_trusted_certificate then + -- default value is set to "system" during schema validation + if apisix_ssl.ssl_trusted_certificate == "system" then + local trusted_certs_path, err = util.get_system_trusted_certs_filepath() + if not trusted_certs_path then + util.die(err) + end + + apisix_ssl.ssl_trusted_certificate = trusted_certs_path + else + -- During validation, the path is relative to PWD + -- When Nginx starts, the path is relative to conf + -- Therefore we need to check the absolute version instead + local cert_path = pl_path.abspath(apisix_ssl.ssl_trusted_certificate) + if not pl_path.exists(cert_path) then + util.die("certificate path", cert_path, "doesn't exist\n") + end + apisix_ssl.ssl_trusted_certificate = cert_path + end + end + + replace_by_reserved_env_vars(default_conf) + + return default_conf +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/cli/ip.lua b/CloudronPackages/APISIX/apisix-source/apisix/cli/ip.lua new file mode 100644 index 0000000..182b824 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/cli/ip.lua @@ -0,0 +1,66 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- IP match and verify module. +-- +-- @module cli.ip + +local mediador_ip = require("resty.mediador.ip") +local setmetatable = setmetatable + + +local _M = {} +local mt = { __index = _M } + + +--- +-- create a instance of module cli.ip +-- +-- @function cli.ip:new +-- @tparam string ip IP or CIDR. +-- @treturn instance of module if the given ip valid, nil and error message otherwise. +function _M.new(self, ip) + if not mediador_ip.valid(ip) then + return nil, "invalid ip" + end + + local _ip = mediador_ip.parse(ip) + + return setmetatable({ _ip = _ip }, mt) +end + + +--- +-- Is that the given ip loopback? +-- +-- @function cli.ip:is_loopback +-- @treturn boolean True if the given ip is the loopback, false otherwise. +function _M.is_loopback(self) + return self._ip and "loopback" == self._ip:range() +end + +--- +-- Is that the given ip unspecified? +-- +-- @function cli.ip:is_unspecified +-- @treturn boolean True if the given ip is all the unspecified, false otherwise. +function _M.is_unspecified(self) + return self._ip and "unspecified" == self._ip:range() +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/cli/ngx_tpl.lua b/CloudronPackages/APISIX/apisix-source/apisix/cli/ngx_tpl.lua new file mode 100644 index 0000000..5dd739b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/cli/ngx_tpl.lua @@ -0,0 +1,998 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +return [=[ +# Configuration File - Nginx Server Configs +# This is a read-only file, do not try to modify it. +{% if user and user ~= '' then %} +user {* user *}; +{% end %} +master_process on; + +worker_processes {* worker_processes *}; +{% if os_name == "Linux" and enable_cpu_affinity == true then %} +worker_cpu_affinity auto; +{% end %} + +# main configuration snippet starts +{% if main_configuration_snippet then %} +{* main_configuration_snippet *} +{% end %} +# main configuration snippet ends + +error_log {* error_log *} {* error_log_level or "warn" *}; +pid logs/nginx.pid; + +worker_rlimit_nofile {* worker_rlimit_nofile *}; + +events { + accept_mutex off; + worker_connections {* event.worker_connections *}; +} + +worker_rlimit_core {* worker_rlimit_core *}; + +worker_shutdown_timeout {* worker_shutdown_timeout *}; + +env APISIX_PROFILE; +env PATH; # for searching external plugin runner's binary + +# reserved environment variables for configuration +env APISIX_DEPLOYMENT_ETCD_HOST; + +{% if envs then %} +{% for _, name in ipairs(envs) do %} +env {*name*}; +{% end %} +{% end %} + +{% if use_apisix_base then %} +thread_pool grpc-client-nginx-module threads=1; + +lua { + {% if enabled_stream_plugins["prometheus"] then %} + lua_shared_dict prometheus-metrics {* meta.lua_shared_dict["prometheus-metrics"] *}; + {% end %} + {% if standalone_with_admin_api then %} + lua_shared_dict standalone-config {* meta.lua_shared_dict["standalone-config"] *}; + {% end %} + {% if status then %} + lua_shared_dict status-report {* meta.lua_shared_dict["status-report"] *}; + {% end %} + lua_shared_dict nacos 10m; +} + +{% if enabled_stream_plugins["prometheus"] and not enable_http then %} +http { + lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=] + .. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};"; + lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=] + .. [=[$prefix/deps/lib/lua/5.1/?.so;;]=] + .. [=[{*lua_cpath*};"; + + {% if enabled_stream_plugins["prometheus"] then %} + + init_by_lua_block { + require "resty.core" + local process = require("ngx.process") + local ok, err = process.enable_privileged_agent() + if not ok then + ngx.log(ngx.ERR, "failed to enable privileged_agent: ", err) + end + } + + init_worker_by_lua_block { + require("apisix.plugins.prometheus.exporter").http_init(true) + } + + server { + {% if use_apisix_base then %} + listen {* prometheus_server_addr *} enable_process=privileged_agent; + {% else %} + listen {* prometheus_server_addr *}; + {% end %} + + access_log off; + + location / { + content_by_lua_block { + local prometheus = require("apisix.plugins.prometheus.exporter") + prometheus.export_metrics(true) + } + } + + location = /apisix/nginx_status { + allow 127.0.0.0/24; + deny all; + stub_status; + } + } + {% end %} +} +{% end %} + +{% end %} + +{% if enable_stream then %} +stream { + lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=] + .. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};"; + lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=] + .. [=[$prefix/deps/lib/lua/5.1/?.so;;]=] + .. [=[{*lua_cpath*};"; + lua_socket_log_errors off; + + {% if max_pending_timers then %} + lua_max_pending_timers {* max_pending_timers *}; + {% end %} + {% if max_running_timers then %} + lua_max_running_timers {* max_running_timers *}; + {% end %} + + lua_shared_dict lrucache-lock-stream {* stream.lua_shared_dict["lrucache-lock-stream"] *}; + lua_shared_dict etcd-cluster-health-check-stream {* stream.lua_shared_dict["etcd-cluster-health-check-stream"] *}; + lua_shared_dict worker-events-stream {* stream.lua_shared_dict["worker-events-stream"] *}; + + {% if stream.lua_shared_dict["upstream-healthcheck-stream"] then %} + lua_shared_dict upstream-healthcheck-stream {* stream.lua_shared_dict["upstream-healthcheck-stream"] *}; + {% end %} + + {% if enabled_discoveries["tars"] then %} + lua_shared_dict tars-stream {* stream.lua_shared_dict["tars-stream"] *}; + {% end %} + + {% if enabled_stream_plugins["limit-conn"] then %} + lua_shared_dict plugin-limit-conn-stream {* stream.lua_shared_dict["plugin-limit-conn-stream"] *}; + {% end %} + + # for discovery shared dict + {% if discovery_shared_dicts then %} + {% for key, size in pairs(discovery_shared_dicts) do %} + lua_shared_dict {*key*}-stream {*size*}; + {% end %} + {% end %} + + resolver {% for _, dns_addr in ipairs(dns_resolver or {}) do %} {*dns_addr*} {% end %} {% if dns_resolver_valid then %} valid={*dns_resolver_valid*}{% end %} ipv6={% if enable_ipv6 then %}on{% else %}off{% end %}; + resolver_timeout {*resolver_timeout*}; + + {% if ssl.ssl_trusted_certificate ~= nil then %} + lua_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *}; + {% end %} + + # for stream logs, off by default + {% if stream.enable_access_log == true then %} + log_format main escape={* stream.access_log_format_escape *} '{* stream.access_log_format *}'; + + access_log {* stream.access_log *} main buffer=16384 flush=3; + {% end %} + + # stream configuration snippet starts + {% if stream_configuration_snippet then %} + {* stream_configuration_snippet *} + {% end %} + # stream configuration snippet ends + + upstream apisix_backend { + server 127.0.0.1:80; + balancer_by_lua_block { + apisix.stream_balancer_phase() + } + } + + init_by_lua_block { + require "resty.core" + {% if lua_module_hook then %} + require "{* lua_module_hook *}" + {% end %} + apisix = require("apisix") + local dns_resolver = { {% for _, dns_addr in ipairs(dns_resolver or {}) do %} "{*dns_addr*}", {% end %} } + local args = { + dns_resolver = dns_resolver, + } + apisix.stream_init(args) + } + + init_worker_by_lua_block { + apisix.stream_init_worker() + } + + {% if (events.module or "") == "lua-resty-events" then %} + # the server block for lua-resty-events + server { + listen unix:{*apisix_lua_home*}/logs/stream_worker_events.sock; + access_log off; + content_by_lua_block { + require("resty.events.compat").run() + } + } + {% end %} + + server { + {% for _, item in ipairs(stream_proxy.tcp or {}) do %} + listen {*item.addr*} {% if item.tls then %} ssl {% end %} {% if enable_reuseport then %} reuseport {% end %} {% if proxy_protocol and proxy_protocol.enable_tcp_pp then %} proxy_protocol {% end %}; + {% end %} + {% for _, addr in ipairs(stream_proxy.udp or {}) do %} + listen {*addr*} udp {% if enable_reuseport then %} reuseport {% end %}; + {% end %} + + {% if tcp_enable_ssl then %} + ssl_certificate {* ssl.ssl_cert *}; + ssl_certificate_key {* ssl.ssl_cert_key *}; + + ssl_client_hello_by_lua_block { + apisix.ssl_client_hello_phase() + } + + ssl_certificate_by_lua_block { + apisix.ssl_phase() + } + {% end %} + + {% if proxy_protocol and proxy_protocol.enable_tcp_pp_to_upstream then %} + proxy_protocol on; + {% end %} + + preread_by_lua_block { + apisix.stream_preread_phase() + } + + proxy_pass apisix_backend; + + {% if use_apisix_base then %} + set $upstream_sni "apisix_backend"; + proxy_ssl_server_name on; + proxy_ssl_name $upstream_sni; + {% end %} + + log_by_lua_block { + apisix.stream_log_phase() + } + } +} +{% end %} + +{% if enable_http then %} +http { + # put extra_lua_path in front of the builtin path + # so user can override the source code + lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=] + .. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};"; + lua_package_cpath "{*extra_lua_cpath*}$prefix/deps/lib64/lua/5.1/?.so;]=] + .. [=[$prefix/deps/lib/lua/5.1/?.so;;]=] + .. [=[{*lua_cpath*};"; + + {% if max_pending_timers then %} + lua_max_pending_timers {* max_pending_timers *}; + {% end %} + {% if max_running_timers then %} + lua_max_running_timers {* max_running_timers *}; + {% end %} + + lua_shared_dict internal-status {* http.lua_shared_dict["internal-status"] *}; + lua_shared_dict upstream-healthcheck {* http.lua_shared_dict["upstream-healthcheck"] *}; + lua_shared_dict worker-events {* http.lua_shared_dict["worker-events"] *}; + lua_shared_dict lrucache-lock {* http.lua_shared_dict["lrucache-lock"] *}; + lua_shared_dict balancer-ewma {* http.lua_shared_dict["balancer-ewma"] *}; + lua_shared_dict balancer-ewma-locks {* http.lua_shared_dict["balancer-ewma-locks"] *}; + lua_shared_dict balancer-ewma-last-touched-at {* http.lua_shared_dict["balancer-ewma-last-touched-at"] *}; + lua_shared_dict etcd-cluster-health-check {* http.lua_shared_dict["etcd-cluster-health-check"] *}; # etcd health check + + # for discovery shared dict + {% if discovery_shared_dicts then %} + {% for key, size in pairs(discovery_shared_dicts) do %} + lua_shared_dict {*key*} {*size*}; + {% end %} + {% end %} + + {% if enabled_discoveries["tars"] then %} + lua_shared_dict tars {* http.lua_shared_dict["tars"] *}; + {% end %} + + + {% if http.lua_shared_dict["plugin-ai-rate-limiting"] then %} + lua_shared_dict plugin-ai-rate-limiting {* http.lua_shared_dict["plugin-ai-rate-limiting"] *}; + {% else %} + lua_shared_dict plugin-ai-rate-limiting 10m; + {% end %} + + {% if http.lua_shared_dict["plugin-ai-rate-limiting"] then %} + lua_shared_dict plugin-ai-rate-limiting-reset-header {* http.lua_shared_dict["plugin-ai-rate-limiting-reset-header"] *}; + {% else %} + lua_shared_dict plugin-ai-rate-limiting-reset-header 10m; + {% end %} + + {% if enabled_plugins["limit-conn"] then %} + lua_shared_dict plugin-limit-conn {* http.lua_shared_dict["plugin-limit-conn"] *}; + lua_shared_dict plugin-limit-conn-redis-cluster-slot-lock {* http.lua_shared_dict["plugin-limit-conn-redis-cluster-slot-lock"] *}; + {% end %} + + {% if enabled_plugins["limit-req"] then %} + lua_shared_dict plugin-limit-req-redis-cluster-slot-lock {* http.lua_shared_dict["plugin-limit-req-redis-cluster-slot-lock"] *}; + lua_shared_dict plugin-limit-req {* http.lua_shared_dict["plugin-limit-req"] *}; + {% end %} + + {% if enabled_plugins["limit-count"] then %} + lua_shared_dict plugin-limit-count {* http.lua_shared_dict["plugin-limit-count"] *}; + lua_shared_dict plugin-limit-count-redis-cluster-slot-lock {* http.lua_shared_dict["plugin-limit-count-redis-cluster-slot-lock"] *}; + lua_shared_dict plugin-limit-count-reset-header {* http.lua_shared_dict["plugin-limit-count"] *}; + {% end %} + + {% if enabled_plugins["prometheus"] and not enabled_stream_plugins["prometheus"] then %} + lua_shared_dict prometheus-metrics {* http.lua_shared_dict["prometheus-metrics"] *}; + {% end %} + + {% if enabled_plugins["skywalking"] then %} + lua_shared_dict tracing_buffer {* http.lua_shared_dict.tracing_buffer *}; # plugin: skywalking + {% end %} + + {% if enabled_plugins["api-breaker"] then %} + lua_shared_dict plugin-api-breaker {* http.lua_shared_dict["plugin-api-breaker"] *}; + {% end %} + + {% if enabled_plugins["openid-connect"] or enabled_plugins["authz-keycloak"] then %} + # for openid-connect and authz-keycloak plugin + lua_shared_dict discovery {* http.lua_shared_dict["discovery"] *}; # cache for discovery metadata documents + {% end %} + + {% if enabled_plugins["openid-connect"] then %} + # for openid-connect plugin + lua_shared_dict jwks {* http.lua_shared_dict["jwks"] *}; # cache for JWKs + lua_shared_dict introspection {* http.lua_shared_dict["introspection"] *}; # cache for JWT verification results + {% end %} + + {% if enabled_plugins["cas-auth"] then %} + lua_shared_dict cas_sessions {* http.lua_shared_dict["cas-auth"] *}; + {% end %} + + {% if enabled_plugins["authz-keycloak"] then %} + # for authz-keycloak + lua_shared_dict access-tokens {* http.lua_shared_dict["access-tokens"] *}; # cache for service account access tokens + {% end %} + + {% if enabled_plugins["ocsp-stapling"] then %} + lua_shared_dict ocsp-stapling {* http.lua_shared_dict["ocsp-stapling"] *}; # cache for ocsp-stapling + {% end %} + + {% if enabled_plugins["ext-plugin-pre-req"] or enabled_plugins["ext-plugin-post-req"] then %} + lua_shared_dict ext-plugin {* http.lua_shared_dict["ext-plugin"] *}; # cache for ext-plugin + {% end %} + + {% if enabled_plugins["mcp-bridge"] then %} + lua_shared_dict mcp-session {* http.lua_shared_dict["mcp-session"] *}; # cache for mcp-session + {% end %} + + {% if config_center == "xds" then %} + lua_shared_dict xds-config 10m; + lua_shared_dict xds-config-version 1m; + {% end %} + + # for custom shared dict + {% if http.custom_lua_shared_dict then %} + {% for cache_key, cache_size in pairs(http.custom_lua_shared_dict) do %} + lua_shared_dict {*cache_key*} {*cache_size*}; + {% end %} + {% end %} + + {% if enabled_plugins["error-log-logger"] then %} + lua_capture_error_log 10m; + {% end %} + + lua_ssl_verify_depth 5; + ssl_session_timeout 86400; + + {% if http.underscores_in_headers then %} + underscores_in_headers {* http.underscores_in_headers *}; + {%end%} + + lua_socket_log_errors off; + + resolver {% for _, dns_addr in ipairs(dns_resolver or {}) do %} {*dns_addr*} {% end %} {% if dns_resolver_valid then %} valid={*dns_resolver_valid*}{% end %} ipv6={% if enable_ipv6 then %}on{% else %}off{% end %}; + resolver_timeout {*resolver_timeout*}; + + lua_http10_buffering off; + + lua_regex_match_limit 100000; + lua_regex_cache_max_entries 8192; + + {% if http.enable_access_log == false then %} + access_log off; + {% else %} + log_format main escape={* http.access_log_format_escape *} '{* http.access_log_format *}'; + uninitialized_variable_warn off; + + {% if http.access_log_buffer then %} + access_log {* http.access_log *} main buffer={* http.access_log_buffer *} flush=3; + {% else %} + access_log {* http.access_log *} main buffer=16384 flush=3; + {% end %} + {% end %} + open_file_cache max=1000 inactive=60; + client_max_body_size {* http.client_max_body_size *}; + keepalive_timeout {* http.keepalive_timeout *}; + client_header_timeout {* http.client_header_timeout *}; + client_body_timeout {* http.client_body_timeout *}; + send_timeout {* http.send_timeout *}; + variables_hash_max_size {* http.variables_hash_max_size *}; + + server_tokens off; + + include mime.types; + charset {* http.charset *}; + + {% if http.real_ip_header then %} + real_ip_header {* http.real_ip_header *}; + {% end %} + + {% if http.real_ip_recursive then %} + real_ip_recursive {* http.real_ip_recursive *}; + {% end %} + + {% if http.real_ip_from then %} + {% for _, real_ip in ipairs(http.real_ip_from) do %} + set_real_ip_from {*real_ip*}; + {% end %} + {% end %} + + {% if ssl.ssl_trusted_certificate ~= nil then %} + lua_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *}; + {% end %} + # http configuration snippet starts + {% if http_configuration_snippet then %} + {* http_configuration_snippet *} + {% end %} + # http configuration snippet ends + + upstream apisix_backend { + server 0.0.0.1; + + {% if use_apisix_base then %} + keepalive {* http.upstream.keepalive *}; + keepalive_requests {* http.upstream.keepalive_requests *}; + keepalive_timeout {* http.upstream.keepalive_timeout *}; + # we put the static configuration above so that we can override it in the Lua code + + balancer_by_lua_block { + apisix.http_balancer_phase() + } + {% else %} + balancer_by_lua_block { + apisix.http_balancer_phase() + } + + keepalive {* http.upstream.keepalive *}; + keepalive_requests {* http.upstream.keepalive_requests *}; + keepalive_timeout {* http.upstream.keepalive_timeout *}; + {% end %} + } + + {% if enabled_plugins["dubbo-proxy"] then %} + upstream apisix_dubbo_backend { + server 0.0.0.1; + balancer_by_lua_block { + apisix.http_balancer_phase() + } + + # dynamical keepalive doesn't work with dubbo as the connection here + # is managed by ngx_multi_upstream_module + multi {* dubbo_upstream_multiplex_count *}; + keepalive {* http.upstream.keepalive *}; + keepalive_requests {* http.upstream.keepalive_requests *}; + keepalive_timeout {* http.upstream.keepalive_timeout *}; + } + {% end %} + + {% if use_apisix_base then %} + apisix_delay_client_max_body_check on; + apisix_mirror_on_demand on; + {% end %} + + {% if wasm then %} + wasm_vm wasmtime; + {% end %} + + init_by_lua_block { + require "resty.core" + {% if lua_module_hook then %} + require "{* lua_module_hook *}" + {% end %} + apisix = require("apisix") + + local dns_resolver = { {% for _, dns_addr in ipairs(dns_resolver or {}) do %} "{*dns_addr*}", {% end %} } + local args = { + dns_resolver = dns_resolver, + } + apisix.http_init(args) + + -- set apisix_lua_home into constants module + -- it may be used by plugins to determine the work path of apisix + local constants = require("apisix.constants") + constants.apisix_lua_home = "{*apisix_lua_home*}" + } + + init_worker_by_lua_block { + apisix.http_init_worker() + } + + exit_worker_by_lua_block { + apisix.http_exit_worker() + } + + {% if (events.module or "") == "lua-resty-events" then %} + # the server block for lua-resty-events + server { + listen unix:{*apisix_lua_home*}/logs/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } + {% end %} + + {% if enable_control then %} + server { + listen {* control_server_addr *}; + + access_log off; + + location / { + content_by_lua_block { + apisix.http_control() + } + } + } + {% end %} + + {% if status then %} + server { + listen {* status_server_addr *} enable_process=privileged_agent; + access_log off; + location /status { + content_by_lua_block { + apisix.status() + } + } + location /status/ready { + content_by_lua_block { + apisix.status_ready() + } + } + } + {% end %} + + {% if enabled_plugins["prometheus"] and prometheus_server_addr then %} + server { + {% if use_apisix_base then %} + listen {* prometheus_server_addr *} enable_process=privileged_agent; + {% else %} + listen {* prometheus_server_addr *}; + {% end %} + + access_log off; + + location / { + content_by_lua_block { + local prometheus = require("apisix.plugins.prometheus.exporter") + prometheus.export_metrics() + } + } + + location = /apisix/nginx_status { + allow 127.0.0.0/24; + deny all; + stub_status; + } + } + {% end %} + + {% if enable_admin then %} + server { + {%if https_admin then%} + listen {* admin_server_addr *} ssl; + + ssl_certificate {* admin_api_mtls.admin_ssl_cert *}; + ssl_certificate_key {* admin_api_mtls.admin_ssl_cert_key *}; + {%if admin_api_mtls.admin_ssl_ca_cert and admin_api_mtls.admin_ssl_ca_cert ~= "" then%} + ssl_verify_client on; + ssl_client_certificate {* admin_api_mtls.admin_ssl_ca_cert *}; + {% end %} + + ssl_session_cache shared:SSL:20m; + ssl_protocols {* ssl.ssl_protocols *}; + ssl_ciphers {* ssl.ssl_ciphers *}; + ssl_prefer_server_ciphers on; + {% if ssl.ssl_session_tickets then %} + ssl_session_tickets on; + {% else %} + ssl_session_tickets off; + {% end %} + + {% else %} + listen {* admin_server_addr *}; + {%end%} + log_not_found off; + + # admin configuration snippet starts + {% if http_admin_configuration_snippet then %} + {* http_admin_configuration_snippet *} + {% end %} + # admin configuration snippet ends + + set $upstream_scheme 'http'; + set $upstream_host $http_host; + set $upstream_uri ''; + + {%if allow_admin then%} + {% for _, allow_ip in ipairs(allow_admin) do %} + allow {*allow_ip*}; + {% end %} + deny all; + {%else%} + allow all; + {%end%} + + location /apisix/admin { + content_by_lua_block { + apisix.http_admin() + } + } + + {% if enable_admin_ui then %} + location = /ui { + return 301 /ui/; + } + location ^~ /ui/ { + rewrite ^/ui/(.*)$ /$1 break; + root {* apisix_lua_home *}/ui; + try_files $uri /index.html =404; + gzip on; + gzip_types text/css application/javascript application/json; + expires 7200s; + add_header Cache-Control "private,max-age=7200"; + } + {% end %} + } + {% end %} + + {% if deployment_role ~= "control_plane" then %} + + {% if enabled_plugins["proxy-cache"] then %} + # for proxy cache + {% for _, cache in ipairs(proxy_cache.zones) do %} + {% if cache.disk_path and cache.cache_levels and cache.disk_size then %} + proxy_cache_path {* cache.disk_path *} levels={* cache.cache_levels *} keys_zone={* cache.name *}:{* cache.memory_size *} inactive=1d max_size={* cache.disk_size *} use_temp_path=off; + {% else %} + lua_shared_dict {* cache.name *} {* cache.memory_size *}; + {% end %} + {% end %} + + map $upstream_cache_zone $upstream_cache_zone_info { + {% for _, cache in ipairs(proxy_cache.zones) do %} + {% if cache.disk_path and cache.cache_levels and cache.disk_size then %} + {* cache.name *} {* cache.disk_path *},{* cache.cache_levels *}; + {% end %} + {% end %} + } + {% end %} + + server { + {% if enable_http2 then %} + http2 on; + {% end %} + {% if enable_http3_in_server_context then %} + http3 on; + {% end %} + {% for _, item in ipairs(node_listen) do %} + listen {* item.ip *}:{* item.port *} default_server {% if enable_reuseport then %} reuseport {% end %}; + {% end %} + {% if ssl.enable then %} + {% for _, item in ipairs(ssl.listen) do %} + {% if item.enable_http3 then %} + listen {* item.ip *}:{* item.port *} quic default_server {% if enable_reuseport then %} reuseport {% end %}; + listen {* item.ip *}:{* item.port *} ssl default_server; + {% else %} + listen {* item.ip *}:{* item.port *} ssl default_server {% if enable_reuseport then %} reuseport {% end %}; + {% end %} + {% end %} + {% end %} + {% if proxy_protocol and proxy_protocol.listen_http_port then %} + listen {* proxy_protocol.listen_http_port *} default_server proxy_protocol; + {% end %} + {% if proxy_protocol and proxy_protocol.listen_https_port then %} + listen {* proxy_protocol.listen_https_port *} ssl default_server proxy_protocol; + {% end %} + + server_name _; + + {% if ssl.enable then %} + ssl_certificate {* ssl.ssl_cert *}; + ssl_certificate_key {* ssl.ssl_cert_key *}; + ssl_session_cache shared:SSL:20m; + ssl_session_timeout 10m; + + ssl_protocols {* ssl.ssl_protocols *}; + ssl_ciphers {* ssl.ssl_ciphers *}; + ssl_prefer_server_ciphers on; + {% if ssl.ssl_session_tickets then %} + ssl_session_tickets on; + {% else %} + ssl_session_tickets off; + {% end %} + {% end %} + + {% if ssl.ssl_trusted_certificate ~= nil then %} + proxy_ssl_trusted_certificate {* ssl.ssl_trusted_certificate *}; + {% end %} + + # opentelemetry_set_ngx_var starts + {% if opentelemetry_set_ngx_var then %} + set $opentelemetry_context_traceparent ''; + set $opentelemetry_trace_id ''; + set $opentelemetry_span_id ''; + {% end %} + # opentelemetry_set_ngx_var ends + + # zipkin_set_ngx_var starts + {% if zipkin_set_ngx_var then %} + set $zipkin_context_traceparent ''; + set $zipkin_trace_id ''; + set $zipkin_span_id ''; + {% end %} + # zipkin_set_ngx_var ends + + # http server configuration snippet starts + {% if http_server_configuration_snippet then %} + {* http_server_configuration_snippet *} + {% end %} + # http server configuration snippet ends + + location = /apisix/nginx_status { + allow 127.0.0.0/24; + deny all; + access_log off; + stub_status; + } + + {% if ssl.enable then %} + ssl_client_hello_by_lua_block { + apisix.ssl_client_hello_phase() + } + + ssl_certificate_by_lua_block { + apisix.ssl_phase() + } + {% end %} + + {% if http.proxy_ssl_server_name then %} + proxy_ssl_name $upstream_host; + proxy_ssl_server_name on; + {% end %} + + location / { + set $upstream_mirror_host ''; + set $upstream_mirror_uri ''; + set $upstream_upgrade ''; + set $upstream_connection ''; + + set $upstream_scheme 'http'; + set $upstream_host $http_host; + set $upstream_uri ''; + set $ctx_ref ''; + + {% if wasm then %} + set $wasm_process_req_body ''; + set $wasm_process_resp_body ''; + {% end %} + + # http server location configuration snippet starts + {% if http_server_location_configuration_snippet then %} + {* http_server_location_configuration_snippet *} + {% end %} + # http server location configuration snippet ends + + {% if enabled_plugins["dubbo-proxy"] then %} + set $dubbo_service_name ''; + set $dubbo_service_version ''; + set $dubbo_method ''; + {% end %} + + access_by_lua_block { + apisix.http_access_phase() + } + + proxy_http_version 1.1; + proxy_set_header Host $upstream_host; + proxy_set_header Upgrade $upstream_upgrade; + proxy_set_header Connection $upstream_connection; + proxy_set_header X-Real-IP $remote_addr; + proxy_pass_header Date; + + ### the following x-forwarded-* headers is to send to upstream server + + set $var_x_forwarded_proto $scheme; + set $var_x_forwarded_host $host; + set $var_x_forwarded_port $server_port; + + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $var_x_forwarded_proto; + proxy_set_header X-Forwarded-Host $var_x_forwarded_host; + proxy_set_header X-Forwarded-Port $var_x_forwarded_port; + + {% if enabled_plugins["proxy-cache"] then %} + ### the following configuration is to cache response content from upstream server + + set $upstream_cache_zone off; + set $upstream_cache_key ''; + set $upstream_cache_bypass ''; + set $upstream_no_cache ''; + + proxy_cache $upstream_cache_zone; + proxy_cache_valid any {% if proxy_cache.cache_ttl then %} {* proxy_cache.cache_ttl *} {% else %} 10s {% end %}; + proxy_cache_min_uses 1; + proxy_cache_methods GET HEAD POST; + proxy_cache_lock_timeout 5s; + proxy_cache_use_stale off; + proxy_cache_key $upstream_cache_key; + proxy_no_cache $upstream_no_cache; + proxy_cache_bypass $upstream_cache_bypass; + + {% end %} + + proxy_pass $upstream_scheme://apisix_backend$upstream_uri; + + {% if enabled_plugins["proxy-mirror"] then %} + mirror /proxy_mirror; + {% end %} + + header_filter_by_lua_block { + apisix.http_header_filter_phase() + } + + body_filter_by_lua_block { + apisix.http_body_filter_phase() + } + + log_by_lua_block { + apisix.http_log_phase() + } + } + + location @grpc_pass { + + access_by_lua_block { + apisix.grpc_access_phase() + } + + {% if use_apisix_base then %} + # For servers which obey the standard, when `:authority` is missing, + # `host` will be used instead. When used with apisix-runtime, we can do + # better by setting `:authority` directly + grpc_set_header ":authority" $upstream_host; + {% else %} + grpc_set_header "Host" $upstream_host; + {% end %} + grpc_set_header Content-Type application/grpc; + grpc_set_header TE trailers; + grpc_socket_keepalive on; + grpc_pass $upstream_scheme://apisix_backend; + + {% if enabled_plugins["proxy-mirror"] then %} + mirror /proxy_mirror_grpc; + {% end %} + + header_filter_by_lua_block { + apisix.http_header_filter_phase() + } + + body_filter_by_lua_block { + apisix.http_body_filter_phase() + } + + log_by_lua_block { + apisix.http_log_phase() + } + } + + {% if enabled_plugins["dubbo-proxy"] then %} + location @dubbo_pass { + access_by_lua_block { + apisix.dubbo_access_phase() + } + + dubbo_pass_all_headers on; + dubbo_pass_body on; + dubbo_pass $dubbo_service_name $dubbo_service_version $dubbo_method apisix_dubbo_backend; + + header_filter_by_lua_block { + apisix.http_header_filter_phase() + } + + body_filter_by_lua_block { + apisix.http_body_filter_phase() + } + + log_by_lua_block { + apisix.http_log_phase() + } + } + {% end %} + + {% if enabled_plugins["proxy-mirror"] then %} + location = /proxy_mirror { + internal; + + {% if not use_apisix_base then %} + if ($upstream_mirror_uri = "") { + return 200; + } + {% end %} + + + {% if proxy_mirror_timeouts then %} + {% if proxy_mirror_timeouts.connect then %} + proxy_connect_timeout {* proxy_mirror_timeouts.connect *}; + {% end %} + {% if proxy_mirror_timeouts.read then %} + proxy_read_timeout {* proxy_mirror_timeouts.read *}; + {% end %} + {% if proxy_mirror_timeouts.send then %} + proxy_send_timeout {* proxy_mirror_timeouts.send *}; + {% end %} + {% end %} + proxy_http_version 1.1; + proxy_set_header Host $upstream_host; + proxy_pass $upstream_mirror_uri; + } + {% end %} + + {% if enabled_plugins["proxy-mirror"] then %} + location = /proxy_mirror_grpc { + internal; + + {% if not use_apisix_base then %} + if ($upstream_mirror_uri = "") { + return 200; + } + {% end %} + + + {% if proxy_mirror_timeouts then %} + {% if proxy_mirror_timeouts.connect then %} + grpc_connect_timeout {* proxy_mirror_timeouts.connect *}; + {% end %} + {% if proxy_mirror_timeouts.read then %} + grpc_read_timeout {* proxy_mirror_timeouts.read *}; + {% end %} + {% if proxy_mirror_timeouts.send then %} + grpc_send_timeout {* proxy_mirror_timeouts.send *}; + {% end %} + {% end %} + grpc_pass $upstream_mirror_host; + } + {% end %} + } + {% end %} + + # http end configuration snippet starts + {% if http_end_configuration_snippet then %} + {* http_end_configuration_snippet *} + {% end %} + # http end configuration snippet ends +} +{% end %} +]=] diff --git a/CloudronPackages/APISIX/apisix-source/apisix/cli/ops.lua b/CloudronPackages/APISIX/apisix-source/apisix/cli/ops.lua new file mode 100644 index 0000000..3b2e555 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/cli/ops.lua @@ -0,0 +1,1013 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ver = require("apisix.core.version") +local etcd = require("apisix.cli.etcd") +local util = require("apisix.cli.util") +local file = require("apisix.cli.file") +local schema = require("apisix.cli.schema") +local ngx_tpl = require("apisix.cli.ngx_tpl") +local cli_ip = require("apisix.cli.ip") +local profile = require("apisix.core.profile") +local template = require("resty.template") +local argparse = require("argparse") +local pl_path = require("pl.path") +local lfs = require("lfs") +local signal = require("posix.signal") +local errno = require("posix.errno") + +local stderr = io.stderr +local ipairs = ipairs +local pairs = pairs +local print = print +local type = type +local tostring = tostring +local tonumber = tonumber +local io_open = io.open +local execute = os.execute +local os_rename = os.rename +local os_remove = os.remove +local table_insert = table.insert +local table_remove = table.remove +local getenv = os.getenv +local max = math.max +local floor = math.floor +local str_find = string.find +local str_byte = string.byte +local str_sub = string.sub +local str_format = string.format + + +local _M = {} + + +local function help() + print([[ +Usage: apisix [action] + +help: print the apisix cli help message +init: initialize the local nginx.conf +init_etcd: initialize the data of etcd +start: start the apisix server +stop: stop the apisix server +quit: stop the apisix server gracefully +restart: restart the apisix server +reload: reload the apisix server +test: test the generated nginx.conf +version: print the version of apisix +]]) +end + + +local function version_greater_equal(cur_ver_s, need_ver_s) + local cur_vers = util.split(cur_ver_s, [[.]]) + local need_vers = util.split(need_ver_s, [[.]]) + local len = max(#cur_vers, #need_vers) + + for i = 1, len do + local cur_ver = tonumber(cur_vers[i]) or 0 + local need_ver = tonumber(need_vers[i]) or 0 + if cur_ver > need_ver then + return true + end + + if cur_ver < need_ver then + return false + end + end + + return true +end + + +local function get_openresty_version() + local str = "nginx version: openresty/" + local ret = util.execute_cmd("openresty -v 2>&1") + local pos = str_find(ret, str, 1, true) + if pos then + return str_sub(ret, pos + #str) + end + + str = "nginx version: nginx/" + pos = str_find(ret, str, 1, true) + if pos then + return str_sub(ret, pos + #str) + end +end + + +local function local_dns_resolver(file_path) + local file, err = io_open(file_path, "rb") + if not file then + return false, "failed to open file: " .. file_path .. ", error info:" .. err + end + + local dns_addrs = {} + for line in file:lines() do + local addr, n = line:gsub("^nameserver%s+([^%s]+)%s*$", "%1") + if n == 1 then + table_insert(dns_addrs, addr) + end + end + + file:close() + return dns_addrs +end +-- exported for test +_M.local_dns_resolver = local_dns_resolver + + +local function version() + print(ver['VERSION']) +end + + +local function get_lua_path(conf) + -- we use "" as the placeholder to enforce the type to be string + if conf and conf ~= "" then + if #conf < 2 then + -- the shortest valid path is ';;' + util.die("invalid extra_lua_path/extra_lua_cpath: \"", conf, "\"\n") + end + + local path = conf + if path:byte(-1) ~= str_byte(';') then + path = path .. ';' + end + return path + end + + return "" +end + + +local function init(env) + if env.is_root_path then + print('Warning! Running apisix under /root is only suitable for ' + .. 'development environments and it is dangerous to do so. ' + .. 'It is recommended to run APISIX in a directory ' + .. 'other than /root.') + end + + local min_ulimit = 1024 + if env.ulimit ~= "unlimited" and env.ulimit <= min_ulimit then + print(str_format("Warning! Current maximum number of open file " + .. "descriptors [%d] is not greater than %d, please increase user limits by " + .. "execute \'ulimit -n \' , otherwise the performance" + .. " is low.", env.ulimit, min_ulimit)) + end + + -- read_yaml_conf + local yaml_conf, err = file.read_yaml_conf(env.apisix_home) + if not yaml_conf then + util.die("failed to read local yaml config of apisix: ", err, "\n") + end + + local ok, err = schema.validate(yaml_conf) + if not ok then + util.die(err, "\n") + end + + -- check the Admin API token + local checked_admin_key = false + local allow_admin = yaml_conf.deployment.admin and + yaml_conf.deployment.admin.allow_admin + if yaml_conf.apisix.enable_admin and allow_admin + and #allow_admin == 1 and allow_admin[1] == "127.0.0.0/24" then + checked_admin_key = true + end + -- check if admin_key is required + if yaml_conf.deployment.admin.admin_key_required == false then + checked_admin_key = true + print("Warning! Admin key is bypassed! " + .. "If you are deploying APISIX in a production environment, " + .. "please enable `admin_key_required` and set a secure admin key!") + end + + if yaml_conf.apisix.enable_admin and not checked_admin_key then + local help = [[ + +%s +Please modify "admin_key" in conf/config.yaml . + +]] + local admin_key = yaml_conf.deployment.admin + if admin_key then + admin_key = admin_key.admin_key + end + + if type(admin_key) ~= "table" or #admin_key == 0 + then + util.die(help:format("ERROR: missing valid Admin API token.")) + end + + for _, admin in ipairs(admin_key) do + if type(admin.key) == "table" then + admin.key = "" + else + admin.key = tostring(admin.key) + end + + if admin.key == "" then + stderr:write( + help:format([[WARNING: using empty Admin API. + This will trigger APISIX to automatically generate a random Admin API token.]]), + "\n" + ) + end + end + end + + if yaml_conf.deployment.admin then + local admin_api_mtls = yaml_conf.deployment.admin.admin_api_mtls + local https_admin = yaml_conf.deployment.admin.https_admin + if https_admin and not (admin_api_mtls and + admin_api_mtls.admin_ssl_cert and + admin_api_mtls.admin_ssl_cert ~= "" and + admin_api_mtls.admin_ssl_cert_key and + admin_api_mtls.admin_ssl_cert_key ~= "") + then + util.die("missing ssl cert for https admin") + end + end + + local or_ver = get_openresty_version() + if or_ver == nil then + util.die("can not find openresty\n") + end + + local need_ver = "1.21.4" + if not version_greater_equal(or_ver, need_ver) then + util.die("openresty version must >=", need_ver, " current ", or_ver, "\n") + end + + local or_info = env.openresty_info + if not or_info:find("http_stub_status_module", 1, true) then + util.die("'http_stub_status_module' module is missing in ", + "your openresty, please check it out.\n") + end + + --- http is enabled by default + local enable_http = true + --- stream is disabled by default + local enable_stream = false + if yaml_conf.apisix.proxy_mode then + --- check for "http" + if yaml_conf.apisix.proxy_mode == "http" then + enable_http = true + enable_stream = false + --- check for "stream" + elseif yaml_conf.apisix.proxy_mode == "stream" then + enable_stream = true + enable_http = false + --- check for "http&stream" + elseif yaml_conf.apisix.proxy_mode == "http&stream" then + enable_stream = true + enable_http = true + end + end + + local enabled_discoveries = {} + for name in pairs(yaml_conf.discovery or {}) do + enabled_discoveries[name] = true + end + + local enabled_plugins = {} + for i, name in ipairs(yaml_conf.plugins or {}) do + enabled_plugins[name] = true + end + + local enabled_stream_plugins = {} + for i, name in ipairs(yaml_conf.stream_plugins or {}) do + enabled_stream_plugins[name] = true + end + + if enabled_plugins["proxy-cache"] and not yaml_conf.apisix.proxy_cache then + util.die("missing apisix.proxy_cache for plugin proxy-cache\n") + end + + if enabled_plugins["batch-requests"] then + local pass_real_client_ip = false + local real_ip_from = yaml_conf.nginx_config.http.real_ip_from + -- the real_ip_from is enabled by default, we just need to make sure it's + -- not disabled by the users + if real_ip_from then + for _, ip in ipairs(real_ip_from) do + local _ip = cli_ip:new(ip) + if _ip then + if _ip:is_loopback() or _ip:is_unspecified() then + pass_real_client_ip = true + end + end + end + end + + if not pass_real_client_ip then + util.die("missing loopback or unspecified in the nginx_config.http.real_ip_from" .. + " for plugin batch-requests\n") + end + end + + local ports_to_check = {} + + local function validate_and_get_listen_addr(port_name, default_ip, configured_ip, + default_port, configured_port) + local ip = configured_ip or default_ip + local port = tonumber(configured_port) or default_port + if ports_to_check[port] ~= nil then + util.die(port_name .. " ", port, " conflicts with ", ports_to_check[port], "\n") + end + ports_to_check[port] = port_name + return ip .. ":" .. port + end + + -- listen in admin use a separate port, support specific IP, compatible with the original style + local admin_server_addr + if yaml_conf.apisix.enable_admin then + local ip = yaml_conf.deployment.admin.admin_listen.ip + local port = yaml_conf.deployment.admin.admin_listen.port + admin_server_addr = validate_and_get_listen_addr("admin port", "0.0.0.0", ip, + 9180, port) + end + + local status_server_addr + if yaml_conf.apisix.status then + status_server_addr = validate_and_get_listen_addr("status port", "127.0.0.1", + yaml_conf.apisix.status.ip, 7085, + yaml_conf.apisix.status.port) + end + + local control_server_addr + if yaml_conf.apisix.enable_control then + if not yaml_conf.apisix.control then + control_server_addr = validate_and_get_listen_addr("control port", "127.0.0.1", nil, + 9090, nil) + else + control_server_addr = validate_and_get_listen_addr("control port", "127.0.0.1", + yaml_conf.apisix.control.ip, + 9090, yaml_conf.apisix.control.port) + end + end + + local prometheus_server_addr + if yaml_conf.plugin_attr.prometheus then + local prometheus = yaml_conf.plugin_attr.prometheus + if prometheus.enable_export_server then + prometheus_server_addr = validate_and_get_listen_addr("prometheus port", "127.0.0.1", + prometheus.export_addr.ip, + 9091, prometheus.export_addr.port) + end + end + + if enabled_stream_plugins["prometheus"] and not prometheus_server_addr then + util.die("L4 prometheus metric should be exposed via export server\n") + end + + local ip_port_to_check = {} + + local function listen_table_insert(listen_table, scheme, ip, port, + enable_http3, enable_ipv6) + if type(ip) ~= "string" then + util.die(scheme, " listen ip format error, must be string", "\n") + end + + if type(port) ~= "number" then + util.die(scheme, " listen port format error, must be number", "\n") + end + + if ports_to_check[port] ~= nil then + util.die(scheme, " listen port ", port, " conflicts with ", + ports_to_check[port], "\n") + end + + local addr = ip .. ":" .. port + + if ip_port_to_check[addr] == nil then + table_insert(listen_table, + { + ip = ip, + port = port, + enable_http3 = enable_http3 + }) + ip_port_to_check[addr] = scheme + end + + if enable_ipv6 then + ip = "[::]" + addr = ip .. ":" .. port + + if ip_port_to_check[addr] == nil then + table_insert(listen_table, + { + ip = ip, + port = port, + enable_http3 = enable_http3 + }) + ip_port_to_check[addr] = scheme + end + end + end + + local node_listen = {} + -- listen in http, support multiple ports and specific IP, compatible with the original style + if type(yaml_conf.apisix.node_listen) == "number" then + listen_table_insert(node_listen, "http", "0.0.0.0", yaml_conf.apisix.node_listen, + false, yaml_conf.apisix.enable_ipv6) + elseif type(yaml_conf.apisix.node_listen) == "table" then + for _, value in ipairs(yaml_conf.apisix.node_listen) do + if type(value) == "number" then + listen_table_insert(node_listen, "http", "0.0.0.0", value, + false, yaml_conf.apisix.enable_ipv6) + elseif type(value) == "table" then + local ip = value.ip + local port = value.port + local enable_ipv6 = false + local enable_http2 = value.enable_http2 + + if ip == nil then + ip = "0.0.0.0" + if yaml_conf.apisix.enable_ipv6 then + enable_ipv6 = true + end + end + + if port == nil then + port = 9080 + end + + if enable_http2 ~= nil then + util.die("ERROR: port level enable_http2 in node_listen is deprecated" + .. "from 3.9 version, and you should use enable_http2 in " + .. "apisix level.", "\n") + end + + listen_table_insert(node_listen, "http", ip, port, + false, enable_ipv6) + end + end + end + yaml_conf.apisix.node_listen = node_listen + + local enable_http3_in_server_context = false + local ssl_listen = {} + -- listen in https, support multiple ports, support specific IP + for _, value in ipairs(yaml_conf.apisix.ssl.listen) do + local ip = value.ip + local port = value.port + local enable_ipv6 = false + local enable_http2 = value.enable_http2 + local enable_http3 = value.enable_http3 + + if ip == nil then + ip = "0.0.0.0" + if yaml_conf.apisix.enable_ipv6 then + enable_ipv6 = true + end + end + + if port == nil then + port = 9443 + end + + if enable_http2 ~= nil then + util.die("ERROR: port level enable_http2 in ssl.listen is deprecated" + .. "from 3.9 version, and you should use enable_http2 in " + .. "apisix level.", "\n") + end + + if enable_http3 == nil then + enable_http3 = false + end + if enable_http3 == true then + enable_http3_in_server_context = true + end + + listen_table_insert(ssl_listen, "https", ip, port, + enable_http3, enable_ipv6) + end + + yaml_conf.apisix.ssl.listen = ssl_listen + yaml_conf.apisix.enable_http3_in_server_context = enable_http3_in_server_context + + -- enable ssl with place holder crt&key + yaml_conf.apisix.ssl.ssl_cert = "cert/ssl_PLACE_HOLDER.crt" + yaml_conf.apisix.ssl.ssl_cert_key = "cert/ssl_PLACE_HOLDER.key" + + local tcp_enable_ssl + -- compatible with the original style which only has the addr + if enable_stream and yaml_conf.apisix.stream_proxy and yaml_conf.apisix.stream_proxy.tcp then + local tcp = yaml_conf.apisix.stream_proxy.tcp + for i, item in ipairs(tcp) do + if type(item) ~= "table" then + tcp[i] = {addr = item} + else + if item.tls then + tcp_enable_ssl = true + end + end + end + end + + local dubbo_upstream_multiplex_count = 32 + if yaml_conf.plugin_attr and yaml_conf.plugin_attr["dubbo-proxy"] then + local dubbo_conf = yaml_conf.plugin_attr["dubbo-proxy"] + if tonumber(dubbo_conf.upstream_multiplex_count) >= 1 then + dubbo_upstream_multiplex_count = dubbo_conf.upstream_multiplex_count + end + end + + if yaml_conf.apisix.dns_resolver_valid then + if tonumber(yaml_conf.apisix.dns_resolver_valid) == nil then + util.die("apisix->dns_resolver_valid should be a number") + end + end + + local proxy_mirror_timeouts + if yaml_conf.plugin_attr["proxy-mirror"] then + proxy_mirror_timeouts = yaml_conf.plugin_attr["proxy-mirror"].timeout + end + + if yaml_conf.deployment and yaml_conf.deployment.role then + local role = yaml_conf.deployment.role + env.deployment_role = role + + if role == "control_plane" and not admin_server_addr then + local listen = node_listen[1] + admin_server_addr = str_format("%s:%s", listen.ip, listen.port) + end + end + + local opentelemetry_set_ngx_var + if enabled_plugins["opentelemetry"] and yaml_conf.plugin_attr["opentelemetry"] then + opentelemetry_set_ngx_var = yaml_conf.plugin_attr["opentelemetry"].set_ngx_var + end + + local zipkin_set_ngx_var + if enabled_plugins["zipkin"] and yaml_conf.plugin_attr["zipkin"] then + zipkin_set_ngx_var = yaml_conf.plugin_attr["zipkin"].set_ngx_var + end + + -- Using template.render + local sys_conf = { + lua_path = env.pkg_path_org, + lua_cpath = env.pkg_cpath_org, + os_name = util.trim(util.execute_cmd("uname")), + apisix_lua_home = env.apisix_home, + deployment_role = env.deployment_role, + use_apisix_base = env.use_apisix_base, + error_log = {level = "warn"}, + enable_http = enable_http, + enable_stream = enable_stream, + enabled_discoveries = enabled_discoveries, + enabled_plugins = enabled_plugins, + enabled_stream_plugins = enabled_stream_plugins, + dubbo_upstream_multiplex_count = dubbo_upstream_multiplex_count, + status_server_addr = status_server_addr, + tcp_enable_ssl = tcp_enable_ssl, + admin_server_addr = admin_server_addr, + control_server_addr = control_server_addr, + prometheus_server_addr = prometheus_server_addr, + proxy_mirror_timeouts = proxy_mirror_timeouts, + opentelemetry_set_ngx_var = opentelemetry_set_ngx_var, + zipkin_set_ngx_var = zipkin_set_ngx_var + } + + if not yaml_conf.apisix then + util.die("failed to read `apisix` field from yaml file") + end + + if not yaml_conf.nginx_config then + util.die("failed to read `nginx_config` field from yaml file") + end + + if util.is_32bit_arch() then + sys_conf["worker_rlimit_core"] = "4G" + else + sys_conf["worker_rlimit_core"] = "16G" + end + + for k,v in pairs(yaml_conf.apisix) do + sys_conf[k] = v + end + for k,v in pairs(yaml_conf.nginx_config) do + sys_conf[k] = v + end + if yaml_conf.deployment.admin then + for k,v in pairs(yaml_conf.deployment.admin) do + sys_conf[k] = v + end + end + + sys_conf.standalone_with_admin_api = env.deployment_role == "traditional" and + yaml_conf.apisix.enable_admin and yaml_conf.deployment.config_provider == "yaml" + + sys_conf["wasm"] = yaml_conf.wasm + + + local wrn = sys_conf["worker_rlimit_nofile"] + local wc = sys_conf["event"]["worker_connections"] + if not wrn or wrn <= wc then + -- ensure the number of fds is slightly larger than the number of conn + sys_conf["worker_rlimit_nofile"] = wc + 128 + end + + if sys_conf["enable_dev_mode"] == true then + sys_conf["worker_processes"] = 1 + sys_conf["enable_reuseport"] = false + + elseif tonumber(sys_conf["worker_processes"]) == nil then + sys_conf["worker_processes"] = "auto" + end + + local dns_resolver = sys_conf["dns_resolver"] + if not dns_resolver or #dns_resolver == 0 then + local dns_addrs, err = local_dns_resolver("/etc/resolv.conf") + if not dns_addrs then + util.die("failed to import local DNS: ", err, "\n") + end + + if #dns_addrs == 0 then + util.die("local DNS is empty\n") + end + + sys_conf["dns_resolver"] = dns_addrs + end + + for i, r in ipairs(sys_conf["dns_resolver"]) do + if r:match(":[^:]*:") then + -- more than one colon, is IPv6 + if r:byte(1) ~= str_byte('[') then + -- ensure IPv6 address is always wrapped in [] + sys_conf["dns_resolver"][i] = "[" .. r .. "]" + end + end + + -- check if the dns_resolver is ipv6 address with zone_id + -- Nginx does not support this form + if r:find("%%") then + stderr:write("unsupported DNS resolver: " .. r .. + ", would ignore this item\n") + table_remove(sys_conf["dns_resolver"], i) + end + end + + local env_worker_processes = getenv("APISIX_WORKER_PROCESSES") + if env_worker_processes then + sys_conf["worker_processes"] = floor(tonumber(env_worker_processes)) + end + + local exported_vars = file.get_exported_vars() + if exported_vars then + if not sys_conf["envs"] then + sys_conf["envs"]= {} + end + for _, cfg_env in ipairs(sys_conf["envs"]) do + local cfg_name + local from = str_find(cfg_env, "=", 1, true) + if from then + cfg_name = str_sub(cfg_env, 1, from - 1) + else + cfg_name = cfg_env + end + + exported_vars[cfg_name] = false + end + + for name, value in pairs(exported_vars) do + if value then + table_insert(sys_conf["envs"], name) + end + end + end + + -- inject kubernetes discovery shared dict and environment variable + if enabled_discoveries["kubernetes"] then + + if not sys_conf["discovery_shared_dicts"] then + sys_conf["discovery_shared_dicts"] = {} + end + + local kubernetes_conf = yaml_conf.discovery["kubernetes"] + + local inject_environment = function(conf, envs) + local keys = { + conf.service.host, + conf.service.port, + } + + if conf.client.token then + table_insert(keys, conf.client.token) + end + + if conf.client.token_file then + table_insert(keys, conf.client.token_file) + end + + for _, key in ipairs(keys) do + if #key > 3 then + local first, second = str_byte(key, 1, 2) + if first == str_byte('$') and second == str_byte('{') then + local last = str_byte(key, #key) + if last == str_byte('}') then + envs[str_sub(key, 3, #key - 1)] = "" + end + end + end + end + + end + + local envs = {} + if #kubernetes_conf == 0 then + sys_conf["discovery_shared_dicts"]["kubernetes"] = kubernetes_conf.shared_size + inject_environment(kubernetes_conf, envs) + else + for _, item in ipairs(kubernetes_conf) do + sys_conf["discovery_shared_dicts"]["kubernetes-" .. item.id] = item.shared_size + inject_environment(item, envs) + end + end + + if not sys_conf["envs"] then + sys_conf["envs"] = {} + end + + for item in pairs(envs) do + table_insert(sys_conf["envs"], item) + end + + end + + -- fix up lua path + sys_conf["extra_lua_path"] = get_lua_path(yaml_conf.apisix.extra_lua_path) + sys_conf["extra_lua_cpath"] = get_lua_path(yaml_conf.apisix.extra_lua_cpath) + + local conf_render = template.compile(ngx_tpl) + local ngxconf = conf_render(sys_conf) + + local ok, err = util.write_file(env.apisix_home .. "/conf/nginx.conf", + ngxconf) + if not ok then + util.die("failed to update nginx.conf: ", err, "\n") + end +end + + +local function init_etcd(env, args) + etcd.init(env, args) +end + + +local function cleanup(env) + if env.apisix_home then + profile.apisix_home = env.apisix_home + end + + os_remove(profile:customized_yaml_index()) +end + + +local function sleep(n) + execute("sleep " .. tonumber(n)) +end + + +local function check_running(env) + local pid_path = env.apisix_home .. "/logs/nginx.pid" + local pid = util.read_file(pid_path) + pid = tonumber(pid) + if not pid then + return false, nil + end + return true, pid +end + + +local function start(env, ...) + cleanup(env) + + if env.apisix_home then + profile.apisix_home = env.apisix_home + end + + -- Because the worker process started by apisix has "nobody" permission, + -- it cannot access the `/root` directory. Therefore, it is necessary to + -- prohibit APISIX from running in the /root directory. + if env.is_root_path then + util.die("Error: It is forbidden to run APISIX in the /root directory.\n") + end + + local logs_path = env.apisix_home .. "/logs" + if not pl_path.exists(logs_path) then + local _, err = pl_path.mkdir(logs_path) + if err ~= nil then + util.die("failed to mkdir ", logs_path, ", error: ", err) + end + elseif not pl_path.isdir(logs_path) and not pl_path.islink(logs_path) then + util.die(logs_path, " is not directory nor symbol link") + end + + -- check running and wait old apisix stop + local pid = nil + for i = 1, 30 do + local running + running, pid = check_running(env) + if not running then + break + else + sleep(0.1) + end + end + + if pid then + if pid <= 0 then + print("invalid pid") + return + end + + local signone = 0 + + local ok, err, err_no = signal.kill(pid, signone) + if ok then + print("the old APISIX is still running, the new one will not start") + return + -- no such process + elseif err_no ~= errno.ESRCH then + print(err) + return + end + + print("nginx.pid exists but there's no corresponding process with pid ", pid, + ", the file will be overwritten") + end + + -- start a new APISIX instance + + local parser = argparse() + parser:argument("_", "Placeholder") + parser:option("-c --config", "location of customized config.yaml") + -- TODO: more logs for APISIX cli could be added using this feature + parser:flag("-v --verbose", "show init_etcd debug information") + local args = parser:parse() + + local customized_yaml = args["config"] + if customized_yaml then + local customized_yaml_path + local idx = str_find(customized_yaml, "/") + if idx and idx == 1 then + customized_yaml_path = customized_yaml + else + local cur_dir, err = lfs.currentdir() + if err then + util.die("failed to get current directory") + end + customized_yaml_path = cur_dir .. "/" .. customized_yaml + end + + if not util.file_exists(customized_yaml_path) then + util.die("customized config file not exists, path: " .. customized_yaml_path) + end + + local ok, err = util.write_file(profile:customized_yaml_index(), customized_yaml_path) + if not ok then + util.die("write customized config index failed, err: " .. err) + end + + print("Use customized yaml: ", customized_yaml) + end + + init(env) + + if env.deployment_role ~= "data_plane" then + init_etcd(env, args) + end + + util.execute_cmd(env.openresty_args) +end + + +local function test(env, backup_ngx_conf) + -- backup nginx.conf + local ngx_conf_path = env.apisix_home .. "/conf/nginx.conf" + local ngx_conf_path_bak = ngx_conf_path .. ".bak" + local ngx_conf_exist = pl_path.exists(ngx_conf_path) + if ngx_conf_exist then + local ok, err = os_rename(ngx_conf_path, ngx_conf_path_bak) + if not ok then + util.die("failed to backup nginx.conf, error: ", err) + end + end + + -- reinit nginx.conf + init(env) + + local test_cmd = env.openresty_args .. [[ -t -q ]] + local test_ret = execute((test_cmd)) + + -- restore nginx.conf + if ngx_conf_exist then + local ok, err = os_rename(ngx_conf_path_bak, ngx_conf_path) + if not ok then + util.die("failed to restore original nginx.conf, error: ", err) + end + end + + -- When success, + -- On linux, os.execute returns 0, + -- On macos, os.execute returns 3 values: true, exit, 0, and we need the first. + if (test_ret == 0 or test_ret == true) then + print("configuration test is successful") + return + end + + util.die("configuration test failed") +end + + +local function quit(env) + cleanup(env) + + local cmd = env.openresty_args .. [[ -s quit]] + util.execute_cmd(cmd) +end + + +local function stop(env) + cleanup(env) + + local cmd = env.openresty_args .. [[ -s stop]] + util.execute_cmd(cmd) +end + + +local function restart(env) + -- test configuration + test(env) + stop(env) + start(env) +end + + +local function reload(env) + -- reinit nginx.conf + init(env) + + local test_cmd = env.openresty_args .. [[ -t -q ]] + -- When success, + -- On linux, os.execute returns 0, + -- On macos, os.execute returns 3 values: true, exit, 0, and we need the first. + local test_ret = execute((test_cmd)) + if (test_ret == 0 or test_ret == true) then + local cmd = env.openresty_args .. [[ -s reload]] + execute(cmd) + return + end + + print("test openresty failed") +end + + + +local action = { + help = help, + version = version, + init = init, + init_etcd = etcd.init, + start = start, + stop = stop, + quit = quit, + restart = restart, + reload = reload, + test = test, +} + + +function _M.execute(env, arg) + local cmd_action = arg[1] + if not cmd_action then + return help() + end + + if not action[cmd_action] then + stderr:write("invalid argument: ", cmd_action, "\n") + return help() + end + + action[cmd_action](env, arg[2]) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/cli/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/cli/schema.lua new file mode 100644 index 0000000..36d758c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/cli/schema.lua @@ -0,0 +1,450 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local jsonschema = require("jsonschema") +local pairs = pairs +local pcall = pcall +local require = require + + +local _M = {} +local etcd_schema = { + type = "object", + properties = { + resync_delay = { + type = "integer", + }, + user = { + type = "string", + }, + password = { + type = "string", + }, + tls = { + type = "object", + properties = { + cert = { + type = "string", + }, + key = { + type = "string", + }, + }, + }, + prefix = { + type = "string", + }, + host = { + type = "array", + items = { + type = "string", + pattern = [[^https?://]] + }, + minItems = 1, + }, + timeout = { + type = "integer", + default = 30, + minimum = 1, + description = "etcd connection timeout in seconds", + }, + }, + required = {"prefix", "host"} +} + +local config_schema = { + type = "object", + properties = { + apisix = { + properties = { + lua_module_hook = { + pattern = "^[a-zA-Z._-]+$", + }, + proxy_protocol = { + type = "object", + properties = { + listen_http_port = { + type = "integer", + }, + listen_https_port = { + type = "integer", + }, + enable_tcp_pp = { + type = "boolean", + }, + enable_tcp_pp_to_upstream = { + type = "boolean", + }, + } + }, + proxy_cache = { + type = "object", + properties = { + zones = { + type = "array", + minItems = 1, + items = { + type = "object", + properties = { + name = { + type = "string", + }, + memory_size = { + type = "string", + }, + disk_size = { + type = "string", + }, + disk_path = { + type = "string", + }, + cache_levels = { + type = "string", + }, + }, + oneOf = { + { + required = {"name", "memory_size"}, + maxProperties = 2, + }, + { + required = {"name", "memory_size", "disk_size", + "disk_path", "cache_levels"}, + } + }, + }, + uniqueItems = true, + } + } + }, + proxy_mode = { + type = "string", + enum = {"http", "stream", "http&stream"}, + }, + stream_proxy = { + type = "object", + properties = { + tcp = { + type = "array", + minItems = 1, + items = { + anyOf = { + { + type = "integer", + }, + { + type = "string", + }, + { + type = "object", + properties = { + addr = { + anyOf = { + { + type = "integer", + }, + { + type = "string", + }, + } + }, + tls = { + type = "boolean", + } + }, + required = {"addr"} + }, + }, + }, + uniqueItems = true, + }, + udp = { + type = "array", + minItems = 1, + items = { + anyOf = { + { + type = "integer", + }, + { + type = "string", + }, + }, + }, + uniqueItems = true, + }, + } + }, + dns_resolver = { + type = "array", + minItems = 1, + items = { + type = "string", + } + }, + dns_resolver_valid = { + type = "integer", + }, + enable_http2 = { + type = "boolean", + default = true + }, + ssl = { + type = "object", + properties = { + ssl_trusted_certificate = { + type = "string", + default = "system" + }, + listen = { + type = "array", + items = { + type = "object", + properties = { + ip = { + type = "string", + }, + port = { + type = "integer", + minimum = 1, + maximum = 65535 + }, + enable_http3 = { + type = "boolean", + }, + } + } + }, + } + }, + data_encryption = { + type = "object", + properties = { + keyring = { + anyOf = { + { + type = "array", + minItems = 1, + items = { + type = "string", + minLength = 16, + maxLength = 16 + } + }, + { + type = "string", + minLength = 16, + maxLength = 16 + } + } + }, + } + }, + } + }, + nginx_config = { + type = "object", + properties = { + envs = { + type = "array", + minItems = 1, + items = { + type = "string", + } + } + }, + }, + http = { + type = "object", + properties = { + custom_lua_shared_dict = { + type = "object", + } + } + }, + etcd = etcd_schema, + plugins = { + type = "array", + default = {}, + minItems = 0, + items = { + type = "string" + } + }, + stream_plugins = { + type = "array", + default = {}, + minItems = 0, + items = { + type = "string" + } + }, + wasm = { + type = "object", + properties = { + plugins = { + type = "array", + minItems = 1, + items = { + type = "object", + properties = { + name = { + type = "string" + }, + file = { + type = "string" + }, + priority = { + type = "integer" + }, + http_request_phase = { + enum = {"access", "rewrite"}, + default = "access", + }, + }, + required = {"name", "file", "priority"} + } + } + } + }, + deployment = { + type = "object", + properties = { + role = { + enum = {"traditional", "control_plane", "data_plane", "standalone"}, + default = "traditional" + } + }, + }, + }, + required = {"apisix", "deployment"}, +} + +local admin_schema = { + type = "object", + properties = { + admin_key = { + type = "array", + properties = { + items = { + properties = { + name = {type = "string"}, + key = {type = "string"}, + role = {type = "string"}, + } + } + } + }, + admin_listen = { + properties = { + listen = { type = "string" }, + port = { type = "integer" }, + }, + default = { + listen = "0.0.0.0", + port = 9180, + } + }, + https_admin = { + type = "boolean", + }, + admin_key_required = { + type = "boolean", + }, + } +} + +local deployment_schema = { + traditional = { + properties = { + etcd = etcd_schema, + admin = admin_schema, + role_traditional = { + properties = { + config_provider = { + enum = {"etcd", "yaml"} + }, + }, + required = {"config_provider"} + } + }, + required = {"etcd"} + }, + control_plane = { + properties = { + etcd = etcd_schema, + admin = admin_schema, + role_control_plane = { + properties = { + config_provider = { + enum = {"etcd"} + }, + }, + required = {"config_provider"} + }, + }, + required = {"etcd", "role_control_plane"} + }, + data_plane = { + properties = { + etcd = etcd_schema, + role_data_plane = { + properties = { + config_provider = { + enum = {"etcd", "yaml", "json", "xds"} + }, + }, + required = {"config_provider"} + }, + }, + required = {"role_data_plane"} + } +} + + +function _M.validate(yaml_conf) + local validator = jsonschema.generate_validator(config_schema) + local ok, err = validator(yaml_conf) + if not ok then + return false, "failed to validate config: " .. err + end + + if yaml_conf.discovery then + for kind, conf in pairs(yaml_conf.discovery) do + local ok, schema = pcall(require, "apisix.discovery." .. kind .. ".schema") + if ok then + local validator = jsonschema.generate_validator(schema) + local ok, err = validator(conf) + if not ok then + return false, "invalid discovery " .. kind .. " configuration: " .. err + end + end + end + end + + local role = yaml_conf.deployment.role + local validator = jsonschema.generate_validator(deployment_schema[role]) + local ok, err = validator(yaml_conf.deployment) + if not ok then + return false, "invalid deployment " .. role .. " configuration: " .. err + end + + return true +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/cli/util.lua b/CloudronPackages/APISIX/apisix-source/apisix/cli/util.lua new file mode 100644 index 0000000..d69468e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/cli/util.lua @@ -0,0 +1,189 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local require = require +local pcall = pcall +local open = io.open +local popen = io.popen +local close = io.close +local exit = os.exit +local stderr = io.stderr +local str_format = string.format +local tonumber = tonumber +local io = io +local ipairs = ipairs +local assert = assert + +local _M = {} + + +-- Note: The `execute_cmd` return value will have a line break at the end, +-- it is recommended to use the `trim` function to handle the return value. +local function execute_cmd(cmd) + local t, err = popen(cmd) + if not t then + return nil, "failed to execute command: " + .. cmd .. ", error info: " .. err + end + + local data, err = t:read("*all") + t:close() + + if not data then + return nil, "failed to read execution result of: " + .. cmd .. ", error info: " .. err + end + + return data +end +_M.execute_cmd = execute_cmd + + +-- For commands which stdout would be always be empty, +-- forward stderr to stdout to get the error msg +function _M.execute_cmd_with_error(cmd) + return execute_cmd(cmd .. " 2>&1") +end + + +function _M.trim(s) + return (s:gsub("^%s*(.-)%s*$", "%1")) +end + + +function _M.split(self, sep) + local sep, fields = sep or ":", {} + local pattern = str_format("([^%s]+)", sep) + + self:gsub(pattern, function(c) fields[#fields + 1] = c end) + + return fields +end + + +function _M.read_file(file_path) + local file, err = open(file_path, "rb") + if not file then + return false, "failed to open file: " .. file_path .. ", error info:" .. err + end + + local data, err = file:read("*all") + file:close() + if not data then + return false, "failed to read file: " .. file_path .. ", error info:" .. err + end + + return data +end + + +function _M.die(...) + stderr:write(...) + exit(1) +end + + +function _M.is_32bit_arch() + local ok, ffi = pcall(require, "ffi") + if ok then + -- LuaJIT + return ffi.abi("32bit") + end + + local ret = _M.execute_cmd("getconf LONG_BIT") + local bits = tonumber(ret) + return bits <= 32 +end + + +function _M.write_file(file_path, data) + local file, err = open(file_path, "w+") + if not file then + return false, "failed to open file: " + .. file_path + .. ", error info:" + .. err + end + + local ok, err = file:write(data) + file:close() + if not ok then + return false, "failed to write file: " + .. file_path + .. ", error info:" + .. err + end + return true +end + + +function _M.file_exists(file_path) + local f = open(file_path, "r") + return f ~= nil and close(f) +end + +do + local trusted_certs_paths = { + "/etc/ssl/certs/ca-certificates.crt", -- Debian/Ubuntu/Gentoo + "/etc/pki/tls/certs/ca-bundle.crt", -- Fedora/RHEL 6 + "/etc/ssl/ca-bundle.pem", -- OpenSUSE + "/etc/pki/tls/cacert.pem", -- OpenELEC + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", -- CentOS/RHEL 7 + "/etc/ssl/cert.pem", -- OpenBSD, Alpine + } + + -- Check if a file exists using Lua's built-in `io.open` + local function file_exists(path) + local file = io.open(path, "r") + if file then + file:close() + return true + else + return false + end + end + + function _M.get_system_trusted_certs_filepath() + for _, path in ipairs(trusted_certs_paths) do + if file_exists(path) then + return path + end + end + + return nil, + "Could not find trusted certs file in " .. + "any of the `system`-predefined locations. " .. + "Please install a certs file there or set " .. + "`lua_ssl_trusted_certificate` to a " .. + "specific file path instead of `system`" + end +end + + +function _M.gen_trusted_certs_combined_file(combined_filepath, paths) + local combined_file = assert(io.open(combined_filepath, "w")) + for _, path in ipairs(paths) do + local cert_file = assert(io.open(path, "r")) + combined_file:write(cert_file:read("*a")) + combined_file:write("\n") + cert_file:close() + end + combined_file:close() +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/constants.lua b/CloudronPackages/APISIX/apisix-source/apisix/constants.lua new file mode 100644 index 0000000..0b3ec16 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/constants.lua @@ -0,0 +1,46 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +return { + RPC_ERROR = 0, + RPC_PREPARE_CONF = 1, + RPC_HTTP_REQ_CALL = 2, + RPC_EXTRA_INFO = 3, + RPC_HTTP_RESP_CALL = 4, + HTTP_ETCD_DIRECTORY = { + ["/upstreams"] = true, + ["/plugins"] = true, + ["/ssls"] = true, + ["/stream_routes"] = true, + ["/plugin_metadata"] = true, + ["/routes"] = true, + ["/services"] = true, + ["/consumers"] = true, + ["/global_rules"] = true, + ["/protos"] = true, + ["/plugin_configs"] = true, + ["/consumer_groups"] = true, + ["/secrets"] = true, + }, + STREAM_ETCD_DIRECTORY = { + ["/upstreams"] = true, + ["/services"] = true, + ["/plugins"] = true, + ["/ssls"] = true, + ["/stream_routes"] = true, + ["/plugin_metadata"] = true, + }, +} diff --git a/CloudronPackages/APISIX/apisix-source/apisix/consumer.lua b/CloudronPackages/APISIX/apisix-source/apisix/consumer.lua new file mode 100644 index 0000000..d69226b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/consumer.lua @@ -0,0 +1,334 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local config_local = require("apisix.core.config_local") +local secret = require("apisix.secret") +local plugin = require("apisix.plugin") +local plugin_checker = require("apisix.plugin").plugin_checker +local check_schema = require("apisix.core.schema").check +local error = error +local ipairs = ipairs +local pairs = pairs +local type = type +local string_sub = string.sub +local consumers + + +local _M = { + version = 0.3, +} + +local lrucache = core.lrucache.new({ + ttl = 300, count = 512 +}) + +-- Please calculate and set the value of the "consumers_count_for_lrucache" +-- variable based on the number of consumers in the current environment, +-- taking into account the appropriate adjustment coefficient. +local consumers_count_for_lrucache = 4096 + +local function remove_etcd_prefix(key) + local prefix = "" + local local_conf = config_local.local_conf() + local role = core.table.try_read_attr(local_conf, "deployment", "role") + local provider = core.table.try_read_attr(local_conf, "deployment", "role_" .. + role, "config_provider") + if provider == "etcd" and local_conf.etcd and local_conf.etcd.prefix then + prefix = local_conf.etcd.prefix + end + return string_sub(key, #prefix + 1) +end + +-- /{etcd.prefix}/consumers/{consumer_name}/credentials/{credential_id} --> {consumer_name} +local function get_consumer_name_from_credential_etcd_key(key) + local uri_segs = core.utils.split_uri(remove_etcd_prefix(key)) + return uri_segs[3] +end + +local function is_credential_etcd_key(key) + if not key then + return false + end + + local uri_segs = core.utils.split_uri(remove_etcd_prefix(key)) + return uri_segs[2] == "consumers" and uri_segs[4] == "credentials" +end + +local function get_credential_id_from_etcd_key(key) + local uri_segs = core.utils.split_uri(remove_etcd_prefix(key)) + return uri_segs[5] +end + +local function filter_consumers_list(data_list) + if #data_list == 0 then + return data_list + end + + local list = {} + for _, item in ipairs(data_list) do + if not (type(item) == "table" and is_credential_etcd_key(item.key)) then + core.table.insert(list, item) + end + end + + return list +end + +local plugin_consumer +do + local consumers_id_lrucache = core.lrucache.new({ + count = consumers_count_for_lrucache + }) + +local function construct_consumer_data(val, plugin_config) + -- if the val is a Consumer, clone it to the local consumer; + -- if the val is a Credential, to get the Consumer by consumer_name and then clone + -- it to the local consumer. + local consumer + if is_credential_etcd_key(val.key) then + local consumer_name = get_consumer_name_from_credential_etcd_key(val.key) + local the_consumer = consumers:get(consumer_name) + if the_consumer and the_consumer.value then + consumer = core.table.clone(the_consumer.value) + consumer.modifiedIndex = the_consumer.modifiedIndex + consumer.credential_id = get_credential_id_from_etcd_key(val.key) + else + -- Normally wouldn't get here: + -- it should belong to a consumer for any credential. + core.log.error("failed to get the consumer for the credential,", + " a wild credential has appeared!", + " credential key: ", val.key, ", consumer name: ", consumer_name) + return nil, "failed to get the consumer for the credential" + end + else + consumer = core.table.clone(val.value) + consumer.modifiedIndex = val.modifiedIndex + end + + -- if the consumer has labels, set the field custom_id to it. + -- the custom_id is used to set in the request headers to the upstream. + if consumer.labels then + consumer.custom_id = consumer.labels["custom_id"] + end + + -- Note: the id here is the key of consumer data, which + -- is 'username' field in admin + consumer.consumer_name = consumer.id + consumer.auth_conf = plugin_config + + return consumer +end + + +function plugin_consumer() + local plugins = {} + + if consumers.values == nil then + return plugins + end + + -- consumers.values is the list that got from etcd by prefix key {etcd_prefix}/consumers. + -- So it contains consumers and credentials. + -- The val in the for-loop may be a Consumer or a Credential. + for _, val in ipairs(consumers.values) do + if type(val) ~= "table" then + goto CONTINUE + end + + for name, config in pairs(val.value.plugins or {}) do + local plugin_obj = plugin.get(name) + if plugin_obj and plugin_obj.type == "auth" then + if not plugins[name] then + plugins[name] = { + nodes = {}, + len = 0, + conf_version = consumers.conf_version + } + end + + local consumer = consumers_id_lrucache(val.value.id .. name, + val.modifiedIndex, construct_consumer_data, val, config) + if consumer == nil then + goto CONTINUE + end + + plugins[name].len = plugins[name].len + 1 + core.table.insert(plugins[name].nodes, plugins[name].len, + consumer) + core.log.info("consumer:", core.json.delay_encode(consumer)) + end + end + + ::CONTINUE:: + end + + return plugins +end + +end + + +_M.filter_consumers_list = filter_consumers_list + +function _M.get_consumer_key_from_credential_key(key) + local uri_segs = core.utils.split_uri(key) + return "/consumers/" .. uri_segs[3] +end + +function _M.plugin(plugin_name) + local plugin_conf = core.lrucache.global("/consumers", + consumers.conf_version, plugin_consumer) + return plugin_conf[plugin_name] +end + +function _M.consumers_conf(plugin_name) + return _M.plugin(plugin_name) +end + + +-- attach chosen consumer to the ctx, used in auth plugin +function _M.attach_consumer(ctx, consumer, conf) + ctx.consumer = consumer + ctx.consumer_name = consumer.consumer_name + ctx.consumer_group_id = consumer.group_id + ctx.consumer_ver = conf.conf_version + + core.request.set_header(ctx, "X-Consumer-Username", consumer.username) + core.request.set_header(ctx, "X-Credential-Identifier", consumer.credential_id) + core.request.set_header(ctx, "X-Consumer-Custom-ID", consumer.custom_id) +end + + +function _M.consumers() + if not consumers then + return nil, nil + end + + return filter_consumers_list(consumers.values), consumers.conf_version +end + + +local create_consume_cache +do + local consumer_lrucache = core.lrucache.new({ + count = consumers_count_for_lrucache + }) + +local function fill_consumer_secret(consumer) + local new_consumer = core.table.clone(consumer) + new_consumer.auth_conf = secret.fetch_secrets(new_consumer.auth_conf, false) + return new_consumer +end + + +function create_consume_cache(consumers_conf, key_attr) + local consumer_names = {} + + for _, consumer in ipairs(consumers_conf.nodes) do + core.log.info("consumer node: ", core.json.delay_encode(consumer)) + local new_consumer = consumer_lrucache(consumer, nil, + fill_consumer_secret, consumer) + consumer_names[new_consumer.auth_conf[key_attr]] = new_consumer + end + + return consumer_names +end + +end + + +function _M.consumers_kv(plugin_name, consumer_conf, key_attr) + local consumers = lrucache("consumers_key#" .. plugin_name, consumer_conf.conf_version, + create_consume_cache, consumer_conf, key_attr) + + return consumers +end + + +function _M.find_consumer(plugin_name, key, key_value) + local consumer + local consumer_conf + consumer_conf = _M.plugin(plugin_name) + if not consumer_conf then + return nil, nil, "Missing related consumer" + end + local consumers = _M.consumers_kv(plugin_name, consumer_conf, key) + consumer = consumers[key_value] + return consumer, consumer_conf +end + + +local function check_consumer(consumer, key) + local data_valid + local err + if is_credential_etcd_key(key) then + data_valid, err = check_schema(core.schema.credential, consumer) + else + data_valid, err = check_schema(core.schema.consumer, consumer) + end + if not data_valid then + return data_valid, err + end + + return plugin_checker(consumer, core.schema.TYPE_CONSUMER) +end + + +function _M.init_worker() + local err + local cfg = { + automatic = true, + checker = check_consumer, + } + + consumers, err = core.config.new("/consumers", cfg) + if not consumers then + error("failed to create etcd instance for fetching consumers: " .. err) + return + end +end + +local function get_anonymous_consumer_from_local_cache(name) + local anon_consumer_raw = consumers:get(name) + + if not anon_consumer_raw or not anon_consumer_raw.value or + not anon_consumer_raw.value.id or not anon_consumer_raw.modifiedIndex then + return nil, nil, "failed to get anonymous consumer " .. name + end + + -- make structure of anon_consumer similar to that of consumer_mod.consumers_kv's response + local anon_consumer = anon_consumer_raw.value + anon_consumer.consumer_name = anon_consumer_raw.value.id + anon_consumer.modifiedIndex = anon_consumer_raw.modifiedIndex + + local anon_consumer_conf = { + conf_version = anon_consumer_raw.modifiedIndex + } + + return anon_consumer, anon_consumer_conf +end + + +function _M.get_anonymous_consumer(name) + local anon_consumer, anon_consumer_conf, err + anon_consumer, anon_consumer_conf, err = get_anonymous_consumer_from_local_cache(name) + + return anon_consumer, anon_consumer_conf, err +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/consumer_group.lua b/CloudronPackages/APISIX/apisix-source/apisix/consumer_group.lua new file mode 100644 index 0000000..3be59ec --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/consumer_group.lua @@ -0,0 +1,55 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local plugin_checker = require("apisix.plugin").plugin_checker +local error = error + + +local consumer_groups + + +local _M = { +} + + +function _M.init_worker() + local err + consumer_groups, err = core.config.new("/consumer_groups", { + automatic = true, + item_schema = core.schema.consumer_group, + checker = plugin_checker, + }) + if not consumer_groups then + error("failed to sync /consumer_groups: " .. err) + end +end + + +function _M.consumer_groups() + if not consumer_groups then + return nil, nil + end + return consumer_groups.values, consumer_groups.conf_version +end + + +function _M.get(id) + return consumer_groups:get(id) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/control/router.lua b/CloudronPackages/APISIX/apisix-source/apisix/control/router.lua new file mode 100644 index 0000000..e6e5ff9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/control/router.lua @@ -0,0 +1,212 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local router = require("apisix.utils.router") +local radixtree = require("resty.radixtree") +local builtin_v1_routes = require("apisix.control.v1") +local plugin_mod = require("apisix.plugin") +local core = require("apisix.core") + +local str_sub = string.sub +local ipairs = ipairs +local pairs = pairs +local type = type +local ngx = ngx +local get_method = ngx.req.get_method +local events = require("apisix.events") + +local _M = {} + + +local function format_dismod_uri(mod_name, uri) + if core.string.has_prefix(uri, "/v1/") then + return uri + end + + local tmp = {"/v1/discovery/", mod_name} + if not core.string.has_prefix(uri, "/") then + core.table.insert(tmp, "/") + end + core.table.insert(tmp, uri) + + return core.table.concat(tmp, "") +end + +-- we do not hardcode the discovery module's control api uri +local function format_dismod_control_api_uris(mod_name, api_route) + if not api_route or #api_route == 0 then + return api_route + end + + local clone_route = core.table.clone(api_route) + for _, v in ipairs(clone_route) do + local uris = v.uris + local target_uris = core.table.new(#uris, 0) + for _, uri in ipairs(uris) do + local target_uri = format_dismod_uri(mod_name, uri) + core.table.insert(target_uris, target_uri) + end + v.uris = target_uris + end + + return clone_route +end + + +local fetch_control_api_router +do + local function register_api_routes(routes, api_routes) + for _, route in ipairs(api_routes) do + core.table.insert(routes, { + methods = route.methods, + -- note that it is 'uris' for control API, which is an array of strings + paths = route.uris, + handler = function (api_ctx) + local code, body = route.handler(api_ctx) + if code or body then + if type(body) == "table" and ngx.header["Content-Type"] == nil then + core.response.set_header("Content-Type", "application/json") + end + + core.response.exit(code, body) + end + end + }) + end + end + + local routes = {} + local v1_routes = {} + local function empty_func() end + +function fetch_control_api_router() + core.table.clear(routes) + + for _, plugin in ipairs(plugin_mod.plugins) do + local api_fun = plugin.control_api + if api_fun then + local api_route = api_fun() + register_api_routes(routes, api_route) + end + end + + local discovery_type = require("apisix.core.config_local").local_conf().discovery + if discovery_type then + local discovery = require("apisix.discovery.init").discovery + local dump_apis = {} + for key, _ in pairs(discovery_type) do + local dis_mod = discovery[key] + -- if discovery module has control_api method, support it + local api_fun = dis_mod.control_api + if api_fun then + local api_route = api_fun() + local format_route = format_dismod_control_api_uris(key, api_route) + register_api_routes(routes, format_route) + end + + local dump_data = dis_mod.dump_data + if dump_data then + local target_uri = format_dismod_uri(key, "/dump") + local item = { + methods = {"GET"}, + uris = {target_uri}, + handler = function() + return 200, dump_data() + end + } + core.table.insert(dump_apis, item) + end + end + + if #dump_apis > 0 then + core.log.notice("dump_apis: ", core.json.encode(dump_apis, true)) + register_api_routes(routes, dump_apis) + end + end + + core.table.clear(v1_routes) + register_api_routes(v1_routes, builtin_v1_routes) + + local v1_router, err = router.new(v1_routes) + if not v1_router then + return nil, err + end + + core.table.insert(routes, { + paths = {"/v1/*"}, + filter_fun = function(vars, opts, ...) + local uri = str_sub(vars.uri, #"/v1" + 1) + return v1_router:dispatch(uri, opts, ...) + end, + handler = empty_func, + }) + + local with_parameter = false + local conf = core.config.local_conf() + if conf.apisix.enable_control and conf.apisix.control then + if conf.apisix.control.router == "radixtree_uri_with_parameter" then + with_parameter = true + end + end + + if with_parameter then + return radixtree.new(routes) + else + return router.new(routes) + end +end + +end -- do + + +do + local match_opts = {} + local cached_version + local router + +function _M.match(uri) + if cached_version ~= plugin_mod.load_times then + local err + router, err = fetch_control_api_router() + if router == nil then + core.log.error("failed to fetch valid api router: ", err) + return false + end + + cached_version = plugin_mod.load_times + end + + core.table.clear(match_opts) + match_opts.method = get_method() + + return router:dispatch(uri, match_opts) +end + +end -- do + +local function reload_plugins() + core.log.info("start to hot reload plugins") + plugin_mod.load() +end + + +function _M.init_worker() + -- register reload plugin handler + events:register(reload_plugins, builtin_v1_routes.reload_event, "PUT") +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/control/v1.lua b/CloudronPackages/APISIX/apisix-source/apisix/control/v1.lua new file mode 100644 index 0000000..4d35018 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/control/v1.lua @@ -0,0 +1,506 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local plugin = require("apisix.plugin") +local get_routes = require("apisix.router").http_routes +local get_services = require("apisix.http.service").services +local upstream_mod = require("apisix.upstream") +local get_upstreams = upstream_mod.upstreams +local collectgarbage = collectgarbage +local ipairs = ipairs +local pcall = pcall +local str_format = string.format +local ngx = ngx +local ngx_var = ngx.var +local events = require("apisix.events") + + +local _M = {} + +_M.RELOAD_EVENT = 'control-api-plugin-reload' + +function _M.schema() + local http_plugins, stream_plugins = plugin.get_all({ + version = true, + priority = true, + schema = true, + metadata_schema = true, + consumer_schema = true, + type = true, + scope = true, + }) + local schema = { + main = { + consumer = core.schema.consumer, + consumer_group = core.schema.consumer_group, + global_rule = core.schema.global_rule, + plugin_config = core.schema.plugin_config, + plugins = core.schema.plugins, + proto = core.schema.proto, + route = core.schema.route, + service = core.schema.service, + ssl = core.schema.ssl, + stream_route = core.schema.stream_route, + upstream = core.schema.upstream, + upstream_hash_header_schema = core.schema.upstream_hash_header_schema, + upstream_hash_vars_schema = core.schema.upstream_hash_vars_schema, + }, + plugins = http_plugins, + stream_plugins = stream_plugins, + } + return 200, schema +end + + +local healthcheck +local function extra_checker_info(value) + if not healthcheck then + healthcheck = require("resty.healthcheck") + end + + local name = upstream_mod.get_healthchecker_name(value) + local nodes, err = healthcheck.get_target_list(name, "upstream-healthcheck") + if err then + core.log.error("healthcheck.get_target_list failed: ", err) + end + return { + name = value.key, + nodes = nodes, + } +end + + +local function get_checker_type(checks) + if checks.active and checks.active.type then + return checks.active.type + elseif checks.passive and checks.passive.type then + return checks.passive.type + end +end + + +local function iter_and_add_healthcheck_info(infos, values) + if not values then + return + end + + for _, value in core.config_util.iterate_values(values) do + local checks = value.value.checks or (value.value.upstream and value.value.upstream.checks) + if checks then + local info = extra_checker_info(value) + info.type = get_checker_type(checks) + core.table.insert(infos, info) + end + end +end + + +local HTML_TEMPLATE = [[ + + + APISIX upstream check status + + +

APISIX upstream check status

+ + + + + + + + + + + + +{% local i = 0 %} +{% for _, stat in ipairs(stats) do %} +{% for _, node in ipairs(stat.nodes) do %} +{% i = i + 1 %} + {% if node.status == "healthy" or node.status == "mostly_healthy" then %} + + {% else %} + + {% end %} + + + + + + + + + + +{% end %} +{% end %} +
IndexUpstreamCheck typeHostStatusSuccess countsTCP FailuresHTTP FailuresTIMEOUT Failures
{* i *}{* stat.name *}{* stat.type *}{* node.ip .. ":" .. node.port *}{* node.status *}{* node.counter.success *}{* node.counter.tcp_failure *}{* node.counter.http_failure *}{* node.counter.timeout_failure *}
+ + +]] + +local html_render + +local function try_render_html(data) + if not html_render then + local template = require("resty.template") + html_render = template.compile(HTML_TEMPLATE) + end + local accept = ngx_var.http_accept + if accept and accept:find("text/html") then + local ok, out = pcall(html_render, data) + if not ok then + local err = str_format("HTML template rendering: %s", out) + core.log.error(err) + return nil, err + end + return out + end +end + + +local function _get_health_checkers() + local infos = {} + local routes = get_routes() + iter_and_add_healthcheck_info(infos, routes) + local services = get_services() + iter_and_add_healthcheck_info(infos, services) + local upstreams = get_upstreams() + iter_and_add_healthcheck_info(infos, upstreams) + return infos +end + + +function _M.get_health_checkers() + local infos = _get_health_checkers() + local out, err = try_render_html({stats=infos}) + if out then + core.response.set_header("Content-Type", "text/html") + return 200, out + end + if err then + return 503, {error_msg = err} + end + + return 200, infos +end + + +local function iter_and_find_healthcheck_info(values, src_type, src_id) + if not values then + return nil, str_format("%s[%s] not found", src_type, src_id) + end + + for _, value in core.config_util.iterate_values(values) do + if value.value.id == src_id then + local checks = value.value.checks or + (value.value.upstream and value.value.upstream.checks) + if not checks then + return nil, str_format("no checker for %s[%s]", src_type, src_id) + end + + local info = extra_checker_info(value) + info.type = get_checker_type(checks) + return info + end + end + + return nil, str_format("%s[%s] not found", src_type, src_id) +end + + +function _M.get_health_checker() + local uri_segs = core.utils.split_uri(ngx_var.uri) + core.log.info("healthcheck uri: ", core.json.delay_encode(uri_segs)) + + local src_type, src_id = uri_segs[4], uri_segs[5] + if not src_id then + return 404, {error_msg = str_format("missing src id for src type %s", src_type)} + end + + local values + if src_type == "routes" then + values = get_routes() + elseif src_type == "services" then + values = get_services() + elseif src_type == "upstreams" then + values = get_upstreams() + else + return 400, {error_msg = str_format("invalid src type %s", src_type)} + end + + local info, err = iter_and_find_healthcheck_info(values, src_type, src_id) + if not info then + return 404, {error_msg = err} + end + + local out, err = try_render_html({stats={info}}) + if out then + core.response.set_header("Content-Type", "text/html") + return 200, out + end + if err then + return 503, {error_msg = err} + end + + return 200, info +end + +local function iter_add_get_routes_info(values, route_id) + local infos = {} + for _, route in core.config_util.iterate_values(values) do + local new_route = core.table.deepcopy(route) + if new_route.value.upstream and new_route.value.upstream.parent then + new_route.value.upstream.parent = nil + end + -- remove healthcheck info + new_route.checker = nil + new_route.checker_idx = nil + new_route.checker_upstream = nil + new_route.clean_handlers = nil + core.table.insert(infos, new_route) + -- check the route id + if route_id and route.value.id == route_id then + return new_route + end + end + if not route_id then + return infos + end + return nil +end + +function _M.dump_all_routes_info() + local routes = get_routes() + local infos = iter_add_get_routes_info(routes, nil) + return 200, infos +end + +function _M.dump_route_info() + local routes = get_routes() + local uri_segs = core.utils.split_uri(ngx_var.uri) + local route_id = uri_segs[4] + local route = iter_add_get_routes_info(routes, route_id) + if not route then + return 404, {error_msg = str_format("route[%s] not found", route_id)} + end + return 200, route +end + +local function iter_add_get_upstream_info(values, upstream_id) + if not values then + return nil + end + + local infos = {} + for _, upstream in core.config_util.iterate_values(values) do + local new_upstream = core.table.deepcopy(upstream) + core.table.insert(infos, new_upstream) + if new_upstream.value and new_upstream.value.parent then + new_upstream.value.parent = nil + end + -- check the upstream id + if upstream_id and upstream.value.id == upstream_id then + return new_upstream + end + end + if not upstream_id then + return infos + end + return nil +end + +function _M.dump_all_upstreams_info() + local upstreams = get_upstreams() + local infos = iter_add_get_upstream_info(upstreams, nil) + return 200, infos +end + +function _M.dump_upstream_info() + local upstreams = get_upstreams() + local uri_segs = core.utils.split_uri(ngx_var.uri) + local upstream_id = uri_segs[4] + local upstream = iter_add_get_upstream_info(upstreams, upstream_id) + if not upstream then + return 404, {error_msg = str_format("upstream[%s] not found", upstream_id)} + end + return 200, upstream +end + +function _M.trigger_gc() + -- TODO: find a way to trigger GC in the stream subsystem + collectgarbage() + return 200 +end + + +local function iter_add_get_services_info(values, svc_id) + local infos = {} + for _, svc in core.config_util.iterate_values(values) do + local new_svc = core.table.deepcopy(svc) + if new_svc.value.upstream and new_svc.value.upstream.parent then + new_svc.value.upstream.parent = nil + end + -- remove healthcheck info + new_svc.checker = nil + new_svc.checker_idx = nil + new_svc.checker_upstream = nil + new_svc.clean_handlers = nil + core.table.insert(infos, new_svc) + -- check the service id + if svc_id and svc.value.id == svc_id then + return new_svc + end + end + if not svc_id then + return infos + end + return nil +end + +function _M.dump_all_services_info() + local services = get_services() + local infos = iter_add_get_services_info(services, nil) + return 200, infos +end + +function _M.dump_service_info() + local services = get_services() + local uri_segs = core.utils.split_uri(ngx_var.uri) + local svc_id = uri_segs[4] + local info = iter_add_get_services_info(services, svc_id) + if not info then + return 404, {error_msg = str_format("service[%s] not found", svc_id)} + end + return 200, info +end + +function _M.dump_all_plugin_metadata() + local names = core.config.local_conf().plugins + local metadatas = core.table.new(0, #names) + for _, name in ipairs(names) do + local metadata = plugin.plugin_metadata(name) + if metadata then + core.table.insert(metadatas, metadata.value) + end + end + return 200, metadatas +end + +function _M.dump_plugin_metadata() + local uri_segs = core.utils.split_uri(ngx_var.uri) + local name = uri_segs[4] + local metadata = plugin.plugin_metadata(name) + if not metadata then + return 404, {error_msg = str_format("plugin metadata[%s] not found", name)} + end + return 200, metadata.value +end + +function _M.post_reload_plugins() + local success, err = events:post(_M.RELOAD_EVENT, ngx.req.get_method(), ngx.time()) + if not success then + core.response.exit(503, err) + end + + core.response.exit(200, "done") +end + +return { + -- /v1/schema + { + methods = {"GET"}, + uris = {"/schema"}, + handler = _M.schema, + }, + -- /v1/healthcheck + { + methods = {"GET"}, + uris = {"/healthcheck"}, + handler = _M.get_health_checkers, + }, + -- /v1/healthcheck/{src_type}/{src_id} + { + methods = {"GET"}, + uris = {"/healthcheck/*"}, + handler = _M.get_health_checker, + }, + -- /v1/gc + { + methods = {"POST"}, + uris = {"/gc"}, + handler = _M.trigger_gc, + }, + -- /v1/routes + { + methods = {"GET"}, + uris = {"/routes"}, + handler = _M.dump_all_routes_info, + }, + -- /v1/route/* + { + methods = {"GET"}, + uris = {"/route/*"}, + handler = _M.dump_route_info, + }, + -- /v1/services + { + methods = {"GET"}, + uris = {"/services"}, + handler = _M.dump_all_services_info + }, + -- /v1/service/* + { + methods = {"GET"}, + uris = {"/service/*"}, + handler = _M.dump_service_info + }, + -- /v1/upstreams + { + methods = {"GET"}, + uris = {"/upstreams"}, + handler = _M.dump_all_upstreams_info, + }, + -- /v1/upstream/* + { + methods = {"GET"}, + uris = {"/upstream/*"}, + handler = _M.dump_upstream_info, + }, + -- /v1/plugin_metadatas + { + methods = {"GET"}, + uris = {"/plugin_metadatas"}, + handler = _M.dump_all_plugin_metadata, + }, + -- /v1/plugin_metadata/* + { + methods = {"GET"}, + uris = {"/plugin_metadata/*"}, + handler = _M.dump_plugin_metadata, + }, + -- /v1/plugins/reload + { + methods = {"PUT"}, + uris = {"/plugins/reload"}, + handler = _M.post_reload_plugins, + }, + get_health_checkers = _get_health_checkers, + reload_event = _M.RELOAD_EVENT, +} diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core.lua b/CloudronPackages/APISIX/apisix-source/apisix/core.lua new file mode 100644 index 0000000..14c5186 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core.lua @@ -0,0 +1,68 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local log = require("apisix.core.log") +local utils = require("apisix.core.utils") +local local_conf, err = require("apisix.core.config_local").local_conf() +if not local_conf then + error("failed to parse yaml config: " .. err) +end + +local config_provider = local_conf.deployment and local_conf.deployment.config_provider + or "etcd" +log.info("use config_provider: ", config_provider) + +local config +-- Currently, we handle JSON parsing in config_yaml, so special processing is needed here. +if config_provider == "json" then + config = require("apisix.core.config_yaml") + config.file_type = "json" +else + config = require("apisix.core.config_" .. config_provider) +end + +config.type = config_provider + + +return { + version = require("apisix.core.version"), + log = log, + config = config, + config_util = require("apisix.core.config_util"), + sleep = utils.sleep, + json = require("apisix.core.json"), + table = require("apisix.core.table"), + request = require("apisix.core.request"), + response = require("apisix.core.response"), + lrucache = require("apisix.core.lrucache"), + schema = require("apisix.schema_def"), + string = require("apisix.core.string"), + ctx = require("apisix.core.ctx"), + timer = require("apisix.core.timer"), + id = require("apisix.core.id"), + ip = require("apisix.core.ip"), + io = require("apisix.core.io"), + utils = utils, + dns_client = require("apisix.core.dns.client"), + etcd = require("apisix.core.etcd"), + tablepool = require("tablepool"), + resolver = require("apisix.core.resolver"), + os = require("apisix.core.os"), + pubsub = require("apisix.core.pubsub"), + math = require("apisix.core.math"), + event = require("apisix.core.event"), + env = require("apisix.core.env"), +} diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/config_etcd.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/config_etcd.lua new file mode 100644 index 0000000..d476941 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/config_etcd.lua @@ -0,0 +1,1168 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Get configuration information. +-- +-- @module core.config_etcd + +local table = require("apisix.core.table") +local config_local = require("apisix.core.config_local") +local config_util = require("apisix.core.config_util") +local log = require("apisix.core.log") +local json = require("apisix.core.json") +local etcd_apisix = require("apisix.core.etcd") +local core_str = require("apisix.core.string") +local new_tab = require("table.new") +local inspect = require("inspect") +local errlog = require("ngx.errlog") +local process = require("ngx.process") +local log_level = errlog.get_sys_filter_level() +local NGX_INFO = ngx.INFO +local check_schema = require("apisix.core.schema").check +local exiting = ngx.worker.exiting +local worker_id = ngx.worker.id +local insert_tab = table.insert +local type = type +local ipairs = ipairs +local setmetatable = setmetatable +local ngx_sleep = require("apisix.core.utils").sleep +local ngx_timer_at = ngx.timer.at +local ngx_time = ngx.time +local ngx = ngx +local sub_str = string.sub +local tostring = tostring +local tonumber = tonumber +local xpcall = xpcall +local debug = debug +local string = string +local error = error +local pairs = pairs +local next = next +local assert = assert +local rand = math.random +local constants = require("apisix.constants") +local health_check = require("resty.etcd.health_check") +local semaphore = require("ngx.semaphore") +local tablex = require("pl.tablex") +local ngx_thread_spawn = ngx.thread.spawn +local ngx_thread_kill = ngx.thread.kill +local ngx_thread_wait = ngx.thread.wait + + +local is_http = ngx.config.subsystem == "http" +local err_etcd_grpc_engine_timeout = "context deadline exceeded" +local err_etcd_grpc_ngx_timeout = "timeout" +local err_etcd_unhealthy_all = "has no healthy etcd endpoint available" +local health_check_shm_name = "etcd-cluster-health-check" +local status_report_shared_dict_name = "status-report" +if not is_http then + health_check_shm_name = health_check_shm_name .. "-stream" +end +local created_obj = {} +local loaded_configuration = {} +local watch_ctx + + +local _M = { + version = 0.3, + local_conf = config_local.local_conf, + clear_local_cache = config_local.clear_cache, +} + + +local mt = { + __index = _M, + __tostring = function(self) + return " etcd key: " .. self.key + end +} + + +local get_etcd +do + local etcd_cli + + function get_etcd() + if etcd_cli ~= nil then + return etcd_cli + end + + local _, err + etcd_cli, _, err = etcd_apisix.get_etcd_syncer() + return etcd_cli, err + end +end + + +local function cancel_watch(http_cli) + local res, err = watch_ctx.cli:watchcancel(http_cli) + if res == 1 then + log.info("cancel watch connection success") + else + log.error("cancel watch failed: ", err) + end +end + + +-- append res to the queue and notify pending watchers +local function produce_res(res, err) + if log_level >= NGX_INFO then + log.info("append res: ", inspect(res), ", err: ", inspect(err)) + end + insert_tab(watch_ctx.res, {res=res, err=err}) + for _, sema in pairs(watch_ctx.sema) do + sema:post() + end + table.clear(watch_ctx.sema) +end + + +local function do_run_watch(premature) + if premature then + return + end + + -- the main watcher first start + if watch_ctx.started == false then + local local_conf, err = config_local.local_conf() + if not local_conf then + error("no local conf: " .. err) + end + watch_ctx.prefix = local_conf.etcd.prefix .. "/" + watch_ctx.timeout = local_conf.etcd.watch_timeout + + watch_ctx.cli, err = get_etcd() + if not watch_ctx.cli then + error("failed to create etcd instance: " .. string(err)) + end + + local rev = 0 + if loaded_configuration then + local _, res = next(loaded_configuration) + if res then + rev = tonumber(res.headers["X-Etcd-Index"]) + assert(rev > 0, 'invalid res.headers["X-Etcd-Index"]') + end + end + + if rev == 0 then + while true do + local res, err = watch_ctx.cli:get(watch_ctx.prefix) + if not res then + log.error("etcd get: ", err) + ngx_sleep(3) + else + rev = tonumber(res.body.header.revision) + break + end + end + end + + watch_ctx.rev = rev + 1 + watch_ctx.started = true + + log.info("main etcd watcher initialised, revision=", watch_ctx.rev) + + if watch_ctx.wait_init then + for _, sema in pairs(watch_ctx.wait_init) do + sema:post() + end + watch_ctx.wait_init = nil + end + end + + local opts = {} + opts.timeout = watch_ctx.timeout or 50 -- second + opts.need_cancel = true + opts.start_revision = watch_ctx.rev + + log.info("restart watchdir: start_revision=", opts.start_revision) + + local res_func, err, http_cli = watch_ctx.cli:watchdir(watch_ctx.prefix, opts) + if not res_func then + log.error("watchdir err: ", err) + ngx_sleep(3) + return + end + + ::watch_event:: + while true do + local res, err = res_func() + if log_level >= NGX_INFO then + log.info("res_func: ", inspect(res)) + end + + if not res then + if err ~= "closed" and + err ~= "timeout" and + err ~= "broken pipe" + then + log.error("wait watch event: ", err) + end + cancel_watch(http_cli) + break + end + + if res.error then + log.error("wait watch event: ", inspect(res.error)) + cancel_watch(http_cli) + break + end + + if res.result.created then + goto watch_event + end + + if res.result.canceled then + log.warn("watch canceled by etcd, res: ", inspect(res)) + if res.result.compact_revision then + watch_ctx.rev = tonumber(res.result.compact_revision) + log.error("etcd compacted, compact_revision=", watch_ctx.rev) + produce_res(nil, "compacted") + end + cancel_watch(http_cli) + break + end + + -- cleanup + local min_idx = 0 + for _, idx in pairs(watch_ctx.idx) do + if (min_idx == 0) or (idx < min_idx) then + min_idx = idx + end + end + + for i = 1, min_idx - 1 do + watch_ctx.res[i] = false + end + + if min_idx > 100 then + for k, idx in pairs(watch_ctx.idx) do + watch_ctx.idx[k] = idx - min_idx + 1 + end + -- trim the res table + for i = 1, min_idx - 1 do + table.remove(watch_ctx.res, 1) + end + end + + local rev = tonumber(res.result.header.revision) + if rev == nil then + log.warn("receive a invalid revision header, header: ", inspect(res.result.header)) + cancel_watch(http_cli) + break + end + + if rev < watch_ctx.rev then + log.error("received smaller revision, rev=", rev, ", watch_ctx.rev=", + watch_ctx.rev,". etcd may be restarted. resyncing....") + watch_ctx.rev = rev + produce_res(nil, "restarted") + cancel_watch(http_cli) + break + end + if rev > watch_ctx.rev then + watch_ctx.rev = rev + 1 + end + produce_res(res) + end +end + + +local function run_watch(premature) + local run_watch_th, err = ngx_thread_spawn(do_run_watch, premature) + if not run_watch_th then + log.error("failed to spawn thread do_run_watch: ", err) + return + end + + local check_worker_th, err = ngx_thread_spawn(function () + while not exiting() do + ngx_sleep(0.1) + end + end) + if not check_worker_th then + log.error("failed to spawn thread check_worker: ", err) + return + end + + local ok, err = ngx_thread_wait(run_watch_th, check_worker_th) + if not ok then + log.error("run_watch or check_worker thread terminates failed", + " restart those threads, error: ", inspect(err)) + end + + ngx_thread_kill(run_watch_th) + ngx_thread_kill(check_worker_th) + + if not exiting() then + ngx_timer_at(0, run_watch) + else + -- notify child watchers + produce_res(nil, "worker exited") + end +end + + +local function init_watch_ctx(key) + if not watch_ctx then + watch_ctx = { + idx = {}, + res = {}, + sema = {}, + wait_init = {}, + started = false, + } + ngx_timer_at(0, run_watch) + end + + if watch_ctx.started == false then + -- wait until the main watcher is started + local sema, err = semaphore.new() + if not sema then + error(err) + end + watch_ctx.wait_init[key] = sema + while true do + local ok, err = sema:wait(60) + if ok then + break + end + log.error("wait main watcher to start, key: ", key, ", err: ", err) + end + end +end + + +local function getkey(etcd_cli, key) + if not etcd_cli then + return nil, "not inited" + end + + local res, err = etcd_cli:readdir(key) + if not res then + -- log.error("failed to get key from etcd: ", err) + return nil, err + end + + if type(res.body) ~= "table" then + return nil, "failed to get key from etcd" + end + + res, err = etcd_apisix.get_format(res, key, true) + if not res then + return nil, err + end + + return res +end + + +local function readdir(etcd_cli, key, formatter) + if not etcd_cli then + return nil, "not inited" + end + + local res, err = etcd_cli:readdir(key) + if not res then + -- log.error("failed to get key from etcd: ", err) + return nil, err + end + + if type(res.body) ~= "table" then + return nil, "failed to read etcd dir" + end + + res, err = etcd_apisix.get_format(res, key .. '/', true, formatter) + if not res then + return nil, err + end + + return res +end + + +local function http_waitdir(self, etcd_cli, key, modified_index, timeout) + if not watch_ctx.idx[key] then + watch_ctx.idx[key] = 1 + end + + ::iterate_events:: + for i = watch_ctx.idx[key], #watch_ctx.res do + watch_ctx.idx[key] = i + 1 + + local item = watch_ctx.res[i] + if item == false then + goto iterate_events + end + + local res, err = item.res, item.err + if err then + return res, err + end + + -- ignore res with revision smaller then self.prev_index + if tonumber(res.result.header.revision) > self.prev_index then + local res2 + for _, evt in ipairs(res.result.events) do + if core_str.find(evt.kv.key, key) == 1 then + if not res2 then + res2 = tablex.deepcopy(res) + table.clear(res2.result.events) + end + insert_tab(res2.result.events, evt) + end + end + + if res2 then + if log_level >= NGX_INFO then + log.info("http_waitdir: ", inspect(res2)) + end + return res2 + end + end + end + + -- if no events, wait via semaphore + if not self.watch_sema then + local sema, err = semaphore.new() + if not sema then + error(err) + end + self.watch_sema = sema + end + + watch_ctx.sema[key] = self.watch_sema + local ok, err = self.watch_sema:wait(timeout or 60) + watch_ctx.sema[key] = nil + if ok then + goto iterate_events + else + if err ~= "timeout" then + log.error("wait watch event, key=", key, ", err: ", err) + end + return nil, err + end +end + + +local function waitdir(self) + local etcd_cli = self.etcd_cli + local key = self.key + local modified_index = self.prev_index + 1 + local timeout = self.timeout + + if not etcd_cli then + return nil, "not inited" + end + + local res, err = http_waitdir(self, etcd_cli, key, modified_index, timeout) + + if not res then + -- log.error("failed to get key from etcd: ", err) + return nil, err + end + + return etcd_apisix.watch_format(res) +end + + +local function short_key(self, str) + return sub_str(str, #self.key + 2) +end + + +local function sync_status_to_shdict(status) + local local_conf = config_local.local_conf() + if not local_conf.apisix.status then + return + end + if process.type() ~= "worker" then + return + end + local status_shdict = ngx.shared[status_report_shared_dict_name] + if not status_shdict then + return + end + local id = worker_id() + status_shdict:set(id, status) +end + + +local function load_full_data(self, dir_res, headers) + local err + local changed = false + + if self.single_item then + self.values = new_tab(1, 0) + self.values_hash = new_tab(0, 1) + + local item = dir_res + local data_valid = item.value ~= nil + + if data_valid and self.item_schema then + data_valid, err = check_schema(self.item_schema, item.value) + if not data_valid then + log.error("failed to check item data of [", self.key, + "] err:", err, " ,val: ", json.encode(item.value)) + end + end + + if data_valid and self.checker then + data_valid, err = self.checker(item.value) + if not data_valid then + log.error("failed to check item data of [", self.key, + "] err:", err, " ,val: ", json.delay_encode(item.value)) + end + end + + if data_valid then + changed = true + insert_tab(self.values, item) + self.values_hash[self.key] = #self.values + + item.clean_handlers = {} + + if self.filter then + self.filter(item) + end + end + + self:upgrade_version(item.modifiedIndex) + + else + -- here dir_res maybe res.body.node or res.body.list + -- we need make values equals to res.body.node.nodes or res.body.list + local values = (dir_res and dir_res.nodes) or dir_res + if not values then + values = {} + end + + self.values = new_tab(#values, 0) + self.values_hash = new_tab(0, #values) + + for _, item in ipairs(values) do + local key = short_key(self, item.key) + local data_valid = true + if type(item.value) ~= "table" then + data_valid = false + log.error("invalid item data of [", self.key .. "/" .. key, + "], val: ", item.value, + ", it should be an object") + end + + if data_valid and self.item_schema then + data_valid, err = check_schema(self.item_schema, item.value) + if not data_valid then + log.error("failed to check item data of [", self.key, + "] err:", err, " ,val: ", json.encode(item.value)) + end + end + + if data_valid and self.checker then + -- TODO: An opts table should be used + -- as different checkers may use different parameters + data_valid, err = self.checker(item.value, item.key) + if not data_valid then + log.error("failed to check item data of [", self.key, + "] err:", err, " ,val: ", json.delay_encode(item.value)) + end + end + + if data_valid then + changed = true + insert_tab(self.values, item) + self.values_hash[key] = #self.values + + item.value.id = key + item.clean_handlers = {} + + if self.filter then + self.filter(item) + end + end + + self:upgrade_version(item.modifiedIndex) + end + end + + if headers then + self.prev_index = tonumber(headers["X-Etcd-Index"]) or 0 + self:upgrade_version(headers["X-Etcd-Index"]) + end + + if changed then + self.conf_version = self.conf_version + 1 + end + + self.need_reload = false + sync_status_to_shdict(true) +end + + +function _M.upgrade_version(self, new_ver) + new_ver = tonumber(new_ver) + if not new_ver then + return + end + + local pre_index = self.prev_index + + if new_ver <= pre_index then + return + end + + self.prev_index = new_ver + return +end + + +local function sync_data(self) + if not self.key then + return nil, "missing 'key' arguments" + end + + init_watch_ctx(self.key) + + if self.need_reload then + local res, err = readdir(self.etcd_cli, self.key) + if not res then + return false, err + end + + local dir_res, headers = res.body.list or res.body.node or {}, res.headers + log.debug("readdir key: ", self.key, " res: ", + json.delay_encode(dir_res)) + + if self.values then + for i, val in ipairs(self.values) do + config_util.fire_all_clean_handlers(val) + end + + self.values = nil + self.values_hash = nil + end + + load_full_data(self, dir_res, headers) + + return true + end + + local dir_res, err = waitdir(self) + log.info("waitdir key: ", self.key, " prev_index: ", self.prev_index + 1) + log.info("res: ", json.delay_encode(dir_res, true), ", err: ", err) + + if not dir_res then + if err == "compacted" or err == "restarted" then + self.need_reload = true + log.error("waitdir [", self.key, "] err: ", err, + ", will read the configuration again via readdir") + return false + end + + return false, err + end + + local res = dir_res.body.node + local err_msg = dir_res.body.message + if err_msg then + return false, err + end + + if not res then + return false, err + end + + local res_copy = res + -- waitdir will return [res] even for self.single_item = true + for _, res in ipairs(res_copy) do + local key + local data_valid = true + if self.single_item then + key = self.key + else + key = short_key(self, res.key) + end + + if res.value and not self.single_item and type(res.value) ~= "table" then + data_valid = false + log.error("invalid item data of [", self.key .. "/" .. key, + "], val: ", res.value, + ", it should be an object") + end + + if data_valid and res.value and self.item_schema then + data_valid, err = check_schema(self.item_schema, res.value) + if not data_valid then + log.error("failed to check item data of [", self.key, + "] err:", err, " ,val: ", json.encode(res.value)) + end + end + + if data_valid and res.value and self.checker then + data_valid, err = self.checker(res.value, res.key) + if not data_valid then + log.error("failed to check item data of [", self.key, + "] err:", err, " ,val: ", json.delay_encode(res.value)) + end + end + + -- the modifiedIndex tracking should be updated regardless of the validity of the config + self:upgrade_version(res.modifiedIndex) + + if not data_valid then + -- do not update the config cache when the data is invalid + -- invalid data should only cancel this config item update, not discard + -- the remaining events, use continue instead of loop break and return + goto CONTINUE + end + + if res.dir then + if res.value then + return false, "todo: support for parsing `dir` response " + .. "structures. " .. json.encode(res) + end + return false + end + + local pre_index = self.values_hash[key] + if pre_index then + local pre_val = self.values[pre_index] + if pre_val then + config_util.fire_all_clean_handlers(pre_val) + end + + if res.value then + if not self.single_item then + res.value.id = key + end + + self.values[pre_index] = res + res.clean_handlers = {} + log.info("update data by key: ", key) + + else + self.sync_times = self.sync_times + 1 + self.values[pre_index] = false + self.values_hash[key] = nil + log.info("delete data by key: ", key) + end + + elseif res.value then + res.clean_handlers = {} + insert_tab(self.values, res) + self.values_hash[key] = #self.values + if not self.single_item then + res.value.id = key + end + + log.info("insert data by key: ", key) + end + + -- avoid space waste + if self.sync_times > 100 then + local values_original = table.clone(self.values) + table.clear(self.values) + + for i = 1, #values_original do + local val = values_original[i] + if val then + table.insert(self.values, val) + end + end + + table.clear(self.values_hash) + log.info("clear stale data in `values_hash` for key: ", key) + + for i = 1, #self.values do + key = short_key(self, self.values[i].key) + self.values_hash[key] = i + end + + self.sync_times = 0 + end + + -- /plugins' filter need to known self.values when it is called + -- so the filter should be called after self.values set. + if self.filter then + self.filter(res) + end + + self.conf_version = self.conf_version + 1 + + ::CONTINUE:: + end + + return self.values +end + + +function _M.get(self, key) + if not self.values_hash then + return + end + + local arr_idx = self.values_hash[tostring(key)] + if not arr_idx then + return nil + end + + return self.values[arr_idx] +end + + +function _M.getkey(self, key) + if not self.running then + return nil, "stopped" + end + + local local_conf = config_local.local_conf() + if local_conf and local_conf.etcd and local_conf.etcd.prefix then + key = local_conf.etcd.prefix .. key + end + + return getkey(self.etcd_cli, key) +end + + +local function _automatic_fetch(premature, self) + if premature then + return + end + + if not (health_check.conf and health_check.conf.shm_name) then + -- used for worker processes to synchronize configuration + local _, err = health_check.init({ + shm_name = health_check_shm_name, + fail_timeout = self.health_check_timeout, + max_fails = 3, + retry = true, + }) + if err then + log.warn("fail to create health_check: " .. err) + end + end + + local i = 0 + while not exiting() and self.running and i <= 32 do + i = i + 1 + + local ok, err = xpcall(function() + if not self.etcd_cli then + local etcd_cli, err = get_etcd() + if not etcd_cli then + error("failed to create etcd instance for key [" + .. self.key .. "]: " .. (err or "unknown")) + end + self.etcd_cli = etcd_cli + end + + local ok, err = sync_data(self) + if err then + if core_str.find(err, err_etcd_grpc_engine_timeout) or + core_str.find(err, err_etcd_grpc_ngx_timeout) + then + err = "timeout" + end + + if core_str.find(err, err_etcd_unhealthy_all) then + local reconnected = false + while err and not reconnected and i <= 32 do + local backoff_duration, backoff_factor, backoff_step = 1, 2, 6 + for _ = 1, backoff_step do + i = i + 1 + ngx_sleep(backoff_duration) + _, err = sync_data(self) + if not err or not core_str.find(err, err_etcd_unhealthy_all) then + log.warn("reconnected to etcd") + reconnected = true + break + end + backoff_duration = backoff_duration * backoff_factor + log.error("no healthy etcd endpoint available, next retry after " + .. backoff_duration .. "s") + end + end + elseif err == "worker exited" then + log.info("worker exited.") + return + elseif err ~= "timeout" and err ~= "Key not found" + and self.last_err ~= err then + log.error("failed to fetch data from etcd: ", err, ", ", + tostring(self)) + end + + if err ~= self.last_err then + self.last_err = err + self.last_err_time = ngx_time() + elseif self.last_err then + if ngx_time() - self.last_err_time >= 30 then + self.last_err = nil + end + end + + -- etcd watch timeout is an expected error, so there is no need for resync_delay + if err ~= "timeout" then + ngx_sleep(self.resync_delay + rand() * 0.5 * self.resync_delay) + end + elseif not ok then + -- no error. reentry the sync with different state + ngx_sleep(0.05) + end + + end, debug.traceback) + + if not ok then + log.error("failed to fetch data from etcd: ", err, ", ", + tostring(self)) + ngx_sleep(self.resync_delay + rand() * 0.5 * self.resync_delay) + break + end + end + + if not exiting() and self.running then + ngx_timer_at(0, _automatic_fetch, self) + end +end + +-- for test +_M.test_sync_data = sync_data +_M.test_automatic_fetch = _automatic_fetch +function _M.inject_sync_data(f) + sync_data = f +end + + +--- +-- Create a new connection to communicate with the control plane. +-- This function should be used in the `init_worker_by_lua` phase. +-- +-- @function core.config.new +-- @tparam string key etcd directory to be monitored, e.g. "/routes". +-- @tparam table opts Parameters related to the etcd client connection. +-- The keys in `opts` are as follows: +-- * automatic: whether to get the latest etcd data automatically +-- * item_schema: the jsonschema that checks the value of each item under the **key** directory +-- * filter: the custom function to filter the value of each item under the **key** directory +-- * timeout: the timeout for watch operation, default is 30s +-- * single_item: whether only one item under the **key** directory +-- * checker: the custom function to check the value of each item under the **key** directory +-- @treturn table The etcd client connection. +-- @usage +-- local plugins_conf, err = core.config.new("/custom_dir", { +-- automatic = true, +-- filter = function(item) +-- -- called once before reload for sync data from admin +-- end, +--}) +function _M.new(key, opts) + local local_conf, err = config_local.local_conf() + if not local_conf then + return nil, err + end + + local etcd_conf = local_conf.etcd + local prefix = etcd_conf.prefix + local resync_delay = etcd_conf.resync_delay + if not resync_delay or resync_delay < 0 then + resync_delay = 5 + end + local health_check_timeout = etcd_conf.health_check_timeout + if not health_check_timeout or health_check_timeout < 0 then + health_check_timeout = 10 + end + local automatic = opts and opts.automatic + local item_schema = opts and opts.item_schema + local filter_fun = opts and opts.filter + local timeout = opts and opts.timeout + local single_item = opts and opts.single_item + local checker = opts and opts.checker + + local obj = setmetatable({ + etcd_cli = nil, + key = key and prefix .. key, + automatic = automatic, + item_schema = item_schema, + checker = checker, + sync_times = 0, + running = true, + conf_version = 0, + values = nil, + need_reload = true, + watching_stream = nil, + routes_hash = nil, + prev_index = 0, + last_err = nil, + last_err_time = nil, + resync_delay = resync_delay, + health_check_timeout = health_check_timeout, + timeout = timeout, + single_item = single_item, + filter = filter_fun, + }, mt) + + if automatic then + if not key then + return nil, "missing `key` argument" + end + + if loaded_configuration[key] then + local res = loaded_configuration[key] + loaded_configuration[key] = nil -- tried to load + + log.notice("use loaded configuration ", key) + + local dir_res, headers = res.body, res.headers + load_full_data(obj, dir_res, headers) + end + + ngx_timer_at(0, _automatic_fetch, obj) + + else + local etcd_cli, err = get_etcd() + if not etcd_cli then + return nil, "failed to start an etcd instance: " .. err + end + obj.etcd_cli = etcd_cli + end + + if key then + created_obj[key] = obj + end + + return obj +end + + +function _M.close(self) + self.running = false +end + + +function _M.fetch_created_obj(key) + return created_obj[key] +end + + +function _M.server_version(self) + if not self.running then + return nil, "stopped" + end + + local res, err = etcd_apisix.server_version() + if not res then + return nil, err + end + + return res.body +end + + +local function create_formatter(prefix) + return function (res) + res.body.nodes = {} + + local dirs + if is_http then + dirs = constants.HTTP_ETCD_DIRECTORY + else + dirs = constants.STREAM_ETCD_DIRECTORY + end + + local curr_dir_data + local curr_key + for _, item in ipairs(res.body.kvs) do + if curr_dir_data then + if core_str.has_prefix(item.key, curr_key) then + table.insert(curr_dir_data, etcd_apisix.kvs_to_node(item)) + goto CONTINUE + end + + curr_dir_data = nil + end + + local key = sub_str(item.key, #prefix + 1) + if dirs[key] then + -- single item + loaded_configuration[key] = { + body = etcd_apisix.kvs_to_node(item), + headers = res.headers, + } + else + local key = sub_str(item.key, #prefix + 1, #item.key - 1) + -- ensure the same key hasn't been handled as single item + if dirs[key] and not loaded_configuration[key] then + loaded_configuration[key] = { + body = { + nodes = {}, + }, + headers = res.headers, + } + curr_dir_data = loaded_configuration[key].body.nodes + curr_key = item.key + end + end + + ::CONTINUE:: + end + + return res + end +end + + +function _M.init() + local local_conf, err = config_local.local_conf() + if not local_conf then + return nil, err + end + + if table.try_read_attr(local_conf, "apisix", "disable_sync_configuration_during_start") then + return true + end + + -- don't go through proxy during start because the proxy is not available + local etcd_cli, prefix, err = etcd_apisix.new_without_proxy() + if not etcd_cli then + return nil, "failed to start a etcd instance: " .. err + end + + local res, err = readdir(etcd_cli, prefix, create_formatter(prefix)) + if not res then + return nil, err + end + + return true +end + + +function _M.init_worker() + sync_status_to_shdict(false) + local local_conf, err = config_local.local_conf() + if not local_conf then + return nil, err + end + + if table.try_read_attr(local_conf, "apisix", "disable_sync_configuration_during_start") then + return true + end + + return true +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/config_local.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/config_local.lua new file mode 100644 index 0000000..2b8f92f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/config_local.lua @@ -0,0 +1,71 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Get configuration information. +-- +-- @module core.config_local + +local file = require("apisix.cli.file") + + +local _M = {} + + +local config_data + + +function _M.clear_cache() + config_data = nil +end + +--- +-- Get the local config info. +-- The configuration information consists of two parts, user-defined configuration in +-- `conf/config.yaml` and default configuration in `conf/config-default.yaml`. The configuration +-- of the same name present in `conf/config.yaml` will overwrite `conf/config-default.yaml`. +-- The final full configuration is `conf/config.yaml` and the default configuration in +-- `conf/config-default.yaml` that is not overwritten. +-- +-- @function core.config_local.local_conf +-- @treturn table The configuration information. +-- @usage +-- -- Given a config item in `conf/config.yaml`: +-- -- +-- -- apisix: +-- -- ssl: +-- -- fallback_sni: "a.test2.com" +-- -- +-- -- you can get the value of `fallback_sni` by: +-- local local_conf = core.config.local_conf() +-- local fallback_sni = core.table.try_read_attr( +-- local_conf, "apisix", "ssl", "fallback_sni") -- "a.test2.com" +function _M.local_conf(force) + if not force and config_data then + return config_data + end + + local default_conf, err = file.read_yaml_conf() + if not default_conf then + return nil, err + end + + config_data = default_conf + return config_data +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/config_util.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/config_util.lua new file mode 100644 index 0000000..9621729 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/config_util.lua @@ -0,0 +1,219 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Collection of util functions +-- +-- @module core.config_util + +local core_tab = require("apisix.core.table") +local log = require("apisix.core.log") +local str_byte = string.byte +local str_char = string.char +local ipairs = ipairs +local setmetatable = setmetatable +local tostring = tostring +local type = type + + +local _M = {} + + +local function _iterate_values(self, tab) + while true do + self.idx = self.idx + 1 + local v = tab[self.idx] + if type(v) == "table" then + return self.idx, v + end + if v == nil then + return nil, nil + end + -- skip the tombstone + end +end + + +function _M.iterate_values(tab) + local iter = setmetatable({idx = 0}, {__call = _iterate_values}) + return iter, tab, 0 +end + + +-- Add a clean handler to a runtime configuration item. +-- The clean handler will be called when the item is deleted from configuration +-- or cancelled. Note that Nginx worker exit doesn't trigger the clean handler. +-- Return an index so that we can cancel it later. +function _M.add_clean_handler(item, func) + if not item.clean_handlers then + return nil, "clean handlers for the item are nil" + end + + if not item.clean_handlers._id then + item.clean_handlers._id = 1 + end + + local id = item.clean_handlers._id + item.clean_handlers._id = item.clean_handlers._id + 1 + core_tab.insert(item.clean_handlers, {f = func, id = id}) + return id +end + + +-- cancel a clean handler added by add_clean_handler. +-- If `fire` is true, call the clean handler. +function _M.cancel_clean_handler(item, idx, fire) + local pos, f + -- the number of pending clean handler is small so we can cancel them in O(n) + for i, clean_handler in ipairs(item.clean_handlers) do + if clean_handler.id == idx then + pos = i + f = clean_handler.f + break + end + end + + if not pos then + log.error("failed to find clean_handler with idx ", idx) + return + end + + core_tab.remove(item.clean_handlers, pos) + if not fire then + return + end + + if f then + f(item) + else + log.error("The function used to clear the health checker is nil, please check") + end +end + + +-- fire all clean handlers added by add_clean_handler. +function _M.fire_all_clean_handlers(item) + -- When the key is deleted, the item will be set to false. + if not item then + return + end + if not item.clean_handlers then + return + end + + for _, clean_handler in ipairs(item.clean_handlers) do + clean_handler.f(item) + end + + item.clean_handlers = {} +end + + +--- +-- Convert different time units to seconds as time units. +-- Time intervals can be specified in milliseconds, seconds, minutes, hours, days and so on, +-- using the following suffixes: +-- ms milliseconds +-- s seconds +-- m minutes +-- h hours +-- d days +-- w weeks +-- M months, 30 days +-- y years, 365 days +-- Multiple units can be combined in a single value by specifying them in the order from the most +-- to the least significant, and optionally separated by whitespace. +-- A value without a suffix means seconds. +-- +-- @function core.config_util.parse_time_unit +-- @tparam number|string s Strings with time units, e.g. "60m". +-- @treturn number Number of seconds after conversion +-- @usage +-- local seconds = core.config_util.parse_time_unit("60m") -- 3600 +function _M.parse_time_unit(s) + local typ = type(s) + if typ == "number" then + return s + end + + if typ ~= "string" or #s == 0 then + return nil, "invalid data: " .. tostring(s) + end + + local size = 0 + local size_in_unit = 0 + local step = 60 * 60 * 24 * 365 + local with_ms = false + for i = 1, #s do + local scale + local unit = str_byte(s, i) + if unit == 121 then -- y + scale = 60 * 60 * 24 * 365 + elseif unit == 77 then -- M + scale = 60 * 60 * 24 * 30 + elseif unit == 119 then -- w + scale = 60 * 60 * 24 * 7 + elseif unit == 100 then -- d + scale = 60 * 60 * 24 + elseif unit == 104 then -- h + scale = 60 * 60 + elseif unit == 109 then -- m + unit = str_byte(s, i + 1) + if unit == 115 then -- ms + size = size * 1000 + with_ms = true + step = 0 + break + end + + scale = 60 + + elseif unit == 115 then -- s + scale = 1 + elseif 48 <= unit and unit <= 57 then + size_in_unit = size_in_unit * 10 + unit - 48 + elseif unit ~= 32 then + return nil, "invalid data: " .. str_char(unit) + end + + if scale ~= nil then + if scale > step then + return nil, "unexpected unit: " .. str_char(unit) + end + + step = scale + size = size + scale * size_in_unit + size_in_unit = 0 + end + end + + if size_in_unit > 0 then + if step == 1 then + return nil, "specific unit conflicts with the default unit second" + end + + size = size + size_in_unit + end + + if with_ms then + size = size / 1000 + end + + return size +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/config_xds.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/config_xds.lua new file mode 100644 index 0000000..bdb4520 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/config_xds.lua @@ -0,0 +1,378 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Get configuration form ngx.shared.DICT +-- +-- @module core.config_xds + +local config_local = require("apisix.core.config_local") +local config_util = require("apisix.core.config_util") +local string = require("apisix.core.string") +local log = require("apisix.core.log") +local json = require("apisix.core.json") +local os = require("apisix.core.os") +local ngx_sleep = require("apisix.core.utils").sleep +local check_schema = require("apisix.core.schema").check +local new_tab = require("table.new") +local table = table +local insert_tab = table.insert +local error = error +local pcall = pcall +local tostring = tostring +local setmetatable = setmetatable +local io = io +local io_open = io.open +local io_close = io.close +local package = package +local ipairs = ipairs +local type = type +local sub_str = string.sub +local ffi = require ("ffi") +local C = ffi.C +local config = ngx.shared["xds-config"] +local conf_ver = ngx.shared["xds-config-version"] +local is_http = ngx.config.subsystem == "http" +local ngx_re_match = ngx.re.match +local ngx_re_gmatch = ngx.re.gmatch +local ngx_timer_every = ngx.timer.every +local ngx_timer_at = ngx.timer.at +local exiting = ngx.worker.exiting +local ngx_time = ngx.time + +local xds_lib_name = "libxds.so" + +local process +if is_http then + process = require("ngx.process") +end + +local shdict_udata_to_zone +if not pcall(function() return C.ngx_http_lua_ffi_shdict_udata_to_zone end) then + shdict_udata_to_zone = C.ngx_meta_lua_ffi_shdict_udata_to_zone +else + shdict_udata_to_zone = C.ngx_http_lua_ffi_shdict_udata_to_zone +end + + +ffi.cdef[[ +extern void initial(void* config_zone, void* version_zone); +]] + +local created_obj = {} + +local _M = { + version = 0.1, + local_conf = config_local.local_conf, +} + + +local mt = { + __index = _M, + __tostring = function(self) + return " xds key: " .. self.key + end +} + + +-- todo: refactor this function in chash.lua and radixtree.lua +local function load_shared_lib(lib_name) + local cpath = package.cpath + local tried_paths = new_tab(32, 0) + local i = 1 + + local iter, err = ngx_re_gmatch(cpath, "[^;]+", "jo") + if not iter then + error("failed to gmatch: " .. err) + end + + while true do + local it = iter() + local fpath + fpath, err = ngx_re_match(it[0], "(.*/)", "jo") + if err then + error("failed to match: " .. err) + end + local spath = fpath[0] .. lib_name + + local f = io_open(spath) + if f ~= nil then + io_close(f) + return ffi.load(spath) + end + tried_paths[i] = spath + i = i + 1 + + if not it then + break + end + end + + return nil, tried_paths +end + + +local function load_libxds(lib_name) + local xdsagent, tried_paths = load_shared_lib(lib_name) + + if not xdsagent then + tried_paths[#tried_paths + 1] = 'tried above paths but can not load ' .. lib_name + error("can not load xds library, tried paths: " .. + table.concat(tried_paths, '\r\n', 1, #tried_paths)) + end + + local config_zone = shdict_udata_to_zone(config[1]) + local config_shd_cdata = ffi.cast("void*", config_zone) + + local conf_ver_zone = shdict_udata_to_zone(conf_ver[1]) + local conf_ver_shd_cdata = ffi.cast("void*", conf_ver_zone) + + xdsagent.initial(config_shd_cdata, conf_ver_shd_cdata) +end + + +local latest_version +local function sync_data(self) + if self.conf_version == latest_version then + return true + end + + if self.values then + for _, val in ipairs(self.values) do + config_util.fire_all_clean_handlers(val) + end + self.values = nil + self.values_hash = nil + end + + local keys = config:get_keys(0) + + if not keys or #keys <= 0 then + -- xds did not write any data to shdict + return false, "no keys" + end + + self.values = new_tab(#keys, 0) + self.values_hash = new_tab(0, #keys) + + for _, key in ipairs(keys) do + if string.has_prefix(key, self.key) then + local data_valid = true + local conf_str = config:get(key, 0) + local conf, err = json.decode(conf_str) + if not conf then + data_valid = false + log.error("decode the conf of [", key, "] failed, err: ", err, + ", conf_str: ", conf_str) + end + + if not self.single_item and type(conf) ~= "table" then + data_valid = false + log.error("invalid conf of [", key, "], conf: ", conf, + ", it should be an object") + end + + if data_valid and self.item_schema then + local ok, err = check_schema(self.item_schema, conf) + if not ok then + data_valid = false + log.error("failed to check the conf of [", key, "] err:", err) + end + end + + if data_valid and self.checker then + local ok, err = self.checker(conf) + if not ok then + data_valid = false + log.error("failed to check the conf of [", key, "] err:", err) + end + end + + if data_valid then + if not conf.id then + conf.id = sub_str(key, #self.key + 2, #key + 1) + log.warn("the id of [", key, "] is nil, use the id: ", conf.id) + end + + local conf_item = {value = conf, modifiedIndex = latest_version, + key = key} + insert_tab(self.values, conf_item) + self.values_hash[conf.id] = #self.values + conf_item.clean_handlers = {} + + if self.filter then + self.filter(conf_item) + end + end + end + end + + self.conf_version = latest_version + return true +end + + +local function _automatic_fetch(premature, self) + if premature then + return + end + + local i = 0 + while not exiting() and self.running and i <= 32 do + i = i + 1 + local ok, ok2, err = pcall(sync_data, self) + if not ok then + err = ok2 + log.error("failed to fetch data from xds: ", + err, ", ", tostring(self)) + ngx_sleep(3) + break + elseif not ok2 and err then + -- todo: handler other error + if err ~= "wait for more time" and err ~= "no keys" and self.last_err ~= err then + log.error("failed to fetch data from xds, ", err, ", ", tostring(self)) + end + + if err ~= self.last_err then + self.last_err = err + self.last_err_time = ngx_time() + else + if ngx_time() - self.last_err_time >= 30 then + self.last_err = nil + end + end + ngx_sleep(0.5) + elseif not ok2 then + ngx_sleep(0.05) + else + ngx_sleep(0.1) + end + end + + if not exiting() and self.running then + ngx_timer_at(0, _automatic_fetch, self) + end +end + + +local function fetch_version(premature) + if premature then + return + end + + local version = conf_ver:get("version") + + if not version then + return + end + + if version ~= latest_version then + latest_version = version + end +end + + +function _M.new(key, opts) + local automatic = opts and opts.automatic + local item_schema = opts and opts.item_schema + local filter_fun = opts and opts.filter + local single_item = opts and opts.single_item + local checker = opts and opts.checker + + + local obj = setmetatable({ + automatic = automatic, + item_schema = item_schema, + checker = checker, + sync_times = 0, + running = true, + conf_version = 0, + values = nil, + routes_hash = nil, + prev_index = nil, + last_err = nil, + last_err_time = nil, + key = key, + single_item = single_item, + filter = filter_fun, + }, mt) + + if automatic then + if not key then + return nil, "missing `key` argument" + end + + -- blocking until xds completes initial configuration + while true do + os.usleep(1000) + fetch_version() + if latest_version then + break + end + end + + local ok, ok2, err = pcall(sync_data, obj) + if not ok then + err = ok2 + end + + if err then + log.error("failed to fetch data from xds ", + err, ", ", key) + end + + ngx_timer_at(0, _automatic_fetch, obj) + end + + if key then + created_obj[key] = obj + end + + return obj +end + + +function _M.get(self, key) + if not self.values_hash then + return + end + + local arr_idx = self.values_hash[tostring(key)] + if not arr_idx then + return nil + end + + return self.values[arr_idx] +end + + +function _M.fetch_created_obj(key) + return created_obj[key] +end + + +function _M.init_worker() + if process.type() == "privileged agent" then + load_libxds(xds_lib_name) + end + + ngx_timer_every(1, fetch_version) + + return true +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/config_yaml.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/config_yaml.lua new file mode 100644 index 0000000..747b087 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/config_yaml.lua @@ -0,0 +1,579 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Get configuration information in Stand-alone mode. +-- +-- @module core.config_yaml + +local config_local = require("apisix.core.config_local") +local config_util = require("apisix.core.config_util") +local yaml = require("lyaml") +local log = require("apisix.core.log") +local json = require("apisix.core.json") +local new_tab = require("table.new") +local check_schema = require("apisix.core.schema").check +local profile = require("apisix.core.profile") +local lfs = require("lfs") +local file = require("apisix.cli.file") +local exiting = ngx.worker.exiting +local insert_tab = table.insert +local type = type +local ipairs = ipairs +local setmetatable = setmetatable +local ngx_sleep = require("apisix.core.utils").sleep +local ngx_timer_at = ngx.timer.at +local ngx_time = ngx.time +local ngx_shared = ngx.shared +local sub_str = string.sub +local tostring = tostring +local pcall = pcall +local io = io +local ngx = ngx +local re_find = ngx.re.find +local process = require("ngx.process") +local worker_id = ngx.worker.id +local created_obj = {} +local shared_dict +local status_report_shared_dict_name = "status-report" + +local _M = { + version = 0.2, + local_conf = config_local.local_conf, + clear_local_cache = config_local.clear_cache, + + -- yaml or json + file_type = "yaml", + + ERR_NO_SHARED_DICT = "failed prepare standalone config shared dict, this will degrade ".. + "to event broadcasting, and if a worker crashes, the configuration ".. + "cannot be restored from other workers and shared dict" +} + + +local mt = { + __index = _M, + __tostring = function(self) + return "apisix.yaml key: " .. (self.key or "") + end +} + +local apisix_yaml +local apisix_yaml_mtime + +local config_yaml = { + path = profile:yaml_path("apisix"), + type = "yaml", + parse = function(self) + local f, err = io.open(self.path, "r") + if not f then + return nil, "failed to open file " .. self.path .. " : " .. err + end + + f:seek('end', -10) + local end_flag = f:read("*a") + local found_end_flag = re_find(end_flag, [[#END\s*$]], "jo") + + if not found_end_flag then + f:close() + return nil, "missing valid end flag in file " .. self.path + end + + f:seek('set') + local raw_config = f:read("*a") + f:close() + + return yaml.load(raw_config), nil + end +} + +local config_json = { + -- `-5` to remove the "yaml" suffix + path = config_yaml.path:sub(1, -5) .. "json", + type = "json", + parse = function(self) + local f, err = io.open(self.path, "r") + if not f then + return nil, "failed to open file " .. self.path .. " : " .. err + end + local raw_config = f:read("*a") + f:close() + + local config, err = json.decode(raw_config) + if err then + return nil, "failed to decode json: " .. err + end + return config, nil + end +} + +local config_file_table = { + yaml = config_yaml, + json = config_json +} + + +local config_file = setmetatable({}, { + __index = function(_, key) + return config_file_table[_M.file_type][key] + end +}) + + +local function sync_status_to_shdict(status) + if process.type() ~= "worker" then + return + end + local status_shdict = ngx.shared[status_report_shared_dict_name] + if not status_shdict then + return + end + local id = worker_id() + log.info("sync status to shared dict, id: ", id, " status: ", status) + status_shdict:set(id, status) +end + + +local function update_config(table, conf_version) + if not table then + log.error("failed update config: empty table") + return + end + + local ok, err = file.resolve_conf_var(table) + if not ok then + log.error("failed to resolve variables:" .. err) + return + end + + apisix_yaml = table + sync_status_to_shdict(true) + apisix_yaml_mtime = conf_version +end +_M._update_config = update_config + + +local function is_use_admin_api() + local local_conf, _ = config_local.local_conf() + return local_conf and local_conf.apisix and local_conf.apisix.enable_admin +end + + +local function read_apisix_config(premature, pre_mtime) + if premature then + return + end + local attributes, err = lfs.attributes(config_file.path) + if not attributes then + log.error("failed to fetch ", config_file.path, " attributes: ", err) + return + end + + local last_modification_time = attributes.modification + if apisix_yaml_mtime == last_modification_time then + return + end + + local config_new, err = config_file:parse() + if err then + log.error("failed to parse the content of file ", config_file.path, ": ", err) + return + end + + update_config(config_new, last_modification_time) + + log.warn("config file ", config_file.path, " reloaded.") +end + + +local function sync_data(self) + if not self.key then + return nil, "missing 'key' arguments" + end + + local conf_version + if is_use_admin_api() then + conf_version = apisix_yaml[self.conf_version_key] or 0 + else + if not apisix_yaml_mtime then + log.warn("wait for more time") + return nil, "failed to read local file " .. config_file.path + end + conf_version = apisix_yaml_mtime + end + + if not conf_version or conf_version == self.conf_version then + return true + end + + local items = apisix_yaml[self.key] + if not items then + self.values = new_tab(8, 0) + self.values_hash = new_tab(0, 8) + self.conf_version = conf_version + return true + end + + if self.values and #self.values > 0 then + if is_use_admin_api() then + -- filter self.values to retain only those whose IDs exist in the new items list. + local exist_values = new_tab(8, 0) + self.values_hash = new_tab(0, 8) + + local exist_items = {} + for _, item in ipairs(items) do + exist_items[tostring(item.id)] = true + end + -- remove objects that exist in the self.values but do not exist in the new items. + -- for removed items, trigger cleanup handlers. + for _, item in ipairs(self.values) do + local id = item.value.id + if not exist_items[id] then + config_util.fire_all_clean_handlers(item) + else + insert_tab(exist_values, item) + self.values_hash[id] = #exist_values + end + end + self.values = exist_values + else + for _, item in ipairs(self.values) do + config_util.fire_all_clean_handlers(item) + end + self.values = nil + end + end + + if self.single_item then + -- treat items as a single item + self.values = new_tab(1, 0) + self.values_hash = new_tab(0, 1) + + local item = items + local modifiedIndex = item.modifiedIndex or conf_version + local conf_item = {value = item, modifiedIndex = modifiedIndex, + key = "/" .. self.key} + + local data_valid = true + local err + if self.item_schema then + data_valid, err = check_schema(self.item_schema, item) + if not data_valid then + log.error("failed to check item data of [", self.key, + "] err:", err, " ,val: ", json.delay_encode(item)) + end + + if data_valid and self.checker then + -- TODO: An opts table should be used + -- as different checkers may use different parameters + data_valid, err = self.checker(item, conf_item.key) + if not data_valid then + log.error("failed to check item data of [", self.key, + "] err:", err, " ,val: ", json.delay_encode(item)) + end + end + end + + if data_valid then + insert_tab(self.values, conf_item) + self.values_hash[self.key] = #self.values + conf_item.clean_handlers = {} + + if self.filter then + self.filter(conf_item) + end + end + + else + if not self.values then + self.values = new_tab(8, 0) + self.values_hash = new_tab(0, 8) + end + + local err + for i, item in ipairs(items) do + local idx = tostring(i) + local data_valid = true + if type(item) ~= "table" then + data_valid = false + log.error("invalid item data of [", self.key .. "/" .. idx, + "], val: ", json.delay_encode(item), + ", it should be an object") + end + + local id = item.id or item.username or ("arr_" .. idx) + local modifiedIndex = item.modifiedIndex or conf_version + local conf_item = {value = item, modifiedIndex = modifiedIndex, + key = "/" .. self.key .. "/" .. id} + + if data_valid and self.item_schema then + data_valid, err = check_schema(self.item_schema, item) + if not data_valid then + log.error("failed to check item data of [", self.key, + "] err:", err, " ,val: ", json.delay_encode(item)) + end + end + + if data_valid and self.checker then + data_valid, err = self.checker(item, conf_item.key) + if not data_valid then + log.error("failed to check item data of [", self.key, + "] err:", err, " ,val: ", json.delay_encode(item)) + end + end + + if data_valid then + local item_id = tostring(id) + local pre_index = self.values_hash[item_id] + if pre_index then + -- remove the old item + local pre_val = self.values[pre_index] + if pre_val and + (not item.modifiedIndex or pre_val.modifiedIndex ~= item.modifiedIndex) then + config_util.fire_all_clean_handlers(pre_val) + self.values[pre_index] = conf_item + conf_item.value.id = item_id + conf_item.clean_handlers = {} + end + else + insert_tab(self.values, conf_item) + self.values_hash[item_id] = #self.values + conf_item.value.id = item_id + conf_item.clean_handlers = {} + end + + if self.filter then + self.filter(conf_item) + end + end + end + end + + self.conf_version = conf_version + return true +end + + +function _M.get(self, key) + if not self.values_hash then + return + end + + local arr_idx = self.values_hash[tostring(key)] + if not arr_idx then + return nil + end + + return self.values[arr_idx] +end + + +local function _automatic_fetch(premature, self) + if premature then + return + end + + -- the _automatic_fetch is only called in the timer, and according to the + -- documentation, ngx.shared.DICT.get can be executed there. + -- if the file's global variables have not yet been assigned values, + -- we can assume that the worker has not been initialized yet and try to + -- read any old data that may be present from the shared dict + -- try load from shared dict only on first startup, otherwise use event mechanism + if is_use_admin_api() and not shared_dict then + log.info("try to load config from shared dict") + + local config, err + shared_dict = ngx_shared["standalone-config"] -- init shared dict in current worker + if not shared_dict then + log.error("failed to read config from shared dict: shared dict not found") + goto SKIP_SHARED_DICT + end + config, err = shared_dict:get("config") + if not config then + if err then -- if the key does not exist, the return values are both nil + log.error("failed to read config from shared dict: ", err) + end + log.info("no config found in shared dict") + goto SKIP_SHARED_DICT + end + log.info("startup config loaded from shared dict: ", config) + + config, err = json.decode(tostring(config)) + if not config then + log.error("failed to decode config from shared dict: ", err) + goto SKIP_SHARED_DICT + end + _M._update_config(config) + log.info("config loaded from shared dict") + + ::SKIP_SHARED_DICT:: + if not shared_dict then + log.crit(_M.ERR_NO_SHARED_DICT) + + -- fill that value to make the worker not try to read from shared dict again + shared_dict = "error" + end + end + + local i = 0 + while not exiting() and self.running and i <= 32 do + i = i + 1 + local ok, ok2, err = pcall(sync_data, self) + if not ok then + err = ok2 + log.error("failed to fetch data from local file " .. config_file.path .. ": ", + err, ", ", tostring(self)) + ngx_sleep(3) + break + + elseif not ok2 and err then + if err ~= "timeout" and err ~= "Key not found" + and self.last_err ~= err then + log.error("failed to fetch data from local file " .. config_file.path .. ": ", + err, ", ", tostring(self)) + end + + if err ~= self.last_err then + self.last_err = err + self.last_err_time = ngx_time() + else + if ngx_time() - self.last_err_time >= 30 then + self.last_err = nil + end + end + ngx_sleep(0.5) + + elseif not ok2 then + ngx_sleep(0.05) + + else + ngx_sleep(0.1) + end + end + + if not exiting() and self.running then + ngx_timer_at(0, _automatic_fetch, self) + end +end + + +function _M.new(key, opts) + local local_conf, err = config_local.local_conf() + if not local_conf then + return nil, err + end + + local automatic = opts and opts.automatic + local item_schema = opts and opts.item_schema + local filter_fun = opts and opts.filter + local single_item = opts and opts.single_item + local checker = opts and opts.checker + + -- like /routes and /upstreams, remove first char `/` + if key then + key = sub_str(key, 2) + end + + local obj = setmetatable({ + automatic = automatic, + item_schema = item_schema, + checker = checker, + sync_times = 0, + running = true, + conf_version = 0, + values = nil, + routes_hash = nil, + prev_index = nil, + last_err = nil, + last_err_time = nil, + key = key, + conf_version_key = key and key .. "_conf_version", + single_item = single_item, + filter = filter_fun, + }, mt) + + if automatic then + if not key then + return nil, "missing `key` argument" + end + + local ok, ok2, err = pcall(sync_data, obj) + if not ok then + err = ok2 + end + + if err then + log.error("failed to fetch data from local file ", config_file.path, ": ", + err, ", ", key) + end + + ngx_timer_at(0, _automatic_fetch, obj) + end + + if key then + created_obj[key] = obj + end + + return obj +end + + +function _M.close(self) + self.running = false +end + + +function _M.server_version(self) + return "apisix.yaml " .. _M.version +end + + +function _M.fetch_created_obj(key) + return created_obj[sub_str(key, 2)] +end + + +function _M.fetch_all_created_obj() + return created_obj +end + + +function _M.init() + if is_use_admin_api() then + return true + end + + read_apisix_config() + return true +end + + +function _M.init_worker() + sync_status_to_shdict(false) + if is_use_admin_api() then + apisix_yaml = {} + apisix_yaml_mtime = 0 + return true + end + + -- sync data in each non-master process + ngx.timer.every(1, read_apisix_config) + + return true +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/ctx.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/ctx.lua new file mode 100644 index 0000000..c6f66fb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/ctx.lua @@ -0,0 +1,463 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Define the request context. +-- +-- @module core.ctx + +local core_str = require("apisix.core.string") +local core_tab = require("apisix.core.table") +local request = require("apisix.core.request") +local log = require("apisix.core.log") +local json = require("apisix.core.json") +local config_local = require("apisix.core.config_local") +local tablepool = require("tablepool") +local get_var = require("resty.ngxvar").fetch +local get_request = require("resty.ngxvar").request +local ck = require "resty.cookie" +local multipart = require("multipart") +local util = require("apisix.cli.util") +local gq_parse = require("graphql").parse +local jp = require("jsonpath") +local setmetatable = setmetatable +local sub_str = string.sub +local ngx = ngx +local ngx_var = ngx.var +local re_gsub = ngx.re.gsub +local ipairs = ipairs +local type = type +local error = error +local pcall = pcall + + +local _M = {version = 0.2} +local GRAPHQL_DEFAULT_MAX_SIZE = 1048576 -- 1MiB +local GRAPHQL_REQ_DATA_KEY = "query" +local GRAPHQL_REQ_METHOD_HTTP_GET = "GET" +local GRAPHQL_REQ_METHOD_HTTP_POST = "POST" +local GRAPHQL_REQ_MIME_JSON = "application/json" + + +local fetch_graphql_data = { + [GRAPHQL_REQ_METHOD_HTTP_GET] = function(ctx, max_size) + local body = request.get_uri_args(ctx)[GRAPHQL_REQ_DATA_KEY] + if not body then + return nil, "failed to read graphql data, args[" .. + GRAPHQL_REQ_DATA_KEY .. "] is nil" + end + + if type(body) == "table" then + body = body[1] + end + + return body + end, + + [GRAPHQL_REQ_METHOD_HTTP_POST] = function(ctx, max_size) + local body, err = request.get_body(max_size, ctx) + if not body then + return nil, "failed to read graphql data, " .. (err or "request body has zero size") + end + + if request.header(ctx, "Content-Type") == GRAPHQL_REQ_MIME_JSON then + local res + res, err = json.decode(body) + if not res then + return nil, "failed to read graphql data, " .. err + end + + if not res[GRAPHQL_REQ_DATA_KEY] then + return nil, "failed to read graphql data, json body[" .. + GRAPHQL_REQ_DATA_KEY .. "] is nil" + end + + body = res[GRAPHQL_REQ_DATA_KEY] + end + + return body + end +} + + +local function parse_graphql(ctx) + local local_conf, err = config_local.local_conf() + if not local_conf then + return nil, "failed to get local conf: " .. err + end + + local max_size = GRAPHQL_DEFAULT_MAX_SIZE + local size = core_tab.try_read_attr(local_conf, "graphql", "max_size") + if size then + max_size = size + end + + local method = request.get_method() + local func = fetch_graphql_data[method] + if not func then + return nil, "graphql not support `" .. method .. "` request" + end + + local body + body, err = func(ctx, max_size) + if not body then + return nil, err + end + + local ok, res = pcall(gq_parse, body) + if not ok then + return nil, "failed to parse graphql: " .. res .. " body: " .. body + end + + if #res.definitions == 0 then + return nil, "empty graphql: " .. body + end + + return res +end + + +local function get_parsed_graphql() + local ctx = ngx.ctx.api_ctx + if ctx._graphql then + return ctx._graphql + end + + local res, err = parse_graphql(ctx) + if not res then + log.error(err) + ctx._graphql = {} + return ctx._graphql + end + + if #res.definitions > 1 then + log.warn("Multiple operations are not supported.", + "Only the first one is handled") + end + + local def = res.definitions[1] + local fields = def.selectionSet.selections + local root_fields = core_tab.new(#fields, 0) + for i, f in ipairs(fields) do + root_fields[i] = f.name.value + end + + local name = "" + if def.name and def.name.value then + name = def.name.value + end + + ctx._graphql = { + name = name, + operation = def.operation, + root_fields = root_fields, + } + + return ctx._graphql +end + + +local CONTENT_TYPE_JSON = "application/json" +local CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded" +local CONTENT_TYPE_MULTIPART_FORM = "multipart/form-data" + +local function get_parsed_request_body(ctx) + local ct_header = request.header(ctx, "Content-Type") or "" + + if core_str.find(ct_header, CONTENT_TYPE_JSON) then + local request_table, err = request.get_json_request_body_table() + if not request_table then + return nil, "failed to parse JSON body: " .. err + end + return request_table + end + + if core_str.find(ct_header, CONTENT_TYPE_FORM_URLENCODED) then + local args, err = request.get_post_args() + if not args then + return nil, "failed to parse form data: " .. (err or "unknown error") + end + return args + end + + if core_str.find(ct_header, CONTENT_TYPE_MULTIPART_FORM) then + local body = request.get_body() + local res = multipart(body, ct_header) + if not res then + return nil, "failed to parse multipart form data" + end + return res:get_all() + end + + local err = "unsupported content-type in header: " .. ct_header .. + ", supported types are: " .. + CONTENT_TYPE_JSON .. ", " .. + CONTENT_TYPE_FORM_URLENCODED .. ", " .. + CONTENT_TYPE_MULTIPART_FORM + return nil, err +end + + +do + local var_methods = { + method = ngx.req.get_method, + cookie = function () + if ngx.var.http_cookie then + return ck:new() + end + end + } + + local no_cacheable_var_names = { + -- var.args should not be cached as it can be changed via set_uri_args + args = true, + is_args = true, + } + + local ngx_var_names = { + upstream_scheme = true, + upstream_host = true, + upstream_upgrade = true, + upstream_connection = true, + upstream_uri = true, + + upstream_mirror_host = true, + upstream_mirror_uri = true, + + upstream_cache_zone = true, + upstream_cache_zone_info = true, + upstream_no_cache = true, + upstream_cache_key = true, + upstream_cache_bypass = true, + + var_x_forwarded_proto = true, + var_x_forwarded_port = true, + var_x_forwarded_host = true, + } + + -- sort in alphabetical + local apisix_var_names = { + balancer_ip = true, + balancer_port = true, + consumer_group_id = true, + consumer_name = true, + resp_body = function(ctx) + -- only for logger and requires the logger to have a special configuration + return ctx.resp_body or '' + end, + route_id = true, + route_name = true, + service_id = true, + service_name = true, + } + + local mt = { + __index = function(t, key) + local cached = t._cache[key] + if cached ~= nil then + log.debug("serving ctx value from cache for key: ", key) + return cached + end + + if type(key) ~= "string" then + error("invalid argument, expect string value", 2) + end + + local val + local method = var_methods[key] + if method then + val = method() + + elseif core_str.has_prefix(key, "cookie_") then + local cookie = t.cookie + if cookie then + local err + val, err = cookie:get(sub_str(key, 8)) + if err then + log.warn("failed to fetch cookie value by key: ", + key, " error: ", err) + end + end + + elseif core_str.has_prefix(key, "arg_") then + local arg_key = sub_str(key, 5) + local args = request.get_uri_args()[arg_key] + if args then + if type(args) == "table" then + val = args[1] + else + val = args + end + end + + elseif core_str.has_prefix(key, "post_arg_") then + -- only match default post form + local content_type = request.header(nil, "Content-Type") + if content_type ~= nil and core_str.has_prefix(content_type, + "application/x-www-form-urlencoded") then + local arg_key = sub_str(key, 10) + local args = request.get_post_args()[arg_key] + if args then + if type(args) == "table" then + val = args[1] + else + val = args + end + end + end + + elseif core_str.has_prefix(key, "uri_param_") then + -- `uri_param_` provides access to the uri parameters when using + -- radixtree_uri_with_parameter + if t._ctx.curr_req_matched then + local arg_key = sub_str(key, 11) + val = t._ctx.curr_req_matched[arg_key] + end + + elseif core_str.has_prefix(key, "http_") then + local arg_key = key:lower() + arg_key = re_gsub(arg_key, "-", "_", "jo") + val = get_var(arg_key, t._request) + + elseif core_str.has_prefix(key, "graphql_") then + -- trim the "graphql_" prefix + local arg_key = sub_str(key, 9) + val = get_parsed_graphql()[arg_key] + elseif core_str.has_prefix(key, "post_arg.") then + -- trim the "post_arg." prefix (10 characters) + local arg_key = sub_str(key, 10) + local parsed_body, err = get_parsed_request_body(t._ctx) + if not parsed_body then + log.warn("failed to fetch post args value by key: ", arg_key, " error: ", err) + return nil + end + if arg_key:find("[%[%*]") or arg_key:find("..", 1, true) then + arg_key = "$." .. arg_key + local results = jp.query(parsed_body, arg_key) + if #results == 0 then + val = nil + else + val = results + end + else + local parts = util.split(arg_key, "(.)") + local current = parsed_body + for _, part in ipairs(parts) do + if type(current) ~= "table" then + current = nil + break + end + current = current[part] + end + val = current + end + + else + local getter = apisix_var_names[key] + if getter then + local ctx = t._ctx + if getter == true then + val = ctx and ctx[key] + else + -- the getter is registered by ctx.register_var + val = getter(ctx) + end + + else + val = get_var(key, t._request) + end + end + + if val ~= nil and not no_cacheable_var_names[key] then + t._cache[key] = val + end + + return val + end, + + __newindex = function(t, key, val) + if ngx_var_names[key] then + ngx_var[key] = val + end + + -- log.info("key: ", key, " new val: ", val) + t._cache[key] = val + end, + } + +--- +-- Register custom variables. +-- Register variables globally, and use them as normal builtin variables. +-- Note that the custom variables can't be used in features that depend +-- on the Nginx directive, like `access_log_format`. +-- +-- @function core.ctx.register_var +-- @tparam string name custom variable name +-- @tparam function getter The fetch function for custom variables. +-- @tparam table opts An optional options table which controls the behavior about the variable +-- @usage +-- local core = require "apisix.core" +-- +-- core.ctx.register_var("a6_labels_zone", function(ctx) +-- local route = ctx.matched_route and ctx.matched_route.value +-- if route and route.labels then +-- return route.labels.zone +-- end +-- return nil +-- end) +-- +-- We support the options below in the `opts`: +-- * no_cacheable: if the result of getter is cacheable or not. Default to `false`. +function _M.register_var(name, getter, opts) + if type(getter) ~= "function" then + error("the getter of registered var should be a function") + end + + apisix_var_names[name] = getter + + if opts then + if opts.no_cacheable then + no_cacheable_var_names[name] = true + end + end +end + +function _M.set_vars_meta(ctx) + local var = tablepool.fetch("ctx_var", 0, 32) + if not var._cache then + var._cache = {} + end + + var._request = get_request() + var._ctx = ctx + setmetatable(var, mt) + ctx.var = var +end + +function _M.release_vars(ctx) + if ctx.var == nil then + return + end + + core_tab.clear(ctx.var._cache) + tablepool.release("ctx_var", ctx.var, true) + ctx.var = nil +end + +end -- do + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/dns/client.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/dns/client.lua new file mode 100644 index 0000000..1bf2aca --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/dns/client.lua @@ -0,0 +1,164 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Wrapped dns search client. +-- +-- @module core.dns.client + +local require = require +local config_local = require("apisix.core.config_local") +local log = require("apisix.core.log") +local json = require("apisix.core.json") +local table = require("apisix.core.table") +local gcd = require("apisix.core.math").gcd +local insert_tab = table.insert +local math_random = math.random +local package_loaded = package.loaded +local ipairs = ipairs +local table_remove = table.remove +local setmetatable = setmetatable + + +local _M = { + RETURN_RANDOM = 1, + RETURN_ALL = 2, +} + + +local function resolve_srv(client, answers) + if #answers == 0 then + return nil, "empty SRV record" + end + + local resolved_answers = {} + local answer_to_count = {} + for _, answer in ipairs(answers) do + if answer.type ~= client.TYPE_SRV then + return nil, "mess SRV with other record" + end + + local resolved, err = client.resolve(answer.target) + if not resolved then + local msg = "failed to resolve SRV record " .. answer.target .. ": " .. err + return nil, msg + end + + log.info("dns resolve SRV ", answer.target, ", result: ", + json.delay_encode(resolved)) + + local weight = answer.weight + if weight == 0 then + weight = 1 + end + + local count = #resolved + answer_to_count[answer] = count + -- one target may have multiple resolved results + for _, res in ipairs(resolved) do + local copy = table.deepcopy(res) + copy.weight = weight / count + copy.port = answer.port + copy.priority = answer.priority + insert_tab(resolved_answers, copy) + end + end + + -- find the least common multiple of the counts + local lcm = answer_to_count[answers[1]] + for i = 2, #answers do + local count = answer_to_count[answers[i]] + lcm = count * lcm / gcd(count, lcm) + end + -- fix the weight as the weight should be integer + for _, res in ipairs(resolved_answers) do + res.weight = res.weight * lcm + end + + return resolved_answers +end + + +function _M.resolve(self, domain, selector) + local client = self.client + + -- this function will dereference the CNAME records + local answers, err = client.resolve(domain) + if not answers then + return nil, "failed to query the DNS server: " .. err + end + + if answers.errcode then + return nil, "server returned error code: " .. answers.errcode + .. ": " .. answers.errstr + end + + if selector == _M.RETURN_ALL then + log.info("dns resolve ", domain, ", result: ", json.delay_encode(answers)) + for _, answer in ipairs(answers) do + if answer.type == client.TYPE_SRV then + return resolve_srv(client, answers) + end + end + return table.deepcopy(answers) + end + + local idx = math_random(1, #answers) + local answer = answers[idx] + local dns_type = answer.type + if dns_type == client.TYPE_A or dns_type == client.TYPE_AAAA then + log.info("dns resolve ", domain, ", result: ", json.delay_encode(answer)) + return table.deepcopy(answer) + end + + return nil, "unsupported DNS answer" +end + + +function _M.new(opts) + local local_conf = config_local.local_conf() + + if opts.enable_ipv6 == nil then + opts.enable_ipv6 = local_conf.apisix.enable_ipv6 + end + + -- ensure the resolver throws an error when ipv6 is disabled + if not opts.enable_ipv6 then + for i, v in ipairs(opts.order) do + if v == "AAAA" then + table_remove(opts.order, i) + break + end + end + end + + opts.timeout = 2000 -- 2 sec + opts.retrans = 5 -- 5 retransmissions on receive timeout + + -- make sure each client has its separate room + package_loaded["resty.dns.client"] = nil + local dns_client_mod = require("resty.dns.client") + + local ok, err = dns_client_mod.init(opts) + if not ok then + return nil, "failed to init the dns client: " .. err + end + + return setmetatable({client = dns_client_mod}, {__index = _M}) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/env.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/env.lua new file mode 100644 index 0000000..6a57a70 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/env.lua @@ -0,0 +1,109 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ffi = require "ffi" + +local json = require("apisix.core.json") +local log = require("apisix.core.log") +local string = require("apisix.core.string") + +local os = os +local type = type +local upper = string.upper +local find = string.find +local sub = string.sub +local str = ffi.string + +local ENV_PREFIX = "$ENV://" + +local _M = { + PREFIX = ENV_PREFIX +} + + +local apisix_env_vars = {} + +ffi.cdef [[ + extern char **environ; +]] + + +function _M.init() + local e = ffi.C.environ + if not e then + log.warn("could not access environment variables") + return + end + + local i = 0 + while e[i] ~= nil do + local var = str(e[i]) + local p = find(var, "=") + if p then + apisix_env_vars[sub(var, 1, p - 1)] = sub(var, p + 1) + end + + i = i + 1 + end +end + + +local function parse_env_uri(env_uri) + -- Avoid the error caused by has_prefix to cause a crash. + if type(env_uri) ~= "string" then + return nil, "error env_uri type: " .. type(env_uri) + end + + if not string.has_prefix(upper(env_uri), ENV_PREFIX) then + return nil, "error env_uri prefix: " .. env_uri + end + + local path = sub(env_uri, #ENV_PREFIX + 1) + local idx = find(path, "/") + if not idx then + return {key = path, sub_key = ""} + end + local key = sub(path, 1, idx - 1) + local sub_key = sub(path, idx + 1) + + return { + key = key, + sub_key = sub_key + } +end + + +function _M.fetch_by_uri(env_uri) + log.info("fetching data from env uri: ", env_uri) + local opts, err = parse_env_uri(env_uri) + if not opts then + return nil, err + end + + local main_value = apisix_env_vars[opts.key] or os.getenv(opts.key) + if main_value and opts.sub_key ~= "" then + local vt, err = json.decode(main_value) + if not vt then + return nil, "decode failed, err: " .. (err or "") .. ", value: " .. main_value + end + return vt[opts.sub_key] + end + + return main_value +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/etcd.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/etcd.lua new file mode 100644 index 0000000..3caa2f1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/etcd.lua @@ -0,0 +1,676 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Etcd API. +-- +-- @module core.etcd + +local require = require +local fetch_local_conf = require("apisix.core.config_local").local_conf +local array_mt = require("apisix.core.json").array_mt +local log = require("apisix.core.log") +local try_read_attr = require("apisix.core.table").try_read_attr +local v3_adapter = require("apisix.admin.v3_adapter") +local etcd = require("resty.etcd") +local clone_tab = require("table.clone") +local health_check = require("resty.etcd.health_check") +local pl_path = require("pl.path") +local ipairs = ipairs +local setmetatable = setmetatable +local string = string +local tonumber = tonumber +local ngx_get_phase = ngx.get_phase + + +local _M = {} + + +local NOT_ALLOW_WRITE_ETCD_WARN = 'Data plane role should not write to etcd. ' .. + 'This operation will be deprecated in future releases.' + +local function is_data_plane() + local local_conf, err = fetch_local_conf() + if not local_conf then + return nil, err + end + + local role = try_read_attr(local_conf, "deployment", "role") + if role == "data_plane" then + return true + end + + return false +end + + + +local function disable_write_if_data_plane() + local data_plane, err = is_data_plane() + if err then + log.error("failed to check data plane role: ", err) + return true, err + end + + if data_plane then + -- current only warn, will be return false in future releases + -- to block etcd write + log.warn(NOT_ALLOW_WRITE_ETCD_WARN) + return false + end + + return false, nil +end + + +local function wrap_etcd_client(etcd_cli) + -- note: methods txn can read and write, don't use txn to write when data plane role + local methods_to_wrap = { + "set", + "setnx", + "setx", + "delete", + "rmdir", + "grant", + "revoke", + "keepalive" + } + + local original_methods = {} + for _, method in ipairs(methods_to_wrap) do + if not etcd_cli[method] then + log.error("method ", method, " not found in etcd client") + return nil, "method " .. method .. " not found in etcd client" + end + + original_methods[method] = etcd_cli[method] + end + + for _, method in ipairs(methods_to_wrap) do + etcd_cli[method] = function(self, ...) + local disable, err = disable_write_if_data_plane() + if disable then + return nil, err + end + + return original_methods[method](self, ...) + end + end + + return etcd_cli +end + + +local function _new(etcd_conf) + local prefix = etcd_conf.prefix + etcd_conf.http_host = etcd_conf.host + etcd_conf.host = nil + etcd_conf.prefix = nil + etcd_conf.protocol = "v3" + etcd_conf.api_prefix = "/v3" + + -- default to verify etcd cluster certificate + etcd_conf.ssl_verify = true + if etcd_conf.tls then + if etcd_conf.tls.verify == false then + etcd_conf.ssl_verify = false + end + + if etcd_conf.tls.cert then + etcd_conf.ssl_cert_path = etcd_conf.tls.cert + etcd_conf.ssl_key_path = etcd_conf.tls.key + end + + if etcd_conf.tls.sni then + etcd_conf.sni = etcd_conf.tls.sni + end + end + + local etcd_cli, err = etcd.new(etcd_conf) + if not etcd_cli then + return nil, nil, err + end + + etcd_cli = wrap_etcd_client(etcd_cli) + + return etcd_cli, prefix +end + + +--- +-- Create an etcd client which will connect to etcd without being proxyed by conf server. +-- This method is used in init_worker phase when the conf server is not ready. +-- +-- @function core.etcd.new_without_proxy +-- @treturn table|nil the etcd client, or nil if failed. +-- @treturn string|nil the configured prefix of etcd keys, or nil if failed. +-- @treturn nil|string the error message. +local function new_without_proxy() + local local_conf, err = fetch_local_conf() + if not local_conf then + return nil, nil, err + end + + local etcd_conf = clone_tab(local_conf.etcd) + + if local_conf.apisix.ssl and local_conf.apisix.ssl.ssl_trusted_certificate then + etcd_conf.trusted_ca = local_conf.apisix.ssl.ssl_trusted_certificate + end + + return _new(etcd_conf) +end +_M.new_without_proxy = new_without_proxy + + +local function new() + local local_conf, err = fetch_local_conf() + if not local_conf then + return nil, nil, err + end + + local etcd_conf = clone_tab(local_conf.etcd) + + if local_conf.apisix.ssl and local_conf.apisix.ssl.ssl_trusted_certificate then + etcd_conf.trusted_ca = local_conf.apisix.ssl.ssl_trusted_certificate + end + + if not health_check.conf then + health_check.init({ + max_fails = 1, + retry = true, + }) + end + + return _new(etcd_conf) +end +_M.new = new + + +local function switch_proxy() + if ngx_get_phase() == "init" or ngx_get_phase() == "init_worker" then + return new_without_proxy() + end + + local etcd_cli, prefix, err = new() + if not etcd_cli or err then + return etcd_cli, prefix, err + end + + if not etcd_cli.unix_socket_proxy then + return etcd_cli, prefix, err + end + local sock_path = etcd_cli.unix_socket_proxy:sub(#"unix:" + 1) + local ok = pl_path.exists(sock_path) + if not ok then + return new_without_proxy() + end + + return etcd_cli, prefix, err +end +_M.get_etcd_syncer = switch_proxy + +-- convert ETCD v3 entry to v2 one +local function kvs_to_node(kvs) + local node = {} + node.key = kvs.key + node.value = kvs.value + node.createdIndex = tonumber(kvs.create_revision) + node.modifiedIndex = tonumber(kvs.mod_revision) + return node +end +_M.kvs_to_node = kvs_to_node + +local function kvs_to_nodes(res, exclude_dir) + res.body.node.dir = true + res.body.node.nodes = setmetatable({}, array_mt) + if exclude_dir then + for i=2, #res.body.kvs do + res.body.node.nodes[i-1] = kvs_to_node(res.body.kvs[i]) + end + else + for i=1, #res.body.kvs do + res.body.node.nodes[i] = kvs_to_node(res.body.kvs[i]) + end + end + return res +end + + +local function not_found(res) + res.body.message = "Key not found" + res.reason = "Not found" + res.status = 404 + return res +end + + +-- When `is_dir` is true, returns the value of both the dir key and its descendants. +-- Otherwise, return the value of key only. +function _M.get_format(res, real_key, is_dir, formatter) + if res.body.error == "etcdserver: user name is empty" then + return nil, "insufficient credentials code: 401" + end + + if res.body.error == "etcdserver: permission denied" then + return nil, "etcd forbidden code: 403" + end + + if res.body.error then + -- other errors, like "grpc: received message larger than max" + return nil, res.body.error + end + + res.headers["X-Etcd-Index"] = res.body.header.revision + + if not res.body.kvs then + return not_found(res) + end + + v3_adapter.to_v3(res.body, "get") + + if formatter then + return formatter(res) + end + + if not is_dir then + local key = res.body.kvs[1].key + if key ~= real_key then + return not_found(res) + end + + res.body.node = kvs_to_node(res.body.kvs[1]) + + else + -- In etcd v2, the direct key asked for is `node`, others which under this dir are `nodes` + -- While in v3, this structure is flatten and all keys related the key asked for are `kvs` + res.body.node = kvs_to_node(res.body.kvs[1]) + -- we have a init_dir (for etcd v2) value that can't be deserialized with json, + -- but we don't put init_dir for new resource type like consumer credential + if not res.body.kvs[1].value then + -- remove last "/" when necessary + if string.byte(res.body.node.key, -1) == 47 then + res.body.node.key = string.sub(res.body.node.key, 1, #res.body.node.key-1) + end + res = kvs_to_nodes(res, true) + else + -- get dir key by remove last part of node key, + -- for example: /apisix/consumers/jack -> /apisix/consumers + local last_slash_index = string.find(res.body.node.key, "/[^/]*$") + if last_slash_index then + res.body.node.key = string.sub(res.body.node.key, 1, last_slash_index-1) + end + res = kvs_to_nodes(res, false) + end + end + + res.body.kvs = nil + v3_adapter.to_v3_list(res.body) + return res +end + + +function _M.watch_format(v3res) + local v2res = {} + v2res.headers = { + ["X-Etcd-Index"] = v3res.result.header.revision + } + v2res.body = { + node = {} + } + + local compact_revision = v3res.result.compact_revision + if compact_revision and tonumber(compact_revision) > 0 then + -- When the revisions are compacted, there might be compacted changes + -- which are unsynced. So we need to do a fully sync. + -- TODO: cover this branch in CI + return nil, "compacted" + end + + for i, event in ipairs(v3res.result.events) do + v2res.body.node[i] = kvs_to_node(event.kv) + if event.type == "DELETE" then + v2res.body.action = "delete" + end + end + + return v2res +end + + +local get_etcd_cli +do + local prefix + local etcd_cli_init_phase + local etcd_cli + local tmp_etcd_cli + + function get_etcd_cli() + local err + if ngx_get_phase() == "init" or ngx_get_phase() == "init_worker" then + if etcd_cli_init_phase == nil then + tmp_etcd_cli, prefix, err = new_without_proxy() + if not tmp_etcd_cli then + return nil, nil, err + end + + return tmp_etcd_cli, prefix + end + + return etcd_cli_init_phase, prefix + end + + if etcd_cli_init_phase ~= nil then + -- we can't share the etcd instance created in init* phase + -- they have different configuration + etcd_cli_init_phase:close() + etcd_cli_init_phase = nil + end + + if etcd_cli == nil then + tmp_etcd_cli, prefix, err = switch_proxy() + if not tmp_etcd_cli then + return nil, nil, err + end + + etcd_cli = tmp_etcd_cli + + return tmp_etcd_cli, prefix + end + + return etcd_cli, prefix + end +end +-- export it so we can mock the etcd cli in test +_M.get_etcd_cli = get_etcd_cli + + +function _M.get(key, is_dir) + local etcd_cli, prefix, err = get_etcd_cli() + if not etcd_cli then + return nil, err + end + + key = prefix .. key + + -- in etcd v2, get could implicitly turn into readdir + -- while in v3, we need to do it explicitly + local res, err = etcd_cli:readdir(key) + if not res then + return nil, err + end + return _M.get_format(res, key, is_dir) +end + + +local function set(key, value, ttl) + local disable, err = disable_write_if_data_plane() + if disable then + return nil, err + end + + + local etcd_cli, prefix, err = get_etcd_cli() + if not etcd_cli then + return nil, err + end + + -- lease substitute ttl in v3 + local res, err + if ttl then + local data, grant_err = etcd_cli:grant(tonumber(ttl)) + if not data then + return nil, grant_err + end + + res, err = etcd_cli:set(prefix .. key, value, {prev_kv = true, lease = data.body.ID}) + if not res then + return nil, err + end + + res.body.lease_id = data.body.ID + else + res, err = etcd_cli:set(prefix .. key, value, {prev_kv = true}) + end + if not res then + return nil, err + end + + if res.body.error then + return nil, res.body.error + end + + res.headers["X-Etcd-Index"] = res.body.header.revision + + -- etcd v3 set would not return kv info + v3_adapter.to_v3(res.body, "set") + res.body.node = {} + res.body.node.key = prefix .. key + res.body.node.value = value + res.status = 201 + if res.body.prev_kv then + res.status = 200 + res.body.prev_kv = nil + end + + return res, nil +end +_M.set = set + + +function _M.atomic_set(key, value, ttl, mod_revision) + local disable, err = disable_write_if_data_plane() + if disable then + return nil, err + end + + local etcd_cli, prefix, err = get_etcd_cli() + if not etcd_cli then + return nil, err + end + + local lease_id + if ttl then + local data, grant_err = etcd_cli:grant(tonumber(ttl)) + if not data then + return nil, grant_err + end + + lease_id = data.body.ID + end + + key = prefix .. key + + local compare = { + { + key = key, + target = "MOD", + result = "EQUAL", + mod_revision = mod_revision, + } + } + + local success = { + { + requestPut = { + key = key, + value = value, + lease = lease_id, + } + } + } + + local res, err = etcd_cli:txn(compare, success) + if not res then + return nil, err + end + + if not res.body.succeeded then + return nil, "value changed before overwritten" + end + + res.headers["X-Etcd-Index"] = res.body.header.revision + -- etcd v3 set would not return kv info + v3_adapter.to_v3(res.body, "compareAndSwap") + res.body.node = { + key = key, + value = value, + } + + return res, nil +end + + + +function _M.push(key, value, ttl) + local disable, err = disable_write_if_data_plane() + if disable then + return nil, err + end + + local etcd_cli, _, err = get_etcd_cli() + if not etcd_cli then + return nil, err + end + + -- Create a new revision and use it as the id. + -- It will be better if we use snowflake algorithm like manager-api, + -- but we haven't found a good library. It costs too much to write + -- our own one as the admin-api will be replaced by manager-api finally. + local res, err = set("/gen_id", 1) + if not res then + return nil, err + end + + -- manually add suffix + local index = res.body.header.revision + index = string.format("%020d", index) + + -- set the basic id attribute + value.id = index + + res, err = set(key .. "/" .. index, value, ttl) + if not res then + return nil, err + end + + v3_adapter.to_v3(res.body, "create") + return res, nil +end + + +function _M.delete(key) + local disable, err = disable_write_if_data_plane() + if disable then + return nil, err + end + + local etcd_cli, prefix, err = get_etcd_cli() + if not etcd_cli then + return nil, err + end + + local res, err = etcd_cli:delete(prefix .. key) + + if not res then + return nil, err + end + + res.headers["X-Etcd-Index"] = res.body.header.revision + + if not res.body.deleted then + return not_found(res), nil + end + + -- etcd v3 set would not return kv info + v3_adapter.to_v3(res.body, "delete") + res.body.node = {} + res.body.key = prefix .. key + + return res, nil +end + +function _M.rmdir(key, opts) + local disable, err = disable_write_if_data_plane() + if disable then + return nil, err + end + + local etcd_cli, prefix, err = get_etcd_cli() + if not etcd_cli then + return nil, err + end + + local res, err = etcd_cli:rmdir(prefix .. key, opts) + if not res then + return nil, err + end + + res.headers["X-Etcd-Index"] = res.body.header.revision + + if not res.body.deleted then + return not_found(res), nil + end + + v3_adapter.to_v3(res.body, "delete") + res.body.node = {} + res.body.key = prefix .. key + + return res, nil +end + +--- +-- Get etcd cluster and server version. +-- +-- @function core.etcd.server_version +-- @treturn table The response of query etcd server version. +-- @usage +-- local res, err = core.etcd.server_version() +-- -- the res.body is as follows: +-- -- { +-- -- etcdcluster = "3.5.0", +-- -- etcdserver = "3.5.0" +-- -- } +function _M.server_version() + local etcd_cli, _, err = get_etcd_cli() + if not etcd_cli then + return nil, err + end + + return etcd_cli:version() +end + + +function _M.keepalive(id) + local disable, err = disable_write_if_data_plane() + if disable then + return nil, err + end + + local etcd_cli, _, err = get_etcd_cli() + if not etcd_cli then + return nil, err + end + + local res, err = etcd_cli:keepalive(id) + if not res then + return nil, err + end + + return res, nil +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/event.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/event.lua new file mode 100644 index 0000000..006cd1a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/event.lua @@ -0,0 +1,45 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local CONST = { + BUILD_ROUTER = 1, +} + +local _M = { + CONST = CONST, +} + +local events = {} + + +function _M.push(type, ...) + local handler = events[type] + if handler then + handler(...) + end +end + +function _M.register(type, handler) + -- TODO: we can register more than one handler + events[type] = handler +end + +function _M.unregister(type) + events[type] = nil +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/id.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/id.lua new file mode 100644 index 0000000..ef8f727 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/id.lua @@ -0,0 +1,169 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Instance id of APISIX +-- +-- @module core.id + +local fetch_local_conf = require("apisix.core.config_local").local_conf +local try_read_attr = require("apisix.core.table").try_read_attr +local profile = require("apisix.core.profile") +local log = require("apisix.core.log") +local uuid = require("resty.jit-uuid") +local lyaml = require("lyaml") +local smatch = string.match +local open = io.open +local type = type +local ipairs = ipairs +local string = string +local math = math +local prefix = ngx.config.prefix() +local pairs = pairs +local ngx_exit = ngx.exit +local apisix_uid + +local _M = {version = 0.1} + + +local function rtrim(str) + return smatch(str, "^(.-)%s*$") +end + + +local function read_file(path) + local file = open(path, "rb") -- r read mode and b binary mode + if not file then + return nil + end + + local content = file:read("*a") -- *a or *all reads the whole file + file:close() + return rtrim(content) +end + + +local function write_file(path, data) + local file = open(path, "w+") + if not file then + return nil, "failed to open file[" .. path .. "] for writing" + end + + file:write(data) + file:close() + return true +end + + +local function generate_yaml(table) + -- By default lyaml will parse null values as [] + -- The following logic is a workaround so that null values are parsed as null + local function replace_null(tbl) + for k, v in pairs(tbl) do + if type(v) == "table" then + replace_null(v) + elseif v == nil then + tbl[k] = "" + end + end + end + + -- Replace null values with "" + replace_null(table) + local yaml = lyaml.dump({ table }) + yaml = yaml:gsub("", "null"):gsub("%[%s*%]", "null") + return yaml +end + + +_M.gen_uuid_v4 = uuid.generate_v4 + + +--- This will autogenerate the admin key if it's passed as an empty string in the configuration. +local function autogenerate_admin_key(default_conf) + local changed = false + -- Check if deployment.role is either traditional or control_plane + local deployment_role = default_conf.deployment and default_conf.deployment.role + if deployment_role and (deployment_role == "traditional" or + deployment_role == "control_plane") then + -- Check if deployment.admin.admin_key is not nil and it's an empty string + local admin_keys = try_read_attr(default_conf, "deployment", "admin", "admin_key") + if admin_keys and type(admin_keys) == "table" then + for i, admin_key in ipairs(admin_keys) do + if admin_key.role == "admin" and admin_key.key == "" then + changed = true + admin_keys[i].key = "" + for _ = 1, 32 do + admin_keys[i].key = admin_keys[i].key .. + string.char(math.random(65, 90) + math.random(0, 1) * 32) + end + end + end + end + end + return default_conf,changed +end + + +function _M.init() + local local_conf = fetch_local_conf() + + local local_conf, changed = autogenerate_admin_key(local_conf) + if changed then + local yaml_conf = generate_yaml(local_conf) + local local_conf_path = profile:yaml_path("config") + local ok, err = write_file(local_conf_path, yaml_conf) + if not ok then + log.error("failed to write updated local configuration: ", err) + ngx_exit(-1) + end + end + + --allow user to specify a meaningful id as apisix instance id + local uid_file_path = prefix .. "/conf/apisix.uid" + apisix_uid = read_file(uid_file_path) + if apisix_uid then + return + end + + local id = try_read_attr(local_conf, "apisix", "id") + if id then + apisix_uid = local_conf.apisix.id + else + uuid.seed() + apisix_uid = uuid.generate_v4() + log.notice("not found apisix uid, generate a new one: ", apisix_uid) + end + + local ok, err = write_file(uid_file_path, apisix_uid) + if not ok then + log.error(err) + end +end + + +--- +-- Returns the instance id of the running APISIX +-- +-- @function core.id.get +-- @treturn string the instance id +-- @usage +-- local apisix_id = core.id.get() +function _M.get() + return apisix_uid +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/io.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/io.lua new file mode 100644 index 0000000..ad1b229 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/io.lua @@ -0,0 +1,50 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- I/O operations on files. +-- +-- @module core.io + +local open = io.open + + +local _M = {} + +--- +-- Read the contents of a file. +-- +-- @function core.io.get_file +-- @tparam string file_name either an absolute path or +-- a relative path based on the APISIX working directory. +-- @treturn string The file content. +-- @usage +-- local file_content, err = core.io.get_file("conf/apisix.uid") +-- -- the `file_content` maybe the APISIX instance id in uuid format, +-- -- like "3f0e827b-5f26-440e-8074-c101c8eb0174" +function _M.get_file(file_name) + local f, err = open(file_name, 'r') + if not f then + return nil, err + end + + local req_body = f:read("*all") + f:close() + return req_body +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/ip.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/ip.lua new file mode 100644 index 0000000..5a762be --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/ip.lua @@ -0,0 +1,80 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- IP match and verify module. +-- +-- @module core.ip + +local json = require("apisix.core.json") +local log = require("apisix.core.log") +local ipmatcher = require("resty.ipmatcher") +local str_sub = string.sub +local str_find = require("apisix.core.string").find +local tonumber = tonumber + + +local _M = {} + + +function _M.create_ip_matcher(ip_list) + local ip, err = ipmatcher.new(ip_list) + if not ip then + log.error("failed to create ip matcher: ", err, + " ip list: ", json.delay_encode(ip_list)) + return nil + end + + return ip +end + +--- +-- Verify that the given ip is a valid ip or cidr. +-- +-- @function core.ip.validate_cidr_or_ip +-- @tparam string ip IP or cidr. +-- @treturn boolean True if the given ip is a valid ip or cidr, false otherwise. +-- @usage +-- local ip1 = core.ip.validate_cidr_or_ip("127.0.0.1") -- true +-- local cidr = core.ip.validate_cidr_or_ip("113.74.26.106/24") -- true +-- local ip2 = core.ip.validate_cidr_or_ip("113.74.26.666") -- false +function _M.validate_cidr_or_ip(ip) + local mask = 0 + local sep_pos = str_find(ip, "/") + if sep_pos then + mask = str_sub(ip, sep_pos + 1) + mask = tonumber(mask) + if mask < 0 or mask > 128 then + return false + end + ip = str_sub(ip, 1, sep_pos - 1) + end + + if ipmatcher.parse_ipv4(ip) then + if mask < 0 or mask > 32 then + return false + end + return true + end + + if mask < 0 or mask > 128 then + return false + end + return ipmatcher.parse_ipv6(ip) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/json.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/json.lua new file mode 100644 index 0000000..4341c46 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/json.lua @@ -0,0 +1,132 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Wrapped serialization and deserialization modules for json and lua tables. +-- +-- @module core.json + +local cjson = require("cjson.safe") +local json_encode = cjson.encode +local clear_tab = require("table.clear") +local ngx = ngx +local tostring = tostring +local type = type +local pairs = pairs +local cached_tab = {} + + +cjson.encode_escape_forward_slash(false) +cjson.decode_array_with_array_mt(true) +local _M = { + version = 0.1, + array_mt = cjson.array_mt, + decode = cjson.decode, + -- This method produces the same encoded string when the input is not changed. + -- Different calls with cjson.encode will produce different string because + -- it doesn't maintain the object key order. + stably_encode = require("dkjson").encode +} + + +local function serialise_obj(data) + if type(data) == "function" or type(data) == "userdata" + or type(data) == "cdata" + or type(data) == "table" then + return tostring(data) + end + + return data +end + + +local function tab_clone_with_serialise(data) + if type(data) ~= "table" then + return serialise_obj(data) + end + + local t = {} + for k, v in pairs(data) do + if type(v) == "table" then + if cached_tab[v] then + t[serialise_obj(k)] = tostring(v) + else + cached_tab[v] = true + t[serialise_obj(k)] = tab_clone_with_serialise(v) + end + + else + t[serialise_obj(k)] = serialise_obj(v) + end + end + + return t +end + + +local function encode(data, force) + if force then + clear_tab(cached_tab) + data = tab_clone_with_serialise(data) + end + + return json_encode(data) +end +_M.encode = encode + +local max_delay_encode_items = 16 +local delay_tab_idx = 0 +local delay_tab_arr = {} +for i = 1, max_delay_encode_items do + delay_tab_arr[i] = setmetatable({data = "", force = false}, { + __tostring = function(self) + local res, err = encode(self.data, self.force) + if not res then + ngx.log(ngx.WARN, "failed to encode: ", err, + " force: ", self.force) + end + + return res + end + }) +end + + + +--- +-- Delayed encoding of input data, avoid unnecessary encode operations. +-- When really writing logs, if the given parameter is table, it will be converted to string in +-- OpenResty by checking if there is a metamethod registered for `__tostring`, and if so, +-- calling this method to convert it to string. +-- +-- @function core.json.delay_encode +-- @tparam string|table data The data to be encoded. +-- @tparam boolean force encode data can't be encoded as JSON with tostring +-- @treturn table The table with the __tostring function overridden. +-- @usage +-- core.log.info("conf : ", core.json.delay_encode(conf)) +function _M.delay_encode(data, force) + delay_tab_idx = delay_tab_idx+1 + if delay_tab_idx > max_delay_encode_items then + delay_tab_idx = 1 + end + delay_tab_arr[delay_tab_idx].data = data + delay_tab_arr[delay_tab_idx].force = force + return delay_tab_arr[delay_tab_idx] +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/log.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/log.lua new file mode 100644 index 0000000..b59e0b7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/log.lua @@ -0,0 +1,173 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Wrapped `ngx.log`. +-- +-- @module core.log + +local ngx = ngx +local ngx_log = ngx.log +local require = require +local select = select +local setmetatable = setmetatable +local tostring = tostring +local unpack = unpack +-- avoid loading other module since core.log is the most foundational one +local tab_clear = require("table.clear") +local ngx_errlog = require("ngx.errlog") +local ngx_get_phase = ngx.get_phase + + +local _M = {version = 0.4} + + +local log_levels = { + stderr = ngx.STDERR, + emerg = ngx.EMERG, + alert = ngx.ALERT, + crit = ngx.CRIT, + error = ngx.ERR, + warn = ngx.WARN, + notice = ngx.NOTICE, + info = ngx.INFO, + debug = ngx.DEBUG, +} + + +local cur_level + +local do_nothing = function() end + + +local function update_log_level() + -- Nginx use `notice` level in init phase instead of error_log directive config + -- Ref to src/core/ngx_log.c's ngx_log_init + if ngx_get_phase() ~= "init" then + cur_level = ngx.config.subsystem == "http" and ngx_errlog.get_sys_filter_level() + end +end + + +function _M.new(prefix) + local m = {version = _M.version} + setmetatable(m, {__index = function(self, cmd) + local log_level = log_levels[cmd] + local method + update_log_level() + + if cur_level and (log_level > cur_level) + then + method = do_nothing + else + method = function(...) + return ngx_log(log_level, prefix, ...) + end + end + + -- cache the lazily generated method in our + -- module table + if ngx_get_phase() ~= "init" then + self[cmd] = method + end + + return method + end}) + + return m +end + + +setmetatable(_M, {__index = function(self, cmd) + local log_level = log_levels[cmd] + local method + update_log_level() + + if cur_level and (log_level > cur_level) + then + method = do_nothing + else + method = function(...) + return ngx_log(log_level, ...) + end + end + + -- cache the lazily generated method in our + -- module table + if ngx_get_phase() ~= "init" then + self[cmd] = method + end + + return method +end}) + + +local delay_tab = setmetatable({ + func = function() end, + args = {}, + res = nil, + }, { + __tostring = function(self) + -- the `__tostring` will be called twice, the first to get the length and + -- the second to get the data + if self.res then + local res = self.res + -- avoid unexpected reference + self.res = nil + return res + end + + local res, err = self.func(unpack(self.args)) + if err then + ngx.log(ngx.WARN, "failed to exec: ", err) + end + + -- avoid unexpected reference + tab_clear(self.args) + self.res = tostring(res) + return self.res + end +}) + + +--- +-- Delayed execute log printing. +-- It works well with log.$level, eg: log.info(..., log.delay_exec(func, ...)) +-- Should not use it elsewhere. +-- +-- @function core.log.delay_exec +-- @tparam function func Functions that need to be delayed during log printing. +-- @treturn table The table with the res attribute overridden. +-- @usage +-- local function delay_func(param1, param2) +-- return param1 .. " " .. param2 +-- end +-- core.log.info("delay log print: ", core.log.delay_exec(delay_func, "hello", "world)) +-- -- then the log will be: "delay log print: hello world" +function _M.delay_exec(func, ...) + delay_tab.func = func + + tab_clear(delay_tab.args) + for i = 1, select('#', ...) do + delay_tab.args[i] = select(i, ...) + end + + delay_tab.res = nil + return delay_tab +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/lrucache.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/lrucache.lua new file mode 100644 index 0000000..5c81dd3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/lrucache.lua @@ -0,0 +1,193 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- LRU Caching Implementation. +-- +-- @module core.lrucache + +local lru_new = require("resty.lrucache").new +local resty_lock = require("resty.lock") +local log = require("apisix.core.log") +local tostring = tostring +local ngx = ngx +local get_phase = ngx.get_phase + + +local lock_shdict_name = "lrucache-lock" +if ngx.config.subsystem == "stream" then + lock_shdict_name = lock_shdict_name .. "-" .. ngx.config.subsystem +end + + +local can_yield_phases = { + ssl_session_fetch = true, + ssl_session_store = true, + rewrite = true, + access = true, + content = true, + timer = true +} + +local GLOBAL_ITEMS_COUNT = 1024 +local GLOBAL_TTL = 60 * 60 -- 60 min +local PLUGIN_TTL = 5 * 60 -- 5 min +local PLUGIN_ITEMS_COUNT = 8 +local global_lru_fun + + +local function fetch_valid_cache(lru_obj, invalid_stale, item_ttl, + item_release, key, version) + local obj, stale_obj = lru_obj:get(key) + if obj and obj.ver == version then + return obj + end + + if not invalid_stale and stale_obj and stale_obj.ver == version then + lru_obj:set(key, stale_obj, item_ttl) + return stale_obj + end + + if item_release and obj then + item_release(obj.val) + end + + return nil +end + + +local function new_lru_fun(opts) + local item_count, item_ttl + if opts and opts.type == 'plugin' then + item_count = opts.count or PLUGIN_ITEMS_COUNT + item_ttl = opts.ttl or PLUGIN_TTL + else + item_count = opts and opts.count or GLOBAL_ITEMS_COUNT + item_ttl = opts and opts.ttl or GLOBAL_TTL + end + + local item_release = opts and opts.release + local invalid_stale = opts and opts.invalid_stale + local serial_creating = opts and opts.serial_creating + local lru_obj = lru_new(item_count) + + return function (key, version, create_obj_fun, ...) + if not serial_creating or not can_yield_phases[get_phase()] then + local cache_obj = fetch_valid_cache(lru_obj, invalid_stale, + item_ttl, item_release, key, version) + if cache_obj then + return cache_obj.val + end + + local obj, err = create_obj_fun(...) + if obj ~= nil then + lru_obj:set(key, {val = obj, ver = version}, item_ttl) + end + + return obj, err + end + + local cache_obj = fetch_valid_cache(lru_obj, invalid_stale, item_ttl, + item_release, key, version) + if cache_obj then + return cache_obj.val + end + + local lock, err = resty_lock:new(lock_shdict_name) + if not lock then + return nil, "failed to create lock: " .. err + end + + local key_s = tostring(key) + log.info("try to lock with key ", key_s) + + local elapsed, err = lock:lock(key_s) + if not elapsed then + return nil, "failed to acquire the lock: " .. err + end + + cache_obj = fetch_valid_cache(lru_obj, invalid_stale, item_ttl, + nil, key, version) + if cache_obj then + lock:unlock() + log.info("unlock with key ", key_s) + return cache_obj.val + end + + local obj, err = create_obj_fun(...) + if obj ~= nil then + lru_obj:set(key, {val = obj, ver = version}, item_ttl) + end + lock:unlock() + log.info("unlock with key ", key_s) + + return obj, err + end +end + + +global_lru_fun = new_lru_fun() + + +local function plugin_ctx_key_and_ver(api_ctx, extra_key) + local key = api_ctx.conf_type .. "#" .. api_ctx.conf_id + + if extra_key then + key = key .. "#" .. extra_key + end + + return key, api_ctx.conf_version +end + +--- +-- Cache some objects for plugins to avoid duplicate resources creation. +-- +-- @function core.lrucache.plugin_ctx +-- @tparam table lrucache LRUCache object instance. +-- @tparam table api_ctx The request context. +-- @tparam string extra_key Additional parameters for generating the lrucache identification key. +-- @tparam function create_obj_func Functions for creating cache objects. +-- If the object does not exist in the lrucache, this function is +-- called to create it and cache it in the lrucache. +-- @treturn table The object cached in lrucache. +-- @usage +-- local function create_obj() { +-- -- create the object +-- -- return the object +-- } +-- local obj, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, create_obj) +-- -- obj is the object cached in lrucache +local function plugin_ctx(lrucache, api_ctx, extra_key, create_obj_func, ...) + local key, ver = plugin_ctx_key_and_ver(api_ctx, extra_key) + return lrucache(key, ver, create_obj_func, ...) +end + +local function plugin_ctx_id(api_ctx, extra_key) + local key, ver = plugin_ctx_key_and_ver(api_ctx, extra_key) + return key .. "#" .. ver +end + + +local _M = { + version = 0.1, + new = new_lru_fun, + global = global_lru_fun, + plugin_ctx = plugin_ctx, + plugin_ctx_id = plugin_ctx_id, +} + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/math.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/math.lua new file mode 100644 index 0000000..1514cf7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/math.lua @@ -0,0 +1,41 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Common library about math +-- +-- @module core.math +local _M = {} + + +--- +-- Calculate the greatest common divisor (GCD) of two numbers +-- +-- @function core.math.gcd +-- @tparam number a +-- @tparam number b +-- @treturn number the GCD of a and b +local function gcd(a, b) + if b == 0 then + return a + end + + return gcd(b, a % b) +end +_M.gcd = gcd + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/os.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/os.lua new file mode 100644 index 0000000..4a922d0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/os.lua @@ -0,0 +1,118 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- OS module. +-- +-- @module core.os + +local ffi = require("ffi") +local ffi_str = ffi.string +local ffi_errno = ffi.errno +local C = ffi.C +local ceil = math.ceil +local floor = math.floor +local error = error +local tostring = tostring +local type = type + + +local _M = {} +local WNOHANG = 1 + + +ffi.cdef[[ + typedef int32_t pid_t; + typedef unsigned int useconds_t; + + int setenv(const char *name, const char *value, int overwrite); + char *strerror(int errnum); + + int usleep(useconds_t usec); + pid_t waitpid(pid_t pid, int *wstatus, int options); +]] + + +local function err() + return ffi_str(C.strerror(ffi_errno())) +end + +--- +-- Sets the value of the environment variable. +-- +-- @function core.os.setenv +-- @tparam string name The name of environment variable. +-- @tparam string value The value of environment variable. +-- @treturn boolean Results of setting environment variables, true on success. +-- @usage +-- local ok, err = core.os.setenv("foo", "bar") +function _M.setenv(name, value) + local tv = type(value) + if type(name) ~= "string" or (tv ~= "string" and tv ~= "number") then + return false, "invalid argument" + end + + value = tostring(value) + local ok = C.setenv(name, value, 1) == 0 + if not ok then + return false, err() + end + return true +end + + +--- +-- sleep blockingly in microseconds +-- +-- @function core.os.usleep +-- @tparam number us The number of microseconds. +local function usleep(us) + if ceil(us) ~= floor(us) then + error("bad microseconds: " .. us) + end + C.usleep(us) +end +_M.usleep = usleep + + +local function waitpid_nohang(pid) + local res = C.waitpid(pid, nil, WNOHANG) + if res == -1 then + return nil, err() + end + return res > 0 +end + + +function _M.waitpid(pid, timeout) + local count = 0 + local step = 1000 * 10 + local total = timeout * 1000 * 1000 + while step * count < total do + count = count + 1 + usleep(step) + local ok, err = waitpid_nohang(pid) + if err then + return nil, err + end + if ok then + return true + end + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/profile.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/profile.lua new file mode 100644 index 0000000..a5dcdc8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/profile.lua @@ -0,0 +1,67 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Profile module. +-- +-- @module core.profile + +local util = require("apisix.cli.util") + +local _M = { + version = 0.1, + profile = os.getenv("APISIX_PROFILE") or "", + apisix_home = (ngx and ngx.config.prefix()) or "" +} + +--- +-- Get yaml file path by filename under the `conf/`. +-- +-- @function core.profile.yaml_path +-- @tparam self self The profile module itself. +-- @tparam string file_name Name of the yaml file to search. +-- @treturn string The path of yaml file searched. +-- @usage +-- local profile = require("apisix.core.profile") +-- ...... +-- -- set the working directory of APISIX +-- profile.apisix_home = env.apisix_home .. "/" +-- local local_conf_path = profile:yaml_path("config") +function _M.yaml_path(self, file_name) + local file_path = self.apisix_home .. "conf/" .. file_name + if self.profile ~= "" and file_name ~= "config-default" then + file_path = file_path .. "-" .. self.profile + end + + return file_path .. ".yaml" +end + + +function _M.customized_yaml_index(self) + return self.apisix_home .. "/conf/.customized_config_path" +end + + +function _M.customized_yaml_path(self) + local customized_config_index = self:customized_yaml_index() + if util.file_exists(customized_config_index) then + return util.read_file(customized_config_index) + end + return nil +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/pubsub.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/pubsub.lua new file mode 100644 index 0000000..5b36b0c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/pubsub.lua @@ -0,0 +1,238 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Extensible framework to support publish-and-subscribe scenarios +-- +-- @module core.pubsub + +local log = require("apisix.core.log") +local ws_server = require("resty.websocket.server") +local protoc = require("protoc") +local pb = require("pb") +local ngx = ngx +local setmetatable = setmetatable +local pcall = pcall + + +local _M = { version = 0.1 } +local mt = { __index = _M } + +local pb_state +local function init_pb_state() + -- clear current pb state + local old_pb_state = pb.state(nil) + + -- set int64 rule for pubsub module + pb.option("int64_as_string") + + -- initialize protoc compiler + protoc.reload() + local pubsub_protoc = protoc.new() + pubsub_protoc:addpath(ngx.config.prefix() .. "apisix/include/apisix/model") + local ok, err = pcall(pubsub_protoc.loadfile, pubsub_protoc, "pubsub.proto") + if not ok then + pubsub_protoc:reset() + pb.state(old_pb_state) + return "failed to load pubsub protocol: " .. err + end + + pb_state = pb.state(old_pb_state) +end + + +-- parse command name and parameters from client message +local function get_cmd(data) + -- There are sequence and command properties in the data, + -- select the handler according to the command value. + local key = data.req + return key, data[key] +end + + +-- send generic response to client +local function send_resp(ws, sequence, data) + data.sequence = sequence + -- only restore state if it has changed + if pb_state ~= pb.state() then + pb.state(pb_state) + end + local ok, encoded = pcall(pb.encode, "PubSubResp", data) + if not ok or not encoded then + log.error("failed to encode response message, err: ", encoded) + return + end + + local _, err = ws:send_binary(encoded) + if err then + log.error("failed to send response to client, err: ", err) + end +end + + +-- send error response to client +local function send_error(ws, sequence, err_msg) + return send_resp(ws, sequence, { + error_resp = { + code = 0, + message = err_msg, + }, + }) +end + + +--- +-- Create pubsub module instance +-- +-- @function core.pubsub.new +-- @treturn pubsub module instance +-- @treturn string|nil error message if present +-- @usage +-- local pubsub, err = core.pubsub.new() +function _M.new() + if not pb_state then + local err = init_pb_state() + if err then + return nil, err + end + end + + local ws, err = ws_server:new() + if not ws then + return nil, err + end + + local obj = setmetatable({ + ws_server = ws, + cmd_handler = {}, + }, mt) + + -- add default ping handler + obj:on("cmd_ping", function (params) + return { pong_resp = params } + end) + + return obj +end + + +--- +-- Add command callbacks to pubsub module instances +-- +-- The callback function prototype: function (params) +-- The params in the parameters contain the data defined in the requested command. +-- Its first return value is the data, which needs to contain the data needed for +-- the particular resp, returns nil if an error exists. +-- Its second return value is a string type error message, no need to return when +-- no error exists. +-- +-- @function core.pubsub.on +-- @tparam string command The command to add callback. +-- @tparam func handler The callback function on receipt of command. +-- @usage +-- pubsub:on(command, function (params) +-- return data, err +-- end) +function _M.on(self, command, handler) + self.cmd_handler[command] = handler +end + + +--- +-- Put the pubsub instance into an event loop, waiting to process client commands +-- +-- @function core.pubsub.wait +-- @usage +-- local err = pubsub:wait() +function _M.wait(self) + local fatal_err + local ws = self.ws_server + while true do + -- read raw data frames from websocket connection + local raw_data, raw_type, err = ws:recv_frame() + if err then + -- terminate the event loop when a fatal error occurs + if ws.fatal then + fatal_err = err + break + end + + -- skip this loop for non-fatal errors + log.error("failed to receive websocket frame: ", err) + goto continue + end + + -- handle client close connection + if raw_type == "close" then + break + end + + -- the pubsub messages use binary, if the message is not + -- binary, skip this message + if raw_type ~= "binary" then + log.warn("pubsub server receive non-binary data, type: ", + raw_type, ", data: ", raw_data) + goto continue + end + + -- only recover state if it has changed + if pb.state() ~= pb_state then + pb.state(pb_state) + end + local data, err = pb.decode("PubSubReq", raw_data) + if not data then + log.error("pubsub server receives undecodable data, err: ", err) + send_error(ws, 0, "wrong command") + goto continue + end + + -- command sequence code + local sequence = data.sequence + + local cmd, params = get_cmd(data) + if not cmd and not params then + log.warn("pubsub server receives empty command") + goto continue + end + + -- find the handler for the current command + local handler = self.cmd_handler[cmd] + if not handler then + log.error("pubsub callback handler not registered for the", + " command, command: ", cmd) + send_error(ws, sequence, "unknown command") + goto continue + end + + -- call command handler to generate response data + local resp, err = handler(params) + if not resp then + send_error(ws, sequence, err) + goto continue + end + send_resp(ws, sequence, resp) + + ::continue:: + end + + if fatal_err then + log.error("fatal error in pubsub websocket server, err: ", fatal_err) + end + ws:send_close() +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/request.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/request.lua new file mode 100644 index 0000000..fef4bf1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/request.lua @@ -0,0 +1,382 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Get or set the information of the client request. +-- +-- @module core.request + +local lfs = require("lfs") +local log = require("apisix.core.log") +local json = require("apisix.core.json") +local io = require("apisix.core.io") +local req_add_header +if ngx.config.subsystem == "http" then + local ngx_req = require "ngx.req" + req_add_header = ngx_req.add_header +end +local is_apisix_or, a6_request = pcall(require, "resty.apisix.request") +local ngx = ngx +local get_headers = ngx.req.get_headers +local clear_header = ngx.req.clear_header +local tonumber = tonumber +local error = error +local type = type +local str_fmt = string.format +local str_lower = string.lower +local req_read_body = ngx.req.read_body +local req_get_body_data = ngx.req.get_body_data +local req_get_body_file = ngx.req.get_body_file +local req_get_post_args = ngx.req.get_post_args +local req_get_uri_args = ngx.req.get_uri_args +local req_set_uri_args = ngx.req.set_uri_args +local table_insert = table.insert +local req_set_header = ngx.req.set_header + + +local _M = {} + + +local function _headers(ctx) + if not ctx then + ctx = ngx.ctx.api_ctx + end + + if not is_apisix_or then + return get_headers() + end + + if a6_request.is_request_header_set() then + a6_request.clear_request_header() + ctx.headers = get_headers() + end + + local headers = ctx.headers + if not headers then + headers = get_headers() + ctx.headers = headers + end + + return headers +end + +local function _validate_header_name(name) + local tname = type(name) + if tname ~= "string" then + return nil, str_fmt("invalid header name %q: got %s, " .. + "expected string", name, tname) + end + + return name +end + +--- +-- Returns all headers of the current request. +-- The name and value of the header in return table is in lower case. +-- +-- @function core.request.headers +-- @tparam table ctx The context of the current request. +-- @treturn table all headers +-- @usage +-- local headers = core.request.headers(ctx) +_M.headers = _headers + +--- +-- Returns the value of the header with the specified name. +-- +-- @function core.request.header +-- @tparam table ctx The context of the current request. +-- @tparam string name The header name, example: "Content-Type". +-- @treturn string|nil the value of the header, or nil if not found. +-- @usage +-- -- You can use upper case for header "Content-Type" here to get the value. +-- local content_type = core.request.header(ctx, "Content-Type") -- "application/json" +function _M.header(ctx, name) + if not ctx then + ctx = ngx.ctx.api_ctx + end + + local value = _headers(ctx)[name] + return type(value) == "table" and value[1] or value +end + +local function modify_header(ctx, header_name, header_value, override) + if type(ctx) == "string" then + -- It would be simpler to keep compatibility if we put 'ctx' + -- after 'header_value', but the style is too ugly! + header_value = header_name + header_name = ctx + ctx = nil + + if override then + log.warn("DEPRECATED: use set_header(ctx, header_name, header_value) instead") + else + log.warn("DEPRECATED: use add_header(ctx, header_name, header_value) instead") + end + end + + local err + header_name, err = _validate_header_name(header_name) + if err then + error(err) + end + + local changed = false + if is_apisix_or then + changed = a6_request.is_request_header_set() + end + + if override then + req_set_header(header_name, header_value) + else + req_add_header(header_name, header_value) + end + + if ctx and ctx.var then + -- when the header is updated, clear cache of ctx.var + ctx.var["http_" .. str_lower(header_name)] = nil + end + + if is_apisix_or and not changed then + -- if the headers are not changed before, + -- we can only update part of the cache instead of invalidating the whole + a6_request.clear_request_header() + if ctx and ctx.headers then + if override or not ctx.headers[header_name] then + ctx.headers[header_name] = header_value + else + local values = ctx.headers[header_name] + if type(values) == "table" then + table_insert(values, header_value) + else + ctx.headers[header_name] = {values, header_value} + end + end + end + end +end + +function _M.set_header(ctx, header_name, header_value) + modify_header(ctx, header_name, header_value, true) +end + +function _M.add_header(ctx, header_name, header_value) + modify_header(ctx, header_name, header_value, false) +end + +-- return the remote address of client which directly connecting to APISIX. +-- so if there is a load balancer between downstream client and APISIX, +-- this function will return the ip of load balancer. +function _M.get_ip(ctx) + if not ctx then + ctx = ngx.ctx.api_ctx + end + return ctx.var.realip_remote_addr or ctx.var.remote_addr or '' +end + + +-- get remote address of downstream client, +-- in cases there is a load balancer between downstream client and APISIX. +function _M.get_remote_client_ip(ctx) + if not ctx then + ctx = ngx.ctx.api_ctx + end + return ctx.var.remote_addr or '' +end + + +function _M.get_remote_client_port(ctx) + if not ctx then + ctx = ngx.ctx.api_ctx + end + return tonumber(ctx.var.remote_port) +end + + +function _M.get_uri_args(ctx) + if not ctx then + ctx = ngx.ctx.api_ctx + end + + if not ctx.req_uri_args then + -- use 0 to avoid truncated result and keep the behavior as the + -- same as other platforms + local args = req_get_uri_args(0) + ctx.req_uri_args = args + end + + return ctx.req_uri_args +end + + +function _M.set_uri_args(ctx, args) + if not ctx then + ctx = ngx.ctx.api_ctx + end + + ctx.req_uri_args = nil + return req_set_uri_args(args) +end + + +function _M.get_post_args(ctx) + if not ctx then + ctx = ngx.ctx.api_ctx + end + + if not ctx.req_post_args then + req_read_body() + + -- use 0 to avoid truncated result and keep the behavior as the + -- same as other platforms + local args, err = req_get_post_args(0) + if not args then + -- do we need a way to handle huge post forms? + log.error("the post form is too large: ", err) + args = {} + end + ctx.req_post_args = args + end + + return ctx.req_post_args +end + + +local function check_size(size, max_size) + if max_size and size > max_size then + return nil, "request size " .. size .. " is greater than the " + .. "maximum size " .. max_size .. " allowed" + end + + return true +end + + +local function test_expect(var) + local expect = var.http_expect + return expect and str_lower(expect) == "100-continue" +end + + +function _M.get_body(max_size, ctx) + if max_size then + local var = ctx and ctx.var or ngx.var + local content_length = tonumber(var.http_content_length) + if content_length then + local ok, err = check_size(content_length, max_size) + if not ok then + -- When client_max_body_size is exceeded, Nginx will set r->expect_tested = 1 to + -- avoid sending the 100 CONTINUE. + -- We use trick below to imitate this behavior. + if test_expect(var) then + clear_header("expect") + end + + return nil, err + end + end + end + + -- check content-length header for http2/http3 + do + local var = ctx and ctx.var or ngx.var + local content_length = tonumber(var.http_content_length) + if (var.server_protocol == "HTTP/2.0" or var.server_protocol == "HTTP/3.0") + and not content_length then + return nil, "HTTP2/HTTP3 request without a Content-Length header" + end + end + req_read_body() + + local req_body = req_get_body_data() + if req_body then + local ok, err = check_size(#req_body, max_size) + if not ok then + return nil, err + end + + return req_body + end + + local file_name = req_get_body_file() + if not file_name then + return nil + end + + log.info("attempt to read body from file: ", file_name) + + if max_size then + local size, err = lfs.attributes (file_name, "size") + if not size then + return nil, err + end + + local ok, err = check_size(size, max_size) + if not ok then + return nil, err + end + end + + local req_body, err = io.get_file(file_name) + return req_body, err +end + + +function _M.get_json_request_body_table() + local body, err = _M.get_body() + if not body then + return nil, { message = "could not get body: " .. (err or "request body is empty") } + end + + local body_tab, err = json.decode(body) + if not body_tab then + return nil, { message = "could not get parse JSON request body: " .. err } + end + + return body_tab +end + + +function _M.get_scheme(ctx) + if not ctx then + ctx = ngx.ctx.api_ctx + end + return ctx.var.scheme or '' +end + + +function _M.get_host(ctx) + if not ctx then + ctx = ngx.ctx.api_ctx + end + return ctx.var.host or '' +end + + +function _M.get_port(ctx) + if not ctx then + ctx = ngx.ctx.api_ctx + end + return tonumber(ctx.var.server_port) +end + + +_M.get_http_version = ngx.req.http_version + + +_M.get_method = ngx.req.get_method + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/resolver.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/resolver.lua new file mode 100644 index 0000000..3568a97 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/resolver.lua @@ -0,0 +1,96 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Domain Resolver. +-- +-- @module core.resolver + +local json = require("apisix.core.json") +local log = require("apisix.core.log") +local utils = require("apisix.core.utils") +local dns_utils = require("resty.dns.utils") +local config_local = require("apisix.core.config_local") + + +local HOSTS_IP_MATCH_CACHE = {} + + +local _M = {} + + +local function init_hosts_ip() + local hosts, err = dns_utils.parseHosts() + if not hosts then + return hosts, err + end + HOSTS_IP_MATCH_CACHE = hosts +end + + +function _M.init_resolver(args) + -- initialize /etc/hosts + init_hosts_ip() + + local dns_resolver = args and args["dns_resolver"] + utils.set_resolver(dns_resolver) + log.info("dns resolver ", json.delay_encode(dns_resolver, true)) +end + +--- +-- Resolve domain name to ip. +-- +-- @function core.resolver.parse_domain +-- @tparam string host Domain name that need to be resolved. +-- @treturn string The IP of the domain name after being resolved. +-- @usage +-- local ip, err = core.resolver.parse_domain("apache.org") -- "198.18.10.114" +function _M.parse_domain(host) + local rev = HOSTS_IP_MATCH_CACHE[host] + local enable_ipv6 = config_local.local_conf().apisix.enable_ipv6 + if rev then + -- use ipv4 in high priority + local ip = rev["ipv4"] + if enable_ipv6 and not ip then + ip = rev["ipv6"] + end + if ip then + -- meet test case + log.info("dns resolve ", host, ", result: ", json.delay_encode(ip)) + log.info("dns resolver domain: ", host, " to ", ip) + return ip + end + end + + local ip_info, err = utils.dns_parse(host) + if not ip_info then + log.error("failed to parse domain: ", host, ", error: ",err) + return nil, err + end + + log.info("parse addr: ", json.delay_encode(ip_info)) + log.info("resolver: ", json.delay_encode(utils.get_resolver())) + log.info("host: ", host) + if ip_info.address then + log.info("dns resolver domain: ", host, " to ", ip_info.address) + return ip_info.address + end + + return nil, "failed to parse domain" +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/response.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/response.lua new file mode 100644 index 0000000..baee977 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/response.lua @@ -0,0 +1,231 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Get the information form upstream response, or set the information to client response. +-- +-- @module core.response + +local encode_json = require("cjson.safe").encode +local ngx = ngx +local arg = ngx.arg +local ngx_print = ngx.print +local ngx_header = ngx.header +local ngx_add_header +if ngx.config.subsystem == "http" then + local ngx_resp = require "ngx.resp" + ngx_add_header = ngx_resp.add_header +end + +local error = error +local select = select +local type = type +local ngx_exit = ngx.exit +local concat_tab = table.concat +local str_sub = string.sub +local tonumber = tonumber +local clear_tab = require("table.clear") +local pairs = pairs + +local _M = {version = 0.1} + + +local resp_exit +do + local t = {} + local idx = 1 + +function resp_exit(code, ...) + clear_tab(t) + idx = 0 + + if code and type(code) ~= "number" then + idx = idx + 1 + t[idx] = code + code = nil + end + + if code then + ngx.status = code + end + + for i = 1, select('#', ...) do + local v = select(i, ...) + if type(v) == "table" then + local body, err = encode_json(v) + if err then + error("failed to encode data: " .. err, -2) + else + idx = idx + 1 + t[idx] = body + idx = idx + 1 + t[idx] = "\n" + end + + elseif v ~= nil then + idx = idx + 1 + t[idx] = v + end + end + + if idx > 0 then + ngx_print(t) + end + + if code then + return ngx_exit(code) + end +end + +end -- do +_M.exit = resp_exit + + +function _M.say(...) + resp_exit(nil, ...) +end + + +local function set_header(append, ...) + if ngx.headers_sent then + error("headers have already been sent", 2) + end + + local count = select('#', ...) + if count == 1 then + local headers = select(1, ...) + if type(headers) ~= "table" then + -- response.set_header(name, nil) + ngx_header[headers] = nil + return + end + + for k, v in pairs(headers) do + if append then + ngx_add_header(k, v) + else + ngx_header[k] = v + end + end + + return + end + + for i = 1, count, 2 do + if append then + ngx_add_header(select(i, ...), select(i + 1, ...)) + else + ngx_header[select(i, ...)] = select(i + 1, ...) + end + end +end + + +function _M.set_header(...) + set_header(false, ...) +end + +--- +-- Add a header to the client response. +-- +-- @function core.response.add_header +-- @usage +-- core.response.add_header("Apisix-Plugins", "no plugin") +function _M.add_header(...) + set_header(true, ...) +end + + +function _M.get_upstream_status(ctx) + -- $upstream_status maybe including multiple status, only need the last one + return tonumber(str_sub(ctx.var.upstream_status or "", -3)) +end + + +function _M.clear_header_as_body_modified() + ngx.header.content_length = nil + -- in case of upstream content is compressed content + ngx.header.content_encoding = nil + + -- clear cache identifier + ngx.header.last_modified = nil + ngx.header.etag = nil +end + + +-- Hold body chunks and return the final body once all chunks have been read. +-- Usage: +-- function _M.body_filter(conf, ctx) +-- local final_body = core.response.hold_body_chunk(ctx) +-- if not final_body then +-- return +-- end +-- final_body = transform(final_body) +-- ngx.arg[1] = final_body +-- ... +function _M.hold_body_chunk(ctx, hold_the_copy, max_resp_body_bytes) + local body_buffer + local chunk, eof = arg[1], arg[2] + + if not ctx._body_buffer then + ctx._body_buffer = {} + end + + if type(chunk) == "string" and chunk ~= "" then + body_buffer = ctx._body_buffer[ctx._plugin_name] + if not body_buffer then + body_buffer = { + chunk, + n = 1 + } + ctx._body_buffer[ctx._plugin_name] = body_buffer + ctx._resp_body_bytes = #chunk + else + local n = body_buffer.n + 1 + body_buffer.n = n + body_buffer[n] = chunk + ctx._resp_body_bytes = ctx._resp_body_bytes + #chunk + end + if max_resp_body_bytes and ctx._resp_body_bytes >= max_resp_body_bytes then + local body_data = concat_tab(body_buffer, "", 1, body_buffer.n) + body_data = str_sub(body_data, 1, max_resp_body_bytes) + return body_data + end + end + + if eof then + body_buffer = ctx._body_buffer[ctx._plugin_name] + if not body_buffer then + if max_resp_body_bytes and #chunk >= max_resp_body_bytes then + chunk = str_sub(chunk, 1, max_resp_body_bytes) + end + return chunk + end + + local body_data = concat_tab(body_buffer, "", 1, body_buffer.n) + ctx._body_buffer[ctx._plugin_name] = nil + return body_data + end + + if not hold_the_copy then + -- flush the origin body chunk + arg[1] = nil + end + return nil +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/schema.lua new file mode 100644 index 0000000..9ce6a55 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/schema.lua @@ -0,0 +1,71 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Json schema validation module. +-- +-- @module core.schema + +local jsonschema = require('jsonschema') +local lrucache = require("apisix.core.lrucache") +local cached_validator = lrucache.new({count = 1000, ttl = 0}) +local pcall = pcall + +local _M = { + version = 0.3, + + TYPE_CONSUMER = 1, + TYPE_METADATA = 2, +} + + +local function create_validator(schema) + -- local code = jsonschema.generate_validator_code(schema, opts) + -- local file2=io.output("/tmp/2.txt") + -- file2:write(code) + -- file2:close() + local ok, res = pcall(jsonschema.generate_validator, schema) + if ok then + return res + end + + return nil, res -- error message +end + +local function get_validator(schema) + local validator, err = cached_validator(schema, nil, + create_validator, schema) + + if not validator then + return nil, err + end + + return validator, nil +end + +function _M.check(schema, json) + local validator, err = get_validator(schema) + + if not validator then + return false, err + end + + return validator(json) +end + +_M.valid = get_validator + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/string.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/string.lua new file mode 100644 index 0000000..5951d33 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/string.lua @@ -0,0 +1,136 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Wrapped string module. +-- +-- @module core.string + +local error = error +local type = type +local str_byte = string.byte +local str_find = string.find +local ffi = require("ffi") +local C = ffi.C +local ffi_cast = ffi.cast +local ngx = ngx +local ngx_decode_args = ngx.decode_args +local ngx_encode_args = ngx.encode_args + + +ffi.cdef[[ + int memcmp(const void *s1, const void *s2, size_t n); +]] + + +local _M = { + version = 0.1, +} + + +setmetatable(_M, {__index = string}) + + +-- find a needle from a haystack in the plain text way +-- note: Make sure that the haystack is 'string' type, otherwise an exception will be thrown. +function _M.find(haystack, needle, from) + return str_find(haystack, needle, from or 1, true) +end + +--- +-- Tests whether the string s begins with prefix. +-- +-- @function core.string.has_prefix +-- @tparam string s The string being tested. +-- @tparam string prefix Specify the prefix. +-- @treturn boolean Test result, true means the string s begins with prefix. +-- @usage +-- local res = core.string.has_prefix("/apisix/admin/routes", "/apisix/") -- true +function _M.has_prefix(s, prefix) + if type(s) ~= "string" or type(prefix) ~= "string" then + error("unexpected type: s:" .. type(s) .. ", prefix:" .. type(prefix)) + end + if #s < #prefix then + return false + end + local rc = C.memcmp(s, prefix, #prefix) + return rc == 0 +end + + +function _M.has_suffix(s, suffix) + if type(s) ~= "string" or type(suffix) ~= "string" then + error("unexpected type: s:" .. type(s) .. ", suffix:" .. type(suffix)) + end + if #s < #suffix then + return false + end + local rc = C.memcmp(ffi_cast("char *", s) + #s - #suffix, suffix, #suffix) + return rc == 0 +end + + +function _M.rfind_char(s, ch, idx) + local b = str_byte(ch) + for i = idx or #s, 1, -1 do + if str_byte(s, i, i) == b then + return i + end + end + return nil +end + + +-- reduce network consumption by compressing string indentation +-- this method should be used with caution +-- it will remove the spaces at the beginning of each line +-- and remove the spaces after `,` character +function _M.compress_script(s) + s = ngx.re.gsub(s, [[^\s+]], "", "mjo") + s = ngx.re.gsub(s, [[,\s+]], ",", "mjo") + return s +end + + +--- +-- Decodes a URI encoded query-string into a Lua table. +-- All request arguments received will be decoded by default. +-- +-- @function core.string.decode_args +-- @tparam string args A URI encoded query-string. +-- @treturn table the value of decoded query-string. +-- @usage +-- local args, err = core.string.decode_args("a=1&b=2") -- {a=1, b=2} +function _M.decode_args(args) + -- use 0 to avoid truncated result and keep the behavior as the + -- same as other platforms + return ngx_decode_args(args, 0) +end + + +--- +-- Encode the Lua table to a query args string according to the URI encoded rules. +-- +-- @function core.string.encode_args +-- @tparam table args The query args Lua table. +-- @treturn string the value of query args string. +-- @usage +-- local str = core.string.encode_args({a=1, b=2}) -- "a=1&b=2" +function _M.encode_args(args) + return ngx_encode_args(args) +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/table.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/table.lua new file mode 100644 index 0000000..ed9450a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/table.lua @@ -0,0 +1,287 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Wrapped table module. +-- +-- @module core.table + +local newproxy = newproxy +local getmetatable = getmetatable +local setmetatable = setmetatable +local select = select +local tostring = tostring +local new_tab = require("table.new") +local nkeys = require("table.nkeys") +local ipairs = ipairs +local pairs = pairs +local type = type +local ngx_re = require("ngx.re") + + +local _M = { + version = 0.2, + new = new_tab, + clear = require("table.clear"), + nkeys = nkeys, + insert = table.insert, + concat = table.concat, + sort = table.sort, + clone = require("table.clone"), + isarray = require("table.isarray"), + isempty = require("table.isempty"), +} + + +setmetatable(_M, {__index = table}) + + +function _M.insert_tail(tab, ...) + local idx = #tab + for i = 1, select('#', ...) do + idx = idx + 1 + tab[idx] = select(i, ...) + end + + return idx +end + + +function _M.set(tab, ...) + for i = 1, select('#', ...) do + tab[i] = select(i, ...) + end +end + + +function _M.try_read_attr(tab, ...) + local count = select('#', ...) + + for i = 1, count do + local attr = select(i, ...) + if type(tab) ~= "table" then + return nil + end + + tab = tab[attr] + end + + return tab +end + +--- +-- Test if an element exists in an array. +-- +-- @function core.table.array_find +-- @tparam table array The tested array. +-- @tparam string val The tested value. +-- @treturn number The index of tested value. +-- @usage +-- local arr = {"a", "b", "c"} +-- local idx = core.table.array_find(arr, "b") -- idx = 2 +local function array_find(array, val) + if type(array) ~= "table" then + return nil + end + + for i, v in ipairs(array) do + if v == val then + return i + end + end + + return nil +end +_M.array_find = array_find + + +-- only work under lua51 or luajit +function _M.setmt__gc(t, mt) + local prox = newproxy(true) + getmetatable(prox).__gc = function() mt.__gc(t) end + t[prox] = true + return setmetatable(t, mt) +end + + +local deepcopy +do + local function _deepcopy(orig, copied, parent, opts) + -- If the array-like table contains nil in the middle, + -- the len might be smaller than the expected. + -- But it doesn't affect the correctness. + local len = #orig + local copy = new_tab(len, nkeys(orig) - len) + -- prevent infinite loop when a field refers its parent + copied[orig] = copy + for orig_key, orig_value in pairs(orig) do + local path = parent .. "." .. tostring(orig_key) + if opts and array_find(opts.shallows, path) then + copy[orig_key] = orig_value + else + if type(orig_value) == "table" then + if copied[orig_value] then + copy[orig_key] = copied[orig_value] + else + copy[orig_key] = _deepcopy(orig_value, copied, path, opts) + end + else + copy[orig_key] = orig_value + end + end + end + + local mt = getmetatable(orig) + if mt ~= nil then + setmetatable(copy, mt) + end + + return copy + end + + + local copied_recorder = {} + + function deepcopy(orig, opts) + local orig_type = type(orig) + if orig_type ~= 'table' then + return orig + end + + local res = _deepcopy(orig, copied_recorder, "self", opts) + _M.clear(copied_recorder) + return res + end +end +_M.deepcopy = deepcopy + + +local ngx_null = ngx.null +local function merge(origin, extend) + for k,v in pairs(extend) do + if type(v) == "table" then + if type(origin[k] or false) == "table" then + if _M.nkeys(origin[k]) ~= #origin[k] then + merge(origin[k] or {}, extend[k] or {}) + else + origin[k] = v + end + else + origin[k] = v + end + elseif v == ngx_null then + origin[k] = nil + else + origin[k] = v + end + end + + return origin +end +_M.merge = merge + + +local function patch(node_value, sub_path, conf) + local sub_value = node_value + local sub_paths = ngx_re.split(sub_path, "/") + for i = 1, #sub_paths - 1 do + local sub_name = sub_paths[i] + if sub_value[sub_name] == nil then + sub_value[sub_name] = {} + end + + sub_value = sub_value[sub_name] + + if type(sub_value) ~= "table" then + return 400, "invalid sub-path: /" + .. _M.concat(sub_paths, 1, i) + end + end + + if type(sub_value) ~= "table" then + return 400, "invalid sub-path: /" .. sub_path + end + + local sub_name = sub_paths[#sub_paths] + if sub_name and sub_name ~= "" then + sub_value[sub_name] = conf + else + node_value = conf + end + + return nil, nil, node_value +end +_M.patch = patch + + +-- Compare two tables as if they are sets (only compare the key part) +function _M.set_eq(a, b) + if nkeys(a) ~= nkeys(b) then + return false + end + + for k in pairs(a) do + if b[k] == nil then + return false + end + end + + return true +end + + +-- Compare two elements, including their descendants +local function deep_eq(a, b) + local type_a = type(a) + local type_b = type(b) + + if type_a ~= 'table' or type_b ~= 'table' then + return a == b + end + + local n_a = nkeys(a) + local n_b = nkeys(b) + if n_a ~= n_b then + return false + end + + for k, v_a in pairs(a) do + local v_b = b[k] + local eq = deep_eq(v_a, v_b) + if not eq then + return false + end + end + + return true +end +_M.deep_eq = deep_eq + + +-- pick takes the given attributes out of object +function _M.pick(obj, attrs) + local data = {} + for k, v in pairs(obj) do + if attrs[k] ~= nil then + data[k] = v + end + end + + return data +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/timer.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/timer.lua new file mode 100644 index 0000000..7cd3c53 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/timer.lua @@ -0,0 +1,108 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Wrapped timer module, can cancel the running timers. +-- +-- @module core.timer + +local log = require("apisix.core.log") +local sleep = require("apisix.core.utils").sleep +local timer_every = ngx.timer.every +local timer_at = ngx.timer.at +local update_time = ngx.update_time +local now = ngx.now +local pcall = pcall + + +local _M = { + version = 0.1, +} + + +local function _internal(timer) + timer.start_time = now() + + repeat + local ok, err = pcall(timer.callback_fun) + if not ok then + log.error("failed to run the timer: ", timer.name, " err: ", err) + + if timer.sleep_fail > 0 then + sleep(timer.sleep_fail) + end + + elseif timer.sleep_succ > 0 then + sleep(timer.sleep_succ) + end + + update_time() + until timer.each_ttl <= 0 or now() >= timer.start_time + timer.each_ttl +end + +local function run_timer(premature, self) + if self.running or premature then + return + end + + self.running = true + + local ok, err = pcall(_internal, self) + if not ok then + log.error("failed to run timer[", self.name, "] err: ", err) + end + + self.running = false +end + + +function _M.new(name, callback_fun, opts) + if not name then + return nil, "missing argument: name" + end + + if not callback_fun then + return nil, "missing argument: callback_fun" + end + + opts = opts or {} + local timer = { + name = name, + each_ttl = opts.each_ttl or 1, + sleep_succ = opts.sleep_succ or 1, + sleep_fail = opts.sleep_fail or 5, + start_time = 0, + + callback_fun = callback_fun, + running = false, + } + + local hdl, err = timer_every(opts.check_interval or 1, + run_timer, timer) + if not hdl then + return nil, err + end + + hdl, err = timer_at(0, run_timer, timer) + if not hdl then + return nil, err + end + + return timer +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/core/utils.lua b/CloudronPackages/APISIX/apisix-source/apisix/core/utils.lua new file mode 100644 index 0000000..cfea756 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/core/utils.lua @@ -0,0 +1,465 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Collection of util functions. +-- +-- @module core.utils + +local config_local = require("apisix.core.config_local") +local core_str = require("apisix.core.string") +local rfind_char = core_str.rfind_char +local table = require("apisix.core.table") +local log = require("apisix.core.log") +local string = require("apisix.core.string") +local dns_client = require("apisix.core.dns.client") +local ngx_re = require("ngx.re") +local ipmatcher = require("resty.ipmatcher") +local ffi = require("ffi") +local base = require("resty.core.base") +local open = io.open +local sub_str = string.sub +local str_byte = string.byte +local tonumber = tonumber +local tostring = tostring +local re_gsub = ngx.re.gsub +local re_match = ngx.re.match +local re_gmatch = ngx.re.gmatch +local type = type +local io_popen = io.popen +local C = ffi.C +local ffi_string = ffi.string +local get_string_buf = base.get_string_buf +local exiting = ngx.worker.exiting +local ngx_sleep = ngx.sleep +local ipairs = ipairs + +local hostname +local dns_resolvers +local current_inited_resolvers +local current_dns_client +local max_sleep_interval = 1 + +ffi.cdef[[ + int ngx_escape_uri(char *dst, const char *src, + size_t size, int type); +]] + + +local _M = { + version = 0.2, + parse_ipv4 = ipmatcher.parse_ipv4, + parse_ipv6 = ipmatcher.parse_ipv6, +} + + +function _M.get_seed_from_urandom() + local frandom, err = open("/dev/urandom", "rb") + if not frandom then + return nil, 'failed to open /dev/urandom: ' .. err + end + + local str = frandom:read(8) + frandom:close() + if not str then + return nil, 'failed to read data from /dev/urandom' + end + + local seed = 0 + for i = 1, 8 do + seed = 256 * seed + str:byte(i) + end + + return seed +end + + +function _M.split_uri(uri) + return ngx_re.split(uri, "/") +end + + +local function dns_parse(domain, selector) + if dns_resolvers ~= current_inited_resolvers then + local local_conf = config_local.local_conf() + local valid = table.try_read_attr(local_conf, "apisix", "dns_resolver_valid") + local enable_resolv_search_opt = table.try_read_attr(local_conf, "apisix", + "enable_resolv_search_opt") + local opts = { + nameservers = table.clone(dns_resolvers), + order = {"last", "A", "AAAA", "CNAME"}, -- avoid querying SRV + } + + opts.validTtl = valid + + if not enable_resolv_search_opt then + opts.search = {} + end + + local client, err = dns_client.new(opts) + if not client then + return nil, "failed to init the dns client: " .. err + end + + current_dns_client = client + current_inited_resolvers = dns_resolvers + end + + return current_dns_client:resolve(domain, selector) +end +_M.dns_parse = dns_parse + + +local function set_resolver(resolvers) + dns_resolvers = resolvers +end +_M.set_resolver = set_resolver + + +function _M.get_resolver(resolvers) + return dns_resolvers +end + + +local function _parse_ipv4_or_host(addr) + local pos = rfind_char(addr, ":", #addr - 1) + if not pos then + return addr, nil + end + + local host = sub_str(addr, 1, pos - 1) + local port = sub_str(addr, pos + 1) + return host, tonumber(port) +end + + +local function _parse_ipv6_without_port(addr) + return addr +end + + +-- parse_addr parses 'addr' into the host and the port parts. If the 'addr' +-- doesn't have a port, nil is used to return. +-- For IPv6 literal host with brackets, like [::1], the square brackets will be kept. +-- For malformed 'addr', the returned value can be anything. This method doesn't validate +-- if the input is valid. +function _M.parse_addr(addr) + if str_byte(addr, 1) == str_byte("[") then + -- IPv6 format, with brackets, maybe with port + local right_bracket = str_byte("]") + local len = #addr + if str_byte(addr, len) == right_bracket then + -- addr in [ip:v6] format + return addr, nil + else + local pos = rfind_char(addr, ":", #addr - 1) + if not pos or str_byte(addr, pos - 1) ~= right_bracket then + -- malformed addr + return addr, nil + end + + -- addr in [ip:v6]:port format + local host = sub_str(addr, 1, pos - 1) + local port = sub_str(addr, pos + 1) + return host, tonumber(port) + end + + else + -- When we reach here, the input can be: + -- 1. IPv4 + -- 2. IPv4, with port + -- 3. IPv6, like "2001:db8::68" or "::ffff:192.0.2.1" + -- 4. Malformed input + -- 5. Host, like "test.com" or "localhost" + -- 6. Host with port + local colon = str_byte(":") + local colon_counter = 0 + local dot = str_byte(".") + for i = 1, #addr do + local ch = str_byte(addr, i, i) + if ch == dot then + return _parse_ipv4_or_host(addr) + elseif ch == colon then + colon_counter = colon_counter + 1 + if colon_counter == 2 then + return _parse_ipv6_without_port(addr) + end + end + end + + return _parse_ipv4_or_host(addr) + end +end + + +function _M.uri_safe_encode(uri) + local count_escaped = C.ngx_escape_uri(nil, uri, #uri, 0) + local len = #uri + 2 * count_escaped + local buf = get_string_buf(len) + C.ngx_escape_uri(buf, uri, #uri, 0) + + return ffi_string(buf, len) +end + + +function _M.validate_header_field(field) + for i = 1, #field do + local b = str_byte(field, i, i) + -- '!' - '~', excluding ':' + if not (32 < b and b < 127) or b == 58 then + return false + end + end + return true +end + + +function _M.validate_header_value(value) + if type(value) ~= "string" then + return true + end + + for i = 1, #value do + local b = str_byte(value, i, i) + -- control characters + if b < 32 or b >= 127 then + return false + end + end + return true +end + + +--- +-- Returns the standard host name of the local host. +-- only use this method in init/init_worker phase. +-- +-- @function core.utils.gethostname +-- @treturn string The host name of the local host. +-- @usage +-- local hostname = core.utils.gethostname() -- "localhost" +function _M.gethostname() + if hostname then + return hostname + end + + local hd = io_popen("/bin/hostname") + local data, err = hd:read("*a") + if err == nil then + hostname = data + if string.has_suffix(hostname, "\r\n") then + hostname = sub_str(hostname, 1, -3) + elseif string.has_suffix(hostname, "\n") then + hostname = sub_str(hostname, 1, -2) + end + + else + hostname = "unknown" + log.error("failed to read output of \"/bin/hostname\": ", err) + end + + return hostname +end + + +local function sleep(sec) + if sec <= max_sleep_interval then + return ngx_sleep(sec) + end + ngx_sleep(max_sleep_interval) + if exiting() then + return + end + sec = sec - max_sleep_interval + return sleep(sec) +end + + +_M.sleep = sleep + + +local resolve_var +do + local _ctx + local n_resolved + local pat = [[(? 8 then + log.warn("missing valid end flag in file ", debug_yaml_path) + end + return + end + + f:seek('set') + local yaml_config = f:read("*a") + f:close() + + local debug_yaml_new = yaml.load(yaml_config) + if not debug_yaml_new then + log.error("failed to parse the content of file " .. debug_yaml_path) + return + end + + debug_yaml_new.hooks = debug_yaml_new.hooks or {} + debug_yaml = debug_yaml_new + debug_yaml_ctime = last_change_time + + -- validate the debug yaml config + local validator = jsonschema.generate_validator(config_schema) + local ok, err = validator(debug_yaml) + if not ok then + log.error("failed to validate debug config " .. err) + return + end + + return true +end + + +local sync_debug_hooks +do + local pre_mtime + local enabled_hooks = {} + +local function apply_new_fun(module, fun_name, file_path, hook_conf) + local log_level = hook_conf.log_level or "warn" + + if not module or type(module[fun_name]) ~= "function" then + log.error("failed to find function [", fun_name, + "] in module:", file_path) + return + end + + local fun = module[fun_name] + local fun_org + if enabled_hooks[fun] then + fun_org = enabled_hooks[fun].org + enabled_hooks[fun] = nil + else + fun_org = fun + end + + local t = {fun_org = fun_org} + local mt = {} + + function mt.__call(self, ...) + local arg = {...} + local http_filter = debug_yaml.http_filter + local api_ctx = ngx.ctx.api_ctx + local enable_by_hook = not (http_filter and http_filter.enable) + local enable_by_header_filter = (http_filter and http_filter.enable) + and (api_ctx and api_ctx.enable_dynamic_debug) + if hook_conf.is_print_input_args then + if enable_by_hook or enable_by_header_filter then + log[log_level]("call require(\"", file_path, "\").", fun_name, + "() args:", inspect(arg)) + end + end + + local ret = {self.fun_org(...)} + if hook_conf.is_print_return_value then + if enable_by_hook or enable_by_header_filter then + log[log_level]("call require(\"", file_path, "\").", fun_name, + "() return:", inspect(ret)) + end + end + return unpack(ret) + end + + setmetatable(t, mt) + enabled_hooks[t] = { + org = fun_org, new = t, mod = module, + fun_name = fun_name + } + module[fun_name] = t +end + + +function sync_debug_hooks() + if not debug_yaml_ctime or debug_yaml_ctime == pre_mtime then + return + end + + for _, hook in pairs(enabled_hooks) do + local m = hook.mod + local name = hook.fun_name + m[name] = hook.org + end + + enabled_hooks = {} + + local hook_conf = debug_yaml.hook_conf + if not hook_conf.enable then + pre_mtime = debug_yaml_ctime + return + end + + local hook_name = hook_conf.name or "" + local hooks = debug_yaml[hook_name] + if not hooks then + pre_mtime = debug_yaml_ctime + return + end + + for file_path, fun_names in pairs(hooks) do + local ok, module = pcall(require, file_path) + if not ok then + log.error("failed to load module [", file_path, "]: ", module) + + else + for _, fun_name in ipairs(fun_names) do + apply_new_fun(module, fun_name, file_path, hook_conf) + end + end + end + + pre_mtime = debug_yaml_ctime +end + +end --do + + +local function sync_debug_status(premature) + if premature then + return + end + + if not read_debug_yaml() then + return + end + + sync_debug_hooks() +end + + +local function check() + if not debug_yaml or not debug_yaml.http_filter then + return false + end + + local http_filter = debug_yaml.http_filter + if not http_filter or not http_filter.enable_header_name or not http_filter.enable then + return false + end + + return true +end + +function _M.dynamic_debug(api_ctx) + if not check() then + return + end + + if get_headers()[debug_yaml.http_filter.enable_header_name] then + api_ctx.enable_dynamic_debug = true + end +end + + +function _M.enable_debug() + if not debug_yaml or not debug_yaml.basic then + return false + end + + return debug_yaml.basic.enable +end + + +function _M.init_worker() + local process = require("ngx.process") + if process.type() ~= "worker" then + return + end + + sync_debug_status() + ngx.timer.every(1, sync_debug_status) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/consul/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/consul/init.lua new file mode 100644 index 0000000..4d3c0e4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/consul/init.lua @@ -0,0 +1,691 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local local_conf = require("apisix.core.config_local").local_conf() +local core = require("apisix.core") +local core_sleep = require("apisix.core.utils").sleep +local resty_consul = require('resty.consul') +local http = require('resty.http') +local util = require("apisix.cli.util") +local ipairs = ipairs +local error = error +local ngx = ngx +local unpack = unpack +local tonumber = tonumber +local pairs = pairs +local ngx_timer_at = ngx.timer.at +local ngx_timer_every = ngx.timer.every +local log = core.log +local json_delay_encode = core.json.delay_encode +local ngx_worker_id = ngx.worker.id +local exiting = ngx.worker.exiting +local thread_spawn = ngx.thread.spawn +local thread_wait = ngx.thread.wait +local thread_kill = ngx.thread.kill +local math_random = math.random +local pcall = pcall +local null = ngx.null +local type = type +local next = next + +local all_services = core.table.new(0, 5) +local default_service +local default_weight +local sort_type +local skip_service_map = core.table.new(0, 1) +local dump_params + +local events +local events_list +local consul_services + +local default_skip_services = {"consul"} +local default_random_range = 5 +local default_catalog_error_index = -1 +local default_health_error_index = -2 +local watch_type_catalog = 1 +local watch_type_health = 2 +local max_retry_time = 256 + +local _M = { + version = 0.3, +} + + +local function discovery_consul_callback(data, event, source, pid) + all_services = data + log.notice("update local variable all_services, event is: ", event, + "source: ", source, "server pid:", pid, + ", all services: ", json_delay_encode(all_services, true)) +end + + +function _M.all_nodes() + return all_services +end + + +function _M.nodes(service_name) + if not all_services then + log.error("all_services is nil, failed to fetch nodes for : ", service_name) + return + end + + local resp_list = all_services[service_name] + + if not resp_list then + log.error("fetch nodes failed by ", service_name, ", return default service") + return default_service and {default_service} + end + + log.info("process id: ", ngx_worker_id(), ", all_services[", service_name, "] = ", + json_delay_encode(resp_list, true)) + + return resp_list +end + + +local function update_all_services(consul_server_url, up_services) + -- clean old unused data + local old_services = consul_services[consul_server_url] or {} + for k, _ in pairs(old_services) do + all_services[k] = nil + end + core.table.clear(old_services) + + for k, v in pairs(up_services) do + all_services[k] = v + end + consul_services[consul_server_url] = up_services + + log.info("update all services: ", json_delay_encode(all_services, true)) +end + + +local function read_dump_services() + local data, err = util.read_file(dump_params.path) + if not data then + log.error("read dump file get error: ", err) + return + end + + log.info("read dump file: ", data) + data = util.trim(data) + if #data == 0 then + log.error("dump file is empty") + return + end + + local entity, err = core.json.decode(data) + if not entity then + log.error("decoded dump data got error: ", err, ", file content: ", data) + return + end + + if not entity.services or not entity.last_update then + log.warn("decoded dump data miss fields, file content: ", data) + return + end + + local now_time = ngx.time() + log.info("dump file last_update: ", entity.last_update, ", dump_params.expire: ", + dump_params.expire, ", now_time: ", now_time) + if dump_params.expire ~= 0 and (entity.last_update + dump_params.expire) < now_time then + log.warn("dump file: ", dump_params.path, " had expired, ignored it") + return + end + + all_services = entity.services + log.info("load dump file into memory success") +end + + +local function write_dump_services() + local entity = { + services = all_services, + last_update = ngx.time(), + expire = dump_params.expire, -- later need handle it + } + local data = core.json.encode(entity) + local succ, err = util.write_file(dump_params.path, data) + if not succ then + log.error("write dump into file got error: ", err) + end +end + + +local function show_dump_file() + if not dump_params then + return 503, "dump params is nil" + end + + local data, err = util.read_file(dump_params.path) + if not data then + return 503, err + end + + return 200, data +end + + +local function get_retry_delay(retry_delay) + if not retry_delay or retry_delay >= max_retry_time then + retry_delay = 1 + else + retry_delay = retry_delay * 4 + end + + return retry_delay +end + + +local function get_opts(consul_server, is_catalog) + local opts = { + host = consul_server.host, + port = consul_server.port, + connect_timeout = consul_server.connect_timeout, + read_timeout = consul_server.read_timeout, + default_args = { + token = consul_server.token, + } + } + if not consul_server.keepalive then + return opts + end + + opts.default_args.wait = consul_server.wait_timeout --blocked wait!=0; unblocked by wait=0 + + if is_catalog then + opts.default_args.index = consul_server.catalog_index + else + opts.default_args.index = consul_server.health_index + end + + return opts +end + + +local function watch_catalog(consul_server) + local client = resty_consul:new(get_opts(consul_server, true)) + + ::RETRY:: + local watch_result, watch_err = client:get(consul_server.consul_watch_catalog_url) + local watch_error_info = (watch_err ~= nil and watch_err) + or ((watch_result ~= nil and watch_result.status ~= 200) + and watch_result.status) + if watch_error_info then + log.error("connect consul: ", consul_server.consul_server_url, + " by sub url: ", consul_server.consul_watch_catalog_url, + ", got watch result: ", json_delay_encode(watch_result), + ", with error: ", watch_error_info) + + return watch_type_catalog, default_catalog_error_index + end + + if consul_server.catalog_index > 0 + and consul_server.catalog_index == tonumber(watch_result.headers['X-Consul-Index']) then + local random_delay = math_random(default_random_range) + log.info("watch catalog has no change, re-watch consul after ", random_delay, " seconds") + core_sleep(random_delay) + goto RETRY + end + + return watch_type_catalog, watch_result.headers['X-Consul-Index'] +end + + +local function watch_health(consul_server) + local client = resty_consul:new(get_opts(consul_server, false)) + + ::RETRY:: + local watch_result, watch_err = client:get(consul_server.consul_watch_health_url) + local watch_error_info = (watch_err ~= nil and watch_err) + or ((watch_result ~= nil and watch_result.status ~= 200) + and watch_result.status) + if watch_error_info then + log.error("connect consul: ", consul_server.consul_server_url, + " by sub url: ", consul_server.consul_watch_health_url, + ", got watch result: ", json_delay_encode(watch_result), + ", with error: ", watch_error_info) + + return watch_type_health, default_health_error_index + end + + if consul_server.health_index > 0 + and consul_server.health_index == tonumber(watch_result.headers['X-Consul-Index']) then + local random_delay = math_random(default_random_range) + log.info("watch health has no change, re-watch consul after ", random_delay, " seconds") + core_sleep(random_delay) + goto RETRY + end + + return watch_type_health, watch_result.headers['X-Consul-Index'] +end + + +local function check_keepalive(consul_server, retry_delay) + if consul_server.keepalive and not exiting() then + local ok, err = ngx_timer_at(0, _M.connect, consul_server, retry_delay) + if not ok then + log.error("create ngx_timer_at got error: ", err) + return + end + end +end + + +local function update_index(consul_server, catalog_index, health_index) + local c_index = 0 + local h_index = 0 + if catalog_index ~= nil then + c_index = tonumber(catalog_index) + end + + if health_index ~= nil then + h_index = tonumber(health_index) + end + + if c_index > 0 then + consul_server.catalog_index = c_index + end + + if h_index > 0 then + consul_server.health_index = h_index + end +end + + +local function is_not_empty(value) + if value == nil or value == null + or (type(value) == "table" and not next(value)) + or (type(value) == "string" and value == "") + then + return false + end + + return true +end + + +local function watch_result_is_valid(watch_type, index, catalog_index, health_index) + if index <= 0 then + return false + end + + if watch_type == watch_type_catalog then + if index == catalog_index then + return false + end + else + if index == health_index then + return false + end + end + + return true +end + + +local function combine_sort_nodes_cmp(left, right) + if left.host ~= right.host then + return left.host < right.host + end + + return left.port < right.port +end + + +local function port_sort_nodes_cmp(left, right) + return left.port < right.port +end + + +local function host_sort_nodes_cmp(left, right) + return left.host < right.host +end + + +function _M.connect(premature, consul_server, retry_delay) + if premature then + return + end + + local catalog_thread, spawn_catalog_err = thread_spawn(watch_catalog, consul_server) + if not catalog_thread then + local random_delay = math_random(default_random_range) + log.error("failed to spawn thread watch catalog: ", spawn_catalog_err, + ", retry connecting consul after ", random_delay, " seconds") + core_sleep(random_delay) + + check_keepalive(consul_server, retry_delay) + return + end + + local health_thread, err = thread_spawn(watch_health, consul_server) + if not health_thread then + thread_kill(catalog_thread) + local random_delay = math_random(default_random_range) + log.error("failed to spawn thread watch health: ", err, ", retry connecting consul after ", + random_delay, " seconds") + core_sleep(random_delay) + + check_keepalive(consul_server, retry_delay) + return + end + + local thread_wait_ok, watch_type, index = thread_wait(catalog_thread, health_thread) + thread_kill(catalog_thread) + thread_kill(health_thread) + if not thread_wait_ok then + local random_delay = math_random(default_random_range) + log.error("failed to wait thread: ", watch_type, ", retry connecting consul after ", + random_delay, " seconds") + core_sleep(random_delay) + + check_keepalive(consul_server, retry_delay) + return + end + + -- double check index has changed + if not watch_result_is_valid(tonumber(watch_type), + tonumber(index), consul_server.catalog_index, consul_server.health_index) then + retry_delay = get_retry_delay(retry_delay) + log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds") + core_sleep(retry_delay) + + check_keepalive(consul_server, retry_delay) + return + end + + local consul_client = resty_consul:new({ + host = consul_server.host, + port = consul_server.port, + connect_timeout = consul_server.connect_timeout, + read_timeout = consul_server.read_timeout, + default_args = { + token = consul_server.token + } + }) + local catalog_success, catalog_res, catalog_err = pcall(function() + return consul_client:get(consul_server.consul_watch_catalog_url) + end) + if not catalog_success then + log.error("connect consul: ", consul_server.consul_server_url, + " by sub url: ", consul_server.consul_watch_catalog_url, + ", got catalog result: ", json_delay_encode(catalog_res)) + check_keepalive(consul_server, retry_delay) + return + end + local catalog_error_info = (catalog_err ~= nil and catalog_err) + or ((catalog_res ~= nil and catalog_res.status ~= 200) + and catalog_res.status) + if catalog_error_info then + log.error("connect consul: ", consul_server.consul_server_url, + " by sub url: ", consul_server.consul_watch_catalog_url, + ", got catalog result: ", json_delay_encode(catalog_res), + ", with error: ", catalog_error_info) + + retry_delay = get_retry_delay(retry_delay) + log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds") + core_sleep(retry_delay) + + check_keepalive(consul_server, retry_delay) + return + end + + -- get health index + local success, health_res, health_err = pcall(function() + return consul_client:get(consul_server.consul_watch_health_url) + end) + if not success then + log.error("connect consul: ", consul_server.consul_server_url, + " by sub url: ", consul_server.consul_watch_health_url, + ", got health result: ", json_delay_encode(health_res)) + check_keepalive(consul_server, retry_delay) + return + end + local health_error_info = (health_err ~= nil and health_err) + or ((health_res ~= nil and health_res.status ~= 200) + and health_res.status) + if health_error_info then + log.error("connect consul: ", consul_server.consul_server_url, + " by sub url: ", consul_server.consul_watch_health_url, + ", got health result: ", json_delay_encode(health_res), + ", with error: ", health_error_info) + + retry_delay = get_retry_delay(retry_delay) + log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds") + core_sleep(retry_delay) + + check_keepalive(consul_server, retry_delay) + return + end + + log.info("connect consul: ", consul_server.consul_server_url, + ", catalog_result status: ", catalog_res.status, + ", catalog_result.headers.index: ", catalog_res.headers['X-Consul-Index'], + ", consul_server.index: ", consul_server.index, + ", consul_server: ", json_delay_encode(consul_server)) + + -- if the current index is different from the last index, then update the service + if (consul_server.catalog_index ~= tonumber(catalog_res.headers['X-Consul-Index'])) + or (consul_server.health_index ~= tonumber(health_res.headers['X-Consul-Index'])) then + local up_services = core.table.new(0, #catalog_res.body) + for service_name, _ in pairs(catalog_res.body) do + -- check if the service_name is 'skip service' + if skip_service_map[service_name] then + goto CONTINUE + end + + -- get node from service + local svc_url = consul_server.consul_sub_url .. "/" .. service_name + local svc_success, result, get_err = pcall(function() + return consul_client:get(svc_url, {passing = true}) + end) + local error_info = (get_err ~= nil and get_err) or + ((result ~= nil and result.status ~= 200) and result.status) + if not svc_success or error_info then + log.error("connect consul: ", consul_server.consul_server_url, + ", by service url: ", svc_url, ", with error: ", error_info) + goto CONTINUE + end + + -- decode body, decode json, update service, error handling + -- check result body is not nil and not empty + if is_not_empty(result.body) then + -- add services to table + local nodes = up_services[service_name] + local nodes_uniq = {} + for _, node in ipairs(result.body) do + if not node.Service then + goto CONTINUE + end + + local svc_address, svc_port = node.Service.Address, node.Service.Port + -- Handle nil or 0 port case - default to 80 for HTTP services + if not svc_port or svc_port == 0 then + svc_port = 80 + end + -- if nodes is nil, new nodes table and set to up_services + if not nodes then + nodes = core.table.new(1, 0) + up_services[service_name] = nodes + end + -- not store duplicate service IDs. + local service_id = svc_address .. ":" .. svc_port + if not nodes_uniq[service_id] then + -- add node to nodes table + core.table.insert(nodes, { + host = svc_address, + port = tonumber(svc_port), + weight = default_weight, + }) + nodes_uniq[service_id] = true + end + end + if nodes then + if sort_type == "port_sort" then + core.table.sort(nodes, port_sort_nodes_cmp) + + elseif sort_type == "host_sort" then + core.table.sort(nodes, host_sort_nodes_cmp) + + elseif sort_type == "combine_sort" then + core.table.sort(nodes, combine_sort_nodes_cmp) + + end + end + up_services[service_name] = nodes + end + :: CONTINUE :: + end + + update_all_services(consul_server.consul_server_url, up_services) + + --update events + local post_ok, post_err = events:post(events_list._source, + events_list.updating, all_services) + if not post_ok then + log.error("post_event failure with ", events_list._source, + ", update all services error: ", post_err) + end + + if dump_params then + ngx_timer_at(0, write_dump_services) + end + + update_index(consul_server, + catalog_res.headers['X-Consul-Index'], + health_res.headers['X-Consul-Index']) + end + + check_keepalive(consul_server, retry_delay) +end + + +local function format_consul_params(consul_conf) + local consul_server_list = core.table.new(0, #consul_conf.servers) + + for _, v in pairs(consul_conf.servers) do + local scheme, host, port, path = unpack(http.parse_uri(nil, v)) + if scheme ~= "http" then + return nil, "only support consul http schema address, eg: http://address:port" + elseif path ~= "/" or core.string.has_suffix(v, '/') then + return nil, "invalid consul server address, the valid format: http://address:port" + end + core.table.insert(consul_server_list, { + host = host, + port = port, + token = consul_conf.token, + connect_timeout = consul_conf.timeout.connect, + read_timeout = consul_conf.timeout.read, + wait_timeout = consul_conf.timeout.wait, + consul_watch_catalog_url = "/catalog/services", + consul_sub_url = "/health/service", + consul_watch_health_url = "/health/state/any", + consul_server_url = v .. "/v1", + weight = consul_conf.weight, + keepalive = consul_conf.keepalive, + health_index = 0, + catalog_index = 0, + fetch_interval = consul_conf.fetch_interval -- fetch interval to next connect consul + }) + end + return consul_server_list, nil +end + + +function _M.init_worker() + local consul_conf = local_conf.discovery.consul + + if consul_conf.dump then + local dump = consul_conf.dump + dump_params = dump + + if dump.load_on_init then + read_dump_services() + end + end + + events = require("apisix.events") + events_list = events:event_list( + "discovery_consul_update_all_services", + "updating" + ) + + if 0 ~= ngx_worker_id() then + events:register(discovery_consul_callback, events_list._source, events_list.updating) + return + end + + log.notice("consul_conf: ", json_delay_encode(consul_conf, true)) + default_weight = consul_conf.weight + sort_type = consul_conf.sort_type + -- set default service, used when the server node cannot be found + if consul_conf.default_service then + default_service = consul_conf.default_service + default_service.weight = default_weight + end + if consul_conf.skip_services then + skip_service_map = core.table.new(0, #consul_conf.skip_services) + for _, v in ipairs(consul_conf.skip_services) do + skip_service_map[v] = true + end + end + -- set up default skip service + for _, v in ipairs(default_skip_services) do + skip_service_map[v] = true + end + + local consul_servers_list, err = format_consul_params(consul_conf) + if err then + error("format consul config got error: " .. err) + end + log.info("consul_server_list: ", json_delay_encode(consul_servers_list, true)) + + consul_services = core.table.new(0, 1) + -- success or failure + for _, server in ipairs(consul_servers_list) do + local ok, err = ngx_timer_at(0, _M.connect, server) + if not ok then + error("create consul got error: " .. err) + end + + if server.keepalive == false then + ngx_timer_every(server.fetch_interval, _M.connect, server) + end + end +end + + +function _M.dump_data() + return {config = local_conf.discovery.consul, services = all_services } +end + + +function _M.control_api() + return { + { + methods = {"GET"}, + uris = {"/show_dump_file"}, + handler = show_dump_file, + } + } +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/consul/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/consul/schema.lua new file mode 100644 index 0000000..5d6fc64 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/consul/schema.lua @@ -0,0 +1,92 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +return { + type = "object", + properties = { + servers = { + type = "array", + minItems = 1, + items = { + type = "string", + } + }, + token = {type = "string", default = ""}, + fetch_interval = {type = "integer", minimum = 1, default = 3}, + keepalive = { + type = "boolean", + default = true + }, + weight = {type = "integer", minimum = 1, default = 1}, + timeout = { + type = "object", + properties = { + connect = {type = "integer", minimum = 1, default = 2000}, + read = {type = "integer", minimum = 1, default = 2000}, + wait = {type = "integer", minimum = 1, default = 60} + }, + default = { + connect = 2000, + read = 2000, + wait = 60, + } + }, + sort_type = { + type = "string", + enum = {"origin", "host_sort", "port_sort", "combine_sort"}, + default = "origin", + }, + skip_services = { + type = "array", + minItems = 1, + items = { + type = "string", + } + }, + dump = { + type = "object", + properties = { + path = {type = "string", minLength = 1}, + load_on_init = {type = "boolean", default = true}, + expire = {type = "integer", default = 0}, + }, + required = {"path"}, + }, + default_service = { + type = "object", + properties = { + host = {type = "string"}, + port = {type = "integer"}, + metadata = { + type = "object", + properties = { + fail_timeout = {type = "integer", default = 1}, + weight = {type = "integer", default = 1}, + max_fails = {type = "integer", default = 1} + }, + default = { + fail_timeout = 1, + weight = 1, + max_fails = 1 + } + } + } + } + }, + + required = {"servers"} +} + diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/consul_kv/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/consul_kv/init.lua new file mode 100644 index 0000000..bf60654 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/consul_kv/init.lua @@ -0,0 +1,439 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local local_conf = require("apisix.core.config_local").local_conf() +local core = require("apisix.core") +local core_sleep = require("apisix.core.utils").sleep +local resty_consul = require('resty.consul') +local cjson = require('cjson') +local http = require('resty.http') +local util = require("apisix.cli.util") +local ipairs = ipairs +local error = error +local ngx = ngx +local unpack = unpack +local ngx_re_match = ngx.re.match +local tonumber = tonumber +local pairs = pairs +local ipairs = ipairs +local ngx_timer_at = ngx.timer.at +local ngx_timer_every = ngx.timer.every +local log = core.log +local ngx_decode_base64 = ngx.decode_base64 +local json_delay_encode = core.json.delay_encode +local cjson_null = cjson.null + +local applications = core.table.new(0, 5) +local default_service +local default_weight +local default_prefix_rule +local skip_keys_map = core.table.new(0, 1) +local dump_params + +local events +local events_list +local consul_apps + +local _M = { + version = 0.3, +} + + +local function discovery_consul_callback(data, event, source, pid) + applications = data + log.notice("update local variable application, event is: ", event, + "source: ", source, "server pid:", pid, + ", application: ", core.json.encode(applications, true)) +end + + +function _M.all_nodes() + return applications +end + + +function _M.nodes(service_name) + if not applications then + log.error("application is nil, failed to fetch nodes for : ", service_name) + return + end + + local resp_list = applications[service_name] + + if not resp_list then + log.error("fetch nodes failed by ", service_name, ", return default service") + return default_service and {default_service} + end + + log.info("process id: ", ngx.worker.id(), ", applications[", service_name, "] = ", + json_delay_encode(resp_list, true)) + + return resp_list +end + + +local function parse_instance(node, server_name_prefix) + local key = node.Key + + if key == cjson_null or not key or #key == 0 then + log.error("consul_key_empty, server_name_prefix: ", server_name_prefix, + ", node: ", json_delay_encode(node, true)) + return false + end + + local result = ngx_re_match(key, default_prefix_rule, "jo") + if not result then + log.error("server name parse error, server_name_prefix: ", server_name_prefix, + ", node: ", json_delay_encode(node, true)) + return false + end + + local sn, host, port = result[1], result[2], result[3] + + -- if exist, skip special kesy + if sn and skip_keys_map[sn] then + return false + end + + -- base64 value = "IHsid2VpZ2h0IjogMTIwLCAibWF4X2ZhaWxzIjogMiwgImZhaWxfdGltZW91dCI6IDJ9" + -- ori value = "{"weight": 120, "max_fails": 2, "fail_timeout": 2}" + local metadataBase64 = node.Value + if metadataBase64 == cjson_null or not metadataBase64 or #metadataBase64 == 0 then + log.error("error: consul_value_empty, server_name_prefix: ", server_name_prefix, + ", node: ", json_delay_encode(node, true)) + return false + end + + local metadata, err = core.json.decode(ngx_decode_base64(metadataBase64)) + if err then + log.error("invalid upstream value, server_name_prefix: ", server_name_prefix, + ",err: ", err, ", node: ", json_delay_encode(node, true)) + return false + elseif metadata.check_status == false or metadata.check_status == "false" then + log.error("server node unhealthy, server_name_prefix: ", server_name_prefix, + ", node: ", json_delay_encode(node, true)) + return false + end + + return true, host, tonumber(port), metadata, sn +end + + +local function update_application(server_name_prefix, data) + local sn + local up_apps = core.table.new(0, #data) + local weight = default_weight + + for _, node in ipairs(data) do + local succ, ip, port, metadata, server_name = parse_instance(node, server_name_prefix) + if succ then + sn = server_name_prefix .. server_name + local nodes = up_apps[sn] + if not nodes then + nodes = core.table.new(1, 0) + up_apps[sn] = nodes + end + core.table.insert(nodes, { + host = ip, + port = port, + weight = metadata and metadata.weight or weight, + }) + end + end + + -- clean old unused data + local old_apps = consul_apps[server_name_prefix] or {} + for k, _ in pairs(old_apps) do + applications[k] = nil + end + core.table.clear(old_apps) + + for k, v in pairs(up_apps) do + applications[k] = v + end + consul_apps[server_name_prefix] = up_apps + + log.info("update applications: ", core.json.encode(applications)) +end + + +local function read_dump_srvs() + local data, err = util.read_file(dump_params.path) + if not data then + log.notice("read dump file get error: ", err) + return + end + + log.info("read dump file: ", data) + data = util.trim(data) + if #data == 0 then + log.error("dump file is empty") + return + end + + local entity, err = core.json.decode(data) + if not entity then + log.error("decoded dump data got error: ", err, ", file content: ", data) + return + end + + if not entity.services or not entity.last_update then + log.warn("decoded dump data miss fields, file content: ", data) + return + end + + local now_time = ngx.time() + log.info("dump file last_update: ", entity.last_update, ", dump_params.expire: ", + dump_params.expire, ", now_time: ", now_time) + if dump_params.expire ~= 0 and (entity.last_update + dump_params.expire) < now_time then + log.warn("dump file: ", dump_params.path, " had expired, ignored it") + return + end + + applications = entity.services + log.info("load dump file into memory success") +end + + +local function write_dump_srvs() + local entity = { + services = applications, + last_update = ngx.time(), + expire = dump_params.expire, -- later need handle it + } + local data = core.json.encode(entity) + local succ, err = util.write_file(dump_params.path, data) + if not succ then + log.error("write dump into file got error: ", err) + end +end + + +local function show_dump_file() + if not dump_params then + return 503, "dump params is nil" + end + + local data, err = util.read_file(dump_params.path) + if not data then + return 503, err + end + + return 200, data +end + + +function _M.connect(premature, consul_server, retry_delay) + if premature then + return + end + + local consul_client = resty_consul:new({ + host = consul_server.host, + port = consul_server.port, + connect_timeout = consul_server.connect_timeout, + read_timeout = consul_server.read_timeout, + default_args = consul_server.default_args, + }) + + log.info("consul_server: ", json_delay_encode(consul_server, true)) + local result, err = consul_client:get(consul_server.consul_key) + local error_info = (err ~= nil and err) + or ((result ~= nil and result.status ~= 200) + and result.status) + if error_info then + log.error("connect consul: ", consul_server.server_name_key, + " by key: ", consul_server.consul_key, + ", got result: ", json_delay_encode(result, true), + ", with error: ", error_info) + + if not retry_delay then + retry_delay = 1 + else + retry_delay = retry_delay * 4 + end + + log.warn("retry connecting consul after ", retry_delay, " seconds") + core_sleep(retry_delay) + + goto ERR + end + + log.info("connect consul: ", consul_server.server_name_key, + ", result status: ", result.status, + ", result.headers.index: ", result.headers['X-Consul-Index'], + ", result body: ", json_delay_encode(result.body)) + + -- if current index different last index then update application + if consul_server.index ~= result.headers['X-Consul-Index'] then + consul_server.index = result.headers['X-Consul-Index'] + -- only long connect type use index + if consul_server.keepalive then + consul_server.default_args.index = result.headers['X-Consul-Index'] + end + + -- decode body, decode json, update application, error handling + if result.body and #result.body ~= 0 then + log.notice("server_name: ", consul_server.server_name_key, + ", header: ", core.json.encode(result.headers, true), + ", body: ", core.json.encode(result.body, true)) + + update_application(consul_server.server_name_key, result.body) + --update events + local ok, err = events:post(events_list._source, events_list.updating, applications) + if not ok then + log.error("post_event failure with ", events_list._source, + ", update application error: ", err) + end + + if dump_params then + ngx_timer_at(0, write_dump_srvs) + end + end + end + + :: ERR :: + local keepalive = consul_server.keepalive + if keepalive then + local ok, err = ngx_timer_at(0, _M.connect, consul_server, retry_delay) + if not ok then + log.error("create ngx_timer_at got error: ", err) + return + end + end +end + + +local function format_consul_params(consul_conf) + local consul_server_list = core.table.new(0, #consul_conf.servers) + local args = { + token = consul_conf.token, + recurse = true + } + + if consul_conf.keepalive then + args.wait = consul_conf.timeout.wait --blocked wait!=0; unblocked by wait=0 + args.index = 0 + end + + for _, v in pairs(consul_conf.servers) do + local scheme, host, port, path = unpack(http.parse_uri(nil, v)) + if scheme ~= "http" then + return nil, "only support consul http schema address, eg: http://address:port" + elseif path ~= "/" or core.string.has_suffix(v, '/') then + return nil, "invalid consul server address, the valid format: http://address:port" + end + + core.table.insert(consul_server_list, { + host = host, + port = port, + connect_timeout = consul_conf.timeout.connect, + read_timeout = consul_conf.timeout.read, + consul_key = "/kv/" .. consul_conf.prefix, + server_name_key = v .. "/v1/kv/", + weight = consul_conf.weight, + keepalive = consul_conf.keepalive, + default_args = args, + index = 0, + fetch_interval = consul_conf.fetch_interval -- fetch interval to next connect consul + }) + end + + return consul_server_list +end + + +function _M.init_worker() + local consul_conf = local_conf.discovery.consul_kv + + if consul_conf.dump then + local dump = consul_conf.dump + dump_params = dump + + if dump.load_on_init then + read_dump_srvs() + end + end + + events = require("apisix.events") + events_list = events:event_list( + "discovery_consul_update_application", + "updating" + ) + + if 0 ~= ngx.worker.id() then + events:register(discovery_consul_callback, events_list._source, events_list.updating) + return + end + + log.notice("consul_conf: ", core.json.encode(consul_conf)) + default_weight = consul_conf.weight + -- set default service, used when the server node cannot be found + if consul_conf.default_service then + default_service = consul_conf.default_service + default_service.weight = default_weight + end + default_prefix_rule = "(" .. consul_conf.prefix .. "/.*/)([a-zA-Z0-9.]+):([0-9]+)" + log.info("default params, default_weight: ", default_weight, + ", default_prefix_rule: ", default_prefix_rule) + if consul_conf.skip_keys then + skip_keys_map = core.table.new(0, #consul_conf.skip_keys) + for _, v in ipairs(consul_conf.skip_keys) do + skip_keys_map[v] = true + end + end + + local consul_servers_list, err = format_consul_params(consul_conf) + if err then + error(err) + return + end + log.info("consul_server_list: ", core.json.encode(consul_servers_list)) + + consul_apps = core.table.new(0, 1) + -- success or failure + for _, server in ipairs(consul_servers_list) do + local ok, err = ngx_timer_at(0, _M.connect, server) + if not ok then + error("create consul_kv got error: " .. err) + return + end + + if server.keepalive == false then + ngx_timer_every(server.fetch_interval, _M.connect, server) + end + end +end + + +function _M.dump_data() + return {config = local_conf.discovery.consul_kv, services = applications} +end + + +function _M.control_api() + return { + { + methods = {"GET"}, + uris = {"/show_dump_file"}, + handler = show_dump_file, + } + } +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/consul_kv/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/consul_kv/schema.lua new file mode 100644 index 0000000..4c02b2c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/consul_kv/schema.lua @@ -0,0 +1,88 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +return { + type = "object", + properties = { + servers = { + type = "array", + minItems = 1, + items = { + type = "string", + } + }, + token = {type = "string", default = ""}, + fetch_interval = {type = "integer", minimum = 1, default = 3}, + keepalive = { + type = "boolean", + default = true + }, + prefix = {type = "string", default = "upstreams"}, + weight = {type = "integer", minimum = 1, default = 1}, + timeout = { + type = "object", + properties = { + connect = {type = "integer", minimum = 1, default = 2000}, + read = {type = "integer", minimum = 1, default = 2000}, + wait = {type = "integer", minimum = 1, default = 60} + }, + default = { + connect = 2000, + read = 2000, + wait = 60, + } + }, + skip_keys = { + type = "array", + minItems = 1, + items = { + type = "string", + } + }, + dump = { + type = "object", + properties = { + path = {type = "string", minLength = 1}, + load_on_init = {type = "boolean", default = true}, + expire = {type = "integer", default = 0}, + }, + required = {"path"}, + }, + default_service = { + type = "object", + properties = { + host = {type = "string"}, + port = {type = "integer"}, + metadata = { + type = "object", + properties = { + fail_timeout = {type = "integer", default = 1}, + weight = {type = "integer", default = 1}, + max_fails = {type = "integer", default = 1} + }, + default = { + fail_timeout = 1, + weight = 1, + max_fails = 1 + } + } + } + } + }, + + required = {"servers"} +} + diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/dns/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/dns/init.lua new file mode 100644 index 0000000..601de0e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/dns/init.lua @@ -0,0 +1,89 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local config_local = require("apisix.core.config_local") +local is_http = ngx.config.subsystem == "http" +local ipairs = ipairs +local error = error + + +local dns_client +local _M = {} + + +function _M.nodes(service_name) + local host, port = core.utils.parse_addr(service_name) + core.log.info("discovery dns with host ", host, ", port ", port) + + local records, err = dns_client:resolve(host, core.dns_client.RETURN_ALL) + if not records then + return nil, err + end + + local nodes = core.table.new(#records, 0) + local index = 1 + for _, r in ipairs(records) do + if r.address then + local node_port = port + if not node_port and r.port ~= 0 then + -- if the port is zero, fallback to use the default + node_port = r.port + end + + -- ignore zero port when subsystem is stream + if node_port or is_http then + nodes[index] = {host = r.address, weight = r.weight or 1, port = node_port} + if r.priority then + -- for SRV record, nodes with lower priority are chosen first + nodes[index].priority = -r.priority + end + index = index + 1 + end + end + end + + return nodes +end + + +function _M.init_worker() + local local_conf = config_local.local_conf() + local servers = local_conf.discovery.dns.servers + local resolv_conf = local_conf.discovery.dns.resolv_conf + local default_order = {"last", "SRV", "A", "AAAA", "CNAME"} + local order = core.table.try_read_attr(local_conf, "discovery", "dns", "order") + order = order or default_order + + local opts = { + hosts = {}, + resolvConf = resolv_conf, + nameservers = servers, + order = order, + } + + local client, err = core.dns_client.new(opts) + if not client then + error("failed to init the dns client: ", err) + return + end + + dns_client = client +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/dns/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/dns/schema.lua new file mode 100644 index 0000000..03c7934 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/dns/schema.lua @@ -0,0 +1,48 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +return { + type = "object", + properties = { + servers = { + type = "array", + minItems = 1, + items = { + type = "string", + }, + }, + resolv_conf = { + type = "string", + }, + order = { + type = "array", + minItems = 1, + maxItems = 5, + uniqueItems = true, + items = { + enum = {"last", "SRV", "A", "AAAA", "CNAME"} + }, + }, + }, + oneOf = { + { + required = {"servers"}, + }, + { + required = {"resolv_conf"}, + } + } +} diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/eureka/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/eureka/init.lua new file mode 100644 index 0000000..df72a52 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/eureka/init.lua @@ -0,0 +1,223 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local local_conf = require("apisix.core.config_local").local_conf() +local http = require("resty.http") +local core = require("apisix.core") +local ipmatcher = require("resty.ipmatcher") +local ipairs = ipairs +local tostring = tostring +local type = type +local math_random = math.random +local ngx = ngx +local ngx_timer_at = ngx.timer.at +local ngx_timer_every = ngx.timer.every +local string_sub = string.sub +local str_find = core.string.find +local log = core.log + +local default_weight +local applications + + +local _M = { + version = 0.1, +} + + +local function service_info() + local host = local_conf.discovery and + local_conf.discovery.eureka and local_conf.discovery.eureka.host + if not host then + log.error("do not set eureka.host") + return + end + + local basic_auth + -- TODO Add health check to get healthy nodes. + local url = host[math_random(#host)] + local auth_idx = str_find(url, "@") + if auth_idx then + local protocol_idx = str_find(url, "://") + local protocol = string_sub(url, 1, protocol_idx + 2) + local user_and_password = string_sub(url, protocol_idx + 3, auth_idx - 1) + local other = string_sub(url, auth_idx + 1) + url = protocol .. other + basic_auth = "Basic " .. ngx.encode_base64(user_and_password) + end + if local_conf.discovery.eureka.prefix then + url = url .. local_conf.discovery.eureka.prefix + end + if string_sub(url, #url) ~= "/" then + url = url .. "/" + end + + return url, basic_auth +end + + +local function request(request_uri, basic_auth, method, path, query, body) + log.info("eureka uri:", request_uri, ".") + local url = request_uri .. path + local headers = core.table.new(0, 5) + headers['Connection'] = 'Keep-Alive' + headers['Accept'] = 'application/json' + + if basic_auth then + headers['Authorization'] = basic_auth + end + + if body and 'table' == type(body) then + local err + body, err = core.json.encode(body) + if not body then + return nil, 'invalid body : ' .. err + end + -- log.warn(method, url, body) + headers['Content-Type'] = 'application/json' + end + + local httpc = http.new() + local timeout = local_conf.discovery.eureka.timeout + local connect_timeout = timeout and timeout.connect or 2000 + local send_timeout = timeout and timeout.send or 2000 + local read_timeout = timeout and timeout.read or 5000 + log.info("connect_timeout:", connect_timeout, ", send_timeout:", send_timeout, + ", read_timeout:", read_timeout, ".") + httpc:set_timeouts(connect_timeout, send_timeout, read_timeout) + return httpc:request_uri(url, { + version = 1.1, + method = method, + headers = headers, + query = query, + body = body, + ssl_verify = false, + }) +end + + +local function parse_instance(instance) + local status = instance.status + local overridden_status = instance.overriddenstatus or instance.overriddenStatus + if overridden_status and overridden_status ~= "UNKNOWN" then + status = overridden_status + end + + if status ~= "UP" then + return + end + local port + if tostring(instance.port["@enabled"]) == "true" and instance.port["$"] then + port = instance.port["$"] + -- secure = false + end + if tostring(instance.securePort["@enabled"]) == "true" and instance.securePort["$"] then + port = instance.securePort["$"] + -- secure = true + end + local ip = instance.ipAddr + if not ipmatcher.parse_ipv4(ip) and + not ipmatcher.parse_ipv6(ip) then + log.error(instance.app, " service ", instance.hostName, " node IP ", ip, + " is invalid(must be IPv4 or IPv6).") + return + end + return ip, port, instance.metadata +end + + +local function fetch_full_registry(premature) + if premature then + return + end + + local request_uri, basic_auth = service_info() + if not request_uri then + return + end + + local res, err = request(request_uri, basic_auth, "GET", "apps") + if not res then + log.error("failed to fetch registry", err) + return + end + + if not res.body or res.status ~= 200 then + log.error("failed to fetch registry, status = ", res.status) + return + end + + local json_str = res.body + local data, err = core.json.decode(json_str) + if not data then + log.error("invalid response body: ", json_str, " err: ", err) + return + end + local apps = data.applications.application + local up_apps = core.table.new(0, #apps) + for _, app in ipairs(apps) do + for _, instance in ipairs(app.instance) do + local ip, port, metadata = parse_instance(instance) + if ip and port then + local nodes = up_apps[app.name] + if not nodes then + nodes = core.table.new(#app.instance, 0) + up_apps[app.name] = nodes + end + core.table.insert(nodes, { + host = ip, + port = port, + weight = metadata and metadata.weight or default_weight, + metadata = metadata, + }) + if metadata then + -- remove useless data + metadata.weight = nil + end + end + end + end + applications = up_apps +end + + +function _M.nodes(service_name) + if not applications then + log.error("failed to fetch nodes for : ", service_name) + return + end + + return applications[service_name] +end + + +function _M.init_worker() + default_weight = local_conf.discovery.eureka.weight or 100 + log.info("default_weight:", default_weight, ".") + local fetch_interval = local_conf.discovery.eureka.fetch_interval or 30 + log.info("fetch_interval:", fetch_interval, ".") + ngx_timer_at(0, fetch_full_registry) + ngx_timer_every(fetch_interval, fetch_full_registry) +end + + +function _M.dump_data() + return {config = local_conf.discovery.eureka, services = applications or {}} +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/eureka/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/eureka/schema.lua new file mode 100644 index 0000000..1966b8e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/eureka/schema.lua @@ -0,0 +1,40 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +return { + type = "object", + properties = { + host = { + type = "array", + minItems = 1, + items = { + type = "string", + }, + }, + fetch_interval = {type = "integer", minimum = 1, default = 30}, + prefix = {type = "string"}, + weight = {type = "integer", minimum = 0}, + timeout = { + type = "object", + properties = { + connect = {type = "integer", minimum = 1, default = 2000}, + send = {type = "integer", minimum = 1, default = 2000}, + read = {type = "integer", minimum = 1, default = 5000}, + } + }, + }, + required = {"host"} +} diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/init.lua new file mode 100644 index 0000000..10e7aa1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/init.lua @@ -0,0 +1,43 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local log = require("apisix.core.log") +local local_conf = require("apisix.core.config_local").local_conf() +local pairs = pairs + +local discovery_type = local_conf.discovery +local discovery = {} + +if discovery_type then + for discovery_name, _ in pairs(discovery_type) do + log.info("use discovery: ", discovery_name) + discovery[discovery_name] = require("apisix.discovery." .. discovery_name) + end +end + +function discovery.init_worker() + if discovery_type then + for discovery_name, _ in pairs(discovery_type) do + discovery[discovery_name].init_worker() + end + end +end + +return { + version = 0.1, + discovery = discovery +} diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/kubernetes/informer_factory.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/kubernetes/informer_factory.lua new file mode 100644 index 0000000..fd434c0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/kubernetes/informer_factory.lua @@ -0,0 +1,377 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local ngx = ngx +local ipairs = ipairs +local string = string +local math = math +local type = type +local core = require("apisix.core") +local http = require("resty.http") + +local function list_query(informer) + local arguments = { + limit = informer.limit, + } + + if informer.continue and informer.continue ~= "" then + arguments.continue = informer.continue + end + + if informer.label_selector and informer.label_selector ~= "" then + arguments.labelSelector = informer.label_selector + end + + if informer.field_selector and informer.field_selector ~= "" then + arguments.fieldSelector = informer.field_selector + end + + return ngx.encode_args(arguments) +end + + +local function list(httpc, apiserver, informer) + local response, err = httpc:request({ + path = informer.path, + query = list_query(informer), + headers = { + ["Host"] = apiserver.host .. ":" .. apiserver.port, + ["Authorization"] = "Bearer " .. apiserver.token, + ["Accept"] = "application/json", + ["Connection"] = "keep-alive" + } + }) + + core.log.info("--raw=", informer.path, "?", list_query(informer)) + + if not response then + return false, "RequestError", err or "" + end + + if response.status ~= 200 then + return false, response.reason, response:read_body() or "" + end + + local body, err = response:read_body() + if err then + return false, "ReadBodyError", err + end + + local data = core.json.decode(body) + if not data or data.kind ~= informer.list_kind then + return false, "UnexpectedBody", body + end + + informer.version = data.metadata.resourceVersion + + if informer.on_added then + for _, item in ipairs(data.items or {}) do + informer:on_added(item, "list") + end + end + + informer.continue = data.metadata.continue + if informer.continue and informer.continue ~= "" then + list(httpc, apiserver, informer) + end + + return true +end + + +local function watch_query(informer) + local arguments = { + watch = "true", + allowWatchBookmarks = "true", + timeoutSeconds = informer.overtime, + } + + if informer.version and informer.version ~= "" then + arguments.resourceVersion = informer.version + end + + if informer.label_selector and informer.label_selector ~= "" then + arguments.labelSelector = informer.label_selector + end + + if informer.field_selector and informer.field_selector ~= "" then + arguments.fieldSelector = informer.field_selector + end + + return ngx.encode_args(arguments) +end + + +local function split_event (body, callback, ...) + local gmatch_iterator, err = ngx.re.gmatch(body, "{\"type\":.*}\n", "jao") + if not gmatch_iterator then + return false, nil, "GmatchError", err + end + + local captures + local captured_size = 0 + local ok, reason + while true do + captures, err = gmatch_iterator() + + if err then + return false, nil, "GmatchError", err + end + + if not captures then + break + end + + captured_size = captured_size + #captures[0] + + ok, reason, err = callback(captures[0], ...) + if not ok then + return false, nil, reason, err + end + end + + local remainder_body + if captured_size == #body then + remainder_body = "" + elseif captured_size == 0 then + remainder_body = body + elseif captured_size < #body then + remainder_body = string.sub(body, captured_size + 1) + end + + return true, remainder_body +end + + +local function dispatch_event(event_string, informer) + local event = core.json.decode(event_string) + + if not event or not event.type or not event.object then + return false, "UnexpectedBody", event_string + end + + local tp = event.type + + if tp == "ERROR" then + if event.object.code == 410 then + return false, "ResourceGone", nil + end + return false, "UnexpectedBody", event_string + end + + local object = event.object + informer.version = object.metadata.resourceVersion + + if tp == "ADDED" then + if informer.on_added then + informer:on_added(object, "watch") + end + elseif tp == "DELETED" then + if informer.on_deleted then + informer:on_deleted(object) + end + elseif tp == "MODIFIED" then + if informer.on_modified then + informer:on_modified(object) + end + -- elseif type == "BOOKMARK" then + -- do nothing + end + + return true +end + + +local function watch(httpc, apiserver, informer) + local watch_times = 8 + for _ = 1, watch_times do + local watch_seconds = 1800 + math.random(9, 999) + informer.overtime = watch_seconds + local http_seconds = watch_seconds + 120 + httpc:set_timeouts(2000, 3000, http_seconds * 1000) + + local response, err = httpc:request({ + path = informer.path, + query = watch_query(informer), + headers = { + ["Host"] = apiserver.host .. ":" .. apiserver.port, + ["Authorization"] = "Bearer " .. apiserver.token, + ["Accept"] = "application/json", + ["Connection"] = "keep-alive" + } + }) + + core.log.info("--raw=", informer.path, "?", watch_query(informer)) + + if err then + return false, "RequestError", err + end + + if response.status ~= 200 then + return false, response.reason, response:read_body() or "" + end + + local ok + local remainder_body + local body + local reason + + while true do + body, err = response.body_reader() + if err then + return false, "ReadBodyError", err + end + + if not body then + break + end + + if remainder_body and #remainder_body > 0 then + body = remainder_body .. body + end + + ok, remainder_body, reason, err = split_event(body, dispatch_event, informer) + if not ok then + if reason == "ResourceGone" then + return true + end + return false, reason, err + end + end + end + + return true +end + + +local function list_watch(informer, apiserver) + local ok + local reason, message + local httpc = http.new() + + informer.continue = "" + informer.version = "" + + informer.fetch_state = "connecting" + core.log.info("begin to connect ", apiserver.host, ":", apiserver.port) + + ok, message = httpc:connect({ + scheme = apiserver.schema, + host = apiserver.host, + port = apiserver.port, + ssl_verify = false + }) + + if not ok then + informer.fetch_state = "connect failed" + core.log.error("connect apiserver failed, apiserver.host: ", apiserver.host, + ", apiserver.port: ", apiserver.port, ", message : ", message) + return false + end + + core.log.info("begin to list ", informer.kind) + informer.fetch_state = "listing" + if informer.pre_List then + informer:pre_list() + end + + ok, reason, message = list(httpc, apiserver, informer) + if not ok then + informer.fetch_state = "list failed" + core.log.error("list failed, kind: ", informer.kind, + ", reason: ", reason, ", message : ", message) + return false + end + + informer.fetch_state = "list finished" + if informer.post_List then + informer:post_list() + end + + core.log.info("begin to watch ", informer.kind) + informer.fetch_state = "watching" + ok, reason, message = watch(httpc, apiserver, informer) + if not ok then + informer.fetch_state = "watch failed" + core.log.error("watch failed, kind: ", informer.kind, + ", reason: ", reason, ", message : ", message) + return false + end + + informer.fetch_state = "watch finished" + + return true +end + +local _M = { +} + +function _M.new(group, version, kind, plural, namespace) + local tp + tp = type(group) + if tp ~= "nil" and tp ~= "string" then + return nil, "group should set to string or nil type but " .. tp + end + + tp = type(namespace) + if tp ~= "nil" and tp ~= "string" then + return nil, "namespace should set to string or nil type but " .. tp + end + + tp = type(version) + if tp ~= "string" or version == "" then + return nil, "version should set to non-empty string" + end + + tp = type(kind) + if tp ~= "string" or kind == "" then + return nil, "kind should set to non-empty string" + end + + tp = type(plural) + if tp ~= "string" or plural == "" then + return nil, "plural should set to non-empty string" + end + + local path = "" + if group == nil or group == "" then + path = path .. "/api/" .. version + else + path = path .. "/apis/" .. group .. "/" .. version + end + + if namespace and namespace ~= "" then + path = path .. "/namespaces/" .. namespace + end + path = path .. "/" .. plural + + return { + kind = kind, + list_kind = kind .. "List", + plural = plural, + path = path, + limit = 120, + label_selector = "", + field_selector = "", + overtime = "1800", + version = "", + continue = "", + list_watch = list_watch + } +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/kubernetes/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/kubernetes/init.lua new file mode 100644 index 0000000..39fa69e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/kubernetes/init.lua @@ -0,0 +1,694 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local ngx = ngx +local ipairs = ipairs +local pairs = pairs +local string = string +local tonumber = tonumber +local tostring = tostring +local os = os +local error = error +local pcall = pcall +local setmetatable = setmetatable +local is_http = ngx.config.subsystem == "http" +local process = require("ngx.process") +local core = require("apisix.core") +local util = require("apisix.cli.util") +local local_conf = require("apisix.core.config_local").local_conf() +local informer_factory = require("apisix.discovery.kubernetes.informer_factory") + + +local ctx + +local endpoint_lrucache = core.lrucache.new({ + ttl = 300, + count = 1024 +}) + +local endpoint_buffer = {} + +local function sort_nodes_cmp(left, right) + if left.host ~= right.host then + return left.host < right.host + end + + return left.port < right.port +end + +local function on_endpoint_slices_modified(handle, endpoint) + if handle.namespace_selector and + not handle:namespace_selector(endpoint.metadata.namespace) then + return + end + + core.log.debug(core.json.delay_encode(endpoint)) + core.table.clear(endpoint_buffer) + + local endpointslices = endpoint.endpoints + for _, endpointslice in ipairs(endpointslices or {}) do + if endpointslice.addresses then + local addresses = endpointslices.addresses + for _, port in ipairs(endpoint.ports or {}) do + local port_name + if port.name then + port_name = port.name + elseif port.targetPort then + port_name = tostring(port.targetPort) + else + port_name = tostring(port.port) + end + + if endpointslice.conditions and endpointslice.condition.ready then + local nodes = endpoint_buffer[port_name] + if nodes == nil then + nodes = core.table.new(0, #endpointslices * #addresses) + endpoint_buffer[port_name] = nodes + end + + for _, address in ipairs(endpointslices.addresses) do + core.table.insert(nodes, { + host = address.ip, + port = port.port, + weight = handle.default_weight + }) + end + end + end + end + end + + for _, ports in pairs(endpoint_buffer) do + for _, nodes in pairs(ports) do + core.table.sort(nodes, sort_nodes_cmp) + end + end + local endpoint_key = endpoint.metadata.namespace .. "/" .. endpoint.metadata.name + local endpoint_content = core.json.encode(endpoint_buffer, true) + local endpoint_version = ngx.crc32_long(endpoint_content) + + local _, err + _, err = handle.endpoint_dict:safe_set(endpoint_key .. "#version", endpoint_version) + if err then + core.log.error("set endpoint version into discovery DICT failed, ", err) + return + end + _, err = handle.endpoint_dict:safe_set(endpoint_key, endpoint_content) + if err then + core.log.error("set endpoint into discovery DICT failed, ", err) + handle.endpoint_dict:delete(endpoint_key .. "#version") + end +end + +local function on_endpoint_modified(handle, endpoint) + if handle.namespace_selector and + not handle:namespace_selector(endpoint.metadata.namespace) then + return + end + + core.log.debug(core.json.delay_encode(endpoint)) + core.table.clear(endpoint_buffer) + + local subsets = endpoint.subsets + for _, subset in ipairs(subsets or {}) do + if subset.addresses then + local addresses = subset.addresses + for _, port in ipairs(subset.ports or {}) do + local port_name + if port.name then + port_name = port.name + elseif port.targetPort then + port_name = tostring(port.targetPort) + else + port_name = tostring(port.port) + end + + local nodes = endpoint_buffer[port_name] + if nodes == nil then + nodes = core.table.new(0, #subsets * #addresses) + endpoint_buffer[port_name] = nodes + end + + for _, address in ipairs(subset.addresses) do + core.table.insert(nodes, { + host = address.ip, + port = port.port, + weight = handle.default_weight + }) + end + end + end + end + + for _, ports in pairs(endpoint_buffer) do + for _, nodes in pairs(ports) do + core.table.sort(nodes, sort_nodes_cmp) + end + end + + local endpoint_key = endpoint.metadata.namespace .. "/" .. endpoint.metadata.name + local endpoint_content = core.json.encode(endpoint_buffer, true) + local endpoint_version = ngx.crc32_long(endpoint_content) + + local _, err + _, err = handle.endpoint_dict:safe_set(endpoint_key .. "#version", endpoint_version) + if err then + core.log.error("set endpoint version into discovery DICT failed, ", err) + return + end + _, err = handle.endpoint_dict:safe_set(endpoint_key, endpoint_content) + if err then + core.log.error("set endpoint into discovery DICT failed, ", err) + handle.endpoint_dict:delete(endpoint_key .. "#version") + end +end + + +local function on_endpoint_deleted(handle, endpoint) + if handle.namespace_selector and + not handle:namespace_selector(endpoint.metadata.namespace) then + return + end + + core.log.debug(core.json.delay_encode(endpoint)) + local endpoint_key = endpoint.metadata.namespace .. "/" .. endpoint.metadata.name + handle.endpoint_dict:delete(endpoint_key .. "#version") + handle.endpoint_dict:delete(endpoint_key) +end + + +local function pre_list(handle) + handle.endpoint_dict:flush_all() +end + + +local function post_list(handle) + handle.endpoint_dict:flush_expired() +end + + +local function setup_label_selector(conf, informer) + informer.label_selector = conf.label_selector +end + + +local function setup_namespace_selector(conf, informer) + local ns = conf.namespace_selector + if ns == nil then + informer.namespace_selector = nil + return + end + + if ns.equal then + informer.field_selector = "metadata.namespace=" .. ns.equal + informer.namespace_selector = nil + return + end + + if ns.not_equal then + informer.field_selector = "metadata.namespace!=" .. ns.not_equal + informer.namespace_selector = nil + return + end + + if ns.match then + informer.namespace_selector = function(self, namespace) + local match = conf.namespace_selector.match + local m, err + for _, v in ipairs(match) do + m, err = ngx.re.match(namespace, v, "jo") + if m and m[0] == namespace then + return true + end + if err then + core.log.error("ngx.re.match failed: ", err) + end + end + return false + end + return + end + + if ns.not_match then + informer.namespace_selector = function(self, namespace) + local not_match = conf.namespace_selector.not_match + local m, err + for _, v in ipairs(not_match) do + m, err = ngx.re.match(namespace, v, "jo") + if m and m[0] == namespace then + return false + end + if err then + return false + end + end + return true + end + return + end + + return +end + + +local function read_env(key) + if #key > 3 then + local first, second = string.byte(key, 1, 2) + if first == string.byte('$') and second == string.byte('{') then + local last = string.byte(key, #key) + if last == string.byte('}') then + local env = string.sub(key, 3, #key - 1) + local value = os.getenv(env) + if not value then + return nil, "not found environment variable " .. env + end + return value + end + end + end + return key +end + +local function read_token(token_file) + local token, err = util.read_file(token_file) + if err then + return nil, err + end + + -- remove possible extra whitespace + return util.trim(token) +end + +local function get_apiserver(conf) + local apiserver = { + schema = "", + host = "", + port = "", + } + + apiserver.schema = conf.service.schema + if apiserver.schema ~= "http" and apiserver.schema ~= "https" then + return nil, "service.schema should set to one of [http,https] but " .. apiserver.schema + end + + local err + apiserver.host, err = read_env(conf.service.host) + if err then + return nil, err + end + + if apiserver.host == "" then + return nil, "service.host should set to non-empty string" + end + + local port + port, err = read_env(conf.service.port) + if err then + return nil, err + end + + apiserver.port = tonumber(port) + if not apiserver.port or apiserver.port <= 0 or apiserver.port > 65535 then + return nil, "invalid port value: " .. apiserver.port + end + + if conf.client.token then + local token, err = read_env(conf.client.token) + if err then + return nil, err + end + apiserver.token = util.trim(token) + elseif conf.client.token_file and conf.client.token_file ~= "" then + setmetatable(apiserver, { + __index = function(_, key) + if key ~= "token" then + return + end + + local token_file, err = read_env(conf.client.token_file) + if err then + core.log.error("failed to read token file path: ", err) + return + end + + local token, err = read_token(token_file) + if err then + core.log.error("failed to read token from file: ", err) + return + end + core.log.debug("re-read the token value") + return token + end + }) + else + return nil, "one of [client.token,client.token_file] should be set but none" + end + + if apiserver.schema == "https" and apiserver.token == "" then + return nil, "apiserver.token should set to non-empty string when service.schema is https" + end + + return apiserver +end + +local function create_endpoint_lrucache(endpoint_dict, endpoint_key, endpoint_port) + local endpoint_content = endpoint_dict:get_stale(endpoint_key) + if not endpoint_content then + core.log.error("get empty endpoint content from discovery DIC, this should not happen ", + endpoint_key) + return nil + end + + local endpoint = core.json.decode(endpoint_content) + if not endpoint then + core.log.error("decode endpoint content failed, this should not happen, content: ", + endpoint_content) + return nil + end + + return endpoint[endpoint_port] +end + + +local _M = { + version = "0.0.1" +} + + +local function start_fetch(handle) + local timer_runner + timer_runner = function(premature) + if premature then + return + end + + local ok, status = pcall(handle.list_watch, handle, handle.apiserver) + + local retry_interval = 0 + if not ok then + core.log.error("list_watch failed, kind: ", handle.kind, + ", reason: ", "RuntimeException", ", message : ", status) + retry_interval = 40 + elseif not status then + retry_interval = 40 + end + + ngx.timer.at(retry_interval, timer_runner) + end + ngx.timer.at(0, timer_runner) +end + +local function get_endpoint_dict(id) + local shm = "kubernetes" + + if id and #id > 0 then + shm = shm .. "-" .. id + end + + if not is_http then + shm = shm .. "-stream" + end + + return ngx.shared[shm] +end + + +local function single_mode_init(conf) + local endpoint_dict = get_endpoint_dict() + + if not endpoint_dict then + error("failed to get lua_shared_dict: ngx.shared.kubernetes, " .. + "please check your APISIX version") + end + + if process.type() ~= "privileged agent" then + ctx = endpoint_dict + return + end + + local apiserver, err = get_apiserver(conf) + if err then + error(err) + return + end + + local default_weight = conf.default_weight + local endpoints_informer, err + if conf.watch_endpoint_slices_schema then + endpoints_informer, err = informer_factory.new("discovery.k8s.io", "v1", + "EndpointSlice", "endpointslices", "") + else + endpoints_informer, err = informer_factory.new("", "v1", "Endpoints", "endpoints", "") + end + if err then + error(err) + return + end + + setup_namespace_selector(conf, endpoints_informer) + setup_label_selector(conf, endpoints_informer) + + if conf.watch_endpoint_slices_schema then + endpoints_informer.on_added = on_endpoint_slices_modified + endpoints_informer.on_modified = on_endpoint_slices_modified + else + endpoints_informer.on_added = on_endpoint_modified + endpoints_informer.on_modified = on_endpoint_modified + end + endpoints_informer.on_deleted = on_endpoint_deleted + endpoints_informer.pre_list = pre_list + endpoints_informer.post_list = post_list + + ctx = setmetatable({ + endpoint_dict = endpoint_dict, + apiserver = apiserver, + default_weight = default_weight + }, { __index = endpoints_informer }) + + start_fetch(ctx) +end + + +local function single_mode_nodes(service_name) + local pattern = "^(.*):(.*)$" -- namespace/name:port_name + local match = ngx.re.match(service_name, pattern, "jo") + if not match then + core.log.error("get unexpected upstream service_name: ", service_name) + return nil + end + + local endpoint_dict = ctx + local endpoint_key = match[1] + local endpoint_port = match[2] + local endpoint_version = endpoint_dict:get_stale(endpoint_key .. "#version") + if not endpoint_version then + core.log.info("get empty endpoint version from discovery DICT ", endpoint_key) + return nil + end + + return endpoint_lrucache(service_name, endpoint_version, + create_endpoint_lrucache, endpoint_dict, endpoint_key, endpoint_port) +end + + +local function multiple_mode_worker_init(confs) + for _, conf in ipairs(confs) do + + local id = conf.id + if ctx[id] then + error("duplicate id value") + end + + local endpoint_dict = get_endpoint_dict(id) + if not endpoint_dict then + error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) .. + "please check your APISIX version") + end + + ctx[id] = endpoint_dict + end +end + + +local function multiple_mode_init(confs) + ctx = core.table.new(#confs, 0) + + if process.type() ~= "privileged agent" then + multiple_mode_worker_init(confs) + return + end + + for _, conf in ipairs(confs) do + local id = conf.id + + if ctx[id] then + error("duplicate id value") + end + + local endpoint_dict = get_endpoint_dict(id) + if not endpoint_dict then + error(string.format("failed to get lua_shared_dict: ngx.shared.kubernetes-%s, ", id) .. + "please check your APISIX version") + end + + local apiserver, err = get_apiserver(conf) + if err then + error(err) + return + end + + local default_weight = conf.default_weight + + local endpoints_informer, err + if conf.watch_endpoint_slices_schema then + endpoints_informer, err = informer_factory.new("discovery.k8s.io", "v1", + "EndpointSlice", "endpointslices", "") + else + endpoints_informer, err = informer_factory.new("", "v1", "Endpoints", "endpoints", "") + end + if err then + error(err) + return + end + + setup_namespace_selector(conf, endpoints_informer) + setup_label_selector(conf, endpoints_informer) + + if conf.watch_endpoint_slices_schema then + endpoints_informer.on_added = on_endpoint_slices_modified + endpoints_informer.on_modified = on_endpoint_slices_modified + else + endpoints_informer.on_added = on_endpoint_modified + endpoints_informer.on_modified = on_endpoint_modified + end + endpoints_informer.on_deleted = on_endpoint_deleted + endpoints_informer.pre_list = pre_list + endpoints_informer.post_list = post_list + + ctx[id] = setmetatable({ + endpoint_dict = endpoint_dict, + apiserver = apiserver, + default_weight = default_weight + }, { __index = endpoints_informer }) + end + + for _, item in pairs(ctx) do + start_fetch(item) + end +end + + +local function multiple_mode_nodes(service_name) + local pattern = "^(.*)/(.*/.*):(.*)$" -- id/namespace/name:port_name + local match = ngx.re.match(service_name, pattern, "jo") + if not match then + core.log.error("get unexpected upstream service_name: ", service_name) + return nil + end + + local id = match[1] + local endpoint_dict = ctx[id] + if not endpoint_dict then + core.log.error("id not exist") + return nil + end + + local endpoint_key = match[2] + local endpoint_port = match[3] + local endpoint_version = endpoint_dict:get_stale(endpoint_key .. "#version") + if not endpoint_version then + core.log.info("get empty endpoint version from discovery DICT ", endpoint_key) + return nil + end + + return endpoint_lrucache(service_name, endpoint_version, + create_endpoint_lrucache, endpoint_dict, endpoint_key, endpoint_port) +end + + +function _M.init_worker() + local discovery_conf = local_conf.discovery.kubernetes + core.log.info("kubernetes discovery conf: ", core.json.delay_encode(discovery_conf)) + if #discovery_conf == 0 then + _M.nodes = single_mode_nodes + single_mode_init(discovery_conf) + else + _M.nodes = multiple_mode_nodes + multiple_mode_init(discovery_conf) + end +end + + +local function dump_endpoints_from_dict(endpoint_dict) + local keys, err = endpoint_dict:get_keys(0) + if err then + core.log.error("get keys from discovery dict failed: ", err) + return + end + + if not keys or #keys == 0 then + return + end + + local endpoints = {} + for i = 1, #keys do + local key = keys[i] + -- skip key with suffix #version + if key:sub(-#"#version") ~= "#version" then + local value = endpoint_dict:get(key) + core.table.insert(endpoints, { + name = key, + value = value + }) + end + end + + return endpoints +end + +function _M.dump_data() + local discovery_conf = local_conf.discovery.kubernetes + local eps = {} + + if #discovery_conf == 0 then + -- Single mode: discovery_conf is a single configuration object + local endpoint_dict = get_endpoint_dict() + local endpoints = dump_endpoints_from_dict(endpoint_dict) + if endpoints then + core.table.insert(eps, { + endpoints = endpoints + }) + end + else + -- Multiple mode: discovery_conf is an array of configuration objects + for _, conf in ipairs(discovery_conf) do + local endpoint_dict = get_endpoint_dict(conf.id) + local endpoints = dump_endpoints_from_dict(endpoint_dict) + if endpoints then + core.table.insert(eps, { + id = conf.id, + endpoints = endpoints + }) + end + end + end + + return {config = discovery_conf, endpoints = eps} +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/kubernetes/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/kubernetes/schema.lua new file mode 100644 index 0000000..e18d06f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/kubernetes/schema.lua @@ -0,0 +1,217 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local host_patterns = { + { pattern = [[^\${[_A-Za-z]([_A-Za-z0-9]*[_A-Za-z])*}$]] }, + { pattern = [[^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$]] }, +} + +local port_patterns = { + { pattern = [[^\${[_A-Za-z]([_A-Za-z0-9]*[_A-Za-z])*}$]] }, + { pattern = [[^(([1-9]\d{0,3}|[1-5]\d{4}|6[0-4]\d{3}|65[0-4]\d{2}|655[0-2]\d|6553[0-5]))$]] }, +} + +local schema_schema = { + type = "string", + enum = { "http", "https" }, + default = "https", +} + +local token_patterns = { + { pattern = [[\${[_A-Za-z]([_A-Za-z0-9]*[_A-Za-z])*}$]] }, + { pattern = [[^[A-Za-z0-9+\/._=-]{0,4096}$]] }, +} + +local token_schema = { + type = "string", + oneOf = token_patterns, +} + +local token_file_schema = { + type = "string", + pattern = [[^[^\:*?"<>|]*$]], + minLength = 1, + maxLength = 500, +} + +local namespace_pattern = [[^[a-z0-9]([-a-z0-9_.]*[a-z0-9])?$]] + +local namespace_regex_pattern = [[^[\x21-\x7e]*$]] + +local namespace_selector_schema = { + type = "object", + properties = { + equal = { + type = "string", + pattern = namespace_pattern, + }, + not_equal = { + type = "string", + pattern = namespace_pattern, + }, + match = { + type = "array", + items = { + type = "string", + pattern = namespace_regex_pattern + }, + minItems = 1 + }, + not_match = { + type = "array", + items = { + type = "string", + pattern = namespace_regex_pattern + }, + minItems = 1 + }, + }, + oneOf = { + { required = {} }, + { required = { "equal" } }, + { required = { "not_equal" } }, + { required = { "match" } }, + { required = { "not_match" } } + }, +} + +local label_selector_schema = { + type = "string", +} + +local default_weight_schema = { + type = "integer", + default = 50, + minimum = 0, +} + +local shared_size_schema = { + type = "string", + pattern = [[^[1-9][0-9]*m$]], + default = "1m", +} + +local watch_endpoint_slices_schema = { + type = "boolean", + default = false, +} + +return { + anyOf = { + { + type = "object", + properties = { + service = { + type = "object", + properties = { + schema = schema_schema, + host = { + type = "string", + oneOf = host_patterns, + default = "${KUBERNETES_SERVICE_HOST}", + }, + port = { + type = "string", + oneOf = port_patterns, + default = "${KUBERNETES_SERVICE_PORT}", + }, + }, + default = { + schema = "https", + host = "${KUBERNETES_SERVICE_HOST}", + port = "${KUBERNETES_SERVICE_PORT}", + } + }, + client = { + type = "object", + properties = { + token = token_schema, + token_file = token_file_schema, + }, + default = { + token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token" + }, + ["if"] = { + ["not"] = { + anyOf = { + { required = { "token" } }, + { required = { "token_file" } }, + } + } + }, + ["then"] = { + properties = { + token_file = { + default = "/var/run/secrets/kubernetes.io/serviceaccount/token" + } + } + } + }, + namespace_selector = namespace_selector_schema, + label_selector = label_selector_schema, + default_weight = default_weight_schema, + shared_size = shared_size_schema, + watch_endpoint_slices = watch_endpoint_slices_schema, + }, + }, + { + type = "array", + minItems = 1, + items = { + type = "object", + properties = { + id = { + type = "string", + pattern = [[^[a-z0-9]{1,8}$]] + }, + service = { + type = "object", + properties = { + schema = schema_schema, + host = { + type = "string", + oneOf = host_patterns, + }, + port = { + type = "string", + oneOf = port_patterns, + }, + }, + required = { "host", "port" } + }, + client = { + type = "object", + properties = { + token = token_schema, + token_file = token_file_schema, + }, + oneOf = { + { required = { "token" } }, + { required = { "token_file" } }, + }, + }, + namespace_selector = namespace_selector_schema, + label_selector = label_selector_schema, + default_weight = default_weight_schema, + shared_size = shared_size_schema, + watch_endpoint_slices = watch_endpoint_slices_schema, + }, + required = { "id", "service", "client" } + }, + } + } +} diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/nacos/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/nacos/init.lua new file mode 100644 index 0000000..d4fec79 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/nacos/init.lua @@ -0,0 +1,392 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local require = require +local local_conf = require('apisix.core.config_local').local_conf() +local http = require('resty.http') +local core = require('apisix.core') +local ipairs = ipairs +local pairs = pairs +local type = type +local math = math +local math_random = math.random +local ngx = ngx +local ngx_re = require('ngx.re') +local ngx_timer_at = ngx.timer.at +local ngx_timer_every = ngx.timer.every +local string = string +local string_sub = string.sub +local str_byte = string.byte +local str_find = core.string.find +local log = core.log + +local default_weight +local nacos_dict = ngx.shared.nacos --key: namespace_id.group_name.service_name +if not nacos_dict then + error("lua_shared_dict \"nacos\" not configured") +end + +local auth_path = 'auth/login' +local instance_list_path = 'ns/instance/list?healthyOnly=true&serviceName=' +local default_namespace_id = "public" +local default_group_name = "DEFAULT_GROUP" +local access_key +local secret_key + + +local _M = {} + +local function get_key(namespace_id, group_name, service_name) + return namespace_id .. '.' .. group_name .. '.' .. service_name +end + +local function request(request_uri, path, body, method, basic_auth) + local url = request_uri .. path + log.info('request url:', url) + local headers = {} + headers['Accept'] = 'application/json' + + if basic_auth then + headers['Authorization'] = basic_auth + end + + if body and 'table' == type(body) then + local err + body, err = core.json.encode(body) + if not body then + return nil, 'invalid body : ' .. err + end + headers['Content-Type'] = 'application/json' + end + + local httpc = http.new() + local timeout = local_conf.discovery.nacos.timeout + local connect_timeout = timeout.connect + local send_timeout = timeout.send + local read_timeout = timeout.read + log.info('connect_timeout:', connect_timeout, ', send_timeout:', send_timeout, + ', read_timeout:', read_timeout) + httpc:set_timeouts(connect_timeout, send_timeout, read_timeout) + local res, err = httpc:request_uri(url, { + method = method, + headers = headers, + body = body, + ssl_verify = true, + }) + if not res then + return nil, err + end + + if not res.body or res.status ~= 200 then + return nil, 'status = ' .. res.status + end + + local json_str = res.body + local data, err = core.json.decode(json_str) + if not data then + return nil, err + end + return data +end + + +local function get_url(request_uri, path) + return request(request_uri, path, nil, 'GET', nil) +end + + +local function post_url(request_uri, path, body) + return request(request_uri, path, body, 'POST', nil) +end + + +local function get_token_param(base_uri, username, password) + if not username or not password then + return '' + end + + local args = { username = username, password = password} + local data, err = post_url(base_uri, auth_path .. '?' .. ngx.encode_args(args), nil) + if err then + log.error('nacos login fail:', username, ' ', password, ' desc:', err) + return nil, err + end + return '&accessToken=' .. data.accessToken +end + + +local function get_namespace_param(namespace_id) + local param = '' + if namespace_id then + local args = {namespaceId = namespace_id} + param = '&' .. ngx.encode_args(args) + end + return param +end + + +local function get_group_name_param(group_name) + local param = '' + if group_name then + local args = {groupName = group_name} + param = '&' .. ngx.encode_args(args) + end + return param +end + + +local function get_signed_param(group_name, service_name) + local param = '' + if access_key ~= '' and secret_key ~= '' then + local str_to_sign = ngx.now() * 1000 .. '@@' .. group_name .. '@@' .. service_name + local args = { + ak = access_key, + data = str_to_sign, + signature = ngx.encode_base64(ngx.hmac_sha1(secret_key, str_to_sign)) + } + param = '&' .. ngx.encode_args(args) + end + return param +end + + +local function get_base_uri() + local host = local_conf.discovery.nacos.host + -- TODO Add health check to get healthy nodes. + local url = host[math_random(#host)] + local auth_idx = core.string.rfind_char(url, '@') + local username, password + if auth_idx then + local protocol_idx = str_find(url, '://') + local protocol = string_sub(url, 1, protocol_idx + 2) + local user_and_password = string_sub(url, protocol_idx + 3, auth_idx - 1) + local arr = ngx_re.split(user_and_password, ':') + if #arr == 2 then + username = arr[1] + password = arr[2] + end + local other = string_sub(url, auth_idx + 1) + url = protocol .. other + end + + if local_conf.discovery.nacos.prefix then + url = url .. local_conf.discovery.nacos.prefix + end + + if str_byte(url, #url) ~= str_byte('/') then + url = url .. '/' + end + + return url, username, password +end + + +local function de_duplication(services, namespace_id, group_name, service_name, scheme) + for _, service in ipairs(services) do + if service.namespace_id == namespace_id and service.group_name == group_name + and service.service_name == service_name and service.scheme == scheme then + return true + end + end + return false +end + + +local function iter_and_add_service(services, values) + if not values then + return + end + + for _, value in core.config_util.iterate_values(values) do + local conf = value.value + if not conf then + goto CONTINUE + end + + local up + if conf.upstream then + up = conf.upstream + else + up = conf + end + + local namespace_id = (up.discovery_args and up.discovery_args.namespace_id) + or default_namespace_id + + local group_name = (up.discovery_args and up.discovery_args.group_name) + or default_group_name + + local dup = de_duplication(services, namespace_id, group_name, + up.service_name, up.scheme) + if dup then + goto CONTINUE + end + + if up.discovery_type == 'nacos' then + core.table.insert(services, { + service_name = up.service_name, + namespace_id = namespace_id, + group_name = group_name, + scheme = up.scheme, + }) + end + ::CONTINUE:: + end +end + + +local function get_nacos_services() + local services = {} + + -- here we use lazy load to work around circle dependency + local get_upstreams = require('apisix.upstream').upstreams + local get_routes = require('apisix.router').http_routes + local get_stream_routes = require('apisix.router').stream_routes + local get_services = require('apisix.http.service').services + local values = get_upstreams() + iter_and_add_service(services, values) + values = get_routes() + iter_and_add_service(services, values) + values = get_services() + iter_and_add_service(services, values) + values = get_stream_routes() + iter_and_add_service(services, values) + return services +end + +local function is_grpc(scheme) + if scheme == 'grpc' or scheme == 'grpcs' then + return true + end + + return false +end + +local curr_service_in_use = {} +local function fetch_full_registry(premature) + if premature then + return + end + + local base_uri, username, password = get_base_uri() + local token_param, err = get_token_param(base_uri, username, password) + if err then + log.error('get_token_param error:', err) + return + end + + local infos = get_nacos_services() + if #infos == 0 then + return + end + local service_names = {} + for _, service_info in ipairs(infos) do + local data, err + local namespace_id = service_info.namespace_id + local group_name = service_info.group_name + local scheme = service_info.scheme or '' + local namespace_param = get_namespace_param(service_info.namespace_id) + local group_name_param = get_group_name_param(service_info.group_name) + local signature_param = get_signed_param(service_info.group_name, service_info.service_name) + local query_path = instance_list_path .. service_info.service_name + .. token_param .. namespace_param .. group_name_param + .. signature_param + data, err = get_url(base_uri, query_path) + if err then + log.error('get_url:', query_path, ' err:', err) + goto CONTINUE + end + + local nodes = {} + local key = get_key(namespace_id, group_name, service_info.service_name) + service_names[key] = true + for _, host in ipairs(data.hosts) do + local node = { + host = host.ip, + port = host.port, + weight = host.weight or default_weight, + } + -- docs: https://github.com/yidongnan/grpc-spring-boot-starter/pull/496 + if is_grpc(scheme) and host.metadata and host.metadata.gRPC_port then + node.port = host.metadata.gRPC_port + end + + core.table.insert(nodes, node) + end + if #nodes > 0 then + local content = core.json.encode(nodes) + nacos_dict:set(key, content) + end + ::CONTINUE:: + end + -- remove services that are not in use anymore + for key, _ in pairs(curr_service_in_use) do + if not service_names[key] then + nacos_dict:delete(key) + end + end + curr_service_in_use = service_names +end + + +function _M.nodes(service_name, discovery_args) + local namespace_id = discovery_args and + discovery_args.namespace_id or default_namespace_id + local group_name = discovery_args + and discovery_args.group_name or default_group_name + local key = get_key(namespace_id, group_name, service_name) + local value = nacos_dict:get(key) + if not value then + core.log.error("nacos service not found: ", service_name) + return nil + end + local nodes = core.json.decode(value) + return nodes +end + + +function _M.init_worker() + default_weight = local_conf.discovery.nacos.weight + log.info('default_weight:', default_weight) + local fetch_interval = local_conf.discovery.nacos.fetch_interval + log.info('fetch_interval:', fetch_interval) + access_key = local_conf.discovery.nacos.access_key + secret_key = local_conf.discovery.nacos.secret_key + ngx_timer_at(0, fetch_full_registry) + ngx_timer_every(fetch_interval, fetch_full_registry) +end + + +function _M.dump_data() + local keys = nacos_dict:get_keys(0) + local applications = {} + for _, key in ipairs(keys) do + local value = nacos_dict:get(key) + if value then + local nodes = core.json.decode(value) + if nodes then + applications[key] = { + nodes = nodes, + } + end + end + end + return {services = applications or {}} +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/nacos/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/nacos/schema.lua new file mode 100644 index 0000000..2940487 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/nacos/schema.lua @@ -0,0 +1,59 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local host_pattern = [[^http(s)?:\/\/([a-zA-Z0-9-_.]+:.+\@)?[a-zA-Z0-9-_.:]+$]] +local prefix_pattern = [[^[\/a-zA-Z0-9-_.]+$]] + + +return { + type = 'object', + properties = { + host = { + type = 'array', + minItems = 1, + items = { + type = 'string', + pattern = host_pattern, + minLength = 2, + maxLength = 100, + }, + }, + fetch_interval = {type = 'integer', minimum = 1, default = 30}, + prefix = { + type = 'string', + pattern = prefix_pattern, + maxLength = 100, + default = '/nacos/v1/' + }, + weight = {type = 'integer', minimum = 1, default = 100}, + timeout = { + type = 'object', + properties = { + connect = {type = 'integer', minimum = 1, default = 2000}, + send = {type = 'integer', minimum = 1, default = 2000}, + read = {type = 'integer', minimum = 1, default = 5000}, + }, + default = { + connect = 2000, + send = 2000, + read = 5000, + } + }, + access_key = {type = 'string', default = ''}, + secret_key = {type = 'string', default = ''}, + }, + required = {'host'} +} diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/tars/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/tars/init.lua new file mode 100644 index 0000000..17bb275 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/tars/init.lua @@ -0,0 +1,367 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ngx = ngx +local format = string.format +local ipairs = ipairs +local error = error +local tonumber = tonumber +local local_conf = require("apisix.core.config_local").local_conf() +local core = require("apisix.core") +local mysql = require("resty.mysql") +local is_http = ngx.config.subsystem == "http" +local process = require("ngx.process") + +local endpoint_dict + +local full_query_sql = [[ select servant, group_concat(endpoint order by endpoint) as endpoints +from t_server_conf left join t_adapter_conf tac using (application, server_name, node_name) +where setting_state = 'active' and present_state = 'active' +group by servant ]] + +local incremental_query_sql = [[ +select servant, (setting_state = 'active' and present_state = 'active') activated, +group_concat(endpoint order by endpoint) endpoints +from t_server_conf left join t_adapter_conf tac using (application, server_name, node_name) +where (application, server_name) in +( +select application, server_name from t_server_conf +where registry_timestamp > now() - interval %d second +union +select application, server_name from t_adapter_conf +where registry_timestamp > now() - interval %d second +) +group by servant, activated order by activated desc ]] + +local _M = { + version = 0.1, +} + +local default_weight + +local last_fetch_full_time = 0 +local last_db_error + +local endpoint_lrucache = core.lrucache.new({ + ttl = 300, + count = 1024 +}) + +local activated_buffer = core.table.new(10, 0) +local nodes_buffer = core.table.new(0, 5) + + +--[[ +endpoints format as follows: + tcp -h 172.16.1.1 -p 11 -t 6000 -e 0,tcp -e 0 -p 12 -h 172.16.1.1,tcp -p 13 -h 172.16.1.1 +we extract host and port value via endpoints_pattern +--]] +local endpoints_pattern = core.table.concat( + { [[tcp(\s*-[te]\s*(\S+)){0,2}\s*-([hpHP])\s*(\S+)(\s*-[teTE]\s*(\S+))]], + [[{0,2}\s*-([hpHP])\s*(\S+)(\s*-[teTE]\s*(\S+)){0,2}\s*(,|$)]] } +) + + +local function update_endpoint(servant, nodes) + local endpoint_content = core.json.encode(nodes, true) + local endpoint_version = ngx.crc32_long(endpoint_content) + core.log.debug("set servant ", servant, endpoint_content) + local _, err + _, err = endpoint_dict:safe_set(servant .. "#version", endpoint_version) + if err then + core.log.error("set endpoint version into nginx shared dict failed, ", err) + return + end + _, err = endpoint_dict:safe_set(servant, endpoint_content) + if err then + core.log.error("set endpoint into nginx shared dict failed, ", err) + endpoint_dict:delete(servant .. "#version") + end +end + + +local function delete_endpoint(servant) + core.log.info("delete servant ", servant) + endpoint_dict:delete(servant .. "#version") + endpoint_dict:delete(servant) +end + + +local function add_endpoint_to_lrucache(servant) + local endpoint_content, err = endpoint_dict:get_stale(servant) + if not endpoint_content then + core.log.error("get empty endpoint content, servant: ", servant, ", err: ", err) + return nil + end + + local endpoint, err = core.json.decode(endpoint_content) + if not endpoint then + core.log.error("decode json failed, content: ", endpoint_content, ", err: ", err) + return nil + end + + return endpoint +end + + +local function get_endpoint(servant) + + --[[ + fetch_full function will: + 1: call endpoint_dict:flush_all() + 2: setup servant:nodes pairs into endpoint_dict + 3: call endpoint_dict:flush_expired() + + get_endpoint may be called during the 2 step of the fetch_full function, + so we must use endpoint_dict:get_stale() to get value instead endpoint_dict:get() + --]] + + local endpoint_version, err = endpoint_dict:get_stale(servant .. "#version") + if not endpoint_version then + if err then + core.log.error("get empty endpoint version, servant: ", servant, ", err: ", err) + end + return nil + end + return endpoint_lrucache(servant, endpoint_version, add_endpoint_to_lrucache, servant) +end + + +local function extract_endpoint(query_result) + for _, p in ipairs(query_result) do + repeat + local servant = p.servant + + if servant == ngx.null then + break + end + + if p.activated == 1 then + activated_buffer[servant] = ngx.null + elseif p.activated == 0 then + if activated_buffer[servant] == nil then + delete_endpoint(servant) + end + break + end + + core.table.clear(nodes_buffer) + local iterator = ngx.re.gmatch(p.endpoints, endpoints_pattern, "jao") + while true do + local captures, err = iterator() + if err then + core.log.error("gmatch failed, error: ", err, " , endpoints: ", p.endpoints) + break + end + + if not captures then + break + end + + local host, port + if captures[3] == "h" or captures[3] == "H" then + host = captures[4] + port = tonumber(captures[8]) + else + host = captures[8] + port = tonumber(captures[4]) + end + + core.table.insert(nodes_buffer, { + host = host, + port = port, + weight = default_weight, + }) + end + update_endpoint(servant, nodes_buffer) + until true + end +end + + +local function fetch_full(db_cli) + local res, err, errcode, sqlstate = db_cli:query(full_query_sql) + --[[ + res format is as follows: + { + { + servant = "A.AServer.FirstObj", + endpoints = "tcp -h 172.16.1.1 -p 10001 -e 0 -t 3000,tcp -p 10002 -h 172.16.1.2 -t 3000" + }, + { + servant = "A.AServer.SecondObj", + endpoints = "tcp -t 3000 -p 10002 -h 172.16.1.2" + }, + } + + if current endpoint_dict is as follows: + key1:nodes1, key2:nodes2, key3:nodes3 + + then fetch_full get follow results: + key1:nodes1, key4:nodes4, key5:nodes5 + + at this time, we need + 1: setup key4:nodes4, key5:nodes5 + 2: delete key2:nodes2, key3:nodes3 + + to achieve goals, we should: + 1: before setup results, execute endpoint_dict:flush_all() + 2: after setup results, execute endpoint_dict:flush_expired() + --]] + if not res then + core.log.error("query failed, error: ", err, ", ", errcode, " ", sqlstate) + return err + end + + endpoint_dict:flush_all() + extract_endpoint(res) + + while err == "again" do + res, err, errcode, sqlstate = db_cli:read_result() + if not res then + if err then + core.log.error("read result failed, error: ", err, ", ", errcode, " ", sqlstate) + end + return err + end + extract_endpoint(res) + end + endpoint_dict:flush_expired() + + return nil +end + + +local function fetch_incremental(db_cli) + local res, err, errcode, sqlstate = db_cli:query(incremental_query_sql) + --[[ + res is as follows: + { + { + activated=1, + servant = "A.AServer.FirstObj", + endpoints = "tcp -h 172.16.1.1 -p 10001 -e 0 -t 3000,tcp -p 10002 -h 172.16.1.2 -t 3000" + }, + { + activated=0, + servant = "A.AServer.FirstObj", + endpoints = "tcp -t 3000 -p 10001 -h 172.16.1.3" + }, + { + activated=0, + servant = "B.BServer.FirstObj", + endpoints = "tcp -t 3000 -p 10002 -h 172.16.1.2" + }, + } + + for each item: + if activated==1, setup + if activated==0, if there is a other item had same servant and activate==1, ignore + if activated==0, and there is no other item had same servant, delete + --]] + if not res then + core.log.error("query failed, error: ", err, ", ", errcode, " ", sqlstate) + return err + end + + core.table.clear(activated_buffer) + extract_endpoint(res) + + while err == "again" do + res, err, errcode, sqlstate = db_cli:read_result() + if not res then + if err then + core.log.error("read result failed, error: ", err, ", ", errcode, " ", sqlstate) + end + return err + end + extract_endpoint(res) + end + + return nil +end + + +local function fetch_endpoint(premature, conf) + if premature then + return + end + + local db_cli, err = mysql:new() + if not db_cli then + core.log.error("failed to instantiate mysql: ", err) + return + end + db_cli:set_timeout(3000) + + local ok, err, errcode, sqlstate = db_cli:connect(conf.db_conf) + if not ok then + core.log.error("failed to connect mysql: ", err, ", ", errcode, ", ", sqlstate) + return + end + + local now = ngx.time() + + if last_db_error or last_fetch_full_time + conf.full_fetch_interval <= now then + last_fetch_full_time = now + last_db_error = fetch_full(db_cli) + else + last_db_error = fetch_incremental(db_cli) + end + + if not last_db_error then + db_cli:set_keepalive(120 * 1000, 1) + end +end + + +function _M.nodes(servant) + return get_endpoint(servant) +end + +local function get_endpoint_dict() + local shm = "tars" + + if not is_http then + shm = shm .. "-stream" + end + + return ngx.shared[shm] +end + +function _M.init_worker() + endpoint_dict = get_endpoint_dict() + if not endpoint_dict then + error("failed to get lua_shared_dict: tars, please check your APISIX version") + end + + if process.type() ~= "privileged agent" then + return + end + + local conf = local_conf.discovery.tars + default_weight = conf.default_weight + + core.log.info("conf ", core.json.delay_encode(conf)) + local backtrack_time = conf.incremental_fetch_interval + 5 + incremental_query_sql = format(incremental_query_sql, backtrack_time, backtrack_time) + + ngx.timer.at(0, fetch_endpoint, conf) + ngx.timer.every(conf.incremental_fetch_interval, fetch_endpoint, conf) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/discovery/tars/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/discovery/tars/schema.lua new file mode 100644 index 0000000..01d44d1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/discovery/tars/schema.lua @@ -0,0 +1,45 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local host_pattern = [[^([a-zA-Z0-9-_.]+:.+\@)?[a-zA-Z0-9-_.:]+$]] + +return { + type = 'object', + properties = { + db_conf = { + type = 'object', + properties = { + host = { type = 'string', minLength = 1, maxLength = 500, pattern = host_pattern }, + port = { type = 'integer', minimum = 1, maximum = 65535, default = 3306 }, + database = { type = 'string', minLength = 1, maxLength = 64 }, + user = { type = 'string', minLength = 1, maxLength = 64 }, + password = { type = 'string', minLength = 1, maxLength = 64 }, + }, + required = { 'host', 'database', 'user', 'password' } + }, + full_fetch_interval = { + type = 'integer', minimum = 90, maximum = 3600, default = 300, + }, + incremental_fetch_interval = { + type = 'integer', minimum = 5, maximum = 60, default = 15, + }, + default_weight = { + type = 'integer', minimum = 0, maximum = 100, default = 100, + }, + }, + required = { 'db_conf' } +} diff --git a/CloudronPackages/APISIX/apisix-source/apisix/events.lua b/CloudronPackages/APISIX/apisix-source/apisix/events.lua new file mode 100644 index 0000000..dac71ac --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/events.lua @@ -0,0 +1,139 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local require = require +local error = error +local assert = assert +local tostring = tostring +local pairs = pairs +local setmetatable = setmetatable +local ngx = ngx +local core = require("apisix.core") + +local _M = { + events_module = nil, +} + +_M.EVENTS_MODULE_LUA_RESTY_WORKER_EVENTS = 'lua-resty-worker-events' +_M.EVENTS_MODULE_LUA_RESTY_EVENTS = 'lua-resty-events' + + +-- use lua-resty-worker-events +local function init_resty_worker_events() + _M.events_module = _M.EVENTS_MODULE_LUA_RESTY_WORKER_EVENTS + + local we = require("resty.worker.events") + local shm = ngx.config.subsystem == "http" and "worker-events" or "worker-events-stream" + local ok, err = we.configure({shm = shm, interval = 0.1}) + if not ok then + error("failed to init worker event: " .. err) + end + + return we +end + + +-- use lua-resty-events +local function init_resty_events() + _M.events_module = _M.EVENTS_MODULE_LUA_RESTY_EVENTS + + local listening = "unix:" .. ngx.config.prefix() .. "logs/" + if ngx.config.subsystem == "http" then + listening = listening .. "worker_events.sock" + else + listening = listening .. "stream_worker_events.sock" + end + core.log.info("subsystem: " .. ngx.config.subsystem .. " listening sock: " .. listening) + + local opts = { + unique_timeout = 5, -- life time of unique event data in lrucache + broker_id = 0, -- broker server runs in nginx worker #0 + listening = listening, -- unix socket for broker listening + } + + local we = require("resty.events.compat") + assert(we.configure(opts)) + assert(we.configured()) + + return we +end + + +function _M.init_worker() + if _M.inited then + -- prevent duplicate initializations in the same worker to + -- avoid potentially unexpected behavior + return + end + + _M.inited = true + + local conf = core.config.local_conf() + local module_name = core.table.try_read_attr(conf, "apisix", "events", "module") + or _M.EVENTS_MODULE_LUA_RESTY_WORKER_EVENTS + + if module_name == _M.EVENTS_MODULE_LUA_RESTY_EVENTS then + -- use lua-resty-events as an event module via the apisix.events.module + -- key in the configuration file + _M.worker_events = init_resty_events() + else + -- use lua-resty-worker-events default now + _M.worker_events = init_resty_worker_events() + end +end + + +function _M.register(self, ...) + return self.worker_events.register(...) +end + + +function _M.event_list(self, source, ...) + -- a patch for the lua-resty-events to support event_list + -- this snippet is copied from the lua-resty-worker-events lib + if self.events_module == _M.EVENTS_MODULE_LUA_RESTY_EVENTS then + local events = { _source = source } + for _, event in pairs({...}) do + events[event] = event + end + return setmetatable(events, { + __index = function(_, key) + error("event '"..tostring(key).."' is an unknown event", 2) + end + }) + end + + -- the lua-resty-worker-events has a built-in event_list implementation + return self.worker_events.event_list(source, ...) +end + + +function _M.post(self, ...) + return self.worker_events.post(...) +end + + +function _M.get_healthcheck_events_modele(self) + if self.events_module == _M.EVENTS_MODULE_LUA_RESTY_EVENTS then + return "resty.events" + else + return "resty.worker.events" + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/global_rules.lua b/CloudronPackages/APISIX/apisix-source/apisix/global_rules.lua new file mode 100644 index 0000000..93fa289 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/global_rules.lua @@ -0,0 +1,56 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local plugin_checker = require("apisix.plugin").plugin_checker +local error = error + + +local _M = {} + +local global_rules + +function _M.init_worker() + local err + global_rules, err = core.config.new("/global_rules", { + automatic = true, + item_schema = core.schema.global_rule, + checker = plugin_checker, + }) + if not global_rules then + error("failed to create etcd instance for fetching /global_rules : " + .. err) + end +end + + +function _M.global_rules() + if not global_rules then + return nil, nil + end + return global_rules.values, global_rules.conf_version +end + + +function _M.get_pre_index() + if not global_rules then + return nil + end + return global_rules.prev_index +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/http/route.lua b/CloudronPackages/APISIX/apisix-source/apisix/http/route.lua new file mode 100644 index 0000000..dbf11ab --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/http/route.lua @@ -0,0 +1,153 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local radixtree = require("resty.radixtree") +local router = require("apisix.utils.router") +local service_fetch = require("apisix.http.service").get +local core = require("apisix.core") +local expr = require("resty.expr.v1") +local plugin_checker = require("apisix.plugin").plugin_checker +local event = require("apisix.core.event") +local ipairs = ipairs +local type = type +local error = error +local loadstring = loadstring + + +local _M = {} + + +function _M.create_radixtree_uri_router(routes, uri_routes, with_parameter) + routes = routes or {} + + core.table.clear(uri_routes) + + for _, route in ipairs(routes) do + if type(route) == "table" then + local status = core.table.try_read_attr(route, "value", "status") + -- check the status + if status and status == 0 then + goto CONTINUE + end + + local filter_fun, err + if route.value.filter_func then + filter_fun, err = loadstring( + "return " .. route.value.filter_func, + "router#" .. route.value.id) + if not filter_fun then + core.log.error("failed to load filter function: ", err, + " route id: ", route.value.id) + goto CONTINUE + end + + filter_fun = filter_fun() + end + + local hosts = route.value.hosts or route.value.host + if not hosts and route.value.service_id then + local service = service_fetch(route.value.service_id) + if not service then + core.log.error("failed to fetch service configuration by ", + "id: ", route.value.service_id) + -- we keep the behavior that missing service won't affect the route matching + else + hosts = service.value.hosts + end + end + + core.log.info("insert uri route: ", + core.json.delay_encode(route.value, true)) + core.table.insert(uri_routes, { + paths = route.value.uris or route.value.uri, + methods = route.value.methods, + priority = route.value.priority, + hosts = hosts, + remote_addrs = route.value.remote_addrs + or route.value.remote_addr, + vars = route.value.vars, + filter_fun = filter_fun, + handler = function (api_ctx, match_opts) + api_ctx.matched_params = nil + api_ctx.matched_route = route + api_ctx.curr_req_matched = match_opts.matched + end + }) + + ::CONTINUE:: + end + end + + event.push(event.CONST.BUILD_ROUTER, routes) + core.log.info("route items: ", core.json.delay_encode(uri_routes, true)) + + if with_parameter then + return radixtree.new(uri_routes) + else + return router.new(uri_routes) + end +end + + +function _M.match_uri(uri_router, api_ctx) + local match_opts = core.tablepool.fetch("route_match_opts", 0, 4) + match_opts.method = api_ctx.var.request_method + match_opts.host = api_ctx.var.host + match_opts.remote_addr = api_ctx.var.remote_addr + match_opts.vars = api_ctx.var + match_opts.matched = core.tablepool.fetch("matched_route_record", 0, 4) + + local ok = uri_router:dispatch(api_ctx.var.uri, match_opts, api_ctx, match_opts) + core.tablepool.release("route_match_opts", match_opts) + return ok +end + + +-- additional check for synced route configuration, run after schema check +local function check_route(route) + local ok, err = plugin_checker(route) + if not ok then + return nil, err + end + + if route.vars then + ok, err = expr.new(route.vars) + if not ok then + return nil, "failed to validate the 'vars' expression: " .. err + end + end + + return true +end + + +function _M.init_worker(filter) + local user_routes, err = core.config.new("/routes", { + automatic = true, + item_schema = core.schema.route, + checker = check_route, + filter = filter, + }) + if not user_routes then + error("failed to create etcd instance for fetching /routes : " .. err) + end + + return user_routes +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/http/router/radixtree_host_uri.lua b/CloudronPackages/APISIX/apisix-source/apisix/http/router/radixtree_host_uri.lua new file mode 100644 index 0000000..680a04f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/http/router/radixtree_host_uri.lua @@ -0,0 +1,193 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local router = require("apisix.utils.router") +local core = require("apisix.core") +local event = require("apisix.core.event") +local get_services = require("apisix.http.service").services +local service_fetch = require("apisix.http.service").get +local ipairs = ipairs +local type = type +local tab_insert = table.insert +local loadstring = loadstring +local pairs = pairs +local cached_router_version +local cached_service_version +local host_router +local only_uri_router + + +local _M = {version = 0.1} + + +local function push_host_router(route, host_routes, only_uri_routes) + if type(route) ~= "table" then + return + end + + local filter_fun, err + if route.value.filter_func then + filter_fun, err = loadstring( + "return " .. route.value.filter_func, + "router#" .. route.value.id) + if not filter_fun then + core.log.error("failed to load filter function: ", err, + " route id: ", route.value.id) + return + end + + filter_fun = filter_fun() + end + + local hosts = route.value.hosts + if not hosts then + if route.value.host then + hosts = {route.value.host} + elseif route.value.service_id then + local service = service_fetch(route.value.service_id) + if not service then + core.log.error("failed to fetch service configuration by ", + "id: ", route.value.service_id) + -- we keep the behavior that missing service won't affect the route matching + else + hosts = service.value.hosts + end + end + end + + local radixtree_route = { + paths = route.value.uris or route.value.uri, + methods = route.value.methods, + priority = route.value.priority, + remote_addrs = route.value.remote_addrs + or route.value.remote_addr, + vars = route.value.vars, + filter_fun = filter_fun, + handler = function (api_ctx, match_opts) + api_ctx.matched_params = nil + api_ctx.matched_route = route + api_ctx.curr_req_matched = match_opts.matched + api_ctx.real_curr_req_matched_path = match_opts.matched._path + end + } + + if hosts == nil then + core.table.insert(only_uri_routes, radixtree_route) + return + end + + for i, host in ipairs(hosts) do + local host_rev = host:reverse() + if not host_routes[host_rev] then + host_routes[host_rev] = {radixtree_route} + else + tab_insert(host_routes[host_rev], radixtree_route) + end + end +end + + +local function create_radixtree_router(routes) + local host_routes = {} + local only_uri_routes = {} + host_router = nil + routes = routes or {} + + for _, route in ipairs(routes) do + local status = core.table.try_read_attr(route, "value", "status") + -- check the status + if not status or status == 1 then + push_host_router(route, host_routes, only_uri_routes) + end + end + + -- create router: host_router + local host_router_routes = {} + for host_rev, routes in pairs(host_routes) do + local sub_router = router.new(routes) + + core.table.insert(host_router_routes, { + paths = host_rev, + filter_fun = function(vars, opts, ...) + return sub_router:dispatch(vars.uri, opts, ...) + end, + handler = function (api_ctx, match_opts) + api_ctx.real_curr_req_matched_host = match_opts.matched._path + end + }) + end + + event.push(event.CONST.BUILD_ROUTER, routes) + + if #host_router_routes > 0 then + host_router = router.new(host_router_routes) + end + + -- create router: only_uri_router + only_uri_router = router.new(only_uri_routes) + return true +end + +function _M.match(api_ctx) + local user_routes = _M.user_routes + local _, service_version = get_services() + if not cached_router_version or cached_router_version ~= user_routes.conf_version + or not cached_service_version or cached_service_version ~= service_version + then + create_radixtree_router(user_routes.values) + cached_router_version = user_routes.conf_version + cached_service_version = service_version + end + + return _M.matching(api_ctx) +end + + +function _M.matching(api_ctx) + core.log.info("route match mode: radixtree_host_uri") + + local match_opts = core.tablepool.fetch("route_match_opts", 0, 16) + match_opts.method = api_ctx.var.request_method + match_opts.remote_addr = api_ctx.var.remote_addr + match_opts.vars = api_ctx.var + match_opts.host = api_ctx.var.host + match_opts.matched = core.tablepool.fetch("matched_route_record", 0, 4) + + if host_router then + local host_uri = api_ctx.var.host + local ok = host_router:dispatch(host_uri:reverse(), match_opts, api_ctx, match_opts) + if ok then + if api_ctx.real_curr_req_matched_path then + api_ctx.curr_req_matched._path = api_ctx.real_curr_req_matched_path + api_ctx.real_curr_req_matched_path = nil + end + if api_ctx.real_curr_req_matched_host then + api_ctx.curr_req_matched._host = api_ctx.real_curr_req_matched_host:reverse() + api_ctx.real_curr_req_matched_host = nil + end + core.tablepool.release("route_match_opts", match_opts) + return true + end + end + + local ok = only_uri_router:dispatch(api_ctx.var.uri, match_opts, api_ctx, match_opts) + core.tablepool.release("route_match_opts", match_opts) + return ok +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/http/router/radixtree_uri.lua b/CloudronPackages/APISIX/apisix-source/apisix/http/router/radixtree_uri.lua new file mode 100644 index 0000000..7c1b5c0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/http/router/radixtree_uri.lua @@ -0,0 +1,57 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local base_router = require("apisix.http.route") +local get_services = require("apisix.http.service").services +local cached_router_version +local cached_service_version + + +local _M = {version = 0.2} + + + local uri_routes = {} + local uri_router +function _M.match(api_ctx) + local user_routes = _M.user_routes + local _, service_version = get_services() + if not cached_router_version or cached_router_version ~= user_routes.conf_version + or not cached_service_version or cached_service_version ~= service_version + then + uri_router = base_router.create_radixtree_uri_router(user_routes.values, + uri_routes, false) + cached_router_version = user_routes.conf_version + cached_service_version = service_version + end + + if not uri_router then + core.log.error("failed to fetch valid `uri` router: ") + return true + end + + return _M.matching(api_ctx) +end + + +function _M.matching(api_ctx) + core.log.info("route match mode: radixtree_uri") + return base_router.match_uri(uri_router, api_ctx) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/http/router/radixtree_uri_with_parameter.lua b/CloudronPackages/APISIX/apisix-source/apisix/http/router/radixtree_uri_with_parameter.lua new file mode 100644 index 0000000..3f10f4f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/http/router/radixtree_uri_with_parameter.lua @@ -0,0 +1,57 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local base_router = require("apisix.http.route") +local get_services = require("apisix.http.service").services +local cached_router_version +local cached_service_version + + +local _M = {} + + + local uri_routes = {} + local uri_router +function _M.match(api_ctx) + local user_routes = _M.user_routes + local _, service_version = get_services() + if not cached_router_version or cached_router_version ~= user_routes.conf_version + or not cached_service_version or cached_service_version ~= service_version + then + uri_router = base_router.create_radixtree_uri_router(user_routes.values, + uri_routes, true) + cached_router_version = user_routes.conf_version + cached_service_version = service_version + end + + if not uri_router then + core.log.error("failed to fetch valid `uri_with_parameter` router: ") + return true + end + + return _M.matching(api_ctx) +end + + +function _M.matching(api_ctx) + core.log.info("route match mode: radixtree_uri_with_parameter") + return base_router.match_uri(uri_router, api_ctx) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/http/service.lua b/CloudronPackages/APISIX/apisix-source/apisix/http/service.lua new file mode 100644 index 0000000..97b224d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/http/service.lua @@ -0,0 +1,70 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local apisix_upstream = require("apisix.upstream") +local plugin_checker = require("apisix.plugin").plugin_checker +local services +local error = error + + +local _M = { + version = 0.2, +} + + +function _M.get(service_id) + return services:get(service_id) +end + + +function _M.services() + if not services then + return nil, nil + end + + return services.values, services.conf_version +end + + +local function filter(service) + service.has_domain = false + if not service.value then + return + end + + apisix_upstream.filter_upstream(service.value.upstream, service) + + core.log.info("filter service: ", core.json.delay_encode(service, true)) +end + + +function _M.init_worker() + local err + services, err = core.config.new("/services", { + automatic = true, + item_schema = core.schema.service, + checker = plugin_checker, + filter = filter, + }) + if not services then + error("failed to create etcd instance for fetching /services: " .. err) + return + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/include/apisix/model/pubsub.proto b/CloudronPackages/APISIX/apisix-source/apisix/include/apisix/model/pubsub.proto new file mode 100644 index 0000000..e5459e6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/include/apisix/model/pubsub.proto @@ -0,0 +1,143 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +option java_package = "org.apache.apisix.api.pubsub"; +option java_outer_classname = "PubSubProto"; +option java_multiple_files = true; +option go_package = "github.com/apache/apisix/api/pubsub;pubsub"; + +/** + * Ping command, used to keep the websocket connection alive + * + * The state field is used to pass some non-specific information, + * which will be returned in the pong response as is. + */ +message CmdPing { + bytes state = 1; +} + +/** + * An empty command, a placeholder for testing purposes only + */ +message CmdEmpty {} + +/** + * Get the offset of the specified topic partition from Apache Kafka. + */ +message CmdKafkaListOffset { + string topic = 1; + int32 partition = 2; + int64 timestamp = 3; +} + +/** + * Fetch messages of the specified topic partition from Apache Kafka. + */ +message CmdKafkaFetch { + string topic = 1; + int32 partition = 2; + int64 offset = 3; +} + +/** + * Client request definition for pubsub scenarios + * + * The sequence field is used to associate requests and responses. + * Apache APISIX will set a consistent sequence for the associated + * requests and responses, and the client can explicitly know the + * response corresponding to any of the requests. + * + * The req field is the command data sent by the client, and its + * type will be chosen from any of the lists in the definition. + * + * Field numbers 1 to 30 in the definition are used to define basic + * information and future extensions, and numbers after 30 are used + * to define commands. + */ +message PubSubReq { + int64 sequence = 1; + oneof req { + CmdEmpty cmd_empty = 31; + CmdPing cmd_ping = 32; + CmdKafkaFetch cmd_kafka_fetch = 33; + CmdKafkaListOffset cmd_kafka_list_offset = 34; + }; +} + +/** + * The response body of the service when an error occurs, + * containing the error code and the error message. + */ +message ErrorResp { + int32 code = 1; + string message = 2; +} + +/** + * Pong response, the state field will pass through the + * value in the Ping command field. + */ +message PongResp { + bytes state = 1; +} + +/** + * The definition of a message in Kafka with the current message + * offset, production timestamp, Key, and message content. + */ +message KafkaMessage { + int64 offset = 1; + int64 timestamp = 2; + bytes key = 3; + bytes value = 4; +} + +/** + * The response of Fetch messages from Apache Kafka. + */ +message KafkaFetchResp { + repeated KafkaMessage messages = 1; +} + +/** + * The response of list offset from Apache Kafka. + */ +message KafkaListOffsetResp { + int64 offset = 1; +} + +/** + * Server response definition for pubsub scenarios + * + * The sequence field will be the same as the value in the + * request, which is used to associate the associated request + * and response. + * + * The resp field is the response data sent by the server, and + * its type will be chosen from any of the lists in the definition. + */ +message PubSubResp { + int64 sequence = 1; + oneof resp { + ErrorResp error_resp = 31; + PongResp pong_resp = 32; + KafkaFetchResp kafka_fetch_resp = 33; + KafkaListOffsetResp kafka_list_offset_resp = 34; + }; +} diff --git a/CloudronPackages/APISIX/apisix-source/apisix/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/init.lua new file mode 100644 index 0000000..b5ee018 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/init.lua @@ -0,0 +1,1253 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +-- set the JIT options before any code, to prevent error "changing jit stack size is not +-- allowed when some regexs have already been compiled and cached" +if require("ffi").os == "Linux" then + require("ngx.re").opt("jit_stack_size", 200 * 1024) +end + +require("jit.opt").start("minstitch=2", "maxtrace=4000", + "maxrecord=8000", "sizemcode=64", + "maxmcode=4000", "maxirconst=1000") + +require("apisix.patch").patch() +local core = require("apisix.core") +local plugin = require("apisix.plugin") +local plugin_config = require("apisix.plugin_config") +local consumer_group = require("apisix.consumer_group") +local script = require("apisix.script") +local service_fetch = require("apisix.http.service").get +local admin_init = require("apisix.admin.init") +local get_var = require("resty.ngxvar").fetch +local router = require("apisix.router") +local apisix_upstream = require("apisix.upstream") +local apisix_secret = require("apisix.secret") +local set_upstream = apisix_upstream.set_by_route +local apisix_ssl = require("apisix.ssl") +local apisix_global_rules = require("apisix.global_rules") +local upstream_util = require("apisix.utils.upstream") +local xrpc = require("apisix.stream.xrpc") +local ctxdump = require("resty.ctxdump") +local debug = require("apisix.debug") +local pubsub_kafka = require("apisix.pubsub.kafka") +local ngx = ngx +local get_method = ngx.req.get_method +local ngx_exit = ngx.exit +local math = math +local ipairs = ipairs +local ngx_now = ngx.now +local ngx_var = ngx.var +local re_split = require("ngx.re").split +local str_byte = string.byte +local str_sub = string.sub +local tonumber = tonumber +local type = type +local pairs = pairs +local tostring = tostring +local ngx_re_match = ngx.re.match +local control_api_router + +local is_http = false +if ngx.config.subsystem == "http" then + is_http = true + control_api_router = require("apisix.control.router") +end + +local ok, apisix_base_flags = pcall(require, "resty.apisix.patch") +if not ok then + apisix_base_flags = {} +end + +local load_balancer +local local_conf +local ver_header = "APISIX/" .. core.version.VERSION + +local has_mod, apisix_ngx_client = pcall(require, "resty.apisix.client") + +local _M = {version = 0.4} + + +function _M.http_init(args) + core.resolver.init_resolver(args) + core.id.init() + core.env.init() + + local process = require("ngx.process") + local ok, err = process.enable_privileged_agent() + if not ok then + core.log.error("failed to enable privileged_agent: ", err) + end + + if core.config.init then + local ok, err = core.config.init() + if not ok then + core.log.error("failed to load the configuration: ", err) + end + end + + xrpc.init() +end + + +function _M.http_init_worker() + local seed, err = core.utils.get_seed_from_urandom() + if not seed then + core.log.warn('failed to get seed from urandom: ', err) + seed = ngx_now() * 1000 + ngx.worker.pid() + end + math.randomseed(seed) + -- for testing only + core.log.info("random test in [1, 10000]: ", math.random(1, 10000)) + + require("apisix.events").init_worker() + + local discovery = require("apisix.discovery.init").discovery + if discovery and discovery.init_worker then + discovery.init_worker() + end + require("apisix.balancer").init_worker() + load_balancer = require("apisix.balancer") + require("apisix.admin.init").init_worker() + + require("apisix.timers").init_worker() + + require("apisix.debug").init_worker() + + if core.config.init_worker then + local ok, err = core.config.init_worker() + if not ok then + core.log.error("failed to init worker process of ", core.config.type, + " config center, err: ", err) + end + end + + plugin.init_worker() + router.http_init_worker() + require("apisix.http.service").init_worker() + plugin_config.init_worker() + require("apisix.consumer").init_worker() + consumer_group.init_worker() + apisix_secret.init_worker() + + apisix_global_rules.init_worker() + + apisix_upstream.init_worker() + require("apisix.plugins.ext-plugin.init").init_worker() + + control_api_router.init_worker() + local_conf = core.config.local_conf() + + if local_conf.apisix and local_conf.apisix.enable_server_tokens == false then + ver_header = "APISIX" + end +end + + +function _M.http_exit_worker() + -- TODO: we can support stream plugin later - currently there is not `destroy` method + -- in stream plugins + plugin.exit_worker() + require("apisix.plugins.ext-plugin.init").exit_worker() +end + + +function _M.ssl_phase() + local ok, err = router.router_ssl.set(ngx.ctx.matched_ssl) + if not ok then + if err then + core.log.error("failed to fetch ssl config: ", err) + end + ngx_exit(-1) + end +end + + +function _M.ssl_client_hello_phase() + local sni, err = apisix_ssl.server_name(true) + if not sni or type(sni) ~= "string" then + local advise = "please check if the client requests via IP or uses an outdated " .. + "protocol. If you need to report an issue, " .. + "provide a packet capture file of the TLS handshake." + core.log.error("failed to find SNI: " .. (err or advise)) + ngx_exit(-1) + end + local tls_ext_status_req = apisix_ssl.get_status_request_ext() + + local ngx_ctx = ngx.ctx + local api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + + local ok, err = router.router_ssl.match_and_set(api_ctx, true, sni) + + ngx_ctx.matched_ssl = api_ctx.matched_ssl + core.tablepool.release("api_ctx", api_ctx) + ngx_ctx.api_ctx = nil + ngx_ctx.tls_ext_status_req = tls_ext_status_req + + if not ok then + if err then + core.log.error("failed to fetch ssl config: ", err) + end + core.log.error("failed to match any SSL certificate by SNI: ", sni) + ngx_exit(-1) + end + + ok, err = apisix_ssl.set_protocols_by_clienthello(ngx_ctx.matched_ssl.value.ssl_protocols) + if not ok then + core.log.error("failed to set ssl protocols: ", err) + ngx_exit(-1) + end + + -- in stream subsystem, ngx.ssl.server_name() return hostname of ssl session in preread phase, + -- so that we can't get real SNI without recording it in ngx.ctx during client_hello phase + ngx.ctx.client_hello_sni = sni +end + + +local function stash_ngx_ctx() + local ref = ctxdump.stash_ngx_ctx() + core.log.info("stash ngx ctx: ", ref) + ngx_var.ctx_ref = ref +end + + +local function fetch_ctx() + local ref = ngx_var.ctx_ref + core.log.info("fetch ngx ctx: ", ref) + local ctx = ctxdump.apply_ngx_ctx(ref) + ngx_var.ctx_ref = '' + return ctx +end + + +local function parse_domain_in_route(route) + local nodes = route.value.upstream.nodes + local new_nodes, err = upstream_util.parse_domain_for_nodes(nodes) + if not new_nodes then + return nil, err + end + + local up_conf = route.dns_value and route.dns_value.upstream + local ok = upstream_util.compare_upstream_node(up_conf, new_nodes) + if ok then + return route + end + + -- don't modify the modifiedIndex to avoid plugin cache miss because of DNS resolve result + -- has changed + + route.dns_value = core.table.deepcopy(route.value, { shallows = { "self.upstream.parent"}}) + route.dns_value.upstream.nodes = new_nodes + core.log.info("parse route which contain domain: ", + core.json.delay_encode(route, true)) + return route +end + + +local function set_upstream_host(api_ctx, picked_server) + local up_conf = api_ctx.upstream_conf + if up_conf.pass_host then + api_ctx.pass_host = up_conf.pass_host + api_ctx.upstream_host = up_conf.upstream_host + end + + local pass_host = api_ctx.pass_host or "pass" + if pass_host == "pass" then + return + end + + if pass_host == "rewrite" then + api_ctx.var.upstream_host = api_ctx.upstream_host + return + end + + api_ctx.var.upstream_host = picked_server.upstream_host +end + + +local function set_upstream_headers(api_ctx, picked_server) + set_upstream_host(api_ctx, picked_server) + + local proto = api_ctx.var.http_x_forwarded_proto + if proto then + api_ctx.var.var_x_forwarded_proto = proto + end + + local x_forwarded_host = api_ctx.var.http_x_forwarded_host + if x_forwarded_host then + api_ctx.var.var_x_forwarded_host = x_forwarded_host + end + + local port = api_ctx.var.http_x_forwarded_port + if port then + api_ctx.var.var_x_forwarded_port = port + end +end + + +-- verify the TLS session resumption by checking if the SNI in the client hello +-- matches the hostname of the SSL session, this is to prevent the mTLS bypass security issue. +local function verify_tls_session_resumption() + local session_hostname, err = apisix_ssl.session_hostname() + if err then + core.log.error("failed to get session hostname: ", err) + return false + end + if session_hostname and session_hostname ~= ngx.ctx.client_hello_sni then + core.log.error("sni in client hello mismatch hostname of ssl session, ", + "sni: ", ngx.ctx.client_hello_sni, ", hostname: ", session_hostname) + return false + end + + return true +end + + +local function verify_tls_client(ctx) + local matched = router.router_ssl.match_and_set(ctx, true) + if not matched then + return true + end + + local matched_ssl = ctx.matched_ssl + if matched_ssl.value.client and apisix_ssl.support_client_verification() then + local res = ngx_var.ssl_client_verify + if res ~= "SUCCESS" then + if res == "NONE" then + core.log.error("client certificate was not present") + else + core.log.error("client certificate verification is not passed: ", res) + end + + return false + end + + if not verify_tls_session_resumption() then + return false + end + end + + return true +end + + +local function uri_matches_skip_mtls_route_patterns(ssl, uri) + for _, pat in ipairs(ssl.value.client.skip_mtls_uri_regex) do + if ngx_re_match(uri, pat, "jo") then + return true + end + end +end + + +local function verify_https_client(ctx) + local scheme = ctx.var.scheme + if scheme ~= "https" then + return true + end + + local matched_ssl = ngx.ctx.matched_ssl + if matched_ssl.value.client + and matched_ssl.value.client.skip_mtls_uri_regex + and apisix_ssl.support_client_verification() + and (not uri_matches_skip_mtls_route_patterns(matched_ssl, ngx.var.uri)) then + local res = ctx.var.ssl_client_verify + if res ~= "SUCCESS" then + if res == "NONE" then + core.log.error("client certificate was not present") + else + core.log.error("client certificate verification is not passed: ", res) + end + + return false + end + end + + local host = ctx.var.host + local matched = router.router_ssl.match_and_set(ctx, true, host) + if not matched then + return true + end + + local matched_ssl = ctx.matched_ssl + if matched_ssl.value.client and apisix_ssl.support_client_verification() then + local verified = apisix_base_flags.client_cert_verified_in_handshake + if not verified then + -- vanilla OpenResty requires to check the verification result + local res = ctx.var.ssl_client_verify + if res ~= "SUCCESS" then + if res == "NONE" then + core.log.error("client certificate was not present") + else + core.log.error("client certificate verification is not passed: ", res) + end + + return false + end + end + + local sni = apisix_ssl.server_name() + if sni ~= host then + -- There is a case that the user configures a SSL object with `*.domain`, + -- and the client accesses with SNI `a.domain` but uses Host `b.domain`. + -- This case is complex and we choose to restrict the access until there + -- is a stronge demand in real world. + core.log.error("client certificate verified with SNI ", sni, + ", but the host is ", host) + return false + end + + if not verify_tls_session_resumption() then + return false + end + end + + return true +end + + +local function normalize_uri_like_servlet(uri) + local found = core.string.find(uri, ';') + if not found then + return uri + end + + local segs, err = re_split(uri, "/", "jo") + if not segs then + return nil, err + end + + local len = #segs + for i = 1, len do + local seg = segs[i] + local pos = core.string.find(seg, ';') + if pos then + seg = seg:sub(1, pos - 1) + -- reject bad uri which bypasses with ';' + if seg == "." or seg == ".." then + return nil, "dot segment with parameter" + end + if seg == "" and i < len then + return nil, "empty segment with parameters" + end + + segs[i] = seg + + seg = seg:lower() + if seg == "%2e" or seg == "%2e%2e" then + return nil, "encoded dot segment" + end + end + end + + return core.table.concat(segs, '/') +end + + +local function common_phase(phase_name) + local api_ctx = ngx.ctx.api_ctx + if not api_ctx then + return + end + + plugin.run_global_rules(api_ctx, api_ctx.global_rules, phase_name) + + if api_ctx.script_obj then + script.run(phase_name, api_ctx) + return api_ctx, true + end + + return plugin.run_plugin(phase_name, nil, api_ctx) +end + + + +function _M.handle_upstream(api_ctx, route, enable_websocket) + -- some plugins(ai-proxy...) request upstream by http client directly + if api_ctx.bypass_nginx_upstream then + common_phase("before_proxy") + return + end + + local up_id = route.value.upstream_id + + -- used for the traffic-split plugin + if api_ctx.upstream_id then + up_id = api_ctx.upstream_id + end + + if up_id then + local upstream = apisix_upstream.get_by_id(up_id) + if not upstream then + if is_http then + return core.response.exit(502) + end + + return ngx_exit(1) + end + + api_ctx.matched_upstream = upstream + + else + if route.has_domain then + local err + route, err = parse_domain_in_route(route) + if err then + core.log.error("failed to get resolved route: ", err) + return core.response.exit(500) + end + + api_ctx.conf_version = route.modifiedIndex + api_ctx.matched_route = route + end + + local route_val = route.value + + api_ctx.matched_upstream = (route.dns_value and + route.dns_value.upstream) + or route_val.upstream + end + + if api_ctx.matched_upstream and api_ctx.matched_upstream.tls and + api_ctx.matched_upstream.tls.client_cert_id then + + local cert_id = api_ctx.matched_upstream.tls.client_cert_id + local upstream_ssl = router.router_ssl.get_by_id(cert_id) + if not upstream_ssl or upstream_ssl.type ~= "client" then + local err = upstream_ssl and + "ssl type should be 'client'" or + "ssl id [" .. cert_id .. "] not exits" + core.log.error("failed to get ssl cert: ", err) + + if is_http then + return core.response.exit(502) + end + + return ngx_exit(1) + end + + core.log.info("matched ssl: ", + core.json.delay_encode(upstream_ssl, true)) + api_ctx.upstream_ssl = upstream_ssl + end + + if enable_websocket then + api_ctx.var.upstream_upgrade = api_ctx.var.http_upgrade + api_ctx.var.upstream_connection = api_ctx.var.http_connection + core.log.info("enabled websocket for route: ", route.value.id) + end + + -- load balancer is not required by kafka upstream, so the upstream + -- node selection process is intercepted and left to kafka to + -- handle on its own + if api_ctx.matched_upstream and api_ctx.matched_upstream.scheme == "kafka" then + return pubsub_kafka.access(api_ctx) + end + + local code, err = set_upstream(route, api_ctx) + if code then + core.log.error("failed to set upstream: ", err) + core.response.exit(code) + end + + local server, err = load_balancer.pick_server(route, api_ctx) + if not server then + core.log.error("failed to pick server: ", err) + return core.response.exit(502) + end + + api_ctx.picked_server = server + + set_upstream_headers(api_ctx, server) + + -- run the before_proxy method in access phase first to avoid always reinit request + common_phase("before_proxy") + + local up_scheme = api_ctx.upstream_scheme + if up_scheme == "grpcs" or up_scheme == "grpc" then + stash_ngx_ctx() + return ngx.exec("@grpc_pass") + end + + if api_ctx.dubbo_proxy_enabled then + stash_ngx_ctx() + return ngx.exec("@dubbo_pass") + end +end + + +function _M.http_access_phase() + -- from HTTP/3 to HTTP/1.1 we need to convert :authority pesudo-header + -- to Host header, so we set upstream_host variable here. + if ngx.req.http_version() == 3 then + ngx.var.upstream_host = ngx.var.host .. ":" .. ngx.var.server_port + end + local ngx_ctx = ngx.ctx + + -- always fetch table from the table pool, we don't need a reused api_ctx + local api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + + core.ctx.set_vars_meta(api_ctx) + + if not verify_https_client(api_ctx) then + return core.response.exit(400) + end + + debug.dynamic_debug(api_ctx) + + local uri = api_ctx.var.uri + if local_conf.apisix then + if local_conf.apisix.delete_uri_tail_slash then + if str_byte(uri, #uri) == str_byte("/") then + api_ctx.var.uri = str_sub(api_ctx.var.uri, 1, #uri - 1) + core.log.info("remove the end of uri '/', current uri: ", api_ctx.var.uri) + end + end + + if local_conf.apisix.normalize_uri_like_servlet then + local new_uri, err = normalize_uri_like_servlet(uri) + if not new_uri then + core.log.error("failed to normalize: ", err) + return core.response.exit(400) + end + + api_ctx.var.uri = new_uri + -- forward the original uri so the servlet upstream + -- can consume the param after ';' + api_ctx.var.upstream_uri = uri + end + end + + -- To prevent being hacked by untrusted request_uri, here we + -- record the normalized but not rewritten uri as request_uri, + -- the original request_uri can be accessed via var.real_request_uri + api_ctx.var.real_request_uri = api_ctx.var.request_uri + api_ctx.var.request_uri = api_ctx.var.uri .. api_ctx.var.is_args .. (api_ctx.var.args or "") + + router.router_http.match(api_ctx) + + local route = api_ctx.matched_route + if not route then + -- run global rule when there is no matching route + local global_rules = apisix_global_rules.global_rules() + plugin.run_global_rules(api_ctx, global_rules, nil) + + core.log.info("not find any matched route") + return core.response.exit(404, + {error_msg = "404 Route Not Found"}) + end + + core.log.info("matched route: ", + core.json.delay_encode(api_ctx.matched_route, true)) + + local enable_websocket = route.value.enable_websocket + + if route.value.plugin_config_id then + local conf = plugin_config.get(route.value.plugin_config_id) + if not conf then + core.log.error("failed to fetch plugin config by ", + "id: ", route.value.plugin_config_id) + return core.response.exit(503) + end + + route = plugin_config.merge(route, conf) + end + + if route.value.service_id then + local service = service_fetch(route.value.service_id) + if not service then + core.log.error("failed to fetch service configuration by ", + "id: ", route.value.service_id) + return core.response.exit(404) + end + + route = plugin.merge_service_route(service, route) + api_ctx.matched_route = route + api_ctx.conf_type = "route&service" + api_ctx.conf_version = route.modifiedIndex .. "&" .. service.modifiedIndex + api_ctx.conf_id = route.value.id .. "&" .. service.value.id + api_ctx.service_id = service.value.id + api_ctx.service_name = service.value.name + + if enable_websocket == nil then + enable_websocket = service.value.enable_websocket + end + + else + api_ctx.conf_type = "route" + api_ctx.conf_version = route.modifiedIndex + api_ctx.conf_id = route.value.id + end + api_ctx.route_id = route.value.id + api_ctx.route_name = route.value.name + + -- run global rule + local global_rules = apisix_global_rules.global_rules() + plugin.run_global_rules(api_ctx, global_rules, nil) + + if route.value.script then + script.load(route, api_ctx) + script.run("access", api_ctx) + + else + local plugins = plugin.filter(api_ctx, route) + api_ctx.plugins = plugins + + plugin.run_plugin("rewrite", plugins, api_ctx) + if api_ctx.consumer then + local changed + local group_conf + + if api_ctx.consumer.group_id then + group_conf = consumer_group.get(api_ctx.consumer.group_id) + if not group_conf then + core.log.error("failed to fetch consumer group config by ", + "id: ", api_ctx.consumer.group_id) + return core.response.exit(503) + end + end + + route, changed = plugin.merge_consumer_route( + route, + api_ctx.consumer, + group_conf, + api_ctx + ) + + core.log.info("find consumer ", api_ctx.consumer.username, + ", config changed: ", changed) + + if changed then + api_ctx.matched_route = route + core.table.clear(api_ctx.plugins) + local phase = "rewrite_in_consumer" + api_ctx.plugins = plugin.filter(api_ctx, route, api_ctx.plugins, nil, phase) + -- rerun rewrite phase for newly added plugins in consumer + plugin.run_plugin(phase, api_ctx.plugins, api_ctx) + end + end + plugin.run_plugin("access", plugins, api_ctx) + end + + _M.handle_upstream(api_ctx, route, enable_websocket) +end + + +function _M.dubbo_access_phase() + ngx.ctx = fetch_ctx() +end + + +function _M.grpc_access_phase() + ngx.ctx = fetch_ctx() + + local api_ctx = ngx.ctx.api_ctx + if not api_ctx then + return + end + + local code, err = apisix_upstream.set_grpcs_upstream_param(api_ctx) + if code then + core.log.error("failed to set grpcs upstream param: ", err) + core.response.exit(code) + end + + if api_ctx.enable_mirror == true and has_mod then + apisix_ngx_client.enable_mirror() + end +end + + +local function set_resp_upstream_status(up_status) + local_conf = core.config.local_conf() + + if local_conf.apisix and local_conf.apisix.show_upstream_status_in_response_header then + core.response.set_header("X-APISIX-Upstream-Status", up_status) + elseif #up_status == 3 then + if tonumber(up_status) >= 500 and tonumber(up_status) <= 599 then + core.response.set_header("X-APISIX-Upstream-Status", up_status) + end + elseif #up_status > 3 then + -- the up_status can be "502, 502" or "502, 502 : " + local last_status + if str_byte(up_status, -1) == str_byte(" ") then + last_status = str_sub(up_status, -6, -3) + else + last_status = str_sub(up_status, -3) + end + + if tonumber(last_status) >= 500 and tonumber(last_status) <= 599 then + core.response.set_header("X-APISIX-Upstream-Status", up_status) + end + end +end + + +function _M.http_header_filter_phase() + core.response.set_header("Server", ver_header) + + local up_status = get_var("upstream_status") + if up_status then + set_resp_upstream_status(up_status) + end + + common_phase("header_filter") + + local api_ctx = ngx.ctx.api_ctx + if not api_ctx then + return + end + + local debug_headers = api_ctx.debug_headers + if debug_headers then + local deduplicate = core.table.new(core.table.nkeys(debug_headers), 0) + for k, v in pairs(debug_headers) do + core.table.insert(deduplicate, k) + end + core.response.set_header("Apisix-Plugins", core.table.concat(deduplicate, ", ")) + end +end + + +function _M.http_body_filter_phase() + common_phase("body_filter") + common_phase("delayed_body_filter") +end + + +local function healthcheck_passive(api_ctx) + local checker = api_ctx.up_checker + if not checker then + return + end + + local up_conf = api_ctx.upstream_conf + local passive = up_conf.checks.passive + if not passive then + return + end + + core.log.info("enabled healthcheck passive") + local host = up_conf.checks and up_conf.checks.active + and up_conf.checks.active.host + local port = up_conf.checks and up_conf.checks.active + and up_conf.checks.active.port or api_ctx.balancer_port + + local resp_status = ngx.status + + if not is_http then + -- 200 is the only success status code for TCP + if resp_status ~= 200 then + checker:report_tcp_failure(api_ctx.balancer_ip, port, host, nil, "passive") + end + return + end + + local http_statuses = passive and passive.healthy and + passive.healthy.http_statuses + core.log.info("passive.healthy.http_statuses: ", + core.json.delay_encode(http_statuses)) + if http_statuses then + for i, status in ipairs(http_statuses) do + if resp_status == status then + checker:report_http_status(api_ctx.balancer_ip, + port, + host, + resp_status) + end + end + end + + http_statuses = passive and passive.unhealthy and + passive.unhealthy.http_statuses + core.log.info("passive.unhealthy.http_statuses: ", + core.json.delay_encode(http_statuses)) + if not http_statuses then + return + end + + for i, status in ipairs(http_statuses) do + if resp_status == status then + checker:report_http_status(api_ctx.balancer_ip, + port, + host, + resp_status) + end + end +end + + +function _M.status() + core.response.exit(200, core.json.encode({ status = "ok" })) +end + +function _M.status_ready() + local local_conf = core.config.local_conf() + local role = core.table.try_read_attr(local_conf, "deployment", "role") + local provider = core.table.try_read_attr(local_conf, "deployment", "role_" .. + role, "config_provider") + if provider == "yaml" or provider == "etcd" then + local status_shdict = ngx.shared["status-report"] + local ids = status_shdict:get_keys() + local error + local worker_count = ngx.worker.count() + if #ids ~= worker_count then + core.log.warn("worker count: ", worker_count, " but status report count: ", #ids) + error = "worker count: " .. ngx.worker.count() .. + " but status report count: " .. #ids + end + if error then + core.response.exit(503, core.json.encode({ + status = "error", + error = error + })) + return + end + for _, id in ipairs(ids) do + local ready = status_shdict:get(id) + if not ready then + core.log.warn("worker id: ", id, " has not received configuration") + error = "worker id: " .. id .. + " has not received configuration" + break + end + end + + if error then + core.response.exit(503, core.json.encode({ + status = "error", + error = error + })) + return + end + + core.response.exit(200, core.json.encode({ status = "ok" })) + return + end + + core.response.exit(503, core.json.encode({ + status = "error", + message = "unknown config provider: " .. tostring(provider) + }), { ["Content-Type"] = "application/json" }) +end + + +function _M.http_log_phase() + local api_ctx = common_phase("log") + if not api_ctx then + return + end + + healthcheck_passive(api_ctx) + + if api_ctx.server_picker and api_ctx.server_picker.after_balance then + api_ctx.server_picker.after_balance(api_ctx, false) + end + + core.ctx.release_vars(api_ctx) + if api_ctx.plugins then + core.tablepool.release("plugins", api_ctx.plugins) + end + + if api_ctx.curr_req_matched then + core.tablepool.release("matched_route_record", api_ctx.curr_req_matched) + end + + core.tablepool.release("api_ctx", api_ctx) +end + + +function _M.http_balancer_phase() + local api_ctx = ngx.ctx.api_ctx + if not api_ctx then + core.log.error("invalid api_ctx") + return core.response.exit(500) + end + + load_balancer.run(api_ctx.matched_route, api_ctx, common_phase) +end + + +local function cors_admin() + local_conf = core.config.local_conf() + if not core.table.try_read_attr(local_conf, "deployment", "admin", "enable_admin_cors") then + return + end + + local method = get_method() + if method == "OPTIONS" then + core.response.set_header("Access-Control-Allow-Origin", "*", + "Access-Control-Allow-Methods", + "POST, GET, PUT, OPTIONS, DELETE, PATCH", + "Access-Control-Max-Age", "3600", + "Access-Control-Allow-Headers", "*", + "Access-Control-Allow-Credentials", "true", + "Content-Length", "0", + "Content-Type", "text/plain") + ngx_exit(200) + end + + core.response.set_header("Access-Control-Allow-Origin", "*", + "Access-Control-Allow-Credentials", "true", + "Access-Control-Expose-Headers", "*", + "Access-Control-Max-Age", "3600") +end + +local function add_content_type() + core.response.set_header("Content-Type", "application/json") +end + +do + local router + +function _M.http_admin() + if not router then + router = admin_init.get() + end + + core.response.set_header("Server", ver_header) + -- add cors rsp header + cors_admin() + + -- add content type to rsp header + add_content_type() + + -- core.log.info("uri: ", get_var("uri"), " method: ", get_method()) + local ok = router:dispatch(get_var("uri"), {method = get_method()}) + if not ok then + ngx_exit(404) + end +end + +end -- do + + +function _M.http_control() + local ok = control_api_router.match(get_var("uri")) + if not ok then + ngx_exit(404) + end +end + + +function _M.stream_init(args) + core.log.info("enter stream_init") + + core.resolver.init_resolver(args) + + if core.config.init then + local ok, err = core.config.init() + if not ok then + core.log.error("failed to load the configuration: ", err) + end + end + + xrpc.init() +end + + +function _M.stream_init_worker() + core.log.info("enter stream_init_worker") + local seed, err = core.utils.get_seed_from_urandom() + if not seed then + core.log.warn('failed to get seed from urandom: ', err) + seed = ngx_now() * 1000 + ngx.worker.pid() + end + math.randomseed(seed) + -- for testing only + core.log.info("random stream test in [1, 10000]: ", math.random(1, 10000)) + + if core.config.init_worker then + local ok, err = core.config.init_worker() + if not ok then + core.log.error("failed to init worker process of ", core.config.type, + " config center, err: ", err) + end + end + + plugin.init_worker() + xrpc.init_worker() + router.stream_init_worker() + require("apisix.http.service").init_worker() + apisix_upstream.init_worker() + + require("apisix.events").init_worker() + + local discovery = require("apisix.discovery.init").discovery + if discovery and discovery.init_worker then + discovery.init_worker() + end + + load_balancer = require("apisix.balancer") + + local_conf = core.config.local_conf() +end + + +function _M.stream_preread_phase() + local ngx_ctx = ngx.ctx + local api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + + if not verify_tls_client(api_ctx) then + return ngx_exit(1) + end + + core.ctx.set_vars_meta(api_ctx) + + local ok, err = router.router_stream.match(api_ctx) + if not ok then + core.log.error(err) + return ngx_exit(1) + end + + core.log.info("matched route: ", + core.json.delay_encode(api_ctx.matched_route, true)) + + local matched_route = api_ctx.matched_route + if not matched_route then + return ngx_exit(1) + end + + + local up_id = matched_route.value.upstream_id + if up_id then + local upstream = apisix_upstream.get_by_id(up_id) + if not upstream then + if is_http then + return core.response.exit(502) + end + + return ngx_exit(1) + end + + api_ctx.matched_upstream = upstream + + elseif matched_route.value.service_id then + local service = service_fetch(matched_route.value.service_id) + if not service then + core.log.error("failed to fetch service configuration by ", + "id: ", matched_route.value.service_id) + return core.response.exit(404) + end + + matched_route = plugin.merge_service_stream_route(service, matched_route) + api_ctx.matched_route = matched_route + api_ctx.conf_type = "stream_route&service" + api_ctx.conf_version = matched_route.modifiedIndex .. "&" .. service.modifiedIndex + api_ctx.conf_id = matched_route.value.id .. "&" .. service.value.id + api_ctx.service_id = service.value.id + api_ctx.service_name = service.value.name + api_ctx.matched_upstream = matched_route.value.upstream + if matched_route.value.upstream_id and not matched_route.value.upstream then + local upstream = apisix_upstream.get_by_id(matched_route.value.upstream_id) + if not upstream then + if is_http then + return core.response.exit(502) + end + + return ngx_exit(1) + end + + api_ctx.matched_upstream = upstream + end + else + if matched_route.has_domain then + local err + matched_route, err = parse_domain_in_route(matched_route) + if err then + core.log.error("failed to get resolved route: ", err) + return ngx_exit(1) + end + + api_ctx.matched_route = matched_route + end + + local route_val = matched_route.value + api_ctx.matched_upstream = (matched_route.dns_value and + matched_route.dns_value.upstream) + or route_val.upstream + end + + local plugins = core.tablepool.fetch("plugins", 32, 0) + api_ctx.plugins = plugin.stream_filter(matched_route, plugins) + -- core.log.info("valid plugins: ", core.json.delay_encode(plugins, true)) + + api_ctx.conf_type = "stream/route" + api_ctx.conf_version = matched_route.modifiedIndex + api_ctx.conf_id = matched_route.value.id + + plugin.run_plugin("preread", plugins, api_ctx) + + if matched_route.value.protocol then + xrpc.run_protocol(matched_route.value.protocol, api_ctx) + return + end + + local code, err = set_upstream(matched_route, api_ctx) + if code then + core.log.error("failed to set upstream: ", err) + return ngx_exit(1) + end + + local server, err = load_balancer.pick_server(matched_route, api_ctx) + if not server then + core.log.error("failed to pick server: ", err) + return ngx_exit(1) + end + + api_ctx.picked_server = server + + -- run the before_proxy method in preread phase first to avoid always reinit request + common_phase("before_proxy") +end + + +function _M.stream_balancer_phase() + core.log.info("enter stream_balancer_phase") + local api_ctx = ngx.ctx.api_ctx + if not api_ctx then + core.log.error("invalid api_ctx") + return ngx_exit(1) + end + + load_balancer.run(api_ctx.matched_route, api_ctx, common_phase) +end + + +function _M.stream_log_phase() + core.log.info("enter stream_log_phase") + + local api_ctx = plugin.run_plugin("log") + if not api_ctx then + return + end + + healthcheck_passive(api_ctx) + + core.ctx.release_vars(api_ctx) + if api_ctx.plugins then + core.tablepool.release("plugins", api_ctx.plugins) + end + + core.tablepool.release("api_ctx", api_ctx) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/inspect/dbg.lua b/CloudronPackages/APISIX/apisix-source/apisix/inspect/dbg.lua new file mode 100644 index 0000000..2fd7878 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/inspect/dbg.lua @@ -0,0 +1,163 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local string_format = string.format +local debug = debug +local ipairs = ipairs +local pcall = pcall +local table_insert = table.insert +local jit = jit + +local _M = {} + +local hooks = {} + +function _M.getname(n) + if n.what == "C" then + return n.name + end + local lc = string_format("%s:%d", n.short_src, n.currentline) + if n.what ~= "main" and n.namewhat ~= "" then + return string_format("%s (%s)", lc, n.name) + else + return lc + end +end + +local function hook(_, arg) + local level = 2 + local finfo = debug.getinfo(level, "nSlf") + local key = finfo.source .. "#" .. arg + + local hooks2 = {} + local removed_hooks = {} + for _, hook in ipairs(hooks) do + if key:sub(-#hook.key) == hook.key then + local filter_func = hook.filter_func + local info = {finfo = finfo, uv = {}, vals = {}} + + -- upvalues + local i = 1 + while true do + local name, value = debug.getupvalue(finfo.func, i) + if name == nil then break end + if name:sub(1, 1) ~= "(" then + info.uv[name] = value + end + i = i + 1 + end + + -- local values + local i = 1 + while true do + local name, value = debug.getlocal(level, i) + if not name then break end + if name:sub(1, 1) ~= "(" then + info.vals[name] = value + end + i = i + 1 + end + + local r1, r2_or_err = pcall(filter_func, info) + if not r1 then + core.log.error("inspect: pcall filter_func:", r2_or_err) + table_insert(removed_hooks, hook) + elseif r2_or_err == false then + -- if filter_func returns false, keep the hook + table_insert(hooks2, hook) + else + table_insert(removed_hooks, hook) + end + else + -- key not match, keep the hook + table_insert(hooks2, hook) + end + end + + for _, hook in ipairs(removed_hooks) do + core.log.warn("inspect: remove hook: ", hook.key) + end + + -- disable debug mode if all hooks done + if #hooks2 ~= #hooks then + hooks = hooks2 + if #hooks == 0 then + core.log.warn("inspect: all hooks removed") + debug.sethook() + if jit then + jit.on() + end + end + end +end + +function _M.set_hook(file, line, func, filter_func) + if file == nil then + file = "=stdin" + end + + local key = file .. "#" .. line + table_insert(hooks, {key = key, filter_func = filter_func}) + + if jit then + jit.flush(func) + jit.off() + end + + debug.sethook(hook, "l") +end + +function _M.unset_hook(file, line) + if file == nil then + file = "=stdin" + end + + local hooks2 = {} + + local key = file .. "#" .. line + for i, hook in ipairs(hooks) do + if hook.key ~= key then + table_insert(hooks2, hook) + end + end + + if #hooks2 ~= #hooks then + hooks = hooks2 + if #hooks == 0 then + debug.sethook() + if jit then + jit.on() + end + end + end +end + +function _M.unset_all() + if #hooks > 0 then + hooks = {} + debug.sethook() + if jit then + jit.on() + end + end +end + +function _M.hooks() + return hooks +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/inspect/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/inspect/init.lua new file mode 100644 index 0000000..7014b61 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/inspect/init.lua @@ -0,0 +1,128 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local dbg = require("apisix.inspect.dbg") +local lfs = require("lfs") +local pl_path = require("pl.path") +local io = io +local table_insert = table.insert +local pcall = pcall +local ipairs = ipairs +local os = os +local ngx = ngx +local loadstring = loadstring +local format = string.format + +local _M = {} + +local last_modified = 0 + +local stop = false + +local running = false + +local last_report_time = 0 + +local REPORT_INTERVAL = 30 -- secs + +local function run_lua_file(file) + local f, err = io.open(file, "rb") + if not f then + return false, err + end + local code, err = f:read("*all") + f:close() + if code == nil then + return false, format("cannot read hooks file: %s", err) + end + local func, err = loadstring(code) + if not func then + return false, err + end + func() + return true +end + +local function setup_hooks(file) + if pl_path.exists(file) then + dbg.unset_all() + local _, err = pcall(run_lua_file, file) + local hooks = {} + for _, hook in ipairs(dbg.hooks()) do + table_insert(hooks, hook.key) + end + core.log.warn("set hooks: err: ", err, ", hooks: ", core.json.delay_encode(hooks)) + end +end + +local function reload_hooks(premature, delay, file) + if premature or stop then + stop = false + running = false + return + end + + local time, err = lfs.attributes(file, 'modification') + if err then + if last_modified ~= 0 then + core.log.info(err, ", disable all hooks") + dbg.unset_all() + last_modified = 0 + end + elseif time ~= last_modified then + setup_hooks(file) + last_modified = time + else + local ts = os.time() + if ts - last_report_time >= REPORT_INTERVAL then + local hooks = {} + for _, hook in ipairs(dbg.hooks()) do + table_insert(hooks, hook.key) + end + core.log.info("alive hooks: ", core.json.encode(hooks)) + last_report_time = ts + end + end + + local ok, err = ngx.timer.at(delay, reload_hooks, delay, file) + if not ok then + core.log.error("failed to create the timer: ", err) + running = false + end +end + +function _M.init(delay, file) + if not running then + file = file or "/usr/local/apisix/plugin_inspect_hooks.lua" + delay = delay or 3 + + setup_hooks(file) + + local ok, err = ngx.timer.at(delay, reload_hooks, delay, file) + if not ok then + core.log.error("failed to create the timer: ", err) + return + end + running = true + end +end + +function _M.destroy() + stop = true +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/patch.lua b/CloudronPackages/APISIX/apisix-source/apisix/patch.lua new file mode 100644 index 0000000..2b191b2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/patch.lua @@ -0,0 +1,384 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +require("resty.dns.resolver") -- preload dns resolver to prevent recursive patch +local ipmatcher = require("resty.ipmatcher") +local socket = require("socket") +local unix_socket = require("socket.unix") +local ssl = require("ssl") +local ngx = ngx +local get_phase = ngx.get_phase +local ngx_socket = ngx.socket +local original_tcp = ngx.socket.tcp +local original_udp = ngx.socket.udp +local concat_tab = table.concat +local debug = debug +local new_tab = require("table.new") +local log = ngx.log +local WARN = ngx.WARN +local ipairs = ipairs +local select = select +local setmetatable = setmetatable +local string = string +local table = table +local type = type +local tonumber = tonumber + + +local config_local +local _M = {} + + +local function get_local_conf() + if not config_local then + config_local = require("apisix.core.config_local") + end + + return config_local.local_conf() +end + + +local patch_tcp_socket +do + local old_tcp_sock_connect + + local function new_tcp_sock_connect(sock, host, port, opts) + local core_str = require("apisix.core.string") + local resolver = require("apisix.core.resolver") + + if host then + if core_str.has_prefix(host, "unix:") then + if not opts then + -- workaround for https://github.com/openresty/lua-nginx-module/issues/860 + return old_tcp_sock_connect(sock, host) + end + + elseif not ipmatcher.parse_ipv4(host) and not ipmatcher.parse_ipv6(host) then + local err + host, err = resolver.parse_domain(host) + if not host then + return nil, "failed to parse domain: " .. err + end + end + end + + return old_tcp_sock_connect(sock, host, port, opts) + end + + + function patch_tcp_socket(sock) + if not old_tcp_sock_connect then + old_tcp_sock_connect = sock.connect + end + + sock.connect = new_tcp_sock_connect + return sock + end +end + + +do -- `math.randomseed` patch + -- `math.random` generates PRND(pseudo-random numbers) from the seed set by `math.randomseed` + -- Many module libraries use `ngx.time` and `ngx.worker.pid` to generate seeds which may + -- loss randomness in container env (where pids are identical, e.g. root pid is 1) + -- Kubernetes may launch multi instance with deployment RS at the same time, `ngx.time` may + -- get same return in the pods. + -- Therefore, this global patch enforce entire framework to use + -- the best-practice PRND generates. + + local resty_random = require("resty.random") + local math_randomseed = math.randomseed + local seeded = {} + + -- make linter happy + -- luacheck: ignore + math.randomseed = function() + local worker_pid = ngx.worker.pid() + + -- check seed mark + if seeded[worker_pid] then + log(ngx.DEBUG, debug.traceback("Random seed has been inited", 2)) + return + end + + -- generate randomseed + -- chose 6 from APISIX's SIX, 256 ^ 6 should do the trick + -- it shouldn't be large than 16 to prevent overflow. + local random_bytes = resty_random.bytes(6) + local t = {} + + for i = 1, #random_bytes do + t[i] = string.byte(random_bytes, i) + end + + local s = table.concat(t) + + math_randomseed(tonumber(s)) + seeded[worker_pid] = true + end +end -- do + + +local patch_udp_socket +do + local old_udp_sock_setpeername + + local function new_udp_sock_setpeername(sock, host, port) + local core_str = require("apisix.core.string") + local resolver = require("apisix.core.resolver") + + if host then + if core_str.has_prefix(host, "unix:") then + return old_udp_sock_setpeername(sock, host) + end + + if not ipmatcher.parse_ipv4(host) and not ipmatcher.parse_ipv6(host) then + local err + host, err = resolver.parse_domain(host) + if not host then + return nil, "failed to parse domain: " .. err + end + end + end + + return old_udp_sock_setpeername(sock, host, port) + end + + + function patch_udp_socket(sock) + if not old_udp_sock_setpeername then + old_udp_sock_setpeername = sock.setpeername + end + + sock.setpeername = new_udp_sock_setpeername + return sock + end +end + + +local function flatten(args) + local buf = new_tab(#args, 0) + for i, v in ipairs(args) do + local ty = type(v) + if ty == "table" then + buf[i] = flatten(v) + elseif ty == "boolean" then + buf[i] = v and "true" or "false" + elseif ty == "nil" then + buf[i] = "nil" + else + buf[i] = v + end + end + return concat_tab(buf) +end + + +local luasocket_wrapper = { + connect = function (self, host, port) + if not port then + -- unix socket + self.sock = unix_socket() + if self.timeout then + self.sock:settimeout(self.timeout) + end + + local path = host:sub(#("unix:") + 1) + return self.sock:connect(path) + end + + if host:byte(1) == string.byte('[') then + -- ipv6, form as '[::1]', remove '[' and ']' + host = host:sub(2, -2) + self.sock = self.tcp6 + else + self.sock = self.tcp4 + end + + return self.sock:connect(host, port) + end, + + send = function(self, ...) + if select('#', ...) == 1 and type(select(1, ...)) == "string" then + -- fast path + return self.sock:send(...) + end + + -- luasocket's send only accepts a single string + return self.sock:send(flatten({...})) + end, + + getreusedtimes = function () + return 0 + end, + setkeepalive = function (self) + self.sock:close() + return 1 + end, + + settimeout = function (self, time) + if time then + time = time / 1000 + end + + self.timeout = time + + return self.sock:settimeout(time) + end, + settimeouts = function (self, connect_time, read_time, write_time) + connect_time = connect_time or 0 + read_time = read_time or 0 + write_time = write_time or 0 + + -- set the max one as the timeout + local time = connect_time + if time < read_time then + time = read_time + end + if time < write_time then + time = write_time + end + + if time > 0 then + time = time / 1000 + else + time = nil + end + + self.timeout = time + + return self.sock:settimeout(time) + end, + + tlshandshake = function (self, options) + local reused_session = options.reused_session + local server_name = options.server_name + local verify = options.verify + local send_status_req = options.ocsp_status_req + + if reused_session then + log(WARN, "reused_session is not supported yet") + end + + if send_status_req then + log(WARN, "send_status_req is not supported yet") + end + + local params = { + mode = "client", + protocol = "any", + verify = verify and "peer" or "none", + certificate = options.client_cert_path, + key = options.client_priv_key_path, + options = { + "all", + "no_sslv2", + "no_sslv3", + "no_tlsv1" + } + } + + local local_conf, err = get_local_conf() + if not local_conf then + return nil, err + end + + local apisix_ssl = local_conf.apisix.ssl + if apisix_ssl and apisix_ssl.ssl_trusted_certificate then + params.cafile = apisix_ssl.ssl_trusted_certificate + end + + local sec_sock, err = ssl.wrap(self.sock, params) + if not sec_sock then + return false, err + end + + if server_name then + sec_sock:sni(server_name) + end + + local success + success, err = sec_sock:dohandshake() + if not success then + return false, err + end + + self.sock = sec_sock + return true + end, + + sslhandshake = function (self, reused_session, server_name, verify, send_status_req) + return self:tlshandshake({ + reused_session = reused_session, + server_name = server_name, + verify = verify, + ocsp_status_req = send_status_req, + }) + end +} + + +local mt = { + __index = function(self, key) + local sock = self.sock + local fn = luasocket_wrapper[key] + if fn then + self[key] = fn + return fn + end + + local origin = sock[key] + if type(origin) ~= "function" then + return origin + end + + fn = function(_, ...) + return origin(sock, ...) + end + + self[key] = fn + return fn + end +} + +local function luasocket_tcp() + local sock = socket.tcp() + local tcp4 = socket.tcp4() + local tcp6 = socket.tcp6() + return setmetatable({sock = sock, tcp4 = tcp4, tcp6 = tcp6}, mt) +end + + +function _M.patch() + -- make linter happy + -- luacheck: ignore + ngx_socket.tcp = function () + local phase = get_phase() + if phase ~= "init" and phase ~= "init_worker" then + return patch_tcp_socket(original_tcp()) + end + + return luasocket_tcp() + end + + ngx_socket.udp = function () + return patch_udp_socket(original_udp()) + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugin.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugin.lua new file mode 100644 index 0000000..6cb876b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugin.lua @@ -0,0 +1,1285 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local config_util = require("apisix.core.config_util") +local enable_debug = require("apisix.debug").enable_debug +local wasm = require("apisix.wasm") +local expr = require("resty.expr.v1") +local apisix_ssl = require("apisix.ssl") +local re_split = require("ngx.re").split +local ngx = ngx +local crc32 = ngx.crc32_short +local ngx_exit = ngx.exit +local pkg_loaded = package.loaded +local sort_tab = table.sort +local pcall = pcall +local ipairs = ipairs +local pairs = pairs +local type = type +local local_plugins = core.table.new(32, 0) +local tostring = tostring +local error = error +-- make linter happy to avoid error: getting the Lua global "load" +-- luacheck: globals load, ignore lua_load +local lua_load = load +local is_http = ngx.config.subsystem == "http" +local local_plugins_hash = core.table.new(0, 32) +local stream_local_plugins = core.table.new(32, 0) +local stream_local_plugins_hash = core.table.new(0, 32) + + +local merged_route = core.lrucache.new({ + ttl = 300, count = 512 +}) +local merged_stream_route = core.lrucache.new({ + ttl = 300, count = 512 +}) +local expr_lrucache = core.lrucache.new({ + ttl = 300, count = 512 +}) +local meta_pre_func_load_lrucache = core.lrucache.new({ + ttl = 300, count = 512 +}) +local local_conf +local check_plugin_metadata + +local _M = { + version = 0.3, + + load_times = 0, + plugins = local_plugins, + plugins_hash = local_plugins_hash, + + stream_load_times= 0, + stream_plugins = stream_local_plugins, + stream_plugins_hash = stream_local_plugins_hash, +} + + +local function plugin_attr(name) + -- TODO: get attr from synchronized data + local local_conf = core.config.local_conf() + return core.table.try_read_attr(local_conf, "plugin_attr", name) +end +_M.plugin_attr = plugin_attr + + +local function sort_plugin(l, r) + return l.priority > r.priority +end + +local function custom_sort_plugin(l, r) + return l._meta.priority > r._meta.priority +end + +local function check_disable(plugin_conf) + if not plugin_conf then + return nil + end + + if not plugin_conf._meta then + return nil + end + + if type(plugin_conf._meta) ~= "table" then + return nil + end + + return plugin_conf._meta.disable +end + +local PLUGIN_TYPE_HTTP = 1 +local PLUGIN_TYPE_STREAM = 2 +local PLUGIN_TYPE_HTTP_WASM = 3 +local function unload_plugin(name, plugin_type) + if plugin_type == PLUGIN_TYPE_HTTP_WASM then + return + end + + local pkg_name = "apisix.plugins." .. name + if plugin_type == PLUGIN_TYPE_STREAM then + pkg_name = "apisix.stream.plugins." .. name + end + + local old_plugin = pkg_loaded[pkg_name] + if old_plugin and type(old_plugin.destroy) == "function" then + old_plugin.destroy() + end + + pkg_loaded[pkg_name] = nil +end + + +local function load_plugin(name, plugins_list, plugin_type) + local ok, plugin + if plugin_type == PLUGIN_TYPE_HTTP_WASM then + -- for wasm plugin, we pass the whole attrs instead of name + ok, plugin = wasm.require(name) + name = name.name + else + local pkg_name = "apisix.plugins." .. name + if plugin_type == PLUGIN_TYPE_STREAM then + pkg_name = "apisix.stream.plugins." .. name + end + + ok, plugin = pcall(require, pkg_name) + end + + if not ok then + core.log.error("failed to load plugin [", name, "] err: ", plugin) + return + end + + if not plugin.priority then + core.log.error("invalid plugin [", name, + "], missing field: priority") + return + end + + if not plugin.version then + core.log.error("invalid plugin [", name, "] missing field: version") + return + end + + if type(plugin.schema) ~= "table" then + core.log.error("invalid plugin [", name, "] schema field") + return + end + + if not plugin.schema.properties then + plugin.schema.properties = {} + end + + local properties = plugin.schema.properties + local plugin_injected_schema = core.schema.plugin_injected_schema + + if plugin.schema['$comment'] ~= plugin_injected_schema['$comment'] then + if properties._meta then + core.log.error("invalid plugin [", name, + "]: found forbidden '_meta' field in the schema") + return + end + + properties._meta = plugin_injected_schema._meta + -- new injected fields should be added under `_meta` + -- 1. so we won't break user's code when adding any new injected fields + -- 2. the semantics is clear, especially in the doc and in the caller side + + plugin.schema['$comment'] = plugin_injected_schema['$comment'] + end + + plugin.name = name + plugin.attr = plugin_attr(name) + core.table.insert(plugins_list, plugin) + + if plugin.init then + plugin.init() + end + + if plugin.workflow_handler then + plugin.workflow_handler() + end + + return +end + + +local function load(plugin_names, wasm_plugin_names) + local processed = {} + for _, name in ipairs(plugin_names) do + if processed[name] == nil then + processed[name] = true + end + end + for _, attrs in ipairs(wasm_plugin_names) do + if processed[attrs.name] == nil then + processed[attrs.name] = attrs + end + end + + core.log.warn("new plugins: ", core.json.delay_encode(processed)) + + for name, plugin in pairs(local_plugins_hash) do + local ty = PLUGIN_TYPE_HTTP + if plugin.type == "wasm" then + ty = PLUGIN_TYPE_HTTP_WASM + end + unload_plugin(name, ty) + end + + core.table.clear(local_plugins) + core.table.clear(local_plugins_hash) + + for name, value in pairs(processed) do + local ty = PLUGIN_TYPE_HTTP + if type(value) == "table" then + ty = PLUGIN_TYPE_HTTP_WASM + name = value + end + load_plugin(name, local_plugins, ty) + end + + -- sort by plugin's priority + if #local_plugins > 1 then + sort_tab(local_plugins, sort_plugin) + end + + for i, plugin in ipairs(local_plugins) do + local_plugins_hash[plugin.name] = plugin + if enable_debug() then + core.log.warn("loaded plugin and sort by priority:", + " ", plugin.priority, + " name: ", plugin.name) + end + end + + _M.load_times = _M.load_times + 1 + core.log.info("load plugin times: ", _M.load_times) + return true +end + + +local function load_stream(plugin_names) + local processed = {} + for _, name in ipairs(plugin_names) do + if processed[name] == nil then + processed[name] = true + end + end + + core.log.warn("new plugins: ", core.json.delay_encode(processed)) + + for name in pairs(stream_local_plugins_hash) do + unload_plugin(name, PLUGIN_TYPE_STREAM) + end + + core.table.clear(stream_local_plugins) + core.table.clear(stream_local_plugins_hash) + + for name in pairs(processed) do + load_plugin(name, stream_local_plugins, PLUGIN_TYPE_STREAM) + end + + -- sort by plugin's priority + if #stream_local_plugins > 1 then + sort_tab(stream_local_plugins, sort_plugin) + end + + for i, plugin in ipairs(stream_local_plugins) do + stream_local_plugins_hash[plugin.name] = plugin + if enable_debug() then + core.log.warn("loaded stream plugin and sort by priority:", + " ", plugin.priority, + " name: ", plugin.name) + end + end + + _M.stream_load_times = _M.stream_load_times + 1 + core.log.info("stream plugins: ", + core.json.delay_encode(stream_local_plugins, true)) + core.log.info("load stream plugin times: ", _M.stream_load_times) + return true +end + + +local function get_plugin_names(config) + local http_plugin_names + local stream_plugin_names + + if not config then + -- called during starting or hot reload in admin + local err + local_conf, err = core.config.local_conf(true) + if not local_conf then + -- the error is unrecoverable, so we need to raise it + error("failed to load the configuration file: " .. err) + end + + http_plugin_names = local_conf.plugins + stream_plugin_names = local_conf.stream_plugins + else + -- called during synchronizing plugin data + http_plugin_names = {} + stream_plugin_names = {} + local plugins_conf = config.value + -- plugins_conf can be nil when another instance writes into etcd key "/apisix/plugins/" + if not plugins_conf then + return true + end + + for _, conf in ipairs(plugins_conf) do + if conf.stream then + core.table.insert(stream_plugin_names, conf.name) + else + core.table.insert(http_plugin_names, conf.name) + end + end + end + + return false, http_plugin_names, stream_plugin_names +end + + +function _M.load(config) + local ignored, http_plugin_names, stream_plugin_names = get_plugin_names(config) + if ignored then + return local_plugins + end + + local exporter = require("apisix.plugins.prometheus.exporter") + + if ngx.config.subsystem == "http" then + if not http_plugin_names then + core.log.error("failed to read plugin list from local file") + else + local wasm_plugin_names = {} + if local_conf.wasm then + wasm_plugin_names = local_conf.wasm.plugins + end + + local ok, err = load(http_plugin_names, wasm_plugin_names) + if not ok then + core.log.error("failed to load plugins: ", err) + end + + local enabled = core.table.array_find(http_plugin_names, "prometheus") ~= nil + local active = exporter.get_prometheus() ~= nil + if not enabled then + exporter.destroy() + end + if enabled and not active then + exporter.http_init() + end + end + end + + if not stream_plugin_names then + core.log.warn("failed to read stream plugin list from local file") + else + local ok, err = load_stream(stream_plugin_names) + if not ok then + core.log.error("failed to load stream plugins: ", err) + end + end + + -- for test + return local_plugins +end + + +function _M.exit_worker() + for name, plugin in pairs(local_plugins_hash) do + local ty = PLUGIN_TYPE_HTTP + if plugin.type == "wasm" then + ty = PLUGIN_TYPE_HTTP_WASM + end + unload_plugin(name, ty) + end + + -- we need to load stream plugin so that we can check their schemas in + -- Admin API. Maybe we can avoid calling `load` in this case? So that + -- we don't need to call `destroy` too + for name in pairs(stream_local_plugins_hash) do + unload_plugin(name, PLUGIN_TYPE_STREAM) + end +end + + +local function trace_plugins_info_for_debug(ctx, plugins) + if not enable_debug() then + return + end + + if not plugins then + if is_http and not ngx.headers_sent then + core.response.add_header("Apisix-Plugins", "no plugin") + else + core.log.warn("Apisix-Plugins: no plugin") + end + + return + end + + local t = {} + for i = 1, #plugins, 2 do + core.table.insert(t, plugins[i].name) + end + if is_http and not ngx.headers_sent then + if ctx then + local debug_headers = ctx.debug_headers + if not debug_headers then + debug_headers = core.table.new(0, 5) + end + for i, v in ipairs(t) do + debug_headers[v] = true + end + ctx.debug_headers = debug_headers + end + else + core.log.warn("Apisix-Plugins: ", core.table.concat(t, ", ")) + end +end + + +local function meta_filter(ctx, plugin_name, plugin_conf) + local filter = plugin_conf._meta and plugin_conf._meta.filter + if not filter then + return true + end + + local match_cache_key = + ctx.conf_type .. "#" .. ctx.conf_id .. "#" + .. ctx.conf_version .. "#" .. plugin_name .. "#meta_filter_matched" + if ctx[match_cache_key] ~= nil then + return ctx[match_cache_key] + end + + local ex, ok, err + if ctx then + ex, err = expr_lrucache(plugin_name .. ctx.conf_type .. ctx.conf_id, + ctx.conf_version, expr.new, filter) + else + ex, err = expr.new(filter) + end + if not ex then + core.log.warn("failed to get the 'vars' expression: ", err , + " plugin_name: ", plugin_name) + return true + end + ok, err = ex:eval(ctx.var) + if err then + core.log.warn("failed to run the 'vars' expression: ", err, + " plugin_name: ", plugin_name) + return true + end + + ctx[match_cache_key] = ok + return ok +end + + +function _M.filter(ctx, conf, plugins, route_conf, phase) + local user_plugin_conf = conf.value.plugins + if user_plugin_conf == nil or + core.table.nkeys(user_plugin_conf) == 0 then + trace_plugins_info_for_debug(nil, nil) + -- when 'plugins' is given, always return 'plugins' itself instead + -- of another one + return plugins or core.tablepool.fetch("plugins", 0, 0) + end + + local custom_sort = false + local route_plugin_conf = route_conf and route_conf.value.plugins + plugins = plugins or core.tablepool.fetch("plugins", 32, 0) + for _, plugin_obj in ipairs(local_plugins) do + local name = plugin_obj.name + local plugin_conf = user_plugin_conf[name] + + if type(plugin_conf) ~= "table" then + goto continue + end + + if check_disable(plugin_conf) then + goto continue + end + + if plugin_obj.run_policy == "prefer_route" and route_plugin_conf ~= nil then + local plugin_conf_in_route = route_plugin_conf[name] + local disable_in_route = check_disable(plugin_conf_in_route) + if plugin_conf_in_route and not disable_in_route then + goto continue + end + end + + -- in the rewrite phase, the plugin executes in the following order: + -- 1. execute the rewrite phase of the plugins on route(including the auth plugins) + -- 2. merge plugins from consumer and route + -- 3. execute the rewrite phase of the plugins on consumer(phase: rewrite_in_consumer) + -- in this case, we need to skip the plugins that was already executed(step 1) + if phase == "rewrite_in_consumer" + and (not plugin_conf._from_consumer or plugin_obj.type == "auth") then + plugin_conf._skip_rewrite_in_consumer = true + end + + if plugin_conf._meta and plugin_conf._meta.priority then + custom_sort = true + end + + core.table.insert(plugins, plugin_obj) + core.table.insert(plugins, plugin_conf) + + ::continue:: + end + + trace_plugins_info_for_debug(ctx, plugins) + + if custom_sort then + local tmp_plugin_objs = core.tablepool.fetch("tmp_plugin_objs", 0, #plugins / 2) + local tmp_plugin_confs = core.tablepool.fetch("tmp_plugin_confs", #plugins / 2, 0) + + for i = 1, #plugins, 2 do + local plugin_obj = plugins[i] + local plugin_conf = plugins[i + 1] + + tmp_plugin_objs[plugin_conf] = plugin_obj + core.table.insert(tmp_plugin_confs, plugin_conf) + + if not plugin_conf._meta then + plugin_conf._meta = core.table.new(0, 1) + plugin_conf._meta.priority = plugin_obj.priority + else + if not plugin_conf._meta.priority then + plugin_conf._meta.priority = plugin_obj.priority + end + end + end + + sort_tab(tmp_plugin_confs, custom_sort_plugin) + + local index + for i = 1, #tmp_plugin_confs do + index = i * 2 - 1 + local plugin_conf = tmp_plugin_confs[i] + local plugin_obj = tmp_plugin_objs[plugin_conf] + plugins[index] = plugin_obj + plugins[index + 1] = plugin_conf + end + + core.tablepool.release("tmp_plugin_objs", tmp_plugin_objs) + core.tablepool.release("tmp_plugin_confs", tmp_plugin_confs) + end + + return plugins +end + + +function _M.stream_filter(user_route, plugins) + plugins = plugins or core.table.new(#stream_local_plugins * 2, 0) + local user_plugin_conf = user_route.value.plugins + if user_plugin_conf == nil then + trace_plugins_info_for_debug(nil, nil) + return plugins + end + + for _, plugin_obj in ipairs(stream_local_plugins) do + local name = plugin_obj.name + local plugin_conf = user_plugin_conf[name] + + local disable = check_disable(plugin_conf) + if type(plugin_conf) == "table" and not disable then + core.table.insert(plugins, plugin_obj) + core.table.insert(plugins, plugin_conf) + end + end + + trace_plugins_info_for_debug(nil, plugins) + + return plugins +end + + +local function merge_service_route(service_conf, route_conf) + local new_conf = core.table.deepcopy(service_conf, { shallows = {"self.value.upstream.parent"}}) + new_conf.value.service_id = new_conf.value.id + new_conf.value.id = route_conf.value.id + new_conf.modifiedIndex = route_conf.modifiedIndex + + if route_conf.value.plugins then + for name, conf in pairs(route_conf.value.plugins) do + if not new_conf.value.plugins then + new_conf.value.plugins = {} + end + + new_conf.value.plugins[name] = conf + end + end + + local route_upstream = route_conf.value.upstream + if route_upstream then + new_conf.value.upstream = route_upstream + -- when route's upstream override service's upstream, + -- the upstream.parent still point to the route + new_conf.value.upstream_id = nil + new_conf.has_domain = route_conf.has_domain + end + + if route_conf.value.upstream_id then + new_conf.value.upstream_id = route_conf.value.upstream_id + new_conf.has_domain = route_conf.has_domain + end + + if route_conf.value.script then + new_conf.value.script = route_conf.value.script + end + + if route_conf.value.timeout then + new_conf.value.timeout = route_conf.value.timeout + end + + if route_conf.value.name then + new_conf.value.name = route_conf.value.name + else + new_conf.value.name = nil + end + + if route_conf.value.hosts then + new_conf.value.hosts = route_conf.value.hosts + end + if not new_conf.value.hosts and route_conf.value.host then + new_conf.value.host = route_conf.value.host + end + + if route_conf.value.labels then + new_conf.value.labels = route_conf.value.labels + end + + -- core.log.info("merged conf : ", core.json.delay_encode(new_conf)) + return new_conf +end + + +function _M.merge_service_route(service_conf, route_conf) + core.log.info("service conf: ", core.json.delay_encode(service_conf, true)) + core.log.info(" route conf: ", core.json.delay_encode(route_conf, true)) + + local route_service_key = route_conf.value.id .. "#" + .. route_conf.modifiedIndex .. "#" .. service_conf.modifiedIndex + return merged_route(route_service_key, service_conf, + merge_service_route, + service_conf, route_conf) +end + + +local function merge_service_stream_route(service_conf, route_conf) + -- because many fields in Service are not supported by stream route, + -- so we copy the stream route as base object + local new_conf = core.table.deepcopy(route_conf, { shallows = {"self.value.upstream.parent"}}) + if service_conf.value.plugins then + for name, conf in pairs(service_conf.value.plugins) do + if not new_conf.value.plugins then + new_conf.value.plugins = {} + end + + if not new_conf.value.plugins[name] then + new_conf.value.plugins[name] = conf + end + end + end + + new_conf.value.service_id = nil + + if not new_conf.value.upstream and service_conf.value.upstream then + new_conf.value.upstream = service_conf.value.upstream + end + + if not new_conf.value.upstream_id and service_conf.value.upstream_id then + new_conf.value.upstream_id = service_conf.value.upstream_id + end + + return new_conf +end + + +function _M.merge_service_stream_route(service_conf, route_conf) + core.log.info("service conf: ", core.json.delay_encode(service_conf, true)) + core.log.info(" stream route conf: ", core.json.delay_encode(route_conf, true)) + + local version = route_conf.modifiedIndex .. "#" .. service_conf.modifiedIndex + local route_service_key = route_conf.value.id .. "#" + .. version + return merged_stream_route(route_service_key, version, + merge_service_stream_route, + service_conf, route_conf) +end + + +local function merge_consumer_route(route_conf, consumer_conf, consumer_group_conf) + if not consumer_conf.plugins or + core.table.nkeys(consumer_conf.plugins) == 0 + then + core.log.info("consumer no plugins") + return route_conf + end + + local new_route_conf = core.table.deepcopy(route_conf, + { shallows = {"self.value.upstream.parent"}}) + + if consumer_group_conf then + for name, conf in pairs(consumer_group_conf.value.plugins) do + if not new_route_conf.value.plugins then + new_route_conf.value.plugins = {} + end + + if new_route_conf.value.plugins[name] == nil then + conf._from_consumer = true + end + new_route_conf.value.plugins[name] = conf + end + end + + for name, conf in pairs(consumer_conf.plugins) do + if not new_route_conf.value.plugins then + new_route_conf.value.plugins = {} + end + + if new_route_conf.value.plugins[name] == nil then + conf._from_consumer = true + end + new_route_conf.value.plugins[name] = conf + end + + core.log.info("merged conf : ", core.json.delay_encode(new_route_conf)) + return new_route_conf +end + + +function _M.merge_consumer_route(route_conf, consumer_conf, consumer_group_conf, api_ctx) + core.log.info("route conf: ", core.json.delay_encode(route_conf)) + core.log.info("consumer conf: ", core.json.delay_encode(consumer_conf)) + core.log.info("consumer group conf: ", core.json.delay_encode(consumer_group_conf)) + + local flag = route_conf.value.id .. "#" .. route_conf.modifiedIndex + .. "#" .. consumer_conf.id .. "#" .. consumer_conf.modifiedIndex + + if consumer_group_conf then + flag = flag .. "#" .. consumer_group_conf.value.id + .. "#" .. consumer_group_conf.modifiedIndex + end + + local new_conf = merged_route(flag, api_ctx.conf_version, + merge_consumer_route, route_conf, consumer_conf, consumer_group_conf) + + -- some plugins like limit-count don't care if consumer changes + -- all consumers should share the same counter + api_ctx.conf_type_without_consumer = api_ctx.conf_type + api_ctx.conf_version_without_consumer = api_ctx.conf_version + api_ctx.conf_id_without_consumer = api_ctx.conf_id + + api_ctx.conf_type = api_ctx.conf_type .. "&consumer" + api_ctx.conf_version = api_ctx.conf_version .. "&" .. + api_ctx.consumer_ver + api_ctx.conf_id = api_ctx.conf_id .. "&" .. api_ctx.consumer_name + + if consumer_group_conf then + api_ctx.conf_type = api_ctx.conf_type .. "&consumer_group" + api_ctx.conf_version = api_ctx.conf_version .. "&" .. consumer_group_conf.modifiedIndex + api_ctx.conf_id = api_ctx.conf_id .. "&" .. consumer_group_conf.value.id + end + + return new_conf, new_conf ~= route_conf +end + + +local init_plugins_syncer +do + local plugins_conf + + function init_plugins_syncer() + local err + plugins_conf, err = core.config.new("/plugins", { + automatic = true, + item_schema = core.schema.plugins, + single_item = true, + filter = function(item) + -- we need to pass 'item' instead of plugins_conf because + -- the latter one is nil at the first run + _M.load(item) + end, + }) + if not plugins_conf then + error("failed to create etcd instance for fetching /plugins : " .. err) + end + end +end + + +function _M.init_worker() + local _, http_plugin_names, stream_plugin_names = get_plugin_names() + + -- some plugins need to be initialized in init* phases + if is_http and core.table.array_find(http_plugin_names, "prometheus") then + local prometheus_enabled_in_stream = + core.table.array_find(stream_plugin_names, "prometheus") + require("apisix.plugins.prometheus.exporter").http_init(prometheus_enabled_in_stream) + elseif not is_http and core.table.array_find(stream_plugin_names, "prometheus") then + require("apisix.plugins.prometheus.exporter").stream_init() + end + + -- someone's plugin needs to be initialized after prometheus + -- see https://github.com/apache/apisix/issues/3286 + _M.load() + + if local_conf and not local_conf.apisix.enable_admin then + init_plugins_syncer() + end + + local plugin_metadatas, err = core.config.new("/plugin_metadata", + { + automatic = true, + checker = check_plugin_metadata + } + ) + if not plugin_metadatas then + error("failed to create etcd instance for fetching /plugin_metadatas : " + .. err) + end + + _M.plugin_metadatas = plugin_metadatas +end + + +function _M.plugin_metadata(name) + return _M.plugin_metadatas:get(name) +end + + +function _M.get(name) + return local_plugins_hash and local_plugins_hash[name] +end + + +function _M.get_stream(name) + return stream_local_plugins_hash and stream_local_plugins_hash[name] +end + + +function _M.get_all(attrs) + local http_plugins = {} + local stream_plugins = {} + + if local_plugins_hash then + for name, plugin_obj in pairs(local_plugins_hash) do + http_plugins[name] = core.table.pick(plugin_obj, attrs) + end + end + + if stream_local_plugins_hash then + for name, plugin_obj in pairs(stream_local_plugins_hash) do + stream_plugins[name] = core.table.pick(plugin_obj, attrs) + end + end + + return http_plugins, stream_plugins +end + + +-- conf_version returns a version which only depends on the value of conf, +-- instead of where this plugin conf belongs to +function _M.conf_version(conf) + if not conf._version then + local data = core.json.stably_encode(conf) + conf._version = tostring(crc32(data)) + core.log.info("init plugin-level conf version: ", conf._version, ", from ", data) + end + + return conf._version +end + + +local function check_single_plugin_schema(name, plugin_conf, schema_type, skip_disabled_plugin) + core.log.info("check plugin schema, name: ", name, ", configurations: ", + core.json.delay_encode(plugin_conf, true)) + if type(plugin_conf) ~= "table" then + return false, "invalid plugin conf " .. + core.json.encode(plugin_conf, true) .. + " for plugin [" .. name .. "]" + end + + local plugin_obj = local_plugins_hash[name] + if not plugin_obj then + if skip_disabled_plugin then + return true + else + return false, "unknown plugin [" .. name .. "]" + end + end + + if plugin_obj.check_schema then + local ok, err = plugin_obj.check_schema(plugin_conf, schema_type) + if not ok then + return false, "failed to check the configuration of plugin " + .. name .. " err: " .. err + end + + if plugin_conf._meta then + if plugin_conf._meta.filter then + ok, err = expr.new(plugin_conf._meta.filter) + if not ok then + return nil, "failed to validate the 'vars' expression: " .. err + end + end + + if plugin_conf._meta.pre_function then + local pre_function, err = meta_pre_func_load_lrucache(plugin_conf._meta.pre_function + , "", + lua_load, + plugin_conf._meta.pre_function, "meta pre_function") + if not pre_function then + return nil, "failed to load _meta.pre_function in plugin " .. name .. ": " + .. err + end + end + end + end + + return true +end + + +local enable_data_encryption +local function enable_gde() + if enable_data_encryption == nil then + enable_data_encryption = + core.table.try_read_attr(local_conf, "apisix", "data_encryption", + "enable_encrypt_fields") and (core.config.type == "etcd") + _M.enable_data_encryption = enable_data_encryption + end + + return enable_data_encryption +end + + +local function get_plugin_schema_for_gde(name, schema_type) + local plugin_schema = local_plugins_hash and local_plugins_hash[name] + if not plugin_schema then + return nil + end + + local schema + if schema_type == core.schema.TYPE_CONSUMER then + -- when we use a non-auth plugin in the consumer, + -- where the consumer_schema field does not exist, + -- we need to fallback to it's schema for encryption and decryption. + schema = plugin_schema.consumer_schema or plugin_schema.schema + elseif schema_type == core.schema.TYPE_METADATA then + schema = plugin_schema.metadata_schema + else + schema = plugin_schema.schema + end + + return schema +end + + +local function decrypt_conf(name, conf, schema_type) + if not enable_gde() then + return + end + local schema = get_plugin_schema_for_gde(name, schema_type) + if not schema then + core.log.warn("failed to get schema for plugin: ", name) + return + end + + if schema.encrypt_fields and not core.table.isempty(schema.encrypt_fields) then + for _, key in ipairs(schema.encrypt_fields) do + if conf[key] then + local decrypted, err = apisix_ssl.aes_decrypt_pkey(conf[key], "data_encrypt") + if not decrypted then + core.log.warn("failed to decrypt the conf of plugin [", name, + "] key [", key, "], err: ", err) + else + conf[key] = decrypted + end + elseif core.string.find(key, ".") then + -- decrypt fields has indents + local res, err = re_split(key, "\\.", "jo") + if not res then + core.log.warn("failed to split key [", key, "], err: ", err) + return + end + + -- we only support two levels + if conf[res[1]] and conf[res[1]][res[2]] then + local decrypted, err = apisix_ssl.aes_decrypt_pkey( + conf[res[1]][res[2]], "data_encrypt") + if not decrypted then + core.log.warn("failed to decrypt the conf of plugin [", name, + "] key [", key, "], err: ", err) + else + conf[res[1]][res[2]] = decrypted + end + end + end + end + end +end +_M.decrypt_conf = decrypt_conf + + +local function encrypt_conf(name, conf, schema_type) + if not enable_gde() then + return + end + local schema = get_plugin_schema_for_gde(name, schema_type) + if not schema then + core.log.warn("failed to get schema for plugin: ", name) + return + end + + if schema.encrypt_fields and not core.table.isempty(schema.encrypt_fields) then + for _, key in ipairs(schema.encrypt_fields) do + if conf[key] then + local encrypted, err = apisix_ssl.aes_encrypt_pkey(conf[key], "data_encrypt") + if not encrypted then + core.log.warn("failed to encrypt the conf of plugin [", name, + "] key [", key, "], err: ", err) + else + conf[key] = encrypted + end + elseif core.string.find(key, ".") then + -- encrypt fields has indents + local res, err = re_split(key, "\\.", "jo") + if not res then + core.log.warn("failed to split key [", key, "], err: ", err) + return + end + + -- we only support two levels + if conf[res[1]] and conf[res[1]][res[2]] then + local encrypted, err = apisix_ssl.aes_encrypt_pkey( + conf[res[1]][res[2]], "data_encrypt") + if not encrypted then + core.log.warn("failed to encrypt the conf of plugin [", name, + "] key [", key, "], err: ", err) + else + conf[res[1]][res[2]] = encrypted + end + end + end + end + end +end +_M.encrypt_conf = encrypt_conf + + +check_plugin_metadata = function(item) + local ok, err = check_single_plugin_schema(item.id, item, + core.schema.TYPE_METADATA, true) + if ok and enable_gde() then + decrypt_conf(item.id, item, core.schema.TYPE_METADATA) + end + + return ok, err +end + + +local function check_schema(plugins_conf, schema_type, skip_disabled_plugin) + for name, plugin_conf in pairs(plugins_conf) do + local ok, err = check_single_plugin_schema(name, plugin_conf, + schema_type, skip_disabled_plugin) + if not ok then + return false, err + end + end + + return true +end +_M.check_schema = check_schema + + +local function stream_check_schema(plugins_conf, schema_type, skip_disabled_plugin) + for name, plugin_conf in pairs(plugins_conf) do + core.log.info("check stream plugin schema, name: ", name, + ": ", core.json.delay_encode(plugin_conf, true)) + if type(plugin_conf) ~= "table" then + return false, "invalid plugin conf " .. + core.json.encode(plugin_conf, true) .. + " for plugin [" .. name .. "]" + end + + local plugin_obj = stream_local_plugins_hash[name] + if not plugin_obj then + if skip_disabled_plugin then + goto CONTINUE + else + return false, "unknown plugin [" .. name .. "]" + end + end + + if plugin_obj.check_schema then + local ok, err = plugin_obj.check_schema(plugin_conf, schema_type) + if not ok then + return false, "failed to check the configuration of " + .. "stream plugin [" .. name .. "]: " .. err + end + end + + ::CONTINUE:: + end + + return true +end +_M.stream_check_schema = stream_check_schema + + +function _M.plugin_checker(item, schema_type) + if item.plugins then + local ok, err = check_schema(item.plugins, schema_type, true) + + if ok and enable_gde() then + -- decrypt conf + for name, conf in pairs(item.plugins) do + decrypt_conf(name, conf, schema_type) + end + end + return ok, err + end + + return true +end + + +function _M.stream_plugin_checker(item, in_cp) + if item.plugins then + return stream_check_schema(item.plugins, nil, not in_cp) + end + + return true +end + +local function run_meta_pre_function(conf, api_ctx, name) + if conf._meta and conf._meta.pre_function then + local _, pre_function = pcall(meta_pre_func_load_lrucache(conf._meta.pre_function, "", + lua_load, + conf._meta.pre_function, "meta pre_function")) + local ok, err = pcall(pre_function, conf, api_ctx) + if not ok then + core.log.error("pre_function execution for plugin ", name, " failed: ", err) + end + end +end + +function _M.run_plugin(phase, plugins, api_ctx) + local plugin_run = false + api_ctx = api_ctx or ngx.ctx.api_ctx + if not api_ctx then + return + end + + plugins = plugins or api_ctx.plugins + if not plugins or #plugins == 0 then + return api_ctx + end + + if phase ~= "log" + and phase ~= "header_filter" + and phase ~= "body_filter" + and phase ~= "delayed_body_filter" + then + for i = 1, #plugins, 2 do + + if phase == "rewrite_in_consumer" and plugins[i + 1]._skip_rewrite_in_consumer then + goto CONTINUE + end + + local phase_func = phase == "rewrite_in_consumer" and plugins[i]["rewrite"] + or plugins[i][phase] + if phase_func then + local conf = plugins[i + 1] + if not meta_filter(api_ctx, plugins[i]["name"], conf)then + goto CONTINUE + end + + run_meta_pre_function(conf, api_ctx, plugins[i]["name"]) + plugin_run = true + api_ctx._plugin_name = plugins[i]["name"] + local code, body = phase_func(conf, api_ctx) + api_ctx._plugin_name = nil + if code or body then + if is_http then + if code >= 400 then + core.log.warn(plugins[i].name, " exits with http status code ", code) + + if conf._meta and conf._meta.error_response then + -- Whether or not the original error message is output, + -- always return the configured message + -- so the caller can't guess the real error + body = conf._meta.error_response + end + end + + core.response.exit(code, body) + else + if code >= 400 then + core.log.warn(plugins[i].name, " exits with status code ", code) + end + + ngx_exit(1) + end + end + end + + ::CONTINUE:: + end + return api_ctx, plugin_run + end + + for i = 1, #plugins, 2 do + local phase_func = plugins[i][phase] + local conf = plugins[i + 1] + if phase_func and meta_filter(api_ctx, plugins[i]["name"], conf) then + plugin_run = true + run_meta_pre_function(conf, api_ctx, plugins[i]["name"]) + api_ctx._plugin_name = plugins[i]["name"] + phase_func(conf, api_ctx) + api_ctx._plugin_name = nil + end + end + + return api_ctx, plugin_run +end + + +function _M.run_global_rules(api_ctx, global_rules, phase_name) + if global_rules and #global_rules > 0 then + local orig_conf_type = api_ctx.conf_type + local orig_conf_version = api_ctx.conf_version + local orig_conf_id = api_ctx.conf_id + + if phase_name == nil then + api_ctx.global_rules = global_rules + end + + local plugins = core.tablepool.fetch("plugins", 32, 0) + local values = global_rules + local route = api_ctx.matched_route + for _, global_rule in config_util.iterate_values(values) do + api_ctx.conf_type = "global_rule" + api_ctx.conf_version = global_rule.modifiedIndex + api_ctx.conf_id = global_rule.value.id + + core.table.clear(plugins) + plugins = _M.filter(api_ctx, global_rule, plugins, route) + if phase_name == nil then + _M.run_plugin("rewrite", plugins, api_ctx) + _M.run_plugin("access", plugins, api_ctx) + else + _M.run_plugin(phase_name, plugins, api_ctx) + end + end + core.tablepool.release("plugins", plugins) + + api_ctx.conf_type = orig_conf_type + api_ctx.conf_version = orig_conf_version + api_ctx.conf_id = orig_conf_id + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugin_config.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugin_config.lua new file mode 100644 index 0000000..88b17d4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugin_config.lua @@ -0,0 +1,88 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local plugin_checker = require("apisix.plugin").plugin_checker +local pairs = pairs +local error = error + + +local plugin_configs + + +local _M = { +} + + +function _M.init_worker() + local err + plugin_configs, err = core.config.new("/plugin_configs", { + automatic = true, + item_schema = core.schema.plugin_config, + checker = plugin_checker, + }) + if not plugin_configs then + error("failed to sync /plugin_configs: " .. err) + end +end + + +function _M.plugin_configs() + if not plugin_configs then + return nil, nil + end + return plugin_configs.values, plugin_configs.conf_version +end + + +function _M.get(id) + return plugin_configs:get(id) +end + + +function _M.merge(route_conf, plugin_config) + if route_conf.prev_plugin_config_ver == plugin_config.modifiedIndex then + return route_conf + end + + if not route_conf.value.plugins then + route_conf.value.plugins = {} + end + + if route_conf.orig_plugins then + -- recover + route_conf.value.plugins = route_conf.orig_plugins + else + -- backup in the first time + route_conf.orig_plugins = route_conf.value.plugins + end + + route_conf.value.plugins = core.table.clone(route_conf.value.plugins) + + for name, value in pairs(plugin_config.value.plugins) do + if not route_conf.value.plugins[name] then + route_conf.value.plugins[name] = value + end + end + + route_conf.modifiedIndex = route_conf.orig_modifiedIndex .. "#" .. plugin_config.modifiedIndex + route_conf.prev_plugin_config_ver = plugin_config.modifiedIndex + + return route_conf +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-aws-content-moderation.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-aws-content-moderation.lua new file mode 100644 index 0000000..d229b47 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-aws-content-moderation.lua @@ -0,0 +1,161 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +require("resty.aws.config") -- to read env vars before initing aws module + +local core = require("apisix.core") +local aws = require("resty.aws") +local aws_instance + +local http = require("resty.http") +local fetch_secrets = require("apisix.secret").fetch_secrets + +local pairs = pairs +local unpack = unpack +local type = type +local ipairs = ipairs +local HTTP_INTERNAL_SERVER_ERROR = ngx.HTTP_INTERNAL_SERVER_ERROR +local HTTP_BAD_REQUEST = ngx.HTTP_BAD_REQUEST + +local moderation_categories_pattern = "^(PROFANITY|HATE_SPEECH|INSULT|".. + "HARASSMENT_OR_ABUSE|SEXUAL|VIOLENCE_OR_THREAT)$" +local schema = { + type = "object", + properties = { + comprehend = { + type = "object", + properties = { + access_key_id = { type = "string" }, + secret_access_key = { type = "string" }, + region = { type = "string" }, + endpoint = { + type = "string", + pattern = [[^https?://]] + }, + ssl_verify = { + type = "boolean", + default = true + } + }, + required = { "access_key_id", "secret_access_key", "region", } + }, + moderation_categories = { + type = "object", + patternProperties = { + [moderation_categories_pattern] = { + type = "number", + minimum = 0, + maximum = 1 + } + }, + additionalProperties = false + }, + moderation_threshold = { + type = "number", + minimum = 0, + maximum = 1, + default = 0.5 + } + }, + required = { "comprehend" }, +} + + +local _M = { + version = 0.1, + priority = 1050, + name = "ai-aws-content-moderation", + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +function _M.rewrite(conf, ctx) + conf = fetch_secrets(conf, true, conf, "") + if not conf then + return HTTP_INTERNAL_SERVER_ERROR, "failed to retrieve secrets from conf" + end + + local body, err = core.request.get_body() + if not body then + return HTTP_BAD_REQUEST, err + end + + local comprehend = conf.comprehend + + if not aws_instance then + aws_instance = aws() + end + local credentials = aws_instance:Credentials({ + accessKeyId = comprehend.access_key_id, + secretAccessKey = comprehend.secret_access_key, + sessionToken = comprehend.session_token, + }) + + local default_endpoint = "https://comprehend." .. comprehend.region .. ".amazonaws.com" + local scheme, host, port = unpack(http:parse_uri(comprehend.endpoint or default_endpoint)) + local endpoint = scheme .. "://" .. host + aws_instance.config.endpoint = endpoint + aws_instance.config.ssl_verify = comprehend.ssl_verify + + local comprehend = aws_instance:Comprehend({ + credentials = credentials, + endpoint = endpoint, + region = comprehend.region, + port = port, + }) + + local res, err = comprehend:detectToxicContent({ + LanguageCode = "en", + TextSegments = {{ + Text = body + }}, + }) + + if not res then + core.log.error("failed to send request to ", endpoint, ": ", err) + return HTTP_INTERNAL_SERVER_ERROR, err + end + + local results = res.body and res.body.ResultList + if type(results) ~= "table" or core.table.isempty(results) then + return HTTP_INTERNAL_SERVER_ERROR, "failed to get moderation results from response" + end + + for _, result in ipairs(results) do + if conf.moderation_categories then + for _, item in pairs(result.Labels) do + if not conf.moderation_categories[item.Name] then + goto continue + end + if item.Score > conf.moderation_categories[item.Name] then + return HTTP_BAD_REQUEST, "request body exceeds " .. item.Name .. " threshold" + end + ::continue:: + end + end + + if result.Toxicity > conf.moderation_threshold then + return HTTP_BAD_REQUEST, "request body exceeds toxicity threshold" + end + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/aimlapi.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/aimlapi.lua new file mode 100644 index 0000000..dad1014 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/aimlapi.lua @@ -0,0 +1,24 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +return require("apisix.plugins.ai-drivers.openai-base").new( + { + host = "api.aimlapi.com", + path = "/chat/completions", + port = 443 + } +) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/deepseek.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/deepseek.lua new file mode 100644 index 0000000..19c2e90 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/deepseek.lua @@ -0,0 +1,24 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +return require("apisix.plugins.ai-drivers.openai-base").new( + { + host = "api.deepseek.com", + path = "/chat/completions", + port = 443 + } +) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/openai-base.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/openai-base.lua new file mode 100644 index 0000000..0913426 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/openai-base.lua @@ -0,0 +1,255 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local _M = {} + +local mt = { + __index = _M +} + +local CONTENT_TYPE_JSON = "application/json" + +local core = require("apisix.core") +local http = require("resty.http") +local url = require("socket.url") +local ngx_re = require("ngx.re") + +local ngx_print = ngx.print +local ngx_flush = ngx.flush + +local pairs = pairs +local type = type +local ipairs = ipairs +local setmetatable = setmetatable + +local HTTP_INTERNAL_SERVER_ERROR = ngx.HTTP_INTERNAL_SERVER_ERROR +local HTTP_GATEWAY_TIMEOUT = ngx.HTTP_GATEWAY_TIMEOUT + + +function _M.new(opts) + + local self = { + host = opts.host, + port = opts.port, + path = opts.path, + } + return setmetatable(self, mt) +end + + +function _M.validate_request(ctx) + local ct = core.request.header(ctx, "Content-Type") or CONTENT_TYPE_JSON + if not core.string.has_prefix(ct, CONTENT_TYPE_JSON) then + return nil, "unsupported content-type: " .. ct .. ", only application/json is supported" + end + + local request_table, err = core.request.get_json_request_body_table() + if not request_table then + return nil, err + end + + return request_table, nil +end + + +local function handle_error(err) + if core.string.find(err, "timeout") then + return HTTP_GATEWAY_TIMEOUT + end + return HTTP_INTERNAL_SERVER_ERROR +end + + +local function read_response(ctx, res) + local body_reader = res.body_reader + if not body_reader then + core.log.warn("AI service sent no response body") + return HTTP_INTERNAL_SERVER_ERROR + end + + local content_type = res.headers["Content-Type"] + core.response.set_header("Content-Type", content_type) + + if content_type and core.string.find(content_type, "text/event-stream") then + while true do + local chunk, err = body_reader() -- will read chunk by chunk + if err then + core.log.warn("failed to read response chunk: ", err) + return handle_error(err) + end + if not chunk then + return + end + + ngx_print(chunk) + ngx_flush(true) + + local events, err = ngx_re.split(chunk, "\n") + if err then + core.log.warn("failed to split response chunk [", chunk, "] to events: ", err) + goto CONTINUE + end + + for _, event in ipairs(events) do + if not core.string.find(event, "data:") or core.string.find(event, "[DONE]") then + goto CONTINUE + end + + local parts, err = ngx_re.split(event, ":", nil, nil, 2) + if err then + core.log.warn("failed to split data event [", event, "] to parts: ", err) + goto CONTINUE + end + + if #parts ~= 2 then + core.log.warn("malformed data event: ", event) + goto CONTINUE + end + + local data, err = core.json.decode(parts[2]) + if err then + core.log.warn("failed to decode data event [", parts[2], "] to json: ", err) + goto CONTINUE + end + + -- usage field is null for non-last events, null is parsed as userdata type + if data and data.usage and type(data.usage) ~= "userdata" then + core.log.info("got token usage from ai service: ", + core.json.delay_encode(data.usage)) + ctx.ai_token_usage = { + prompt_tokens = data.usage.prompt_tokens or 0, + completion_tokens = data.usage.completion_tokens or 0, + total_tokens = data.usage.total_tokens or 0, + } + end + end + + ::CONTINUE:: + end + end + + local raw_res_body, err = res:read_body() + if not raw_res_body then + core.log.warn("failed to read response body: ", err) + return handle_error(err) + end + local res_body, err = core.json.decode(raw_res_body) + if err then + core.log.warn("invalid response body from ai service: ", raw_res_body, " err: ", err, + ", it will cause token usage not available") + else + core.log.info("got token usage from ai service: ", core.json.delay_encode(res_body.usage)) + ctx.ai_token_usage = { + prompt_tokens = res_body.usage and res_body.usage.prompt_tokens or 0, + completion_tokens = res_body.usage and res_body.usage.completion_tokens or 0, + total_tokens = res_body.usage and res_body.usage.total_tokens or 0, + } + end + return res.status, raw_res_body +end + + +function _M.request(self, ctx, conf, request_table, extra_opts) + local httpc, err = http.new() + if not httpc then + core.log.error("failed to create http client to send request to LLM server: ", err) + return HTTP_INTERNAL_SERVER_ERROR + end + httpc:set_timeout(conf.timeout) + + local endpoint = extra_opts.endpoint + local parsed_url + if endpoint then + parsed_url = url.parse(endpoint) + end + + local scheme = parsed_url and parsed_url.scheme or "https" + local host = parsed_url and parsed_url.host or self.host + local port = parsed_url and parsed_url.port + if not port then + if scheme == "https" then + port = 443 + else + port = 80 + end + end + local ok, err = httpc:connect({ + scheme = scheme, + host = host, + port = port, + ssl_verify = conf.ssl_verify, + ssl_server_name = parsed_url and parsed_url.host or self.host, + }) + + if not ok then + core.log.warn("failed to connect to LLM server: ", err) + return handle_error(err) + end + + local query_params = extra_opts.query_params + + if type(parsed_url) == "table" and parsed_url.query and #parsed_url.query > 0 then + local args_tab = core.string.decode_args(parsed_url.query) + if type(args_tab) == "table" then + core.table.merge(query_params, args_tab) + end + end + + local path = (parsed_url and parsed_url.path or self.path) + + local headers = extra_opts.headers + headers["Content-Type"] = "application/json" + local params = { + method = "POST", + headers = headers, + ssl_verify = conf.ssl_verify, + path = path, + query = query_params + } + + if extra_opts.model_options then + for opt, val in pairs(extra_opts.model_options) do + request_table[opt] = val + end + end + + local req_json, err = core.json.encode(request_table) + if not req_json then + return nil, err + end + + params.body = req_json + + local res, err = httpc:request(params) + if not res then + core.log.warn("failed to send request to LLM server: ", err) + return handle_error(err) + end + + local code, body = read_response(ctx, res) + + if conf.keepalive then + local ok, err = httpc:set_keepalive(conf.keepalive_timeout, conf.keepalive_pool) + if not ok then + core.log.warn("failed to keepalive connection: ", err) + end + end + + return code, body +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/openai-compatible.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/openai-compatible.lua new file mode 100644 index 0000000..b6c21cf --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/openai-compatible.lua @@ -0,0 +1,18 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +return require("apisix.plugins.ai-drivers.openai-base").new({}) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/openai.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/openai.lua new file mode 100644 index 0000000..e922c8b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/openai.lua @@ -0,0 +1,24 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +return require("apisix.plugins.ai-drivers.openai-base").new( + { + host = "api.openai.com", + path = "/v1/chat/completions", + port = 443 + } +) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/schema.lua new file mode 100644 index 0000000..7a469bd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-drivers/schema.lua @@ -0,0 +1,44 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local _M = {} + +_M.chat_request_schema = { + type = "object", + properties = { + messages = { + type = "array", + minItems = 1, + items = { + properties = { + role = { + type = "string", + enum = {"system", "user", "assistant"} + }, + content = { + type = "string", + minLength = "1", + }, + }, + additionalProperties = false, + required = {"role", "content"}, + }, + } + }, + required = {"messages"} +} + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-prompt-decorator.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-prompt-decorator.lua new file mode 100644 index 0000000..10b36e8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-prompt-decorator.lua @@ -0,0 +1,117 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ngx = ngx +local pairs = pairs +local EMPTY = {} + +local prompt_schema = { + properties = { + role = { + type = "string", + enum = { "system", "user", "assistant" } + }, + content = { + type = "string", + minLength = 1, + } + }, + required = { "role", "content" } +} + +local prompts = { + type = "array", + items = prompt_schema +} + +local schema = { + type = "object", + properties = { + prepend = prompts, + append = prompts, + }, + anyOf = { + { required = { "prepend" } }, + { required = { "append" } }, + { required = { "append", "prepend" } }, + }, +} + + +local _M = { + version = 0.1, + priority = 1070, + name = "ai-prompt-decorator", + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +local function get_request_body_table() + local body, err = core.request.get_body() + if not body then + return nil, { message = "could not get body: " .. err } + end + + local body_tab, err = core.json.decode(body) + if not body_tab then + return nil, { message = "could not get parse JSON request body: " .. err } + end + + return body_tab +end + + +local function decorate(conf, body_tab) + local new_messages = conf.prepend or EMPTY + for _, message in pairs(body_tab.messages) do + core.table.insert_tail(new_messages, message) + end + + for _, message in pairs(conf.append or EMPTY) do + core.table.insert_tail(new_messages, message) + end + + body_tab.messages = new_messages +end + + +function _M.rewrite(conf, ctx) + local body_tab, err = get_request_body_table() + if not body_tab then + return 400, err + end + + if not body_tab.messages then + return 400, "messages missing from request body" + end + decorate(conf, body_tab) -- will decorate body_tab in place + + local new_jbody, err = core.json.encode(body_tab) + if not new_jbody then + return 500, { message = "failed to parse modified JSON request body: " .. err } + end + + ngx.req.set_body_data(new_jbody) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-prompt-guard.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-prompt-guard.lua new file mode 100644 index 0000000..fd6a931 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-prompt-guard.lua @@ -0,0 +1,153 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ngx = ngx +local ipairs = ipairs +local table = table +local re_compile = require("resty.core.regex").re_match_compile +local re_find = ngx.re.find + +local plugin_name = "ai-prompt-guard" + +local schema = { + type = "object", + properties = { + match_all_roles = { + type = "boolean", + default = false, + }, + match_all_conversation_history = { + type = "boolean", + default = false, + }, + allow_patterns = { + type = "array", + items = {type = "string"}, + default = {}, + }, + deny_patterns = { + type = "array", + items = {type = "string"}, + default = {}, + }, + }, +} + +local _M = { + version = 0.1, + priority = 1072, + name = plugin_name, + schema = schema, +} + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + -- Validate allow_patterns + for _, pattern in ipairs(conf.allow_patterns) do + local compiled = re_compile(pattern, "jou") + if not compiled then + return false, "invalid allow_pattern: " .. pattern + end + end + + -- Validate deny_patterns + for _, pattern in ipairs(conf.deny_patterns) do + local compiled = re_compile(pattern, "jou") + if not compiled then + return false, "invalid deny_pattern: " .. pattern + end + end + + return true +end + +local function get_content_to_check(conf, messages) + if conf.match_all_conversation_history then + return messages + end + local contents = {} + if #messages > 0 then + local last_msg = messages[#messages] + if last_msg then + core.table.insert(contents, last_msg) + end + end + return contents +end + +function _M.access(conf, ctx) + local body = core.request.get_body() + if not body then + core.log.error("Empty request body") + return 400, {message = "Empty request body"} + end + + local json_body, err = core.json.decode(body) + if err then + return 400, {message = err} + end + + local messages = json_body.messages or {} + messages = get_content_to_check(conf, messages) + if not conf.match_all_roles then + -- filter to only user messages + local new_messages = {} + for _, msg in ipairs(messages) do + if msg.role == "user" then + core.table.insert(new_messages, msg) + end + end + messages = new_messages + end + if #messages == 0 then --nothing to check + return 200 + end + -- extract only messages + local content = {} + for _, msg in ipairs(messages) do + if msg.content then + core.table.insert(content, msg.content) + end + end + local content_to_check = table.concat(content, " ") + -- Allow patterns check + if #conf.allow_patterns > 0 then + local any_allowed = false + for _, pattern in ipairs(conf.allow_patterns) do + if re_find(content_to_check, pattern, "jou") then + any_allowed = true + break + end + end + if not any_allowed then + return 400, {message = "Request doesn't match allow patterns"} + end + end + + -- Deny patterns check + for _, pattern in ipairs(conf.deny_patterns) do + if re_find(content_to_check, pattern, "jou") then + return 400, {message = "Request contains prohibited content"} + end + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-prompt-template.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-prompt-template.lua new file mode 100644 index 0000000..d2c3669 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-prompt-template.lua @@ -0,0 +1,146 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local body_transformer = require("apisix.plugins.body-transformer") +local ipairs = ipairs + +local prompt_schema = { + properties = { + role = { + type = "string", + enum = { "system", "user", "assistant" } + }, + content = { + type = "string", + minLength = 1, + } + }, + required = { "role", "content" } +} + +local prompts = { + type = "array", + minItems = 1, + items = prompt_schema +} + +local schema = { + type = "object", + properties = { + templates = { + type = "array", + minItems = 1, + items = { + type = "object", + properties = { + name = { + type = "string", + minLength = 1, + }, + template = { + type = "object", + properties = { + model = { + type = "string", + minLength = 1, + }, + messages = prompts + } + } + }, + required = {"name", "template"} + } + }, + }, + required = {"templates"}, +} + + +local _M = { + version = 0.1, + priority = 1071, + name = "ai-prompt-template", + schema = schema, +} + +local templates_lrucache = core.lrucache.new({ + ttl = 300, count = 256 +}) + +local templates_json_lrucache = core.lrucache.new({ + ttl = 300, count = 256 +}) + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +local function get_request_body_table() + local body, err = core.request.get_body() + if not body then + return nil, { message = "could not get body: " .. err } + end + + local body_tab, err = core.json.decode(body) + if not body_tab then + return nil, { message = "could not get parse JSON request body: ", err } + end + + return body_tab +end + + +local function find_template(conf, template_name) + for _, template in ipairs(conf.templates) do + if template.name == template_name then + return template.template + end + end + return nil +end + +function _M.rewrite(conf, ctx) + local body_tab, err = get_request_body_table() + if not body_tab then + return 400, err + end + local template_name = body_tab.template_name + if not template_name then + return 400, { message = "template name is missing in request." } + end + + local template = templates_lrucache(template_name, conf, find_template, conf, template_name) + if not template then + return 400, { message = "template: " .. template_name .. " not configured." } + end + + local template_json = templates_json_lrucache(template, template, core.json.encode, template) + core.log.info("sending template to body_transformer: ", template_json) + return body_transformer.rewrite( + { + request = { + template = template_json, + input_format = "json" + } + }, + ctx + ) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-proxy-multi.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-proxy-multi.lua new file mode 100644 index 0000000..7ac8bb2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-proxy-multi.lua @@ -0,0 +1,227 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local schema = require("apisix.plugins.ai-proxy.schema") +local base = require("apisix.plugins.ai-proxy.base") +local plugin = require("apisix.plugin") + +local require = require +local pcall = pcall +local ipairs = ipairs +local type = type + +local priority_balancer = require("apisix.balancer.priority") + +local pickers = {} +local lrucache_server_picker = core.lrucache.new({ + ttl = 300, count = 256 +}) + +local plugin_name = "ai-proxy-multi" +local _M = { + version = 0.5, + priority = 1041, + name = plugin_name, + schema = schema.ai_proxy_multi_schema, +} + + +local function get_chash_key_schema(hash_on) + if hash_on == "vars" then + return core.schema.upstream_hash_vars_schema + end + + if hash_on == "header" or hash_on == "cookie" then + return core.schema.upstream_hash_header_schema + end + + if hash_on == "consumer" then + return nil, nil + end + + if hash_on == "vars_combinations" then + return core.schema.upstream_hash_vars_combinations_schema + end + + return nil, "invalid hash_on type " .. hash_on +end + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema.ai_proxy_multi_schema, conf) + if not ok then + return false, err + end + + for _, instance in ipairs(conf.instances) do + local ai_driver, err = pcall(require, "apisix.plugins.ai-drivers." .. instance.provider) + if not ai_driver then + core.log.warn("fail to require ai provider: ", instance.provider, ", err", err) + return false, "ai provider: " .. instance.provider .. " is not supported." + end + end + local algo = core.table.try_read_attr(conf, "balancer", "algorithm") + local hash_on = core.table.try_read_attr(conf, "balancer", "hash_on") + local hash_key = core.table.try_read_attr(conf, "balancer", "key") + + if type(algo) == "string" and algo == "chash" then + if not hash_on then + return false, "must configure `hash_on` when balancer algorithm is chash" + end + + if hash_on ~= "consumer" and not hash_key then + return false, "must configure `hash_key` when balancer `hash_on` is not set to cookie" + end + + local key_schema, err = get_chash_key_schema(hash_on) + if err then + return false, "type is chash, err: " .. err + end + + if key_schema then + local ok, err = core.schema.check(key_schema, hash_key) + if not ok then + return false, "invalid configuration: " .. err + end + end + end + + return ok +end + + +local function transform_instances(new_instances, instance) + if not new_instances._priority_index then + new_instances._priority_index = {} + end + + if not new_instances[instance.priority] then + new_instances[instance.priority] = {} + core.table.insert(new_instances._priority_index, instance.priority) + end + + new_instances[instance.priority][instance.name] = instance.weight +end + + +local function create_server_picker(conf, ups_tab) + local picker = pickers[conf.balancer.algorithm] -- nil check + if not picker then + pickers[conf.balancer.algorithm] = require("apisix.balancer." .. conf.balancer.algorithm) + picker = pickers[conf.balancer.algorithm] + end + local new_instances = {} + for _, ins in ipairs(conf.instances) do + transform_instances(new_instances, ins) + end + + if #new_instances._priority_index > 1 then + core.log.info("new instances: ", core.json.delay_encode(new_instances)) + return priority_balancer.new(new_instances, ups_tab, picker) + end + core.log.info("upstream nodes: ", + core.json.delay_encode(new_instances[new_instances._priority_index[1]])) + return picker.new(new_instances[new_instances._priority_index[1]], ups_tab) +end + + +local function get_instance_conf(instances, name) + for _, ins in ipairs(instances) do + if ins.name == name then + return ins + end + end +end + + +local function pick_target(ctx, conf, ups_tab) + local server_picker = ctx.server_picker + if not server_picker then + server_picker = lrucache_server_picker(ctx.matched_route.key, plugin.conf_version(conf), + create_server_picker, conf, ups_tab) + end + if not server_picker then + return nil, nil, "failed to fetch server picker" + end + ctx.server_picker = server_picker + + local instance_name, err = server_picker.get(ctx) + if err then + return nil, nil, err + end + ctx.balancer_server = instance_name + if conf.fallback_strategy == "instance_health_and_rate_limiting" then + local ai_rate_limiting = require("apisix.plugins.ai-rate-limiting") + for _ = 1, #conf.instances do + if ai_rate_limiting.check_instance_status(nil, ctx, instance_name) then + break + end + core.log.info("ai instance: ", instance_name, + " is not available, try to pick another one") + server_picker.after_balance(ctx, true) + instance_name, err = server_picker.get(ctx) + if err then + return nil, nil, err + end + ctx.balancer_server = instance_name + end + end + + local instance_conf = get_instance_conf(conf.instances, instance_name) + return instance_name, instance_conf +end + + +local function pick_ai_instance(ctx, conf, ups_tab) + local instance_name, instance_conf, err + if #conf.instances == 1 then + instance_name = conf.instances[1].name + instance_conf = conf.instances[1] + else + instance_name, instance_conf, err = pick_target(ctx, conf, ups_tab) + end + + core.log.info("picked instance: ", instance_name) + return instance_name, instance_conf, err +end + + +function _M.access(conf, ctx) + local ups_tab = {} + local algo = core.table.try_read_attr(conf, "balancer", "algorithm") + if algo == "chash" then + local hash_on = core.table.try_read_attr(conf, "balancer", "hash_on") + local hash_key = core.table.try_read_attr(conf, "balancer", "key") + ups_tab["key"] = hash_key + ups_tab["hash_on"] = hash_on + end + + local name, ai_instance, err = pick_ai_instance(ctx, conf, ups_tab) + if err then + return 503, err + end + ctx.picked_ai_instance_name = name + ctx.picked_ai_instance = ai_instance + ctx.bypass_nginx_upstream = true +end + + +_M.before_proxy = base.before_proxy + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-proxy.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-proxy.lua new file mode 100644 index 0000000..fa7f5f2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-proxy.lua @@ -0,0 +1,57 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local schema = require("apisix.plugins.ai-proxy.schema") +local base = require("apisix.plugins.ai-proxy.base") + +local require = require +local pcall = pcall + +local plugin_name = "ai-proxy" +local _M = { + version = 0.5, + priority = 1040, + name = plugin_name, + schema = schema.ai_proxy_schema, +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema.ai_proxy_schema, conf) + if not ok then + return false, err + end + local ai_driver, err = pcall(require, "apisix.plugins.ai-drivers." .. conf.provider) + if not ai_driver then + core.log.warn("fail to require ai provider: ", conf.provider, ", err", err) + return false, "ai provider: " .. conf.provider .. " is not supported." + end + return ok +end + + +function _M.access(conf, ctx) + ctx.picked_ai_instance_name = "ai-proxy" + ctx.picked_ai_instance = conf + ctx.bypass_nginx_upstream = true +end + + +_M.before_proxy = base.before_proxy + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-proxy/base.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-proxy/base.lua new file mode 100644 index 0000000..9076260 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-proxy/base.lua @@ -0,0 +1,50 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local require = require +local bad_request = ngx.HTTP_BAD_REQUEST + +local _M = {} + +function _M.before_proxy(conf, ctx) + local ai_instance = ctx.picked_ai_instance + local ai_driver = require("apisix.plugins.ai-drivers." .. ai_instance.provider) + + local request_body, err = ai_driver.validate_request(ctx) + if not request_body then + return bad_request, err + end + + local extra_opts = { + endpoint = core.table.try_read_attr(ai_instance, "override", "endpoint"), + query_params = ai_instance.auth.query or {}, + headers = (ai_instance.auth.header or {}), + model_options = ai_instance.options, + } + + if request_body.stream then + request_body.stream_options = { + include_usage = true + } + end + + return ai_driver:request(ctx, conf, request_body, extra_opts) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-proxy/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-proxy/schema.lua new file mode 100644 index 0000000..0a3c028 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-proxy/schema.lua @@ -0,0 +1,219 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local _M = {} + +local auth_item_schema = { + type = "object", + patternProperties = { + ["^[a-zA-Z0-9._-]+$"] = { + type = "string" + } + } +} + +local auth_schema = { + type = "object", + patternProperties = { + header = auth_item_schema, + query = auth_item_schema, + }, + additionalProperties = false, +} + +local model_options_schema = { + description = "Key/value settings for the model", + type = "object", + properties = { + model = { + type = "string", + description = "Model to execute.", + }, + }, + additionalProperties = true, +} + +local ai_instance_schema = { + type = "array", + minItems = 1, + items = { + type = "object", + properties = { + name = { + type = "string", + minLength = 1, + maxLength = 100, + description = "Name of the AI service instance.", + }, + provider = { + type = "string", + description = "Type of the AI service instance.", + enum = { + "openai", + "deepseek", + "aimlapi", + "openai-compatible", + }, -- add more providers later + }, + priority = { + type = "integer", + description = "Priority of the provider for load balancing", + default = 0, + }, + weight = { + type = "integer", + minimum = 0, + }, + auth = auth_schema, + options = model_options_schema, + override = { + type = "object", + properties = { + endpoint = { + type = "string", + description = "To be specified to override the endpoint of the AI Instance", + }, + }, + }, + }, + required = {"name", "provider", "auth", "weight"} + }, +} + + +_M.ai_proxy_schema = { + type = "object", + properties = { + provider = { + type = "string", + description = "Type of the AI service instance.", + enum = { + "openai", + "deepseek", + "aimlapi", + "openai-compatible", + }, -- add more providers later + + }, + auth = auth_schema, + options = model_options_schema, + timeout = { + type = "integer", + minimum = 1, + default = 30000, + description = "timeout in milliseconds", + }, + keepalive = {type = "boolean", default = true}, + keepalive_timeout = { + type = "integer", + minimum = 1000, + default = 60000, + description = "keepalive timeout in milliseconds", + }, + keepalive_pool = {type = "integer", minimum = 1, default = 30}, + ssl_verify = {type = "boolean", default = true }, + override = { + type = "object", + properties = { + endpoint = { + type = "string", + description = "To be specified to override the endpoint of the AI Instance", + }, + }, + }, + }, + required = {"provider", "auth"} +} + +_M.ai_proxy_multi_schema = { + type = "object", + properties = { + balancer = { + type = "object", + properties = { + algorithm = { + type = "string", + enum = { "chash", "roundrobin" }, + }, + hash_on = { + type = "string", + default = "vars", + enum = { + "vars", + "header", + "cookie", + "consumer", + "vars_combinations", + }, + }, + key = { + description = "the key of chash for dynamic load balancing", + type = "string", + }, + }, + default = { algorithm = "roundrobin" } + }, + instances = ai_instance_schema, + fallback_strategy = { + type = "string", + enum = { "instance_health_and_rate_limiting" }, + default = "instance_health_and_rate_limiting", + }, + timeout = { + type = "integer", + minimum = 1, + default = 30000, + description = "timeout in milliseconds", + }, + keepalive = {type = "boolean", default = true}, + keepalive_timeout = { + type = "integer", + minimum = 1000, + default = 60000, + description = "keepalive timeout in milliseconds", + }, + keepalive_pool = {type = "integer", minimum = 1, default = 30}, + ssl_verify = {type = "boolean", default = true }, + }, + required = {"instances"} +} + +_M.chat_request_schema = { + type = "object", + properties = { + messages = { + type = "array", + minItems = 1, + items = { + properties = { + role = { + type = "string", + enum = {"system", "user", "assistant"} + }, + content = { + type = "string", + minLength = "1", + }, + }, + additionalProperties = false, + required = {"role", "content"}, + }, + } + }, + required = {"messages"} +} + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-rag.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-rag.lua new file mode 100644 index 0000000..0acd5f1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-rag.lua @@ -0,0 +1,156 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local next = next +local require = require +local ngx_req = ngx.req + +local http = require("resty.http") +local core = require("apisix.core") + +local azure_openai_embeddings = require("apisix.plugins.ai-rag.embeddings.azure_openai").schema +local azure_ai_search_schema = require("apisix.plugins.ai-rag.vector-search.azure_ai_search").schema + +local HTTP_INTERNAL_SERVER_ERROR = ngx.HTTP_INTERNAL_SERVER_ERROR +local HTTP_BAD_REQUEST = ngx.HTTP_BAD_REQUEST + +local schema = { + type = "object", + properties = { + type = "object", + embeddings_provider = { + type = "object", + properties = { + azure_openai = azure_openai_embeddings + }, + -- ensure only one provider can be configured while implementing support for + -- other providers + required = { "azure_openai" }, + maxProperties = 1, + }, + vector_search_provider = { + type = "object", + properties = { + azure_ai_search = azure_ai_search_schema + }, + -- ensure only one provider can be configured while implementing support for + -- other providers + required = { "azure_ai_search" }, + maxProperties = 1 + }, + }, + required = { "embeddings_provider", "vector_search_provider" } +} + +local request_schema = { + type = "object", + properties = { + ai_rag = { + type = "object", + properties = { + vector_search = {}, + embeddings = {}, + }, + required = { "vector_search", "embeddings" } + } + } +} + +local _M = { + version = 0.1, + priority = 1060, + name = "ai-rag", + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +function _M.access(conf, ctx) + local httpc = http.new() + local body_tab, err = core.request.get_json_request_body_table() + if not body_tab then + return HTTP_BAD_REQUEST, err + end + if not body_tab["ai_rag"] then + core.log.error("request body must have \"ai-rag\" field") + return HTTP_BAD_REQUEST + end + + local embeddings_provider = next(conf.embeddings_provider) + local embeddings_provider_conf = conf.embeddings_provider[embeddings_provider] + local embeddings_driver = require("apisix.plugins.ai-rag.embeddings." .. embeddings_provider) + + local vector_search_provider = next(conf.vector_search_provider) + local vector_search_provider_conf = conf.vector_search_provider[vector_search_provider] + local vector_search_driver = require("apisix.plugins.ai-rag.vector-search." .. + vector_search_provider) + + local vs_req_schema = vector_search_driver.request_schema + local emb_req_schema = embeddings_driver.request_schema + + request_schema.properties.ai_rag.properties.vector_search = vs_req_schema + request_schema.properties.ai_rag.properties.embeddings = emb_req_schema + + local ok, err = core.schema.check(request_schema, body_tab) + if not ok then + core.log.error("request body fails schema check: ", err) + return HTTP_BAD_REQUEST + end + + local embeddings, status, err = embeddings_driver.get_embeddings(embeddings_provider_conf, + body_tab["ai_rag"].embeddings, httpc) + if not embeddings then + core.log.error("could not get embeddings: ", err) + return status, err + end + + local search_body = body_tab["ai_rag"].vector_search + search_body.embeddings = embeddings + local res, status, err = vector_search_driver.search(vector_search_provider_conf, + search_body, httpc) + if not res then + core.log.error("could not get vector_search result: ", err) + return status, err + end + + -- remove ai_rag from request body because their purpose is served + -- also, these values will cause failure when proxying requests to LLM. + body_tab["ai_rag"] = nil + + if not body_tab.messages then + body_tab.messages = {} + end + + local augment = { + role = "user", + content = res + } + core.table.insert_tail(body_tab.messages, augment) + + local req_body_json, err = core.json.encode(body_tab) + if not req_body_json then + return HTTP_INTERNAL_SERVER_ERROR, err + end + + ngx_req.set_body_data(req_body_json) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-rag/embeddings/azure_openai.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-rag/embeddings/azure_openai.lua new file mode 100644 index 0000000..b6bacbf --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-rag/embeddings/azure_openai.lua @@ -0,0 +1,88 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local HTTP_INTERNAL_SERVER_ERROR = ngx.HTTP_INTERNAL_SERVER_ERROR +local HTTP_OK = ngx.HTTP_OK +local type = type + +local _M = {} + +_M.schema = { + type = "object", + properties = { + endpoint = { + type = "string", + }, + api_key = { + type = "string", + }, + }, + required = { "endpoint", "api_key" } +} + +function _M.get_embeddings(conf, body, httpc) + local body_tab, err = core.json.encode(body) + if not body_tab then + return nil, HTTP_INTERNAL_SERVER_ERROR, err + end + + local res, err = httpc:request_uri(conf.endpoint, { + method = "POST", + headers = { + ["Content-Type"] = "application/json", + ["api-key"] = conf.api_key, + }, + body = body_tab + }) + + if not res or not res.body then + return nil, HTTP_INTERNAL_SERVER_ERROR, err + end + + if res.status ~= HTTP_OK then + return nil, res.status, res.body + end + + local res_tab, err = core.json.decode(res.body) + if not res_tab then + return nil, HTTP_INTERNAL_SERVER_ERROR, err + end + + if type(res_tab.data) ~= "table" or core.table.isempty(res_tab.data) then + return nil, HTTP_INTERNAL_SERVER_ERROR, res.body + end + + local embeddings, err = core.json.encode(res_tab.data[1].embedding) + if not embeddings then + return nil, HTTP_INTERNAL_SERVER_ERROR, err + end + + return res_tab.data[1].embedding +end + + +_M.request_schema = { + type = "object", + properties = { + input = { + type = "string" + } + }, + required = { "input" } +} + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-rag/vector-search/azure_ai_search.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-rag/vector-search/azure_ai_search.lua new file mode 100644 index 0000000..7a01064 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-rag/vector-search/azure_ai_search.lua @@ -0,0 +1,83 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local HTTP_INTERNAL_SERVER_ERROR = ngx.HTTP_INTERNAL_SERVER_ERROR +local HTTP_OK = ngx.HTTP_OK + +local _M = {} + +_M.schema = { + type = "object", + properties = { + endpoint = { + type = "string", + }, + api_key = { + type = "string", + }, + }, + required = {"endpoint", "api_key"} +} + + +function _M.search(conf, search_body, httpc) + local body = { + vectorQueries = { + { + kind = "vector", + vector = search_body.embeddings, + fields = search_body.fields + } + } + } + local final_body, err = core.json.encode(body) + if not final_body then + return nil, HTTP_INTERNAL_SERVER_ERROR, err + end + + local res, err = httpc:request_uri(conf.endpoint, { + method = "POST", + headers = { + ["Content-Type"] = "application/json", + ["api-key"] = conf.api_key, + }, + body = final_body + }) + + if not res or not res.body then + return nil, HTTP_INTERNAL_SERVER_ERROR, err + end + + if res.status ~= HTTP_OK then + return nil, res.status, res.body + end + + return res.body +end + + +_M.request_schema = { + type = "object", + properties = { + fields = { + type = "string" + } + }, + required = { "fields" } +} + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-rate-limiting.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-rate-limiting.lua new file mode 100644 index 0000000..d8bf970 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-rate-limiting.lua @@ -0,0 +1,234 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local setmetatable = setmetatable +local ipairs = ipairs +local type = type +local core = require("apisix.core") +local limit_count = require("apisix.plugins.limit-count.init") + +local plugin_name = "ai-rate-limiting" + +local instance_limit_schema = { + type = "object", + properties = { + name = {type = "string"}, + limit = {type = "integer", minimum = 1}, + time_window = {type = "integer", minimum = 1} + }, + required = {"name", "limit", "time_window"} +} + +local schema = { + type = "object", + properties = { + limit = {type = "integer", exclusiveMinimum = 0}, + time_window = {type = "integer", exclusiveMinimum = 0}, + show_limit_quota_header = {type = "boolean", default = true}, + limit_strategy = { + type = "string", + enum = {"total_tokens", "prompt_tokens", "completion_tokens"}, + default = "total_tokens", + description = "The strategy to limit the tokens" + }, + instances = { + type = "array", + items = instance_limit_schema, + minItems = 1, + }, + rejected_code = { + type = "integer", minimum = 200, maximum = 599, default = 503 + }, + rejected_msg = { + type = "string", minLength = 1 + }, + }, + dependencies = { + limit = {"time_window"}, + time_window = {"limit"} + }, + anyOf = { + { + required = {"limit", "time_window"} + }, + { + required = {"instances"} + } + } +} + +local _M = { + version = 0.1, + priority = 1030, + name = plugin_name, + schema = schema +} + +local limit_conf_cache = core.lrucache.new({ + ttl = 300, count = 512 +}) + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +local function transform_limit_conf(plugin_conf, instance_conf, instance_name) + local key = plugin_name .. "#global" + local limit = plugin_conf.limit + local time_window = plugin_conf.time_window + local name = instance_name or "" + if instance_conf then + name = instance_conf.name + key = instance_conf.name + limit = instance_conf.limit + time_window = instance_conf.time_window + end + return { + _vid = key, + + key = key, + count = limit, + time_window = time_window, + rejected_code = plugin_conf.rejected_code, + rejected_msg = plugin_conf.rejected_msg, + show_limit_quota_header = plugin_conf.show_limit_quota_header, + -- limit-count need these fields + policy = "local", + key_type = "constant", + allow_degradation = false, + sync_interval = -1, + + limit_header = "X-AI-RateLimit-Limit-" .. name, + remaining_header = "X-AI-RateLimit-Remaining-" .. name, + reset_header = "X-AI-RateLimit-Reset-" .. name, + } +end + + +local function fetch_limit_conf_kvs(conf) + local mt = { + __index = function(t, k) + if not conf.limit then + return nil + end + + local limit_conf = transform_limit_conf(conf, nil, k) + t[k] = limit_conf + return limit_conf + end + } + local limit_conf_kvs = setmetatable({}, mt) + local conf_instances = conf.instances or {} + for _, limit_conf in ipairs(conf_instances) do + limit_conf_kvs[limit_conf.name] = transform_limit_conf(conf, limit_conf) + end + return limit_conf_kvs +end + + +function _M.access(conf, ctx) + local ai_instance_name = ctx.picked_ai_instance_name + if not ai_instance_name then + return + end + + local limit_conf_kvs = limit_conf_cache(conf, nil, fetch_limit_conf_kvs, conf) + local limit_conf = limit_conf_kvs[ai_instance_name] + if not limit_conf then + return + end + local code, msg = limit_count.rate_limit(limit_conf, ctx, plugin_name, 1, true) + ctx.ai_rate_limiting = code and true or false + return code, msg +end + + +function _M.check_instance_status(conf, ctx, instance_name) + if conf == nil then + local plugins = ctx.plugins + for i = 1, #plugins, 2 do + if plugins[i]["name"] == plugin_name then + conf = plugins[i + 1] + end + end + end + if not conf then + return true + end + + instance_name = instance_name or ctx.picked_ai_instance_name + if not instance_name then + return nil, "missing instance_name" + end + + if type(instance_name) ~= "string" then + return nil, "invalid instance_name" + end + + local limit_conf_kvs = limit_conf_cache(conf, nil, fetch_limit_conf_kvs, conf) + local limit_conf = limit_conf_kvs[instance_name] + if not limit_conf then + return true + end + + local code, _ = limit_count.rate_limit(limit_conf, ctx, plugin_name, 1, true) + if code then + core.log.info("rate limit for instance: ", instance_name, " code: ", code) + return false + end + return true +end + + +local function get_token_usage(conf, ctx) + local usage = ctx.ai_token_usage + if not usage then + return + end + return usage[conf.limit_strategy] +end + + +function _M.log(conf, ctx) + local instance_name = ctx.picked_ai_instance_name + if not instance_name then + return + end + + if ctx.ai_rate_limiting then + return + end + + local used_tokens = get_token_usage(conf, ctx) + if not used_tokens then + core.log.error("failed to get token usage for llm service") + return + end + + core.log.info("instance name: ", instance_name, " used tokens: ", used_tokens) + + local limit_conf_kvs = limit_conf_cache(conf, nil, fetch_limit_conf_kvs, conf) + local limit_conf = limit_conf_kvs[instance_name] + if limit_conf then + limit_count.rate_limit(limit_conf, ctx, plugin_name, used_tokens) + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-request-rewrite.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-request-rewrite.lua new file mode 100644 index 0000000..1b850eb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai-request-rewrite.lua @@ -0,0 +1,231 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local require = require +local pcall = pcall +local ngx = ngx +local req_set_body_data = ngx.req.set_body_data +local HTTP_BAD_REQUEST = ngx.HTTP_BAD_REQUEST +local HTTP_INTERNAL_SERVER_ERROR = ngx.HTTP_INTERNAL_SERVER_ERROR + +local plugin_name = "ai-request-rewrite" + +local auth_item_schema = { + type = "object", + patternProperties = { + ["^[a-zA-Z0-9._-]+$"] = { + type = "string" + } + } +} + +local auth_schema = { + type = "object", + properties = { + header = auth_item_schema, + query = auth_item_schema + }, + additionalProperties = false +} + +local model_options_schema = { + description = "Key/value settings for the model", + type = "object", + properties = { + model = { + type = "string", + description = "Model to execute. Examples: \"gpt-3.5-turbo\" for openai, " .. + "\"deepseek-chat\" for deekseek, or \"qwen-turbo\" for openai-compatible services" + } + }, + additionalProperties = true +} + +local schema = { + type = "object", + properties = { + prompt = { + type = "string", + description = "The prompt to rewrite client request." + }, + provider = { + type = "string", + description = "Name of the AI service provider.", + enum = { + "openai", + "openai-compatible", + "deepseek", + "aimlapi" + } -- add more providers later + }, + auth = auth_schema, + options = model_options_schema, + timeout = { + type = "integer", + minimum = 1, + maximum = 60000, + default = 30000, + description = "Total timeout in milliseconds for requests to LLM service, " .. + "including connect, send, and read timeouts." + }, + keepalive = { + type = "boolean", + default = true + }, + keepalive_pool = { + type = "integer", + minimum = 1, + default = 30 + }, + ssl_verify = { + type = "boolean", + default = true + }, + override = { + type = "object", + properties = { + endpoint = { + type = "string", + description = "To be specified to override " .. + "the endpoint of the AI service provider." + } + } + } + }, + required = {"prompt", "provider", "auth"} +} + +local _M = { + version = 0.1, + priority = 1073, + name = plugin_name, + schema = schema +} + +local function request_to_llm(conf, request_table, ctx) + local ok, ai_driver = pcall(require, "apisix.plugins.ai-drivers." .. conf.provider) + if not ok then + return nil, nil, "failed to load ai-driver: " .. conf.provider + end + + local extra_opts = { + endpoint = core.table.try_read_attr(conf, "override", "endpoint"), + query_params = conf.auth.query or {}, + headers = (conf.auth.header or {}), + model_options = conf.options + } + + local res, err, httpc = ai_driver:request(conf, request_table, extra_opts) + if err then + return nil, nil, err + end + + local resp_body, err = res:read_body() + httpc:close() + if err then + return nil, nil, err + end + + return res, resp_body +end + + +local function parse_llm_response(res_body) + local response_table, err = core.json.decode(res_body) + + if err then + return nil, "failed to decode llm response " .. ", err: " .. err + end + + if not response_table.choices or not response_table.choices[1] then + return nil, "'choices' not in llm response" + end + + local message = response_table.choices[1].message + if not message then + return nil, "'message' not in llm response choices" + end + + return message.content +end + + +function _M.check_schema(conf) + -- openai-compatible should be used with override.endpoint + if conf.provider == "openai-compatible" then + local override = conf.override + + if not override or not override.endpoint then + return false, "override.endpoint is required for openai-compatible provider" + end + end + + return core.schema.check(schema, conf) +end + + +function _M.access(conf, ctx) + local client_request_body, err = core.request.get_body() + if err then + core.log.warn("failed to get request body: ", err) + return HTTP_BAD_REQUEST + end + + if not client_request_body then + core.log.warn("missing request body") + return + end + + -- Prepare request for LLM service + local ai_request_table = { + messages = { + { + role = "system", + content = conf.prompt + }, + { + role = "user", + content = client_request_body + } + }, + stream = false + } + + -- Send request to LLM service + local res, resp_body, err = request_to_llm(conf, ai_request_table, ctx) + if err then + core.log.error("failed to request to LLM service: ", err) + return HTTP_INTERNAL_SERVER_ERROR + end + + -- Handle LLM response + if res.status > 299 then + core.log.error("LLM service returned error status: ", res.status) + return HTTP_INTERNAL_SERVER_ERROR + end + + -- Parse LLM response + local llm_response, err = parse_llm_response(resp_body) + if err then + core.log.error("failed to parse LLM response: ", err) + return HTTP_INTERNAL_SERVER_ERROR + end + + req_set_body_data(llm_response) +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai.lua new file mode 100644 index 0000000..278201d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ai.lua @@ -0,0 +1,324 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local apisix = require("apisix") +local core = require("apisix.core") +local router = require("apisix.router") +local get_global_rules = require("apisix.global_rules").global_rules +local event = require("apisix.core.event") +local balancer = require("ngx.balancer") +local ngx = ngx +local is_http = ngx.config.subsystem == "http" +local enable_keepalive = balancer.enable_keepalive and is_http +local is_apisix_or, response = pcall(require, "resty.apisix.response") +local ipairs = ipairs +local pcall = pcall +local loadstring = loadstring +local type = type +local pairs = pairs + +local get_cache_key_func +local get_cache_key_func_def_render + +local get_cache_key_func_def = [[ +return function(ctx) + local var = ctx.var + return var.uri + {% if route_flags["methods"] then %} + .. "#" .. var.method + {% end %} + {% if route_flags["host"] then %} + .. "#" .. var.host + {% end %} +end +]] + +local route_lrucache + +local schema = {} + +local plugin_name = "ai" + +local _M = { + version = 0.1, + priority = 22900, + name = plugin_name, + schema = schema, + scope = "global", +} + +local orig_router_http_matching +local orig_handle_upstream +local orig_http_balancer_phase + +local default_keepalive_pool = {} + +local function create_router_matching_cache(api_ctx) + orig_router_http_matching(api_ctx) + return core.table.deepcopy(api_ctx, { + shallows = { "self.matched_route.value.upstream.parent" } + }) +end + + +local function ai_router_http_matching(api_ctx) + core.log.info("route match mode: ai_match") + + local key = get_cache_key_func(api_ctx) + core.log.info("route cache key: ", key) + local api_ctx_cache = route_lrucache(key, nil, + create_router_matching_cache, api_ctx) + -- if the version has not changed, use the cached route + if api_ctx then + api_ctx.matched_route = api_ctx_cache.matched_route + if api_ctx_cache.curr_req_matched then + api_ctx.curr_req_matched = core.table.clone(api_ctx_cache.curr_req_matched) + end + end +end + + +local function gen_get_cache_key_func(route_flags) + if get_cache_key_func_def_render == nil then + local template = require("resty.template") + get_cache_key_func_def_render = template.compile(get_cache_key_func_def) + end + + local str = get_cache_key_func_def_render({route_flags = route_flags}) + local func, err = loadstring(str) + if func == nil then + return false, err + else + local ok, err_or_function = pcall(func) + if not ok then + return false, err_or_function + end + get_cache_key_func = err_or_function + end + + return true +end + + +local function ai_upstream() + core.log.info("enable sample upstream") +end + + +local pool_opt +local function ai_http_balancer_phase() + local api_ctx = ngx.ctx.api_ctx + if not api_ctx then + core.log.error("invalid api_ctx") + return core.response.exit(500) + end + + if is_apisix_or then + local ok, err = response.skip_body_filter_by_lua() + if not ok then + core.log.error("failed to skip body filter by lua: ", err) + end + end + + local route = api_ctx.matched_route + local server = route.value.upstream.nodes[1] + if enable_keepalive then + local ok, err = balancer.set_current_peer(server.host, server.port or 80, pool_opt) + if not ok then + core.log.error("failed to set server peer [", server.host, ":", + server.port, "] err: ", err) + return ok, err + end + balancer.enable_keepalive(default_keepalive_pool.idle_timeout, + default_keepalive_pool.requests) + else + balancer.set_current_peer(server.host, server.port or 80) + end +end + + +local function routes_analyze(routes) + if orig_router_http_matching == nil then + orig_router_http_matching = router.router_http.matching + end + + if orig_handle_upstream == nil then + orig_handle_upstream = apisix.handle_upstream + end + + if orig_http_balancer_phase == nil then + orig_http_balancer_phase = apisix.http_balancer_phase + end + + local route_flags = core.table.new(0, 16) + local route_up_flags = core.table.new(0, 12) + + for _, route in ipairs(routes) do + if type(route) == "table" then + for key, value in pairs(route.value) do + -- collect route flags + if key == "methods" then + route_flags["methods"] = true + elseif key == "host" or key == "hosts" then + route_flags["host"] = true + elseif key == "vars" then + route_flags["vars"] = true + elseif key == "filter_func"then + route_flags["filter_func"] = true + elseif key == "remote_addr" or key == "remote_addrs" then + route_flags["remote_addr"] = true + elseif key == "service" then + route_flags["service"] = true + elseif key == "enable_websocket" then + route_flags["enable_websocket"] = true + elseif key == "plugins" then + route_flags["plugins"] = true + elseif key == "upstream_id" then + route_flags["upstream_id"] = true + elseif key == "service_id" then + route_flags["service_id"] = true + elseif key == "plugin_config_id" then + route_flags["plugin_config_id"] = true + elseif key == "script" then + route_flags["script"] = true + end + + -- collect upstream flags + if key == "upstream" then + if value.nodes and #value.nodes == 1 then + for k, v in pairs(value) do + if k == "nodes" then + if (not core.utils.parse_ipv4(v[1].host) + and not core.utils.parse_ipv6(v[1].host)) then + route_up_flags["has_domain"] = true + end + elseif k == "pass_host" and v ~= "pass" then + route_up_flags["pass_host"] = true + elseif k == "scheme" and v ~= "http" then + route_up_flags["scheme"] = true + elseif k == "checks" then + route_up_flags["checks"] = true + elseif k == "retries" then + route_up_flags["retries"] = true + elseif k == "timeout" then + route_up_flags["timeout"] = true + elseif k == "tls" then + route_up_flags["tls"] = true + elseif k == "keepalive_pool" then + route_up_flags["keepalive_pool"] = true + elseif k == "service_name" then + route_up_flags["service_name"] = true + end + end + else + route_up_flags["more_nodes"] = true + end + end + end + end + end + + local global_rules, _ = get_global_rules() + local global_rules_flag = global_rules and #global_rules ~= 0 + + if route_flags["vars"] or route_flags["filter_func"] + or route_flags["remote_addr"] + or route_flags["service_id"] + or route_flags["plugin_config_id"] + or global_rules_flag then + router.router_http.matching = orig_router_http_matching + else + core.log.info("use ai plane to match route") + router.router_http.matching = ai_router_http_matching + + local count = #routes + 3000 + core.log.info("renew route cache: count=", count) + route_lrucache = core.lrucache.new({ + count = count + }) + + local ok, err = gen_get_cache_key_func(route_flags) + if not ok then + core.log.error("generate get_cache_key_func failed:", err) + router.router_http.matching = orig_router_http_matching + end + end + + if route_flags["service"] + or route_flags["script"] + or route_flags["service_id"] + or route_flags["upstream_id"] + or route_flags["enable_websocket"] + or route_flags["plugins"] + or route_flags["plugin_config_id"] + or route_up_flags["has_domain"] + or route_up_flags["pass_host"] + or route_up_flags["scheme"] + or route_up_flags["checks"] + or route_up_flags["retries"] + or route_up_flags["timeout"] + or route_up_flags["tls"] + or route_up_flags["keepalive_pool"] + or route_up_flags["service_name"] + or route_up_flags["more_nodes"] + or global_rules_flag then + apisix.handle_upstream = orig_handle_upstream + apisix.http_balancer_phase = orig_http_balancer_phase + else + -- replace the upstream and balancer module + apisix.handle_upstream = ai_upstream + apisix.http_balancer_phase = ai_http_balancer_phase + end +end + + +function _M.init() + event.register(event.CONST.BUILD_ROUTER, routes_analyze) + local local_conf = core.config.local_conf() + local up_keepalive_conf = + core.table.try_read_attr(local_conf, "nginx_config", + "http", "upstream") + default_keepalive_pool.idle_timeout = + core.config_util.parse_time_unit(up_keepalive_conf.keepalive_timeout) + default_keepalive_pool.size = up_keepalive_conf.keepalive + default_keepalive_pool.requests = up_keepalive_conf.keepalive_requests + + pool_opt = { pool_size = default_keepalive_pool.size } +end + + +function _M.destroy() + if orig_router_http_matching then + router.router_http.matching = orig_router_http_matching + orig_router_http_matching = nil + end + + if orig_handle_upstream then + apisix.handle_upstream = orig_handle_upstream + orig_handle_upstream = nil + end + + if orig_http_balancer_phase then + apisix.http_balancer_phase = orig_http_balancer_phase + orig_http_balancer_phase = nil + end + + event.unregister(event.CONST.BUILD_ROUTER) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/api-breaker.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/api-breaker.lua new file mode 100644 index 0000000..eabca14 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/api-breaker.lua @@ -0,0 +1,267 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local plugin_name = "api-breaker" +local ngx = ngx +local math = math +local error = error +local ipairs = ipairs + + +local shared_buffer = ngx.shared["plugin-".. plugin_name] +if not shared_buffer then + error("failed to get ngx.shared dict when load plugin " .. plugin_name) +end + + +local schema = { + type = "object", + properties = { + break_response_code = { + type = "integer", + minimum = 200, + maximum = 599, + }, + break_response_body = { + type = "string" + }, + break_response_headers = { + type = "array", + items = { + type = "object", + properties = { + key = { + type = "string", + minLength = 1 + }, + value = { + type = "string", + minLength = 1 + } + }, + required = {"key", "value"}, + } + }, + max_breaker_sec = { + type = "integer", + minimum = 3, + default = 300, + }, + unhealthy = { + type = "object", + properties = { + http_statuses = { + type = "array", + minItems = 1, + items = { + type = "integer", + minimum = 500, + maximum = 599, + }, + uniqueItems = true, + default = {500} + }, + failures = { + type = "integer", + minimum = 1, + default = 3, + } + }, + default = {http_statuses = {500}, failures = 3} + }, + healthy = { + type = "object", + properties = { + http_statuses = { + type = "array", + minItems = 1, + items = { + type = "integer", + minimum = 200, + maximum = 499, + }, + uniqueItems = true, + default = {200} + }, + successes = { + type = "integer", + minimum = 1, + default = 3, + } + }, + default = {http_statuses = {200}, successes = 3} + } + }, + required = {"break_response_code"}, +} + + +local function gen_healthy_key(ctx) + return "healthy-" .. core.request.get_host(ctx) .. ctx.var.uri +end + + +local function gen_unhealthy_key(ctx) + return "unhealthy-" .. core.request.get_host(ctx) .. ctx.var.uri +end + + +local function gen_lasttime_key(ctx) + return "unhealthy-lasttime" .. core.request.get_host(ctx) .. ctx.var.uri +end + + +local _M = { + version = 0.1, + name = plugin_name, + priority = 1005, + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +function _M.access(conf, ctx) + local unhealthy_key = gen_unhealthy_key(ctx) + -- unhealthy counts + local unhealthy_count, err = shared_buffer:get(unhealthy_key) + if err then + core.log.warn("failed to get unhealthy_key: ", + unhealthy_key, " err: ", err) + return + end + + if not unhealthy_count then + return + end + + -- timestamp of the last time a unhealthy state was triggered + local lasttime_key = gen_lasttime_key(ctx) + local lasttime, err = shared_buffer:get(lasttime_key) + if err then + core.log.warn("failed to get lasttime_key: ", + lasttime_key, " err: ", err) + return + end + + if not lasttime then + return + end + + local failure_times = math.ceil(unhealthy_count / conf.unhealthy.failures) + if failure_times < 1 then + failure_times = 1 + end + + -- cannot exceed the maximum value of the user configuration + local breaker_time = 2 ^ failure_times + if breaker_time > conf.max_breaker_sec then + breaker_time = conf.max_breaker_sec + end + core.log.info("breaker_time: ", breaker_time) + + -- breaker + if lasttime + breaker_time >= ngx.time() then + if conf.break_response_body then + if conf.break_response_headers then + for _, value in ipairs(conf.break_response_headers) do + local val = core.utils.resolve_var(value.value, ctx.var) + core.response.add_header(value.key, val) + end + end + return conf.break_response_code, conf.break_response_body + end + return conf.break_response_code + end + + return +end + + +function _M.log(conf, ctx) + local unhealthy_key = gen_unhealthy_key(ctx) + local healthy_key = gen_healthy_key(ctx) + local upstream_status = core.response.get_upstream_status(ctx) + + if not upstream_status then + return + end + + -- unhealthy process + if core.table.array_find(conf.unhealthy.http_statuses, + upstream_status) + then + local unhealthy_count, err = shared_buffer:incr(unhealthy_key, 1, 0) + if err then + core.log.warn("failed to incr unhealthy_key: ", unhealthy_key, + " err: ", err) + end + core.log.info("unhealthy_key: ", unhealthy_key, " count: ", + unhealthy_count) + + shared_buffer:delete(healthy_key) + + -- whether the user-configured number of failures has been reached, + -- and if so, the timestamp for entering the unhealthy state. + if unhealthy_count % conf.unhealthy.failures == 0 then + shared_buffer:set(gen_lasttime_key(ctx), ngx.time(), + conf.max_breaker_sec) + core.log.info("update unhealthy_key: ", unhealthy_key, " to ", + unhealthy_count) + end + + return + end + + -- health process + if not core.table.array_find(conf.healthy.http_statuses, upstream_status) then + return + end + + local unhealthy_count, err = shared_buffer:get(unhealthy_key) + if err then + core.log.warn("failed to `get` unhealthy_key: ", unhealthy_key, + " err: ", err) + end + + if not unhealthy_count then + return + end + + local healthy_count, err = shared_buffer:incr(healthy_key, 1, 0) + if err then + core.log.warn("failed to `incr` healthy_key: ", healthy_key, + " err: ", err) + end + + -- clear related status + if healthy_count >= conf.healthy.successes then + -- stat change to normal + core.log.info("change to normal, ", healthy_key, " ", healthy_count) + shared_buffer:delete(gen_lasttime_key(ctx)) + shared_buffer:delete(unhealthy_key) + shared_buffer:delete(healthy_key) + end + + return +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/attach-consumer-label.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/attach-consumer-label.lua new file mode 100644 index 0000000..6d3396a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/attach-consumer-label.lua @@ -0,0 +1,68 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local pairs = pairs +local plugin_name = "attach-consumer-label" + +local schema = { + type = "object", + properties = { + headers = { + type = "object", + additionalProperties = { + type = "string", + pattern = "^\\$.*" + }, + minProperties = 1 + }, + }, + required = {"headers"}, +} + +local _M = { + version = 0.1, + priority = 2399, + name = plugin_name, + schema = schema, +} + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + +function _M.before_proxy(conf, ctx) + -- check if the consumer is exists in the context + if not ctx.consumer then + return + end + + local labels = ctx.consumer.labels + core.log.info("consumer username: ", ctx.consumer.username, " labels: ", + core.json.delay_encode(labels)) + if not labels then + return + end + + for header, label_key in pairs(conf.headers) do + -- remove leading $ character + local label_value = labels[label_key:sub(2)] + core.request.set_header(ctx, header, label_value) + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/authz-casbin.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/authz-casbin.lua new file mode 100644 index 0000000..834c747 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/authz-casbin.lua @@ -0,0 +1,135 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local casbin = require("casbin") +local core = require("apisix.core") +local plugin = require("apisix.plugin") + +local plugin_name = "authz-casbin" + +local schema = { + type = "object", + properties = { + model_path = { type = "string" }, + policy_path = { type = "string" }, + model = { type = "string" }, + policy = { type = "string" }, + username = { type = "string"} + }, + oneOf = { + {required = {"model_path", "policy_path", "username"}}, + {required = {"model", "policy", "username"}} + }, +} + +local metadata_schema = { + type = "object", + properties = { + model = {type = "string"}, + policy = {type = "string"}, + }, + required = {"model", "policy"}, +} + +local _M = { + version = 0.1, + priority = 2560, + name = plugin_name, + schema = schema, + metadata_schema = metadata_schema +} + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + local ok, err = core.schema.check(schema, conf) + if ok then + return true + else + local metadata = plugin.plugin_metadata(plugin_name) + if metadata and metadata.value and conf.username then + return true + end + end + return false, err +end + +local casbin_enforcer + +local function new_enforcer_if_need(conf) + if conf.model_path and conf.policy_path then + local model_path = conf.model_path + local policy_path = conf.policy_path + if not conf.casbin_enforcer then + conf.casbin_enforcer = casbin:new(model_path, policy_path) + end + return true + end + + if conf.model and conf.policy then + local model = conf.model + local policy = conf.policy + if not conf.casbin_enforcer then + conf.casbin_enforcer = casbin:newEnforcerFromText(model, policy) + end + return true + end + + local metadata = plugin.plugin_metadata(plugin_name) + if not (metadata and metadata.value.model and metadata.value.policy) then + return nil, "not enough configuration to create enforcer" + end + + local modifiedIndex = metadata.modifiedIndex + if not casbin_enforcer or casbin_enforcer.modifiedIndex ~= modifiedIndex then + local model = metadata.value.model + local policy = metadata.value.policy + casbin_enforcer = casbin:newEnforcerFromText(model, policy) + casbin_enforcer.modifiedIndex = modifiedIndex + end + return true +end + + +function _M.rewrite(conf, ctx) + -- creates an enforcer when request sent for the first time + local ok, err = new_enforcer_if_need(conf) + if not ok then + core.log.error(err) + return 503 + end + + local path = ctx.var.uri + local method = ctx.var.method + local headers = core.request.headers(ctx) + local username = headers[conf.username] or "anonymous" + + if conf.casbin_enforcer then + if not conf.casbin_enforcer:enforce(username, path, method) then + return 403, {message = "Access Denied"} + end + else + if not casbin_enforcer:enforce(username, path, method) then + return 403, {message = "Access Denied"} + end + end +end + + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/authz-casdoor.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/authz-casdoor.lua new file mode 100644 index 0000000..c496ab6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/authz-casdoor.lua @@ -0,0 +1,176 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local http = require("resty.http") +local session = require("resty.session") +local ngx = ngx +local rand = math.random +local tostring = tostring + + +local plugin_name = "authz-casdoor" +local schema = { + type = "object", + properties = { + -- Note: endpoint_addr and callback_url should not end with '/' + endpoint_addr = {type = "string", pattern = "^[^%?]+[^/]$"}, + client_id = {type = "string"}, + client_secret = {type = "string"}, + callback_url = {type = "string", pattern = "^[^%?]+[^/]$"} + }, + encrypt_fields = {"client_secret"}, + required = { + "callback_url", "endpoint_addr", "client_id", "client_secret" + } +} + +local _M = { + version = 0.1, + priority = 2559, + name = plugin_name, + schema = schema +} + +local function fetch_access_token(code, conf) + local client = http.new() + local url = conf.endpoint_addr .. "/api/login/oauth/access_token" + local res, err = client:request_uri(url, { + method = "POST", + body = ngx.encode_args({ + code = code, + grant_type = "authorization_code", + client_id = conf.client_id, + client_secret = conf.client_secret + }), + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if not res then + return nil, nil, err + end + local data, err = core.json.decode(res.body) + + if err or not data then + err = "failed to parse casdoor response data: " .. err .. ", body: " .. res.body + return nil, nil, err + end + + if not data.access_token then + return nil, nil, + "failed when accessing token: no access_token contained" + end + -- In the reply of casdoor, setting expires_in to 0 indicates that the access_token is invalid. + if not data.expires_in or data.expires_in == 0 then + return nil, nil, "failed when accessing token: invalid access_token" + end + + return data.access_token, data.expires_in, nil +end + + +function _M.check_schema(conf) + local check = {"endpoint_addr", "callback_url"} + core.utils.check_https(check, conf, plugin_name) + return core.schema.check(schema, conf) +end + + +function _M.access(conf, ctx) + local current_uri = ctx.var.uri + local session_obj_read, session_present = session.open() + -- step 1: check whether hits the callback + local m, err = ngx.re.match(conf.callback_url, ".+//[^/]+(/.*)", "jo") + if err or not m then + core.log.error(err) + return 503 + end + local real_callback_url = m[1] + if current_uri == real_callback_url then + if not session_present then + err = "no session found" + core.log.error(err) + return 503 + end + local state_in_session = session_obj_read.data.state + if not state_in_session then + err = "no state found in session" + core.log.error(err) + return 503 + end + local args = core.request.get_uri_args(ctx) + if not args or not args.code or not args.state then + err = "failed when accessing token. Invalid code or state" + core.log.error(err) + return 400, err + end + if args.state ~= tostring(state_in_session) then + err = "invalid state" + core.log.error(err) + return 400, err + end + if not args.code then + err = "invalid code" + core.log.error(err) + return 400, err + end + local access_token, lifetime, err = + fetch_access_token(args.code, conf) + if not access_token then + core.log.error(err) + return 503 + end + local original_url = session_obj_read.data.original_uri + if not original_url then + err = "no original_url found in session" + core.log.error(err) + return 503 + end + local session_obj_write = session.new { + cookie = {lifetime = lifetime} + } + session_obj_write:start() + session_obj_write.data.access_token = access_token + session_obj_write:save() + core.response.set_header("Location", original_url) + return 302 + end + + -- step 2: check whether session exists + if not (session_present and session_obj_read.data.access_token) then + -- session not exists, redirect to login page + local state = rand(0x7fffffff) + local session_obj_write = session.start() + session_obj_write.data.original_uri = current_uri + session_obj_write.data.state = state + session_obj_write:save() + + local redirect_url = conf.endpoint_addr .. "/login/oauth/authorize?" .. ngx.encode_args({ + response_type = "code", + scope = "read", + state = state, + client_id = conf.client_id, + redirect_uri = conf.callback_url + }) + core.response.set_header("Location", redirect_url) + return 302 + end + +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/authz-keycloak.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/authz-keycloak.lua new file mode 100644 index 0000000..34a0533 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/authz-keycloak.lua @@ -0,0 +1,790 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local http = require "resty.http" +local sub_str = string.sub +local type = type +local ngx = ngx +local plugin_name = "authz-keycloak" +local fetch_secrets = require("apisix.secret").fetch_secrets + +local log = core.log +local pairs = pairs + +local schema = { + type = "object", + properties = { + discovery = {type = "string", minLength = 1, maxLength = 4096}, + token_endpoint = {type = "string", minLength = 1, maxLength = 4096}, + resource_registration_endpoint = {type = "string", minLength = 1, maxLength = 4096}, + client_id = {type = "string", minLength = 1, maxLength = 100}, + client_secret = {type = "string", minLength = 1, maxLength = 100}, + grant_type = { + type = "string", + default="urn:ietf:params:oauth:grant-type:uma-ticket", + enum = {"urn:ietf:params:oauth:grant-type:uma-ticket"}, + minLength = 1, maxLength = 100 + }, + policy_enforcement_mode = { + type = "string", + enum = {"ENFORCING", "PERMISSIVE"}, + default = "ENFORCING" + }, + permissions = { + type = "array", + items = { + type = "string", + minLength = 1, maxLength = 100 + }, + uniqueItems = true, + default = {} + }, + lazy_load_paths = {type = "boolean", default = false}, + http_method_as_scope = {type = "boolean", default = false}, + timeout = {type = "integer", minimum = 1000, default = 3000}, + ssl_verify = {type = "boolean", default = true}, + cache_ttl_seconds = {type = "integer", minimum = 1, default = 24 * 60 * 60}, + keepalive = {type = "boolean", default = true}, + keepalive_timeout = {type = "integer", minimum = 1000, default = 60000}, + keepalive_pool = {type = "integer", minimum = 1, default = 5}, + access_denied_redirect_uri = {type = "string", minLength = 1, maxLength = 2048}, + access_token_expires_in = {type = "integer", minimum = 1, default = 300}, + access_token_expires_leeway = {type = "integer", minimum = 0, default = 0}, + refresh_token_expires_in = {type = "integer", minimum = 1, default = 3600}, + refresh_token_expires_leeway = {type = "integer", minimum = 0, default = 0}, + password_grant_token_generation_incoming_uri = { + type = "string", + minLength = 1, + maxLength = 4096 + }, + }, + encrypt_fields = {"client_secret"}, + required = {"client_id"}, + allOf = { + -- Require discovery or token endpoint. + { + anyOf = { + {required = {"discovery"}}, + {required = {"token_endpoint"}} + } + }, + -- If lazy_load_paths is true, require discovery or resource registration endpoint. + { + anyOf = { + { + properties = { + lazy_load_paths = {enum = {false}}, + } + }, + { + properties = { + lazy_load_paths = {enum = {true}}, + }, + anyOf = { + {required = {"discovery"}}, + {required = {"resource_registration_endpoint"}} + } + } + } + } + } +} + + +local _M = { + version = 0.1, + priority = 2000, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + local check = {"discovery", "token_endpoint", "resource_registration_endpoint", + "access_denied_redirect_uri"} + core.utils.check_https(check, conf, plugin_name) + core.utils.check_tls_bool({"ssl_verify"}, conf, plugin_name) + + return core.schema.check(schema, conf) +end + + +-- Some auxiliary functions below heavily inspired by the excellent +-- lua-resty-openidc module; see https://github.com/zmartzone/lua-resty-openidc + + +-- Retrieve value from server-wide cache, if available. +local function authz_keycloak_cache_get(type, key) + local dict = ngx.shared[type] + local value + if dict then + value = dict:get(key) + if value then log.debug("cache hit: type=", type, " key=", key) end + end + return value +end + + +-- Set value in server-wide cache, if available. +local function authz_keycloak_cache_set(type, key, value, exp) + local dict = ngx.shared[type] + if dict and (exp > 0) then + local success, err, forcible = dict:set(key, value, exp) + if err then + log.error("cache set: success=", success, " err=", err, " forcible=", forcible) + else + log.debug("cache set: success=", success, " err=", err, " forcible=", forcible) + end + end +end + + +-- Configure request parameters. +local function authz_keycloak_configure_params(params, conf) + -- Keepalive options. + if conf.keepalive then + params.keepalive_timeout = conf.keepalive_timeout + params.keepalive_pool = conf.keepalive_pool + else + params.keepalive = conf.keepalive + end + + -- TLS verification. + params.ssl_verify = conf.ssl_verify + + -- Decorate parameters, maybe, and return. + return conf.http_request_decorator and conf.http_request_decorator(params) or params +end + + +-- Configure timeouts. +local function authz_keycloak_configure_timeouts(httpc, timeout) + if timeout then + if type(timeout) == "table" then + httpc:set_timeouts(timeout.connect or 0, timeout.send or 0, timeout.read or 0) + else + httpc:set_timeout(timeout) + end + end +end + + +-- Set outgoing proxy options. +local function authz_keycloak_configure_proxy(httpc, proxy_opts) + if httpc and proxy_opts and type(proxy_opts) == "table" then + log.debug("authz_keycloak_configure_proxy : use http proxy") + httpc:set_proxy_options(proxy_opts) + else + log.debug("authz_keycloak_configure_proxy : don't use http proxy") + end +end + + +-- Get and configure HTTP client. +local function authz_keycloak_get_http_client(conf) + local httpc = http.new() + authz_keycloak_configure_timeouts(httpc, conf.timeout) + authz_keycloak_configure_proxy(httpc, conf.proxy_opts) + return httpc +end + + +-- Parse the JSON result from a call to the OP. +local function authz_keycloak_parse_json_response(response) + local err + local res + + -- Check the response from the OP. + if response.status ~= 200 then + err = "response indicates failure, status=" .. response.status .. ", body=" .. response.body + else + -- Decode the response and extract the JSON object. + res, err = core.json.decode(response.body) + + if not res then + err = "JSON decoding failed: " .. err + end + end + + return res, err +end + + +-- Get the Discovery metadata from the specified URL. +local function authz_keycloak_discover(conf) + log.debug("authz_keycloak_discover: URL is: " .. conf.discovery) + + local json, err + local v = authz_keycloak_cache_get("discovery", conf.discovery) + + if not v then + log.debug("Discovery data not in cache, making call to discovery endpoint.") + + -- Make the call to the discovery endpoint. + local httpc = authz_keycloak_get_http_client(conf) + + local params = authz_keycloak_configure_params({}, conf) + + local res, error = httpc:request_uri(conf.discovery, params) + + if not res then + err = "Accessing discovery URL (" .. conf.discovery .. ") failed: " .. error + log.error(err) + else + log.debug("Response data: " .. res.body) + json, err = authz_keycloak_parse_json_response(res) + if json then + authz_keycloak_cache_set("discovery", conf.discovery, core.json.encode(json), + conf.cache_ttl_seconds) + else + err = "could not decode JSON from Discovery data" .. (err and (": " .. err) or '') + log.error(err) + end + end + else + json = core.json.decode(v) + end + + return json, err +end + + +-- Turn a discovery url set in the conf dictionary into the discovered information. +local function authz_keycloak_ensure_discovered_data(conf) + local err + if type(conf.discovery) == "string" then + local discovery + discovery, err = authz_keycloak_discover(conf) + if not err then + conf.discovery = discovery + end + end + return err +end + + +-- Get an endpoint from the configuration. +local function authz_keycloak_get_endpoint(conf, endpoint) + if conf and conf[endpoint] then + -- Use explicit entry. + return conf[endpoint] + elseif conf and conf.discovery and type(conf.discovery) == "table" then + -- Use discovery data. + return conf.discovery[endpoint] + end + + -- Unable to obtain endpoint. + return nil +end + + +-- Return the token endpoint from the configuration. +local function authz_keycloak_get_token_endpoint(conf) + return authz_keycloak_get_endpoint(conf, "token_endpoint") +end + + +-- Return the resource registration endpoint from the configuration. +local function authz_keycloak_get_resource_registration_endpoint(conf) + return authz_keycloak_get_endpoint(conf, "resource_registration_endpoint") +end + + +-- Return access_token expires_in value (in seconds). +local function authz_keycloak_access_token_expires_in(conf, expires_in) + return (expires_in or conf.access_token_expires_in) + - 1 - conf.access_token_expires_leeway +end + + +-- Return refresh_token expires_in value (in seconds). +local function authz_keycloak_refresh_token_expires_in(conf, expires_in) + return (expires_in or conf.refresh_token_expires_in) + - 1 - conf.refresh_token_expires_leeway +end + + +-- Ensure a valid service account access token is available for the configured client. +local function authz_keycloak_ensure_sa_access_token(conf) + local client_id = conf.client_id + local ttl = conf.cache_ttl_seconds + local token_endpoint = authz_keycloak_get_token_endpoint(conf) + + if not token_endpoint then + log.error("Unable to determine token endpoint.") + return 503, "Unable to determine token endpoint." + end + + local session = authz_keycloak_cache_get("access-tokens", token_endpoint .. ":" + .. client_id) + + if session then + -- Decode session string. + local err + session, err = core.json.decode(session) + + if not session then + -- Should never happen. + return 500, err + end + + local current_time = ngx.time() + + if current_time < session.access_token_expiration then + -- Access token is still valid. + log.debug("Access token is still valid.") + return session.access_token + else + -- Access token has expired. + log.debug("Access token has expired.") + if session.refresh_token + and (not session.refresh_token_expiration + or current_time < session.refresh_token_expiration) then + -- Try to get a new access token, using the refresh token. + log.debug("Trying to get new access token using refresh token.") + + local httpc = authz_keycloak_get_http_client(conf) + + local params = { + method = "POST", + body = ngx.encode_args({ + grant_type = "refresh_token", + client_id = client_id, + client_secret = conf.client_secret, + refresh_token = session.refresh_token, + }), + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + } + + params = authz_keycloak_configure_params(params, conf) + + local res, err = httpc:request_uri(token_endpoint, params) + + if not res then + err = "Accessing token endpoint URL (" .. token_endpoint + .. ") failed: " .. err + log.error(err) + return nil, err + end + + log.debug("Response data: " .. res.body) + local json, err = authz_keycloak_parse_json_response(res) + + if not json then + err = "Could not decode JSON from token endpoint" + .. (err and (": " .. err) or '.') + log.error(err) + return nil, err + end + + if not json.access_token then + -- Clear session. + log.debug("Answer didn't contain a new access token. Clearing session.") + session = nil + else + log.debug("Got new access token.") + -- Save access token. + session.access_token = json.access_token + + -- Calculate and save access token expiry time. + session.access_token_expiration = current_time + + authz_keycloak_access_token_expires_in(conf, json.expires_in) + + -- Save refresh token, maybe. + if json.refresh_token ~= nil then + log.debug("Got new refresh token.") + session.refresh_token = json.refresh_token + + -- Calculate and save refresh token expiry time. + session.refresh_token_expiration = current_time + + authz_keycloak_refresh_token_expires_in(conf, + json.refresh_expires_in) + end + + authz_keycloak_cache_set("access-tokens", + token_endpoint .. ":" .. client_id, + core.json.encode(session), ttl) + end + else + -- No refresh token available, or it has expired. Clear session. + log.debug("No or expired refresh token. Clearing session.") + session = nil + end + end + end + + if not session then + -- No session available. Create a new one. + + log.debug("Getting access token for Protection API from token endpoint.") + local httpc = authz_keycloak_get_http_client(conf) + + local params = { + method = "POST", + body = ngx.encode_args({ + grant_type = "client_credentials", + client_id = client_id, + client_secret = conf.client_secret, + }), + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + } + + params = authz_keycloak_configure_params(params, conf) + + local current_time = ngx.time() + + local res, err = httpc:request_uri(token_endpoint, params) + + if not res then + err = "Accessing token endpoint URL (" .. token_endpoint .. ") failed: " .. err + log.error(err) + return nil, err + end + + log.debug("Response data: " .. res.body) + local json, err = authz_keycloak_parse_json_response(res) + + if not json then + err = "Could not decode JSON from token endpoint" .. (err and (": " .. err) or '.') + log.error(err) + return nil, err + end + + if not json.access_token then + err = "Response does not contain access_token field." + log.error(err) + return nil, err + end + + session = {} + + -- Save access token. + session.access_token = json.access_token + + -- Calculate and save access token expiry time. + session.access_token_expiration = current_time + + authz_keycloak_access_token_expires_in(conf, json.expires_in) + + -- Save refresh token, maybe. + if json.refresh_token ~= nil then + session.refresh_token = json.refresh_token + + -- Calculate and save refresh token expiry time. + session.refresh_token_expiration = current_time + + authz_keycloak_refresh_token_expires_in(conf, json.refresh_expires_in) + end + + authz_keycloak_cache_set("access-tokens", token_endpoint .. ":" .. client_id, + core.json.encode(session), ttl) + end + + return session.access_token +end + + +-- Resolve a URI to one or more resource IDs. +local function authz_keycloak_resolve_resource(conf, uri, sa_access_token) + -- Get resource registration endpoint URL. + local resource_registration_endpoint = authz_keycloak_get_resource_registration_endpoint(conf) + + if not resource_registration_endpoint then + local err = "Unable to determine registration endpoint." + log.error(err) + return nil, err + end + + log.debug("Resource registration endpoint: ", resource_registration_endpoint) + + local httpc = authz_keycloak_get_http_client(conf) + + local params = { + method = "GET", + query = {uri = uri, matchingUri = "true"}, + headers = { + ["Authorization"] = "Bearer " .. sa_access_token + } + } + + params = authz_keycloak_configure_params(params, conf) + + local res, err = httpc:request_uri(resource_registration_endpoint, params) + + if not res then + err = "Accessing resource registration endpoint URL (" .. resource_registration_endpoint + .. ") failed: " .. err + log.error(err) + return nil, err + end + + log.debug("Response data: " .. res.body) + res.body = '{"resources": ' .. res.body .. '}' + local json, err = authz_keycloak_parse_json_response(res) + + if not json then + err = "Could not decode JSON from resource registration endpoint" + .. (err and (": " .. err) or '.') + log.error(err) + return nil, err + end + + return json.resources +end + + +local function evaluate_permissions(conf, ctx, token) + -- Ensure discovered data. + local err = authz_keycloak_ensure_discovered_data(conf) + if err then + return 503, err + end + + local permission + + if conf.lazy_load_paths then + -- Ensure service account access token. + local sa_access_token, err = authz_keycloak_ensure_sa_access_token(conf) + if err then + log.error(err) + return 503, err + end + + -- Resolve URI to resource(s). + permission, err = authz_keycloak_resolve_resource(conf, ctx.var.request_uri, + sa_access_token) + + -- Check result. + if permission == nil then + -- No result back from resource registration endpoint. + log.error(err) + return 503, err + end + else + -- Use statically configured permissions. + permission = conf.permissions + end + + -- Return 403 or 307 if permission is empty and enforcement mode is "ENFORCING". + if #permission == 0 and conf.policy_enforcement_mode == "ENFORCING" then + -- Return Keycloak-style message for consistency. + if conf.access_denied_redirect_uri then + core.response.set_header("Location", conf.access_denied_redirect_uri) + return 307 + end + return 403, '{"error":"access_denied","error_description":"not_authorized"}' + end + + -- Determine scope from HTTP method, maybe. + local scope + if conf.http_method_as_scope then + scope = ctx.var.request_method + end + + if scope then + -- Loop over permissions and add scope. + for k, v in pairs(permission) do + if v:find("#", 1, true) then + -- Already contains scope. + permission[k] = v .. ", " .. scope + else + -- Doesn't contain scope yet. + permission[k] = v .. "#" .. scope + end + end + end + + for k, v in pairs(permission) do + log.debug("Requesting permission ", v, ".") + end + + -- Get token endpoint URL. + local token_endpoint = authz_keycloak_get_token_endpoint(conf) + if not token_endpoint then + err = "Unable to determine token endpoint." + log.error(err) + return 503, err + end + log.debug("Token endpoint: ", token_endpoint) + + local httpc = authz_keycloak_get_http_client(conf) + + local params = { + method = "POST", + body = ngx.encode_args({ + grant_type = conf.grant_type, + audience = conf.client_id, + response_mode = "decision", + permission = permission + }), + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + ["Authorization"] = token + } + } + + params = authz_keycloak_configure_params(params, conf) + + local res, err = httpc:request_uri(token_endpoint, params) + + if not res then + err = "Error while sending authz request to " .. token_endpoint .. ": " .. err + log.error(err) + return 503 + end + + log.debug("Response status: ", res.status, ", data: ", res.body) + + if res.status == 403 then + -- Request permanently denied, e.g. due to lacking permissions. + log.debug('Request denied: HTTP 403 Forbidden. Body: ', res.body) + if conf.access_denied_redirect_uri then + core.response.set_header("Location", conf.access_denied_redirect_uri) + return 307 + end + + return res.status, res.body + elseif res.status == 401 then + -- Request temporarily denied, e.g access token not valid. + log.debug('Request denied: HTTP 401 Unauthorized. Body: ', res.body) + return res.status, res.body + elseif res.status >= 400 then + -- Some other error. Log full response. + log.error('Request denied: Token endpoint returned an error (status: ', + res.status, ', body: ', res.body, ').') + return res.status, res.body + end + + -- Request accepted. +end + + +local function fetch_jwt_token(ctx) + local token = core.request.header(ctx, "Authorization") + if not token then + return nil, "authorization header not available" + end + + local prefix = sub_str(token, 1, 7) + if prefix ~= 'Bearer ' and prefix ~= 'bearer ' then + return "Bearer " .. token + end + return token +end + +-- To get new access token by calling get token api +local function generate_token_using_password_grant(conf,ctx) + log.debug("generate_token_using_password_grant Function Called") + + local body, err = core.request.get_body() + if err or not body then + log.error("Failed to get request body: ", err) + return 503 + end + local parameters = core.string.decode_args(body) + + local username = parameters["username"] + local password = parameters["password"] + + if not username then + local err = "username is missing." + log.warn(err) + return 422, {message = err} + end + if not password then + local err = "password is missing." + log.warn(err) + return 422, {message = err} + end + + local client_id = conf.client_id + + local token_endpoint = authz_keycloak_get_token_endpoint(conf) + + if not token_endpoint then + local err = "Unable to determine token endpoint." + log.error(err) + return 503, {message = err} + end + local httpc = authz_keycloak_get_http_client(conf) + + local params = { + method = "POST", + body = ngx.encode_args({ + grant_type = "password", + client_id = client_id, + client_secret = conf.client_secret, + username = username, + password = password + }), + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + } + + params = authz_keycloak_configure_params(params, conf) + + local res, err = httpc:request_uri(token_endpoint, params) + + if not res then + err = "Accessing token endpoint URL (" .. token_endpoint + .. ") failed: " .. err + log.error(err) + return 401, {message = "Accessing token endpoint URL failed."} + end + + log.debug("Response data: " .. res.body) + local json, err = authz_keycloak_parse_json_response(res) + + if not json then + err = "Could not decode JSON from response" + .. (err and (": " .. err) or '.') + log.error(err) + return 401, {message = "Could not decode JSON from response."} + end + + return res.status, res.body +end + +function _M.access(conf, ctx) + -- resolve secrets + conf = fetch_secrets(conf, true, conf, "") + local headers = core.request.headers(ctx) + local need_grant_token = conf.password_grant_token_generation_incoming_uri and + ctx.var.request_uri == conf.password_grant_token_generation_incoming_uri and + headers["content-type"] == "application/x-www-form-urlencoded" and + core.request.get_method() == "POST" + if need_grant_token then + return generate_token_using_password_grant(conf,ctx) + end + log.debug("hit keycloak-auth access") + local jwt_token, err = fetch_jwt_token(ctx) + if not jwt_token then + log.error("failed to fetch JWT token: ", err) + return 401, {message = "Missing JWT token in request"} + end + + local status, body = evaluate_permissions(conf, ctx, jwt_token) + if status then + return status, body + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/aws-lambda.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/aws-lambda.lua new file mode 100644 index 0000000..1b172af --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/aws-lambda.lua @@ -0,0 +1,187 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. + +local ngx = ngx +local hmac = require("resty.hmac") +local hex_encode = require("resty.string").to_hex +local resty_sha256 = require("resty.sha256") +local str_strip = require("pl.stringx").strip +local norm_path = require("pl.path").normpath +local pairs = pairs +local tab_concat = table.concat +local tab_sort = table.sort +local os = os + + +local plugin_name = "aws-lambda" +local plugin_version = 0.1 +local priority = -1899 + +local ALGO = "AWS4-HMAC-SHA256" + +local function hmac256(key, msg) + return hmac:new(key, hmac.ALGOS.SHA256):final(msg) +end + +local function sha256(msg) + local hash = resty_sha256:new() + hash:update(msg) + local digest = hash:final() + return hex_encode(digest) +end + +local function get_signature_key(key, datestamp, region, service) + local kDate = hmac256("AWS4" .. key, datestamp) + local kRegion = hmac256(kDate, region) + local kService = hmac256(kRegion, service) + local kSigning = hmac256(kService, "aws4_request") + return kSigning +end + +local aws_authz_schema = { + type = "object", + properties = { + -- API Key based authorization + apikey = {type = "string"}, + -- IAM role based authorization, works via aws v4 request signing + -- more at https://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html + iam = { + type = "object", + properties = { + accesskey = { + type = "string", + description = "access key id from from aws iam console" + }, + secretkey = { + type = "string", + description = "secret access key from from aws iam console" + }, + aws_region = { + type = "string", + default = "us-east-1", + description = "the aws region that is receiving the request" + }, + service = { + type = "string", + default = "execute-api", + description = "the service that is receiving the request" + } + }, + required = {"accesskey", "secretkey"} + } + } +} + +local function request_processor(conf, ctx, params) + local headers = params.headers + -- set authorization headers if not already set by the client + -- we are following not to overwrite the authz keys + if not headers["x-api-key"] then + if conf.authorization and conf.authorization.apikey then + headers["x-api-key"] = conf.authorization.apikey + return + end + end + + -- performing aws v4 request signing for IAM authorization + -- visit https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html + -- to look at the pseudocode in python. + if headers["authorization"] or not conf.authorization or not conf.authorization.iam then + return + end + + -- create a date for headers and the credential string + local t = ngx.time() + local amzdate = os.date("!%Y%m%dT%H%M%SZ", t) + local datestamp = os.date("!%Y%m%d", t) -- Date w/o time, used in credential scope + headers["X-Amz-Date"] = amzdate + + -- computing canonical uri + local canonical_uri = norm_path(params.path) + if canonical_uri ~= "/" then + if canonical_uri:sub(-1, -1) == "/" then + canonical_uri = canonical_uri:sub(1, -2) + end + if canonical_uri:sub(1, 1) ~= "/" then + canonical_uri = "/" .. canonical_uri + end + end + + -- computing canonical query string + local canonical_qs = {} + local canonical_qs_i = 0 + for k, v in pairs(params.query) do + canonical_qs_i = canonical_qs_i + 1 + canonical_qs[canonical_qs_i] = ngx.unescape_uri(k) .. "=" .. ngx.unescape_uri(v) + end + + tab_sort(canonical_qs) + canonical_qs = tab_concat(canonical_qs, "&") + + -- computing canonical and signed headers + + local canonical_headers, signed_headers = {}, {} + local signed_headers_i = 0 + for k, v in pairs(headers) do + k = k:lower() + if k ~= "connection" then + signed_headers_i = signed_headers_i + 1 + signed_headers[signed_headers_i] = k + -- strip starting and trailing spaces including strip multiple spaces into single space + canonical_headers[k] = str_strip(v) + end + end + tab_sort(signed_headers) + + for i = 1, #signed_headers do + local k = signed_headers[i] + canonical_headers[i] = k .. ":" .. canonical_headers[k] .. "\n" + end + canonical_headers = tab_concat(canonical_headers, nil, 1, #signed_headers) + signed_headers = tab_concat(signed_headers, ";") + + -- combining elements to form the canonical request (step-1) + local canonical_request = params.method:upper() .. "\n" + .. canonical_uri .. "\n" + .. (canonical_qs or "") .. "\n" + .. canonical_headers .. "\n" + .. signed_headers .. "\n" + .. sha256(params.body or "") + + -- creating the string to sign for aws signature v4 (step-2) + local iam = conf.authorization.iam + local credential_scope = datestamp .. "/" .. iam.aws_region .. "/" + .. iam.service .. "/aws4_request" + local string_to_sign = ALGO .. "\n" + .. amzdate .. "\n" + .. credential_scope .. "\n" + .. sha256(canonical_request) + + -- calculate the signature (step-3) + local signature_key = get_signature_key(iam.secretkey, datestamp, iam.aws_region, iam.service) + local signature = hex_encode(hmac256(signature_key, string_to_sign)) + + -- add info to the headers (step-4) + headers["authorization"] = ALGO .. " Credential=" .. iam.accesskey + .. "/" .. credential_scope + .. ", SignedHeaders=" .. signed_headers + .. ", Signature=" .. signature +end + + +local serverless_obj = require("apisix.plugins.serverless.generic-upstream") + +return serverless_obj(plugin_name, plugin_version, priority, request_processor, aws_authz_schema) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/azure-functions.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/azure-functions.lua new file mode 100644 index 0000000..0b0e64d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/azure-functions.lua @@ -0,0 +1,61 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. + +local plugin = require("apisix.plugin") +local plugin_name, plugin_version, priority = "azure-functions", 0.1, -1900 + +local azure_authz_schema = { + type = "object", + properties = { + apikey = {type = "string"}, + clientid = {type = "string"} + } +} + +local metadata_schema = { + type = "object", + properties = { + master_apikey = {type = "string", default = ""}, + master_clientid = {type = "string", default = ""} + } +} + +local function request_processor(conf, ctx, params) + local headers = params.headers or {} + -- set authorization headers if not already set by the client + -- we are following not to overwrite the authz keys + if not headers["x-functions-key"] and + not headers["x-functions-clientid"] then + if conf.authorization then + headers["x-functions-key"] = conf.authorization.apikey + headers["x-functions-clientid"] = conf.authorization.clientid + else + -- If neither api keys are set with the client request nor inside the plugin attributes + -- plugin will fallback to the master key (if any) present inside the metadata. + local metadata = plugin.plugin_metadata(plugin_name) + if metadata then + headers["x-functions-key"] = metadata.value.master_apikey + headers["x-functions-clientid"] = metadata.value.master_clientid + end + end + end + + params.headers = headers +end + + +return require("apisix.plugins.serverless.generic-upstream")(plugin_name, + plugin_version, priority, request_processor, azure_authz_schema, metadata_schema) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/basic-auth.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/basic-auth.lua new file mode 100644 index 0000000..aac1ef9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/basic-auth.lua @@ -0,0 +1,189 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ngx = ngx +local ngx_re = require("ngx.re") +local consumer = require("apisix.consumer") +local schema_def = require("apisix.schema_def") +local auth_utils = require("apisix.utils.auth") + +local lrucache = core.lrucache.new({ + ttl = 300, count = 512 +}) + +local schema = { + type = "object", + title = "work with route or service object", + properties = { + hide_credentials = { + type = "boolean", + default = false, + } + }, + anonymous_consumer = schema_def.anonymous_consumer_schema, +} + +local consumer_schema = { + type = "object", + title = "work with consumer object", + properties = { + username = { type = "string" }, + password = { type = "string" }, + }, + encrypt_fields = {"password"}, + required = {"username", "password"}, +} + +local plugin_name = "basic-auth" + + +local _M = { + version = 0.1, + priority = 2520, + type = 'auth', + name = plugin_name, + schema = schema, + consumer_schema = consumer_schema +} + +function _M.check_schema(conf, schema_type) + local ok, err + if schema_type == core.schema.TYPE_CONSUMER then + ok, err = core.schema.check(consumer_schema, conf) + else + ok, err = core.schema.check(schema, conf) + end + + if not ok then + return false, err + end + + return true +end + +local function extract_auth_header(authorization) + + local function do_extract(auth) + local obj = { username = "", password = "" } + + local m, err = ngx.re.match(auth, "Basic\\s(.+)", "jo") + if err then + -- error authorization + return nil, err + end + + if not m then + return nil, "Invalid authorization header format" + end + + local decoded = ngx.decode_base64(m[1]) + + if not decoded then + return nil, "Failed to decode authentication header: " .. m[1] + end + + local res + res, err = ngx_re.split(decoded, ":") + if err then + return nil, "Split authorization err:" .. err + end + if #res < 2 then + return nil, "Split authorization err: invalid decoded data: " .. decoded + end + + obj.username = ngx.re.gsub(res[1], "\\s+", "", "jo") + obj.password = ngx.re.gsub(res[2], "\\s+", "", "jo") + core.log.info("plugin access phase, authorization: ", + obj.username, ": ", obj.password) + + return obj, nil + end + + local matcher, err = lrucache(authorization, nil, do_extract, authorization) + + if matcher then + return matcher.username, matcher.password, err + else + return "", "", err + end + +end + + +local function find_consumer(ctx) + local auth_header = core.request.header(ctx, "Authorization") + if not auth_header then + core.response.set_header("WWW-Authenticate", "Basic realm='.'") + return nil, nil, "Missing authorization in request" + end + + local username, password, err = extract_auth_header(auth_header) + if err then + if auth_utils.is_running_under_multi_auth(ctx) then + return nil, nil, err + end + core.log.warn(err) + return nil, nil, "Invalid authorization in request" + end + + local cur_consumer, consumer_conf, err = consumer.find_consumer(plugin_name, + "username", username) + if not cur_consumer then + err = "failed to find user: " .. (err or "invalid user") + if auth_utils.is_running_under_multi_auth(ctx) then + return nil, nil, err + end + core.log.warn(err) + return nil, nil, "Invalid user authorization" + end + + if cur_consumer.auth_conf.password ~= password then + return nil, nil, "Invalid user authorization" + end + + return cur_consumer, consumer_conf, err +end + + +function _M.rewrite(conf, ctx) + core.log.info("plugin access phase, conf: ", core.json.delay_encode(conf)) + + local cur_consumer, consumer_conf, err = find_consumer(ctx) + if not cur_consumer then + if not conf.anonymous_consumer then + return 401, { message = err } + end + cur_consumer, consumer_conf, err = consumer.get_anonymous_consumer(conf.anonymous_consumer) + if not cur_consumer then + err = "basic-auth failed to authenticate the request, code: 401. error: " .. err + core.log.error(err) + return 401, { message = "Invalid user authorization" } + end + end + + core.log.info("consumer: ", core.json.delay_encode(cur_consumer)) + + if conf.hide_credentials then + core.request.set_header(ctx, "Authorization", nil) + end + + consumer.attach_consumer(ctx, cur_consumer, consumer_conf) + + core.log.info("hit basic-auth access") +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/batch-requests.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/batch-requests.lua new file mode 100644 index 0000000..a1b5743 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/batch-requests.lua @@ -0,0 +1,309 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local http = require("resty.http") +local plugin = require("apisix.plugin") +local ngx = ngx +local ipairs = ipairs +local pairs = pairs +local str_find = core.string.find +local str_lower = string.lower + + +local plugin_name = "batch-requests" + +local default_uri = "/apisix/batch-requests" + +local attr_schema = { + type = "object", + properties = { + uri = { + type = "string", + description = "uri for batch-requests", + default = default_uri + } + }, +} + +local schema = { + type = "object", +} + +local default_max_body_size = 1024 * 1024 -- 1MiB +local metadata_schema = { + type = "object", + properties = { + max_body_size = { + description = "max pipeline body size in bytes", + type = "integer", + exclusiveMinimum = 0, + default = default_max_body_size, + }, + }, +} + +local method_schema = core.table.clone(core.schema.method_schema) +method_schema.default = "GET" + +local req_schema = { + type = "object", + properties = { + query = { + description = "pipeline query string", + type = "object" + }, + headers = { + description = "pipeline header", + type = "object" + }, + timeout = { + description = "pipeline timeout(ms)", + type = "integer", + default = 30000, + }, + pipeline = { + type = "array", + minItems = 1, + items = { + type = "object", + properties = { + version = { + description = "HTTP version", + type = "number", + enum = {1.0, 1.1}, + default = 1.1, + }, + method = method_schema, + path = { + type = "string", + minLength = 1, + }, + query = { + description = "request header", + type = "object", + }, + headers = { + description = "request query string", + type = "object", + }, + ssl_verify = { + type = "boolean", + default = false + }, + } + } + } + }, + anyOf = { + {required = {"pipeline"}}, + }, +} + +local _M = { + version = 0.1, + priority = 4010, + name = plugin_name, + schema = schema, + metadata_schema = metadata_schema, + attr_schema = attr_schema, + scope = "global", +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + return core.schema.check(schema, conf) +end + + +local function check_input(data) + local ok, err = core.schema.check(req_schema, data) + if not ok then + return 400, {error_msg = "bad request body: " .. err} + end +end + +local function lowercase_key_or_init(obj) + if not obj then + return {} + end + + local lowercase_key_obj = {} + for k, v in pairs(obj) do + lowercase_key_obj[str_lower(k)] = v + end + + return lowercase_key_obj +end + +local function ensure_header_lowercase(data) + data.headers = lowercase_key_or_init(data.headers) + + for i,req in ipairs(data.pipeline) do + req.headers = lowercase_key_or_init(req.headers) + end +end + + +local function set_common_header(data) + local local_conf = core.config.local_conf() + local real_ip_hdr = core.table.try_read_attr(local_conf, "nginx_config", "http", + "real_ip_header") + -- we don't need to handle '_' to '-' as Nginx won't treat 'X_REAL_IP' as 'X-Real-IP' + real_ip_hdr = str_lower(real_ip_hdr) + + local outer_headers = core.request.headers(nil) + for i,req in ipairs(data.pipeline) do + for k, v in pairs(data.headers) do + if not req.headers[k] then + req.headers[k] = v + end + end + + if outer_headers then + for k, v in pairs(outer_headers) do + local is_content_header = str_find(k, "content-") == 1 + -- skip header start with "content-" + if not req.headers[k] and not is_content_header then + req.headers[k] = v + end + end + end + + req.headers[real_ip_hdr] = core.request.get_remote_client_ip() + end +end + + +local function set_common_query(data) + if not data.query then + return + end + + for i,req in ipairs(data.pipeline) do + if not req.query then + req.query = data.query + else + for k, v in pairs(data.query) do + if not req.query[k] then + req.query[k] = v + end + end + end + end +end + + +local function batch_requests(ctx) + local metadata = plugin.plugin_metadata(plugin_name) + core.log.info("metadata: ", core.json.delay_encode(metadata)) + + local max_body_size + if metadata then + max_body_size = metadata.value.max_body_size + else + max_body_size = default_max_body_size + end + + local req_body, err = core.request.get_body(max_body_size, ctx) + if err then + -- Nginx doesn't support 417: https://trac.nginx.org/nginx/ticket/2062 + -- So always return 413 instead + return 413, { error_msg = err } + end + if not req_body then + return 400, { + error_msg = "no request body, you should give at least one pipeline setting" + } + end + + local data, err = core.json.decode(req_body) + if not data then + return 400, { + error_msg = "invalid request body: " .. req_body .. ", err: " .. err + } + end + + local code, body = check_input(data) + if code then + return code, body + end + + local httpc = http.new() + httpc:set_timeout(data.timeout) + local ok, err = httpc:connect("127.0.0.1", ngx.var.server_port) + if not ok then + return 500, {error_msg = "connect to apisix failed: " .. err} + end + + ensure_header_lowercase(data) + set_common_header(data) + set_common_query(data) + + local responses, err = httpc:request_pipeline(data.pipeline) + if not responses then + return 400, {error_msg = "request failed: " .. err} + end + + local aggregated_resp = {} + for _, resp in ipairs(responses) do + if not resp.status then + core.table.insert(aggregated_resp, { + status = 504, + reason = "upstream timeout" + }) + end + local sub_resp = { + status = resp.status, + reason = resp.reason, + headers = resp.headers, + } + if resp.has_body then + local err + sub_resp.body, err = resp:read_body() + if err then + sub_resp.read_body_err = err + core.log.error("read pipeline response body failed: ", err) + else + resp:read_trailers() + end + end + core.table.insert(aggregated_resp, sub_resp) + end + return 200, aggregated_resp +end + + +function _M.api() + local uri = default_uri + local attr = plugin.plugin_attr(plugin_name) + if attr then + uri = attr.uri or default_uri + end + return { + { + methods = {"POST"}, + uri = uri, + handler = batch_requests, + } + } +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/body-transformer.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/body-transformer.lua new file mode 100644 index 0000000..9cb881a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/body-transformer.lua @@ -0,0 +1,261 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local xml2lua = require("xml2lua") +local xmlhandler = require("xmlhandler.tree") +local template = require("resty.template") +local ngx = ngx +local decode_base64 = ngx.decode_base64 +local req_set_body_data = ngx.req.set_body_data +local req_get_uri_args = ngx.req.get_uri_args +local str_format = string.format +local decode_args = ngx.decode_args +local str_find = core.string.find +local type = type +local pcall = pcall +local pairs = pairs +local next = next +local multipart = require("multipart") +local setmetatable = setmetatable + +local transform_schema = { + type = "object", + properties = { + input_format = { type = "string", + enum = {"xml", "json", "encoded", "args", "plain", "multipart",}}, + template = { type = "string" }, + template_is_base64 = { type = "boolean" }, + }, + required = {"template"}, +} + +local schema = { + type = "object", + properties = { + request = transform_schema, + response = transform_schema, + }, + anyOf = { + {required = {"request"}}, + {required = {"response"}}, + {required = {"request", "response"}}, + }, +} + + +local _M = { + version = 0.1, + priority = 1080, + name = "body-transformer", + schema = schema, +} + + +local function escape_xml(s) + return s:gsub("&", "&") + :gsub("<", "<") + :gsub(">", ">") + :gsub("'", "'") + :gsub('"', """) +end + + +local function escape_json(s) + return core.json.encode(s) +end + + +local function remove_namespace(tbl) + for k, v in pairs(tbl) do + if type(v) == "table" and next(v) == nil then + v = "" + tbl[k] = v + end + if type(k) == "string" then + local newk = k:match(".*:(.*)") + if newk then + tbl[newk] = v + tbl[k] = nil + end + if type(v) == "table" then + remove_namespace(v) + end + end + end + return tbl +end + + +local decoders = { + xml = function(data) + local handler = xmlhandler:new() + local parser = xml2lua.parser(handler) + local ok, err = pcall(parser.parse, parser, data) + if ok then + return remove_namespace(handler.root) + else + return nil, err + end + end, + json = function(data) + return core.json.decode(data) + end, + encoded = function(data) + return decode_args(data) + end, + args = function() + return req_get_uri_args() + end, + multipart = function (data, content_type_header) + local res = multipart(data, content_type_header) + return res + end +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +local function transform(conf, body, typ, ctx, request_method) + local out = {} + local _multipart + local format = conf[typ].input_format + local ct = ctx.var.http_content_type + if typ == "response" then + ct = ngx.header.content_type + end + if (body or request_method == "GET") and format ~= "plain" then + local err + if format then + out, err = decoders[format](body, ct) + if format == "multipart" then + _multipart = out + out = out:get_all_with_arrays() + end + if not out then + err = str_format("%s body decode: %s", typ, err) + core.log.error(err, ", body=", body) + return nil, 400, err + end + else + core.log.warn("no input format to parse ", typ, " body") + end + end + + local text = conf[typ].template + if (conf[typ].template_is_base64 or (format and format ~= "encoded" and format ~= "args")) then + text = decode_base64(text) or text + end + local ok, render = pcall(template.compile, text) + if not ok then + local err = render + err = str_format("%s template compile: %s", typ, err) + core.log.error(err) + return nil, 503, err + end + + setmetatable(out, {__index = { + _ctx = ctx, + _body = body, + _escape_xml = escape_xml, + _escape_json = escape_json, + _multipart = _multipart + }}) + + local ok, render_out = pcall(render, out) + if not ok then + local err = str_format("%s template rendering: %s", typ, render_out) + core.log.error(err) + return nil, 503, err + end + + core.log.info(typ, " body transform output=", render_out) + return render_out +end + + +local function set_input_format(conf, typ, ct, method) + if method == "GET" then + conf[typ].input_format = "args" + end + if conf[typ].input_format == nil and ct then + if ct:find("text/xml") then + conf[typ].input_format = "xml" + elseif ct:find("application/json") then + conf[typ].input_format = "json" + elseif str_find(ct:lower(), "application/x-www-form-urlencoded", nil, true) then + conf[typ].input_format = "encoded" + elseif str_find(ct:lower(), "multipart/", nil, true) then + conf[typ].input_format = "multipart" + end + end +end + + +function _M.rewrite(conf, ctx) + if conf.request then + local request_method = ngx.var.request_method + conf = core.table.deepcopy(conf) + ctx.body_transformer_conf = conf + local body = core.request.get_body() + set_input_format(conf, "request", ctx.var.http_content_type, request_method) + local out, status, err = transform(conf, body, "request", ctx, request_method) + if not out then + return status, { message = err } + end + req_set_body_data(out) + end +end + + +function _M.header_filter(conf, ctx) + if conf.response then + if not ctx.body_transformer_conf then + conf = core.table.deepcopy(conf) + ctx.body_transformer_conf = conf + end + set_input_format(conf, "response", ngx.header.content_type) + core.response.clear_header_as_body_modified() + end +end + + +function _M.body_filter(_, ctx) + local conf = ctx.body_transformer_conf + if not conf then + return + end + if conf.response then + local body = core.response.hold_body_chunk(ctx) + if ngx.arg[2] == false and not body then + return + end + + local out = transform(conf, body, "response", ctx) + if not out then + core.log.error("failed to transform response body: ", body) + return + end + + ngx.arg[1] = out + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/brotli.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/brotli.lua new file mode 100644 index 0000000..031bd8e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/brotli.lua @@ -0,0 +1,248 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ngx = ngx +local ngx_re_gmatch = ngx.re.gmatch +local ngx_header = ngx.header +local req_http_version = ngx.req.http_version +local str_sub = string.sub +local ipairs = ipairs +local tonumber = tonumber +local type = type +local is_loaded, brotli = pcall(require, "brotli") + + +local schema = { + type = "object", + properties = { + types = { + anyOf = { + { + type = "array", + minItems = 1, + items = { + type = "string", + minLength = 1, + }, + }, + { + enum = {"*"} + } + }, + default = {"text/html"} + }, + min_length = { + type = "integer", + minimum = 1, + default = 20, + }, + mode = { + type = "integer", + minimum = 0, + maximum = 2, + default = 0, + -- 0: MODE_GENERIC (default), + -- 1: MODE_TEXT (for UTF-8 format text input) + -- 2: MODE_FONT (for WOFF 2.0) + }, + comp_level = { + type = "integer", + minimum = 0, + maximum = 11, + default = 6, + -- follow the default value from ngx_brotli brotli_comp_level + }, + lgwin = { + type = "integer", + default = 19, + -- follow the default value from ngx_brotli brotli_window + enum = {0,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24}, + }, + lgblock = { + type = "integer", + default = 0, + enum = {0,16,17,18,19,20,21,22,23,24}, + }, + http_version = { + enum = {1.1, 1.0}, + default = 1.1, + }, + vary = { + type = "boolean", + } + }, +} + + +local _M = { + version = 0.1, + priority = 996, + name = "brotli", + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +local function create_brotli_compressor(mode, comp_level, lgwin, lgblock) + local options = { + mode = mode, + quality = comp_level, + lgwin = lgwin, + lgblock = lgblock, + } + return brotli.compressor:new(options) +end + + +local function check_accept_encoding(ctx) + local accept_encoding = core.request.header(ctx, "Accept-Encoding") + -- no Accept-Encoding + if not accept_encoding then + return false + end + + -- single Accept-Encoding + if accept_encoding == "*" or accept_encoding == "br" then + return true + end + + -- multi Accept-Encoding + local iterator, err = ngx_re_gmatch(accept_encoding, + [[([a-z\*]+)(;q=)?([0-9.]*)?]], "jo") + if not iterator then + core.log.error("gmatch failed, error: ", err) + return false + end + + local captures + while true do + captures, err = iterator() + if not captures then + break + end + if err then + core.log.error("iterator failed, error: ", err) + return false + end + if (captures[1] == "br" or captures[1] == "*") and + (not captures[2] or captures[3] ~= "0") then + return true + end + end + + return false +end + + +function _M.header_filter(conf, ctx) + if not is_loaded then + core.log.error("please check the brotli library") + return + end + + local allow_encoding = check_accept_encoding(ctx) + if not allow_encoding then + return + end + + local content_encoded = ngx_header["Content-Encoding"] + if content_encoded then + -- Don't compress if Content-Encoding is present in upstream data + return + end + + local types = conf.types + local content_type = ngx_header["Content-Type"] + if not content_type then + -- Like Nginx, don't compress if Content-Type is missing + return + end + + if type(types) == "table" then + local matched = false + local from = core.string.find(content_type, ";") + if from then + content_type = str_sub(content_type, 1, from - 1) + end + + for _, ty in ipairs(types) do + if content_type == ty then + matched = true + break + end + end + + if not matched then + return + end + end + + local content_length = tonumber(ngx_header["Content-Length"]) + if content_length then + local min_length = conf.min_length + if content_length < min_length then + return + end + -- Like Nginx, don't check min_length if Content-Length is missing + end + + local http_version = req_http_version() + if http_version < conf.http_version then + return + end + + if conf.vary then + core.response.add_header("Vary", "Accept-Encoding") + end + + local compressor = create_brotli_compressor(conf.mode, conf.comp_level, + conf.lgwin, conf.lgblock) + if not compressor then + core.log.error("failed to create brotli compressor") + return + end + + ctx.brotli_matched = true + ctx.compressor = compressor + core.response.clear_header_as_body_modified() + core.response.add_header("Content-Encoding", "br") +end + + +function _M.body_filter(conf, ctx) + if not ctx.brotli_matched then + return + end + + local chunk, eof = ngx.arg[1], ngx.arg[2] + if type(chunk) == "string" and chunk ~= "" then + local encode_chunk = ctx.compressor:compress(chunk) + ngx.arg[1] = encode_chunk .. ctx.compressor:flush() + end + + if eof then + -- overwriting the arg[1], results into partial response + ngx.arg[1] = ngx.arg[1] .. ctx.compressor:finish() + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/cas-auth.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/cas-auth.lua new file mode 100644 index 0000000..d949636 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/cas-auth.lua @@ -0,0 +1,201 @@ +-- +---- Licensed to the Apache Software Foundation (ASF) under one or more +---- contributor license agreements. See the NOTICE file distributed with +---- this work for additional information regarding copyright ownership. +---- The ASF licenses this file to You under the Apache License, Version 2.0 +---- (the "License"); you may not use this file except in compliance with +---- the License. You may obtain a copy of the License at +---- +---- http://www.apache.org/licenses/LICENSE-2.0 +---- +---- Unless required by applicable law or agreed to in writing, software +---- distributed under the License is distributed on an "AS IS" BASIS, +---- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +---- See the License for the specific language governing permissions and +---- limitations under the License. +---- +local core = require("apisix.core") +local http = require("resty.http") +local ngx = ngx +local ngx_re_match = ngx.re.match + +local CAS_REQUEST_URI = "CAS_REQUEST_URI" +local COOKIE_NAME = "CAS_SESSION" +local COOKIE_PARAMS = "; Path=/; HttpOnly" +local SESSION_LIFETIME = 3600 +local STORE_NAME = "cas_sessions" + +local store = ngx.shared[STORE_NAME] + + +local plugin_name = "cas-auth" +local schema = { + type = "object", + properties = { + idp_uri = {type = "string"}, + cas_callback_uri = {type = "string"}, + logout_uri = {type = "string"}, + }, + required = { + "idp_uri", "cas_callback_uri", "logout_uri" + } +} + +local _M = { + version = 0.1, + priority = 2597, + name = plugin_name, + schema = schema +} + +function _M.check_schema(conf) + local check = {"idp_uri"} + core.utils.check_https(check, conf, plugin_name) + return core.schema.check(schema, conf) +end + +local function uri_without_ticket(conf, ctx) + return ctx.var.scheme .. "://" .. ctx.var.host .. ":" .. + ctx.var.server_port .. conf.cas_callback_uri +end + +local function get_session_id(ctx) + return ctx.var["cookie_" .. COOKIE_NAME] +end + +local function set_our_cookie(name, val) + core.response.add_header("Set-Cookie", name .. "=" .. val .. COOKIE_PARAMS) +end + +local function first_access(conf, ctx) + local login_uri = conf.idp_uri .. "/login?" .. + ngx.encode_args({ service = uri_without_ticket(conf, ctx) }) + core.log.info("first access: ", login_uri, + ", cookie: ", ctx.var.http_cookie, ", request_uri: ", ctx.var.request_uri) + set_our_cookie(CAS_REQUEST_URI, ctx.var.request_uri) + core.response.set_header("Location", login_uri) + return ngx.HTTP_MOVED_TEMPORARILY +end + +local function with_session_id(conf, ctx, session_id) + -- does the cookie exist in our store? + local user = store:get(session_id); + core.log.info("ticket=", session_id, ", user=", user) + if user == nil then + set_our_cookie(COOKIE_NAME, "deleted; Max-Age=0") + return first_access(conf, ctx) + else + -- refresh the TTL + store:set(session_id, user, SESSION_LIFETIME) + end +end + +local function set_store_and_cookie(session_id, user) + -- place cookie into cookie store + local success, err, forcible = store:add(session_id, user, SESSION_LIFETIME) + if success then + if forcible then + core.log.info("CAS cookie store is out of memory") + end + set_our_cookie(COOKIE_NAME, session_id) + else + if err == "no memory" then + core.log.emerg("CAS cookie store is out of memory") + elseif err == "exists" then + core.log.error("Same CAS ticket validated twice, this should never happen!") + else + core.log.error("CAS cookie store: ", err) + end + end + return success +end + +local function validate(conf, ctx, ticket) + -- send a request to CAS to validate the ticket + local httpc = http.new() + local res, err = httpc:request_uri(conf.idp_uri .. + "/serviceValidate", + { query = { ticket = ticket, service = uri_without_ticket(conf, ctx) } }) + + if res and res.status == ngx.HTTP_OK and res.body ~= nil then + if core.string.find(res.body, "") then + local m = ngx_re_match(res.body, "(.*?)", "jo"); + if m then + return m[1] + end + else + core.log.info("CAS serviceValidate failed: ", res.body) + end + else + core.log.error("validate ticket failed: status=", (res and res.status), + ", has_body=", (res and res.body ~= nil or false), ", err=", err) + end + return nil +end + +local function validate_with_cas(conf, ctx, ticket) + local user = validate(conf, ctx, ticket) + if user and set_store_and_cookie(ticket, user) then + local request_uri = ctx.var["cookie_" .. CAS_REQUEST_URI] + set_our_cookie(CAS_REQUEST_URI, "deleted; Max-Age=0") + core.log.info("ticket: ", ticket, + ", cookie: ", ctx.var.http_cookie, ", request_uri: ", request_uri, ", user=", user) + core.response.set_header("Location", request_uri) + return ngx.HTTP_MOVED_TEMPORARILY + else + return ngx.HTTP_UNAUTHORIZED, {message = "invalid ticket"} + end +end + +local function logout(conf, ctx) + local session_id = get_session_id(ctx) + if session_id == nil then + return ngx.HTTP_UNAUTHORIZED + end + + core.log.info("logout: ticket=", session_id, ", cookie=", ctx.var.http_cookie) + store:delete(session_id) + set_our_cookie(COOKIE_NAME, "deleted; Max-Age=0") + + core.response.set_header("Location", conf.idp_uri .. "/logout") + return ngx.HTTP_MOVED_TEMPORARILY +end + +function _M.access(conf, ctx) + local method = core.request.get_method() + local uri = ctx.var.uri + + if method == "GET" and uri == conf.logout_uri then + return logout(conf, ctx) + end + + if method == "POST" and uri == conf.cas_callback_uri then + local data = core.request.get_body() + local ticket = data:match("(.*)") + if ticket == nil then + return ngx.HTTP_BAD_REQUEST, + {message = "invalid logout request from IdP, no ticket"} + end + core.log.info("Back-channel logout (SLO) from IdP: LogoutRequest: ", data) + local session_id = ticket + local user = store:get(session_id); + if user then + store:delete(session_id) + core.log.info("SLO: user=", user, ", tocket=", ticket) + end + else + local session_id = get_session_id(ctx) + if session_id ~= nil then + return with_session_id(conf, ctx, session_id) + end + + local ticket = ctx.var.arg_ticket + if ticket ~= nil and uri == conf.cas_callback_uri then + return validate_with_cas(conf, ctx, ticket) + else + return first_access(conf, ctx) + end + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/chaitin-waf.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/chaitin-waf.lua new file mode 100644 index 0000000..39bb088 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/chaitin-waf.lua @@ -0,0 +1,421 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local rr_balancer = require("apisix.balancer.roundrobin") +local plugin = require("apisix.plugin") +local t1k = require "resty.t1k" +local expr = require("resty.expr.v1") + +local ngx = ngx +local ngx_now = ngx.now +local string = string +local fmt = string.format +local tostring = tostring +local tonumber = tonumber +local ipairs = ipairs + +local plugin_name = "chaitin-waf" + +local vars_schema = { + type = "array", +} + +local lrucache = core.lrucache.new({ + ttl = 300, count = 1024 +}) + +local match_schema = { + type = "array", + items = { + type = "object", + properties = { + vars = vars_schema + } + }, +} + +local plugin_schema = { + type = "object", + properties = { + mode = { + type = "string", + enum = { "off", "monitor", "block", nil }, + default = nil, + }, + match = match_schema, + append_waf_resp_header = { + type = "boolean", + default = true + }, + append_waf_debug_header = { + type = "boolean", + default = false + }, + config = { + type = "object", + properties = { + connect_timeout = { + type = "integer", + }, + send_timeout = { + type = "integer", + }, + read_timeout = { + type = "integer", + }, + req_body_size = { + type = "integer", + }, + keepalive_size = { + type = "integer", + }, + keepalive_timeout = { + type = "integer", + }, + real_client_ip = { + type = "boolean" + } + }, + }, + }, +} + +local metadata_schema = { + type = "object", + properties = { + mode = { + type = "string", + enum = { "off", "monitor", "block", nil }, + default = nil, + }, + nodes = { + type = "array", + items = { + type = "object", + properties = { + host = { + type = "string", + pattern = "^\\*?[0-9a-zA-Z-._\\[\\]:/]+$" + }, + port = { + type = "integer", + minimum = 1, + default = 80 + }, + }, + required = { "host" } + }, + minItems = 1, + }, + config = { + type = "object", + properties = { + connect_timeout = { + type = "integer", + default = 1000 -- milliseconds + }, + send_timeout = { + type = "integer", + default = 1000 -- milliseconds + }, + read_timeout = { + type = "integer", + default = 1000 -- milliseconds + }, + req_body_size = { + type = "integer", + default = 1024 -- milliseconds + }, + -- maximum concurrent idle connections to + -- the SafeLine WAF detection service + keepalive_size = { + type = "integer", + default = 256 + }, + keepalive_timeout = { + type = "integer", + default = 60000 -- milliseconds + }, + real_client_ip = { + type = "boolean", + default = true + } + }, + default = {}, + }, + }, + required = { "nodes" }, +} + +local _M = { + version = 0.1, + priority = 2700, + name = plugin_name, + schema = plugin_schema, + metadata_schema = metadata_schema +} + +local global_server_picker + +local HEADER_CHAITIN_WAF = "X-APISIX-CHAITIN-WAF" +local HEADER_CHAITIN_WAF_ERROR = "X-APISIX-CHAITIN-WAF-ERROR" +local HEADER_CHAITIN_WAF_TIME = "X-APISIX-CHAITIN-WAF-TIME" +local HEADER_CHAITIN_WAF_STATUS = "X-APISIX-CHAITIN-WAF-STATUS" +local HEADER_CHAITIN_WAF_ACTION = "X-APISIX-CHAITIN-WAF-ACTION" +local HEADER_CHAITIN_WAF_SERVER = "X-APISIX-CHAITIN-WAF-SERVER" +local blocked_message = [[{"code": %s, "success":false, ]] .. + [["message": "blocked by Chaitin SafeLine Web Application Firewall", "event_id": "%s"}]] +local warning_message = "chaitin-waf monitor mode: request would have been rejected, event_id: " + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local ok, err = core.schema.check(plugin_schema, conf) + + if not ok then + return false, err + end + + if conf.match then + for _, m in ipairs(conf.match) do + local ok, err = expr.new(m.vars) + if not ok then + return false, "failed to validate the 'vars' expression: " .. err + end + end + end + + return true +end + + +local function get_healthy_chaitin_server_nodes(metadata, checker) + local nodes = metadata.nodes + local new_nodes = core.table.new(0, #nodes) + + for i = 1, #nodes do + local host, port = nodes[i].host, nodes[i].port + new_nodes[host .. ":" .. tostring(port)] = 1 + end + + return new_nodes +end + + +local function get_chaitin_server(metadata, ctx) + if not global_server_picker or global_server_picker.upstream ~= metadata.value.nodes then + local up_nodes = get_healthy_chaitin_server_nodes(metadata.value) + if core.table.nkeys(up_nodes) == 0 then + return nil, nil, "no healthy nodes" + end + core.log.info("chaitin-waf nodes: ", core.json.delay_encode(up_nodes)) + + global_server_picker = rr_balancer.new(up_nodes, metadata.value.nodes) + end + + local server = global_server_picker.get(ctx) + local host, port, err = core.utils.parse_addr(server) + if err then + return nil, nil, err + end + + return host, port, nil +end + + +local function check_match(conf, ctx) + if not conf.match or #conf.match == 0 then + return true + end + + for _, match in ipairs(conf.match) do + local cache_key = tostring(match.vars) + + local exp, err = lrucache(cache_key, nil, function(vars) + return expr.new(vars) + end, match.vars) + + if not exp then + local msg = "failed to create match expression for " .. + tostring(match.vars) .. ", err: " .. tostring(err) + return false, msg + end + + local matched = exp:eval(ctx.var) + if matched then + return true + end + end + + return false +end + + +local function get_conf(conf, metadata) + local t = { + mode = "block", + real_client_ip = true, + } + + if metadata.config then + t.connect_timeout = metadata.config.connect_timeout + t.send_timeout = metadata.config.send_timeout + t.read_timeout = metadata.config.read_timeout + t.req_body_size = metadata.config.req_body_size + t.keepalive_size = metadata.config.keepalive_size + t.keepalive_timeout = metadata.config.keepalive_timeout + t.real_client_ip = metadata.config.real_client_ip or t.real_client_ip + end + + if conf.config then + t.connect_timeout = conf.config.connect_timeout + t.send_timeout = conf.config.send_timeout + t.read_timeout = conf.config.read_timeout + t.req_body_size = conf.config.req_body_size + t.keepalive_size = conf.config.keepalive_size + t.keepalive_timeout = conf.config.keepalive_timeout + t.real_client_ip = conf.config.real_client_ip or t.real_client_ip + end + + t.mode = conf.mode or metadata.mode or t.mode + + return t +end + + +local function do_access(conf, ctx) + local extra_headers = {} + + local metadata = plugin.plugin_metadata(plugin_name) + if not core.table.try_read_attr(metadata, "value", "nodes") then + extra_headers[HEADER_CHAITIN_WAF] = "err" + extra_headers[HEADER_CHAITIN_WAF_ERROR] = "missing metadata" + return 500, nil, extra_headers + end + + local host, port, err = get_chaitin_server(metadata, ctx) + if err then + extra_headers[HEADER_CHAITIN_WAF] = "unhealthy" + extra_headers[HEADER_CHAITIN_WAF_ERROR] = tostring(err) + + return 500, nil, extra_headers + end + + core.log.info("picked chaitin-waf server: ", host, ":", port) + local t = get_conf(conf, metadata.value) + t.host = host + t.port = port + + extra_headers[HEADER_CHAITIN_WAF_SERVER] = host + + local mode = t.mode or "block" + if mode == "off" then + extra_headers[HEADER_CHAITIN_WAF] = "off" + return nil, nil, extra_headers + end + + local match, err = check_match(conf, ctx) + if not match then + if err then + extra_headers[HEADER_CHAITIN_WAF] = "err" + extra_headers[HEADER_CHAITIN_WAF_ERROR] = tostring(err) + return 500, nil, extra_headers + else + extra_headers[HEADER_CHAITIN_WAF] = "no" + return nil, nil, extra_headers + end + end + + if t.real_client_ip then + t.client_ip = ctx.var.http_x_forwarded_for or ctx.var.remote_addr + else + t.client_ip = ctx.var.remote_addr + end + + local start_time = ngx_now() * 1000 + local ok, err, result = t1k.do_access(t, false) + + extra_headers[HEADER_CHAITIN_WAF_TIME] = ngx_now() * 1000 - start_time + + if not ok then + extra_headers[HEADER_CHAITIN_WAF] = "waf-err" + local err_msg = tostring(err) + if core.string.find(err_msg, "timeout") then + extra_headers[HEADER_CHAITIN_WAF] = "timeout" + end + extra_headers[HEADER_CHAITIN_WAF_ERROR] = tostring(err) + + if mode == "monitor" then + core.log.warn("chaitin-waf monitor mode: detected waf error - ", err_msg) + return nil, nil, extra_headers + end + + return 500, nil, extra_headers + else + extra_headers[HEADER_CHAITIN_WAF] = "yes" + extra_headers[HEADER_CHAITIN_WAF_ACTION] = "pass" + end + + local code = 200 + extra_headers[HEADER_CHAITIN_WAF_STATUS] = code + + if result and result.status and result.status ~= 200 and result.event_id then + extra_headers[HEADER_CHAITIN_WAF_STATUS] = result.status + extra_headers[HEADER_CHAITIN_WAF_ACTION] = "reject" + + if mode == "monitor" then + core.log.warn(warning_message, result.event_id) + return nil, nil, extra_headers + end + + core.log.error("request rejected by chaitin-waf, event_id: " .. result.event_id) + + return tonumber(result.status), + fmt(blocked_message, result.status, result.event_id) .. "\n", + extra_headers + end + + return nil, nil, extra_headers +end + + +function _M.access(conf, ctx) + local code, msg, extra_headers = do_access(conf, ctx) + + if not conf.append_waf_debug_header then + extra_headers[HEADER_CHAITIN_WAF_ERROR] = nil + extra_headers[HEADER_CHAITIN_WAF_SERVER] = nil + end + + if conf.append_waf_resp_header then + core.response.set_header(extra_headers) + end + + return code, msg +end + + +function _M.header_filter(conf, ctx) + t1k.do_header_filter() +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/clickhouse-logger.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/clickhouse-logger.lua new file mode 100644 index 0000000..793a8d4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/clickhouse-logger.lua @@ -0,0 +1,208 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local log_util = require("apisix.utils.log-util") +local core = require("apisix.core") +local http = require("resty.http") +local url = require("net.url") +local math_random = math.random + +local tostring = tostring + +local plugin_name = "clickhouse-logger" +local batch_processor_manager = bp_manager_mod.new(plugin_name) + +local schema = { + type = "object", + properties = { + -- deprecated, use "endpoint_addrs" instead + endpoint_addr = core.schema.uri_def, + endpoint_addrs = {items = core.schema.uri_def, type = "array", minItems = 1}, + user = {type = "string", default = ""}, + password = {type = "string", default = ""}, + database = {type = "string", default = ""}, + logtable = {type = "string", default = ""}, + timeout = {type = "integer", minimum = 1, default = 3}, + name = {type = "string", default = "clickhouse logger"}, + ssl_verify = {type = "boolean", default = true}, + log_format = {type = "object"}, + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = {type = "boolean", default = false}, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + } + }, + oneOf = { + {required = {"endpoint_addr", "user", "password", "database", "logtable"}}, + {required = {"endpoint_addrs", "user", "password", "database", "logtable"}} + }, + encrypt_fields = {"password"}, +} + + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + + +local _M = { + version = 0.1, + priority = 398, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + local check = {"endpoint_addrs"} + core.utils.check_https(check, conf, plugin_name) + core.utils.check_tls_bool({"ssl_verify"}, conf, plugin_name) + + return core.schema.check(schema, conf) +end + + +local function send_http_data(conf, log_message) + local err_msg + local res = true + local selected_endpoint_addr + if conf.endpoint_addr then + selected_endpoint_addr = conf.endpoint_addr + else + selected_endpoint_addr = conf.endpoint_addrs[math_random(#conf.endpoint_addrs)] + end + local url_decoded = url.parse(selected_endpoint_addr) + local host = url_decoded.host + local port = url_decoded.port + + core.log.info("sending a batch logs to ", selected_endpoint_addr) + + if not port then + if url_decoded.scheme == "https" then + port = 443 + else + port = 80 + end + end + + local httpc = http.new() + httpc:set_timeout(conf.timeout * 1000) + local ok, err = httpc:connect(host, port) + + if not ok then + return false, "failed to connect to host[" .. host .. "] port[" + .. tostring(port) .. "] " .. err + end + + if url_decoded.scheme == "https" then + ok, err = httpc:ssl_handshake(true, host, conf.ssl_verify) + if not ok then + return false, "failed to perform SSL with host[" .. host .. "] " + .. "port[" .. tostring(port) .. "] " .. err + end + end + + local httpc_res, httpc_err = httpc:request({ + method = "POST", + path = url_decoded.path, + query = url_decoded.query, + body = "INSERT INTO " .. conf.logtable .." FORMAT JSONEachRow " .. log_message, + headers = { + ["Host"] = url_decoded.host, + ["Content-Type"] = "application/json", + ["X-ClickHouse-User"] = conf.user, + ["X-ClickHouse-Key"] = conf.password, + ["X-ClickHouse-Database"] = conf.database + } + }) + + if not httpc_res then + return false, "error while sending data to [" .. host .. "] port[" + .. tostring(port) .. "] " .. httpc_err + end + + -- some error occurred in the server + if httpc_res.status >= 400 then + res = false + err_msg = "server returned status code[" .. httpc_res.status .. "] host[" + .. host .. "] port[" .. tostring(port) .. "] " + .. "body[" .. httpc_res:read_body() .. "]" + end + + return res, err_msg +end + + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + + +function _M.log(conf, ctx) + local entry = log_util.get_log_entry(plugin_name, conf, ctx) + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + -- Generate a function to be executed by the batch processor + local func = function(entries, batch_max_size) + local data, err + + if batch_max_size == 1 then + data, err = core.json.encode(entries[1]) -- encode as single {} + else + local log_table = {} + for i = 1, #entries do + core.table.insert(log_table, core.json.encode(entries[i])) + end + data = core.table.concat(log_table, " ") -- assemble multi items as string "{} {}" + end + + if not data then + return false, 'error occurred while encoding the data: ' .. err + end + + return send_http_data(conf, data) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/client-control.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/client-control.lua new file mode 100644 index 0000000..1975098 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/client-control.lua @@ -0,0 +1,76 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local ok, apisix_ngx_client = pcall(require, "resty.apisix.client") +local tonumber = tonumber + + +local schema = { + type = "object", + properties = { + max_body_size = { + type = "integer", + minimum = 0, + description = "Maximum message body size in bytes. No restriction when set to 0." + }, + }, +} + + +local plugin_name = "client-control" + + +local _M = { + version = 0.1, + priority = 22000, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +function _M.rewrite(conf, ctx) + if not ok then + core.log.error("need to build APISIX-Runtime to support client control") + return 501 + end + + if conf.max_body_size then + local len = tonumber(core.request.header(ctx, "Content-Length")) + if len then + -- if length is given in the header, check it immediately + if conf.max_body_size ~= 0 and len > conf.max_body_size then + return 413 + end + end + + -- then check it when reading the body + local ok, err = apisix_ngx_client.set_client_max_body_size(conf.max_body_size) + if not ok then + core.log.error("failed to set client max body size: ", err) + return 503 + end + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/consumer-restriction.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/consumer-restriction.lua new file mode 100644 index 0000000..88c2bbd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/consumer-restriction.lua @@ -0,0 +1,164 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ipairs = ipairs +local core = require("apisix.core") +local ngx = ngx +local schema = { + type = "object", + properties = { + type = { + type = "string", + enum = {"consumer_name", "service_id", "route_id", "consumer_group_id"}, + default = "consumer_name" + }, + blacklist = { + type = "array", + minItems = 1, + items = {type = "string"} + }, + whitelist = { + type = "array", + minItems = 1, + items = {type = "string"} + }, + allowed_by_methods = { + type = "array", + items = { + type = "object", + properties = { + user = { + type = "string" + }, + methods = { + type = "array", + minItems = 1, + items = core.schema.method_schema, + } + } + } + }, + rejected_code = {type = "integer", minimum = 200, default = 403}, + rejected_msg = {type = "string"} + }, + anyOf = { + {required = {"blacklist"}}, + {required = {"whitelist"}}, + {required = {"allowed_by_methods"}} + }, +} + +local plugin_name = "consumer-restriction" + +local _M = { + version = 0.1, + priority = 2400, + name = plugin_name, + schema = schema, +} + +local fetch_val_funcs = { + ["route_id"] = function(ctx) + return ctx.route_id + end, + ["service_id"] = function(ctx) + return ctx.service_id + end, + ["consumer_name"] = function(ctx) + return ctx.consumer_name + end, + ["consumer_group_id"] = function (ctx) + return ctx.consumer_group_id + end +} + +local function is_include(value, tab) + for k,v in ipairs(tab) do + if v == value then + return true + end + end + return false +end + +local function is_method_allowed(allowed_methods, method, user) + for _, value in ipairs(allowed_methods) do + if value.user == user then + for _, allowed_method in ipairs(value.methods) do + if allowed_method == method then + return true + end + end + return false + end + end + return true +end + +local function reject(conf) + if conf.rejected_msg then + return conf.rejected_code , { message = conf.rejected_msg } + end + return conf.rejected_code , { message = "The " .. conf.type .. " is forbidden."} +end + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + return true +end + +function _M.access(conf, ctx) + local value = fetch_val_funcs[conf.type](ctx) + local method = ngx.req.get_method() + + if not value then + local err_msg = "The request is rejected, please check the " + .. conf.type .. " for this request" + return 401, { message = err_msg} + end + core.log.info("value: ", value) + + local block = false + local whitelisted = false + + if conf.blacklist and #conf.blacklist > 0 then + if is_include(value, conf.blacklist) then + return reject(conf) + end + end + + if conf.whitelist and #conf.whitelist > 0 then + whitelisted = is_include(value, conf.whitelist) + if not whitelisted then + block = true + end + end + + if conf.allowed_by_methods and #conf.allowed_by_methods > 0 and not whitelisted then + if not is_method_allowed(conf.allowed_by_methods, method, value) then + block = true + end + end + + if block then + return reject(conf) + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/cors.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/cors.lua new file mode 100644 index 0000000..deae034 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/cors.lua @@ -0,0 +1,402 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local plugin = require("apisix.plugin") +local ngx = ngx +local plugin_name = "cors" +local str_find = core.string.find +local re_gmatch = ngx.re.gmatch +local re_compile = require("resty.core.regex").re_match_compile +local re_find = ngx.re.find +local ipairs = ipairs +local origins_pattern = [[^(\*|\*\*|null|\w+://[^,]+(,\w+://[^,]+)*)$]] + +local TYPE_ACCESS_CONTROL_ALLOW_ORIGIN = "ACAO" +local TYPE_TIMING_ALLOW_ORIGIN = "TAO" + +local lrucache = core.lrucache.new({ + type = "plugin", +}) + +local metadata_schema = { + type = "object", + properties = { + allow_origins = { + type = "object", + additionalProperties = { + type = "string", + pattern = origins_pattern + } + }, + }, +} + +local schema = { + type = "object", + properties = { + allow_origins = { + description = + "you can use '*' to allow all origins when no credentials," .. + "'**' to allow forcefully(it will bring some security risks, be carefully)," .. + "multiple origin use ',' to split. default: *.", + type = "string", + pattern = origins_pattern, + default = "*" + }, + allow_methods = { + description = + "you can use '*' to allow all methods when no credentials," .. + "'**' to allow forcefully(it will bring some security risks, be carefully)," .. + "multiple method use ',' to split. default: *.", + type = "string", + default = "*" + }, + allow_headers = { + description = + "you can use '*' to allow all header when no credentials," .. + "'**' to allow forcefully(it will bring some security risks, be carefully)," .. + "multiple header use ',' to split. default: *.", + type = "string", + default = "*" + }, + expose_headers = { + description = + "multiple header use ',' to split." .. + "If not specified, no custom headers are exposed.", + type = "string" + }, + max_age = { + description = + "maximum number of seconds the results can be cached." .. + "-1 means no cached, the max value is depend on browser," .. + "more details plz check MDN. default: 5.", + type = "integer", + default = 5 + }, + allow_credential = { + description = + "allow client append credential. according to CORS specification," .. + "if you set this option to 'true', you can not use '*' for other options.", + type = "boolean", + default = false + }, + allow_origins_by_regex = { + type = "array", + description = + "you can use regex to allow specific origins when no credentials," .. + "for example use [.*\\.test.com$] to allow a.test.com and b.test.com", + items = { + type = "string", + minLength = 1, + maxLength = 4096, + }, + minItems = 1, + uniqueItems = true, + }, + allow_origins_by_metadata = { + type = "array", + description = + "set allowed origins by referencing origins in plugin metadata", + items = { + type = "string", + minLength = 1, + maxLength = 4096, + }, + minItems = 1, + uniqueItems = true, + }, + timing_allow_origins = { + description = + "you can use '*' to allow all origins which can view timing information " .. + "when no credentials," .. + "'**' to allow forcefully (it will bring some security risks, be careful)," .. + "multiple origin use ',' to split. default: nil", + type = "string", + pattern = origins_pattern + }, + timing_allow_origins_by_regex = { + type = "array", + description = + "you can use regex to allow specific origins which can view timing information," .. + "for example use [.*\\.test.com] to allow a.test.com and b.test.com", + items = { + type = "string", + minLength = 1, + maxLength = 4096, + }, + minItems = 1, + uniqueItems = true, + }, + } +} + +local _M = { + version = 0.1, + priority = 4000, + name = plugin_name, + schema = schema, + metadata_schema = metadata_schema, +} + + +local function create_multiple_origin_cache(allow_origins) + if not str_find(allow_origins, ",") then + return nil + end + local origin_cache = {} + local iterator, err = re_gmatch(allow_origins, "([^,]+)", "jiox") + if not iterator then + core.log.error("match origins failed: ", err) + return nil + end + while true do + local origin, err = iterator() + if err then + core.log.error("iterate origins failed: ", err) + return nil + end + if not origin then + break + end + origin_cache[origin[0]] = true + end + return origin_cache +end + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + if conf.allow_credential then + if conf.allow_origins == "*" or conf.allow_methods == "*" or + conf.allow_headers == "*" or conf.expose_headers == "*" or + conf.timing_allow_origins == "*" then + return false, "you can not set '*' for other option when 'allow_credential' is true" + end + end + if conf.allow_origins_by_regex then + for i, re_rule in ipairs(conf.allow_origins_by_regex) do + local ok, err = re_compile(re_rule, "j") + if not ok then + return false, err + end + end + end + + if conf.timing_allow_origins_by_regex then + for i, re_rule in ipairs(conf.timing_allow_origins_by_regex) do + local ok, err = re_compile(re_rule, "j") + if not ok then + return false, err + end + end + end + + return true +end + + +local function set_cors_headers(conf, ctx) + local allow_methods = conf.allow_methods + if allow_methods == "**" then + allow_methods = "GET,POST,PUT,DELETE,PATCH,HEAD,OPTIONS,CONNECT,TRACE" + end + + core.response.set_header("Access-Control-Allow-Origin", ctx.cors_allow_origins) + core.response.set_header("Access-Control-Allow-Methods", allow_methods) + core.response.set_header("Access-Control-Max-Age", conf.max_age) + if conf.expose_headers ~= nil and conf.expose_headers ~= "" then + core.response.set_header("Access-Control-Expose-Headers", conf.expose_headers) + end + if conf.allow_headers == "**" then + core.response.set_header("Access-Control-Allow-Headers", + core.request.header(ctx, "Access-Control-Request-Headers")) + else + core.response.set_header("Access-Control-Allow-Headers", conf.allow_headers) + end + if conf.allow_credential then + core.response.set_header("Access-Control-Allow-Credentials", true) + end +end + +local function set_timing_headers(conf, ctx) + if ctx.timing_allow_origin then + core.response.set_header("Timing-Allow-Origin", ctx.timing_allow_origin) + end +end + + +local function process_with_allow_origins(allow_origin_type, allow_origins, ctx, req_origin, + cache_key, cache_version) + if allow_origins == "**" then + allow_origins = req_origin or '*' + end + + local multiple_origin, err + if cache_key and cache_version then + multiple_origin, err = lrucache( + cache_key, cache_version, create_multiple_origin_cache, allow_origins + ) + else + multiple_origin, err = core.lrucache.plugin_ctx( + lrucache, ctx, allow_origin_type, create_multiple_origin_cache, allow_origins + ) + end + + if err then + return 500, {message = "get multiple origin cache failed: " .. err} + end + + if multiple_origin then + if multiple_origin[req_origin] then + allow_origins = req_origin + else + return + end + end + + return allow_origins +end + +local function process_with_allow_origins_by_regex(allow_origin_type, + allow_origins_by_regex, conf, ctx, req_origin) + + local allow_origins_by_regex_rules_concat_conf_key = + "allow_origins_by_regex_rules_concat_" .. allow_origin_type + + if not conf[allow_origins_by_regex_rules_concat_conf_key] then + local allow_origins_by_regex_rules = {} + for i, re_rule in ipairs(allow_origins_by_regex) do + allow_origins_by_regex_rules[i] = re_rule + end + conf[allow_origins_by_regex_rules_concat_conf_key] = core.table.concat( + allow_origins_by_regex_rules, "|") + end + + -- core.log.warn("regex: ", conf[allow_origins_by_regex_rules_concat_conf_key], "\n ") + local matched = re_find(req_origin, conf[allow_origins_by_regex_rules_concat_conf_key], "jo") + if matched then + return req_origin + end +end + + +local function match_origins(req_origin, allow_origins) + return req_origin == allow_origins or allow_origins == '*' +end + +local function process_with_allow_origins_by_metadata(allow_origin_type, allow_origins_by_metadata, + ctx, req_origin) + + if allow_origins_by_metadata == nil then + return + end + + local metadata = plugin.plugin_metadata(plugin_name) + if metadata and metadata.value.allow_origins then + local allow_origins_map = metadata.value.allow_origins + for _, key in ipairs(allow_origins_by_metadata) do + local allow_origins_conf = allow_origins_map[key] + local allow_origins = process_with_allow_origins( + allow_origin_type, allow_origins_conf, ctx, req_origin, + plugin_name .. "#" .. key, metadata.modifiedIndex + ) + if match_origins(req_origin, allow_origins) then + return req_origin + end + end + end +end + + +function _M.rewrite(conf, ctx) + -- save the original request origin as it may be changed at other phase + ctx.original_request_origin = core.request.header(ctx, "Origin") + if ctx.var.request_method == "OPTIONS" then + return 200 + end +end + + +function _M.header_filter(conf, ctx) + local req_origin = ctx.original_request_origin + -- If allow_origins_by_regex is not nil, should be matched to it only + local allow_origins + local allow_origins_local = false + if conf.allow_origins_by_metadata then + allow_origins = process_with_allow_origins_by_metadata( + TYPE_ACCESS_CONTROL_ALLOW_ORIGIN, conf.allow_origins_by_metadata, ctx, req_origin + ) + if not match_origins(req_origin, allow_origins) then + if conf.allow_origins and conf.allow_origins ~= "*" then + allow_origins_local = true + end + end + else + allow_origins_local = true + end + if conf.allow_origins_by_regex == nil then + if allow_origins_local then + allow_origins = process_with_allow_origins( + TYPE_ACCESS_CONTROL_ALLOW_ORIGIN, conf.allow_origins, ctx, req_origin + ) + end + else + if allow_origins_local then + allow_origins = process_with_allow_origins_by_regex( + TYPE_ACCESS_CONTROL_ALLOW_ORIGIN, conf.allow_origins_by_regex, + conf, ctx, req_origin + ) + end + end + if not match_origins(req_origin, allow_origins) then + allow_origins = process_with_allow_origins_by_metadata( + TYPE_ACCESS_CONTROL_ALLOW_ORIGIN, conf.allow_origins_by_metadata, ctx, req_origin + ) + end + if conf.allow_origins ~= "*" then + core.response.add_header("Vary", "Origin") + end + if allow_origins then + ctx.cors_allow_origins = allow_origins + set_cors_headers(conf, ctx) + end + + local timing_allow_origins + if conf.timing_allow_origins_by_regex == nil and conf.timing_allow_origins then + timing_allow_origins = process_with_allow_origins( + TYPE_TIMING_ALLOW_ORIGIN, conf.timing_allow_origins, ctx, req_origin + ) + elseif conf.timing_allow_origins_by_regex then + timing_allow_origins = process_with_allow_origins_by_regex( + TYPE_TIMING_ALLOW_ORIGIN, conf.timing_allow_origins_by_regex, + conf, ctx, req_origin + ) + end + if timing_allow_origins and match_origins(req_origin, timing_allow_origins) then + ctx.timing_allow_origin = timing_allow_origins + set_timing_headers(conf, ctx) + end + +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/csrf.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/csrf.lua new file mode 100644 index 0000000..4ed2ad6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/csrf.lua @@ -0,0 +1,168 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local resty_sha256 = require("resty.sha256") +local str = require("resty.string") +local ngx = ngx +local ngx_encode_base64 = ngx.encode_base64 +local ngx_decode_base64 = ngx.decode_base64 +local ngx_time = ngx.time +local ngx_cookie_time = ngx.cookie_time +local math = math +local SAFE_METHODS = {"GET", "HEAD", "OPTIONS"} + + +local schema = { + type = "object", + properties = { + key = { + description = "use to generate csrf token", + type = "string", + }, + expires = { + description = "expires time(s) for csrf token", + type = "integer", + default = 7200 + }, + name = { + description = "the csrf token name", + type = "string", + default = "apisix-csrf-token" + } + }, + encrypt_fields = {"key"}, + required = {"key"} +} + + +local _M = { + version = 0.1, + priority = 2980, + name = "csrf", + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +local function gen_sign(random, expires, key) + local sha256 = resty_sha256:new() + + local sign = "{expires:" .. expires .. ",random:" .. random .. ",key:" .. key .. "}" + + sha256:update(sign) + local digest = sha256:final() + + return str.to_hex(digest) +end + + +local function gen_csrf_token(conf) + local random = math.random() + local timestamp = ngx_time() + local sign = gen_sign(random, timestamp, conf.key) + + local token = { + random = random, + expires = timestamp, + sign = sign, + } + + local cookie = ngx_encode_base64(core.json.encode(token)) + return cookie +end + + +local function check_csrf_token(conf, ctx, token) + local token_str = ngx_decode_base64(token) + if not token_str then + core.log.error("csrf token base64 decode error") + return false + end + + local token_table, err = core.json.decode(token_str) + if err then + core.log.error("decode token error: ", err) + return false + end + + local random = token_table["random"] + if not random then + core.log.error("no random in token") + return false + end + + local expires = token_table["expires"] + if not expires then + core.log.error("no expires in token") + return false + end + local time_now = ngx_time() + if conf.expires > 0 and time_now - expires > conf.expires then + core.log.error("token has expired") + return false + end + + local sign = gen_sign(random, expires, conf.key) + if token_table["sign"] ~= sign then + core.log.error("Invalid signatures") + return false + end + + return true +end + + +function _M.access(conf, ctx) + local method = core.request.get_method(ctx) + if core.table.array_find(SAFE_METHODS, method) then + return + end + + local header_token = core.request.header(ctx, conf.name) + if not header_token or header_token == "" then + return 401, {error_msg = "no csrf token in headers"} + end + + local cookie_token = ctx.var["cookie_" .. conf.name] + if not cookie_token then + return 401, {error_msg = "no csrf cookie"} + end + + if header_token ~= cookie_token then + return 401, {error_msg = "csrf token mismatch"} + end + + local result = check_csrf_token(conf, ctx, cookie_token) + if not result then + return 401, {error_msg = "Failed to verify the csrf token signature"} + end +end + + +function _M.header_filter(conf, ctx) + local csrf_token = gen_csrf_token(conf) + local cookie = conf.name .. "=" .. csrf_token .. ";path=/;SameSite=Lax;Expires=" + .. ngx_cookie_time(ngx_time() + conf.expires) + core.response.add_header("Set-Cookie", cookie) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/datadog.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/datadog.lua new file mode 100644 index 0000000..972c0a2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/datadog.lua @@ -0,0 +1,251 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. + +local core = require("apisix.core") +local plugin = require("apisix.plugin") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local fetch_log = require("apisix.utils.log-util").get_full_log +local service_fetch = require("apisix.http.service").get +local ngx = ngx +local udp = ngx.socket.udp +local format = string.format +local concat = table.concat +local tostring = tostring + +local plugin_name = "datadog" +local defaults = { + host = "127.0.0.1", + port = 8125, + namespace = "apisix", + constant_tags = {"source:apisix"} +} + +local batch_processor_manager = bp_manager_mod.new(plugin_name) +local schema = { + type = "object", + properties = { + prefer_name = {type = "boolean", default = true} + } +} + +local metadata_schema = { + type = "object", + properties = { + host = {type = "string", default= defaults.host}, + port = {type = "integer", minimum = 0, default = defaults.port}, + namespace = {type = "string", default = defaults.namespace}, + constant_tags = { + type = "array", + items = {type = "string"}, + default = defaults.constant_tags + } + }, +} + +local _M = { + version = 0.1, + priority = 495, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + return core.schema.check(schema, conf) +end + + +local function generate_tag(entry, const_tags) + local tags + if const_tags and #const_tags > 0 then + tags = core.table.clone(const_tags) + else + tags = {} + end + + if entry.route_id and entry.route_id ~= "" then + core.table.insert(tags, "route_name:" .. entry.route_id) + end + + if entry.service_id and entry.service_id ~= "" then + core.table.insert(tags, "service_name:" .. entry.service_id) + end + + if entry.consumer and entry.consumer.username then + core.table.insert(tags, "consumer:" .. entry.consumer.username) + end + if entry.balancer_ip ~= "" then + core.table.insert(tags, "balancer_ip:" .. entry.balancer_ip) + end + if entry.response.status then + core.table.insert(tags, "response_status:" .. entry.response.status) + end + if entry.scheme ~= "" then + core.table.insert(tags, "scheme:" .. entry.scheme) + end + + if #tags > 0 then + return "|#" .. concat(tags, ',') + end + + return "" +end + + +local function send_metric_over_udp(entry, metadata) + local err_msg + local sock = udp() + local host, port = metadata.value.host, metadata.value.port + + local ok, err = sock:setpeername(host, port) + if not ok then + return false, "failed to connect to UDP server: host[" .. host + .. "] port[" .. tostring(port) .. "] err: " .. err + end + + -- Generate prefix & suffix according dogstatsd udp data format. + local suffix = generate_tag(entry, metadata.value.constant_tags) + local prefix = metadata.value.namespace + if prefix ~= "" then + prefix = prefix .. "." + end + + -- request counter + ok, err = sock:send(format("%s:%s|%s%s", prefix .. "request.counter", 1, "c", suffix)) + if not ok then + err_msg = "error sending request.counter: " .. err + core.log.error("failed to report request count to dogstatsd server: host[" .. host + .. "] port[" .. tostring(port) .. "] err: " .. err) + end + + -- request latency histogram + ok, err = sock:send(format("%s:%s|%s%s", prefix .. "request.latency", + entry.latency, "h", suffix)) + if not ok then + err_msg = "error sending request.latency: " .. err + core.log.error("failed to report request latency to dogstatsd server: host[" + .. host .. "] port[" .. tostring(port) .. "] err: " .. err) + end + + -- upstream latency + if entry.upstream_latency then + ok, err = sock:send(format("%s:%s|%s%s", prefix .. "upstream.latency", + entry.upstream_latency, "h", suffix)) + if not ok then + err_msg = "error sending upstream.latency: " .. err + core.log.error("failed to report upstream latency to dogstatsd server: host[" + .. host .. "] port[" .. tostring(port) .. "] err: " .. err) + end + end + + -- apisix_latency + ok, err = sock:send(format("%s:%s|%s%s", prefix .. "apisix.latency", + entry.apisix_latency, "h", suffix)) + if not ok then + err_msg = "error sending apisix.latency: " .. err + core.log.error("failed to report apisix latency to dogstatsd server: host[" .. host + .. "] port[" .. tostring(port) .. "] err: " .. err) + end + + -- request body size timer + ok, err = sock:send(format("%s:%s|%s%s", prefix .. "ingress.size", + entry.request.size, "ms", suffix)) + if not ok then + err_msg = "error sending ingress.size: " .. err + core.log.error("failed to report req body size to dogstatsd server: host[" .. host + .. "] port[" .. tostring(port) .. "] err: " .. err) + end + + -- response body size timer + ok, err = sock:send(format("%s:%s|%s%s", prefix .. "egress.size", + entry.response.size, "ms", suffix)) + if not ok then + err_msg = "error sending egress.size: " .. err + core.log.error("failed to report response body size to dogstatsd server: host[" + .. host .. "] port[" .. tostring(port) .. "] err: " .. err) + end + + ok, err = sock:close() + if not ok then + core.log.error("failed to close the UDP connection, host[", + host, "] port[", port, "] ", err) + end + + if not err_msg then + return true + end + + return false, err_msg +end + + +local function push_metrics(entries) + -- Fetching metadata details + local metadata = plugin.plugin_metadata(plugin_name) + core.log.info("metadata: ", core.json.delay_encode(metadata)) + + if not metadata then + core.log.info("received nil metadata: using metadata defaults: ", + core.json.delay_encode(defaults, true)) + metadata = {} + metadata.value = defaults + end + core.log.info("sending batch metrics to dogstatsd: ", metadata.value.host, + ":", metadata.value.port) + + for i = 1, #entries do + local ok, err = send_metric_over_udp(entries[i], metadata) + if not ok then + return false, err, i + end + end + + return true +end + + +function _M.log(conf, ctx) + local entry = fetch_log(ngx, {}) + entry.balancer_ip = ctx.balancer_ip or "" + entry.scheme = ctx.upstream_scheme or "" + + -- if prefer_name is set, fetch the service/route name. If the name is nil, fall back to id. + if conf.prefer_name then + if entry.service_id and entry.service_id ~= "" then + local svc = service_fetch(entry.service_id) + + if svc and svc.value.name ~= "" then + entry.service_id = svc.value.name + end + end + + if ctx.route_name and ctx.route_name ~= "" then + entry.route_id = ctx.route_name + end + end + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, push_metrics) +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/degraphql.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/degraphql.lua new file mode 100644 index 0000000..e47a276 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/degraphql.lua @@ -0,0 +1,160 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local gq_parse = require("graphql").parse +local req_set_body_data = ngx.req.set_body_data +local ipairs = ipairs +local pcall = pcall +local type = type + + +local schema = { + type = "object", + properties = { + query = { + type = "string", + minLength = 1, + maxLength = 1024, + }, + variables = { + type = "array", + items = { + type = "string" + }, + minItems = 1, + }, + operation_name = { + type = "string", + minLength = 1, + maxLength = 1024 + }, + }, + required = {"query"}, +} + +local plugin_name = "degraphql" + +local _M = { + version = 0.1, + priority = 509, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + local ok, res = pcall(gq_parse, conf.query) + if not ok then + return false, "failed to parse query: " .. res + end + + if #res.definitions > 1 and not conf.operation_name then + return false, "operation_name is required if multiple operations are present in the query" + end + return true +end + + +local function fetch_post_variables(conf) + local req_body, err = core.request.get_body() + if err ~= nil then + core.log.error("failed to get request body: ", err) + return nil, 503 + end + + if not req_body then + core.log.error("missing request body") + return nil, 400 + end + + -- JSON as the default content type + req_body, err = core.json.decode(req_body) + if type(req_body) ~= "table" then + core.log.error("invalid request body can't be decoded: ", err or "bad type") + return nil, 400 + end + + local variables = {} + for _, v in ipairs(conf.variables) do + variables[v] = req_body[v] + end + + return variables +end + + +local function fetch_get_variables(conf) + local args = core.request.get_uri_args() + local variables = {} + for _, v in ipairs(conf.variables) do + variables[v] = args[v] + end + + return variables +end + + +function _M.access(conf, ctx) + local meth = core.request.get_method() + if meth ~= "POST" and meth ~= "GET" then + return 405 + end + + local new_body = core.table.new(0, 3) + + if conf.variables then + local variables, code + if meth == "POST" then + variables, code = fetch_post_variables(conf) + else + variables, code = fetch_get_variables(conf) + end + + if not variables then + return code + end + + if meth == "POST" then + new_body["variables"] = variables + else + new_body["variables"] = core.json.encode(variables) + end + end + + new_body["operationName"] = conf.operation_name + new_body["query"] = conf.query + + if meth == "POST" then + if not conf.variables then + -- the set_body_data requires to read the body first + core.request.get_body() + end + + core.request.set_header(ctx, "Content-Type", "application/json") + req_set_body_data(core.json.encode(new_body)) + else + core.request.set_uri_args(ctx, new_body) + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/dubbo-proxy.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/dubbo-proxy.lua new file mode 100644 index 0000000..57a093f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/dubbo-proxy.lua @@ -0,0 +1,69 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ngx_var = ngx.var + + +local plugin_name = "dubbo-proxy" + +local schema = { + type = "object", + properties = { + service_name = { + type = "string", + minLength = 1, + }, + service_version = { + type = "string", + pattern = [[^\d+\.\d+\.\d+]], + }, + method = { + type = "string", + minLength = 1, + }, + }, + required = { "service_name", "service_version"}, +} + +local _M = { + version = 0.1, + priority = 507, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +function _M.access(conf, ctx) + ctx.dubbo_proxy_enabled = true + + ngx_var.dubbo_service_name = conf.service_name + ngx_var.dubbo_service_version = conf.service_version + if not conf.method then + -- remove the prefix '/' from $uri + ngx_var.dubbo_method = core.string.sub(ngx_var.uri, 2) + else + ngx_var.dubbo_method = conf.method + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/echo.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/echo.lua new file mode 100644 index 0000000..525c175 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/echo.lua @@ -0,0 +1,121 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local pairs = pairs +local type = type +local ngx = ngx + + +local schema = { + type = "object", + properties = { + before_body = { + description = "body before the filter phase.", + type = "string" + }, + body = { + description = "body to replace upstream response.", + type = "string" + }, + after_body = { + description = "body after the modification of filter phase.", + type = "string" + }, + headers = { + description = "new headers for response", + type = "object", + minProperties = 1, + }, + }, + anyOf = { + {required = {"before_body"}}, + {required = {"body"}}, + {required = {"after_body"}} + }, + minProperties = 1, +} + +local plugin_name = "echo" + +local _M = { + version = 0.1, + priority = 412, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + return true +end + + +function _M.body_filter(conf, ctx) + if conf.body then + ngx.arg[1] = conf.body + ngx.arg[2] = true + end + + if conf.before_body and not ctx.plugin_echo_body_set then + ngx.arg[1] = conf.before_body .. ngx.arg[1] + ctx.plugin_echo_body_set = true + end + + if ngx.arg[2] and conf.after_body then + ngx.arg[1] = ngx.arg[1] .. conf.after_body + end +end + + +function _M.header_filter(conf, ctx) + if conf.body or conf.before_body or conf.after_body then + core.response.clear_header_as_body_modified() + end + + if not conf.headers then + return + end + + if not conf.headers_arr then + conf.headers_arr = {} + + for field, value in pairs(conf.headers) do + if type(field) == 'string' + and (type(value) == 'string' or type(value) == 'number') then + if #field == 0 then + return false, 'invalid field length in header' + end + core.table.insert(conf.headers_arr, field) + core.table.insert(conf.headers_arr, value) + else + return false, 'invalid type as header value' + end + end + end + + local field_cnt = #conf.headers_arr + for i = 1, field_cnt, 2 do + ngx.header[conf.headers_arr[i]] = conf.headers_arr[i+1] + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/elasticsearch-logger.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/elasticsearch-logger.lua new file mode 100644 index 0000000..09dcbd7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/elasticsearch-logger.lua @@ -0,0 +1,281 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local http = require("resty.http") +local log_util = require("apisix.utils.log-util") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") + +local ngx = ngx +local str_format = core.string.format +local math_random = math.random + +local plugin_name = "elasticsearch-logger" +local batch_processor_manager = bp_manager_mod.new(plugin_name) + +local schema = { + type = "object", + properties = { + -- deprecated, use "endpoint_addrs" instead + endpoint_addr = { + type = "string", + pattern = "[^/]$", + }, + endpoint_addrs = { + type = "array", + minItems = 1, + items = { + type = "string", + pattern = "[^/]$", + }, + }, + field = { + type = "object", + properties = { + index = { type = "string"}, + }, + required = {"index"} + }, + log_format = {type = "object"}, + auth = { + type = "object", + properties = { + username = { + type = "string", + minLength = 1 + }, + password = { + type = "string", + minLength = 1 + }, + }, + required = {"username", "password"}, + }, + timeout = { + type = "integer", + minimum = 1, + default = 10 + }, + ssl_verify = { + type = "boolean", + default = true + }, + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = { type = "boolean", default = false }, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + }, + encrypt_fields = {"auth.password"}, + oneOf = { + {required = {"endpoint_addr", "field"}}, + {required = {"endpoint_addrs", "field"}} + }, +} + + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + + +local _M = { + version = 0.1, + priority = 413, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local check = {"endpoint_addrs"} + core.utils.check_https(check, conf, plugin_name) + core.utils.check_tls_bool({"ssl_verify"}, conf, plugin_name) + return core.schema.check(schema, conf) +end + + +local function get_es_major_version(uri, conf) + local httpc = http.new() + if not httpc then + return nil, "failed to create http client" + end + local headers = {} + if conf.auth then + local authorization = "Basic " .. ngx.encode_base64( + conf.auth.username .. ":" .. conf.auth.password + ) + headers["Authorization"] = authorization + end + httpc:set_timeout(conf.timeout * 1000) + local res, err = httpc:request_uri(uri, { + ssl_verify = conf.ssl_verify, + method = "GET", + headers = headers, + }) + if not res then + return false, err + end + if res.status ~= 200 then + return nil, str_format("server returned status: %d, body: %s", + res.status, res.body or "") + end + local json_body, err = core.json.decode(res.body) + if not json_body then + return nil, "failed to decode response body: " .. err + end + if not json_body.version or not json_body.version.number then + return nil, "failed to get version from response body" + end + + local major_version = json_body.version.number:match("^(%d+)%.") + if not major_version then + return nil, "invalid version format: " .. json_body.version.number + end + + return major_version +end + + +local function get_logger_entry(conf, ctx) + local entry = log_util.get_log_entry(plugin_name, conf, ctx) + local body = { + index = { + _index = conf.field.index + } + } + -- for older version type is required + if conf._version == "6" or conf._version == "5" then + body.index._type = "_doc" + end + return core.json.encode(body) .. "\n" .. + core.json.encode(entry) .. "\n" +end + +local function fetch_and_update_es_version(conf) + if conf._version then + return + end + local selected_endpoint_addr + if conf.endpoint_addr then + selected_endpoint_addr = conf.endpoint_addr + else + selected_endpoint_addr = conf.endpoint_addrs[math_random(#conf.endpoint_addrs)] + end + local major_version, err = get_es_major_version(selected_endpoint_addr, conf) + if err then + core.log.error("failed to get Elasticsearch version: ", err) + return + end + conf._version = major_version +end + + +local function send_to_elasticsearch(conf, entries) + local httpc, err = http.new() + if not httpc then + return false, str_format("create http error: %s", err) + end + fetch_and_update_es_version(conf) + local selected_endpoint_addr + if conf.endpoint_addr then + selected_endpoint_addr = conf.endpoint_addr + else + selected_endpoint_addr = conf.endpoint_addrs[math_random(#conf.endpoint_addrs)] + end + local uri = selected_endpoint_addr .. "/_bulk" + local body = core.table.concat(entries, "") + local headers = { + ["Content-Type"] = "application/x-ndjson", + ["Accept"] = "application/vnd.elasticsearch+json" + } + if conf.auth then + local authorization = "Basic " .. ngx.encode_base64( + conf.auth.username .. ":" .. conf.auth.password + ) + headers["Authorization"] = authorization + end + + core.log.info("uri: ", uri, ", body: ", body) + + httpc:set_timeout(conf.timeout * 1000) + local resp, err = httpc:request_uri(uri, { + ssl_verify = conf.ssl_verify, + method = "POST", + headers = headers, + body = body + }) + if not resp then + return false, err + end + + if resp.status ~= 200 then + return false, str_format("elasticsearch server returned status: %d, body: %s", + resp.status, resp.body or "") + end + + return true +end + + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + +function _M.access(conf) + -- fetch_and_update_es_version will call ES server only the first time + -- so this should not amount to considerable overhead + fetch_and_update_es_version(conf) +end + +function _M.log(conf, ctx) + local entry = get_logger_entry(conf, ctx) + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + local process = function(entries) + return send_to_elasticsearch(conf, entries) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, process) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/error-log-logger.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/error-log-logger.lua new file mode 100644 index 0000000..88eca65 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/error-log-logger.lua @@ -0,0 +1,510 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local errlog = require("ngx.errlog") +local batch_processor = require("apisix.utils.batch-processor") +local plugin = require("apisix.plugin") +local timers = require("apisix.timers") +local http = require("resty.http") +local producer = require("resty.kafka.producer") +local plugin_name = "error-log-logger" +local table = core.table +local schema_def = core.schema +local ngx = ngx +local tcp = ngx.socket.tcp +local tostring = tostring +local ipairs = ipairs +local string = require("string") +local lrucache = core.lrucache.new({ + ttl = 300, count = 32 +}) +local kafka_prod_lrucache = core.lrucache.new({ + ttl = 300, count = 32 +}) + + +local metadata_schema = { + type = "object", + properties = { + tcp = { + type = "object", + properties = { + host = schema_def.host_def, + port = {type = "integer", minimum = 0}, + tls = {type = "boolean", default = false}, + tls_server_name = {type = "string"}, + }, + required = {"host", "port"} + }, + skywalking = { + type = "object", + properties = { + endpoint_addr = {schema_def.uri, default = "http://127.0.0.1:12900/v3/logs"}, + service_name = {type = "string", default = "APISIX"}, + service_instance_name = {type="string", default = "APISIX Service Instance"}, + }, + }, + clickhouse = { + type = "object", + properties = { + endpoint_addr = {schema_def.uri_def, default="http://127.0.0.1:8123"}, + user = {type = "string", default = "default"}, + password = {type = "string", default = ""}, + database = {type = "string", default = ""}, + logtable = {type = "string", default = ""}, + }, + required = {"endpoint_addr", "user", "password", "database", "logtable"} + }, + kafka = { + type = "object", + properties = { + brokers = { + type = "array", + minItems = 1, + items = { + type = "object", + properties = { + host = { + type = "string", + description = "the host of kafka broker", + }, + port = { + type = "integer", + minimum = 1, + maximum = 65535, + description = "the port of kafka broker", + }, + sasl_config = { + type = "object", + description = "sasl config", + properties = { + mechanism = { + type = "string", + default = "PLAIN", + enum = {"PLAIN"}, + }, + user = { type = "string", description = "user" }, + password = { type = "string", description = "password" }, + }, + required = {"user", "password"}, + }, + }, + required = {"host", "port"}, + }, + uniqueItems = true, + }, + kafka_topic = {type = "string"}, + producer_type = { + type = "string", + default = "async", + enum = {"async", "sync"}, + }, + required_acks = { + type = "integer", + default = 1, + enum = { 0, 1, -1 }, + }, + key = {type = "string"}, + -- in lua-resty-kafka, cluster_name is defined as number + -- see https://github.com/doujiang24/lua-resty-kafka#new-1 + cluster_name = {type = "integer", minimum = 1, default = 1}, + meta_refresh_interval = {type = "integer", minimum = 1, default = 30}, + }, + required = {"brokers", "kafka_topic"}, + }, + name = {type = "string", default = plugin_name}, + level = {type = "string", default = "WARN", enum = {"STDERR", "EMERG", "ALERT", "CRIT", + "ERR", "ERROR", "WARN", "NOTICE", "INFO", "DEBUG"}}, + timeout = {type = "integer", minimum = 1, default = 3}, + keepalive = {type = "integer", minimum = 1, default = 30}, + batch_max_size = {type = "integer", minimum = 0, default = 1000}, + max_retry_count = {type = "integer", minimum = 0, default = 0}, + retry_delay = {type = "integer", minimum = 0, default = 1}, + buffer_duration = {type = "integer", minimum = 1, default = 60}, + inactive_timeout = {type = "integer", minimum = 1, default = 3}, + }, + oneOf = { + {required = {"skywalking"}}, + {required = {"tcp"}}, + {required = {"clickhouse"}}, + {required = {"kafka"}}, + -- for compatible with old schema + {required = {"host", "port"}} + }, + encrypt_fields = {"clickhouse.password"}, +} + + +local schema = { + type = "object", +} + + +local log_level = { + STDERR = ngx.STDERR, + EMERG = ngx.EMERG, + ALERT = ngx.ALERT, + CRIT = ngx.CRIT, + ERR = ngx.ERR, + ERROR = ngx.ERR, + WARN = ngx.WARN, + NOTICE = ngx.NOTICE, + INFO = ngx.INFO, + DEBUG = ngx.DEBUG +} + + +local config = {} +local log_buffer + + +local _M = { + version = 0.1, + priority = 1091, + name = plugin_name, + schema = schema, + metadata_schema = metadata_schema, + scope = "global", +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local check = {"skywalking.endpoint_addr", "clickhouse.endpoint_addr"} + core.utils.check_https(check, conf, plugin_name) + core.utils.check_tls_bool({"tcp.tls"}, conf, plugin_name) + + return core.schema.check(schema, conf) +end + + +local function send_to_tcp_server(data) + local sock, soc_err = tcp() + + if not sock then + return false, "failed to init the socket " .. soc_err + end + + sock:settimeout(config.timeout * 1000) + + local tcp_config = config.tcp + local ok, err = sock:connect(tcp_config.host, tcp_config.port) + if not ok then + return false, "failed to connect the TCP server: host[" .. tcp_config.host + .. "] port[" .. tostring(tcp_config.port) .. "] err: " .. err + end + + if tcp_config.tls then + ok, err = sock:sslhandshake(false, tcp_config.tls_server_name, false) + if not ok then + sock:close() + return false, "failed to perform TLS handshake to TCP server: host[" + .. tcp_config.host .. "] port[" .. tostring(tcp_config.port) .. "] err: " .. err + end + end + + local bytes, err = sock:send(data) + if not bytes then + sock:close() + return false, "failed to send data to TCP server: host[" .. tcp_config.host + .. "] port[" .. tostring(tcp_config.port) .. "] err: " .. err + end + + sock:setkeepalive(config.keepalive * 1000) + return true +end + + +local function send_to_skywalking(log_message) + local err_msg + local res = true + core.log.info("sending a batch logs to ", config.skywalking.endpoint_addr) + + local httpc = http.new() + httpc:set_timeout(config.timeout * 1000) + + local entries = {} + local service_instance_name = config.skywalking.service_instance_name + if service_instance_name == "$hostname" then + service_instance_name = core.utils.gethostname() + end + + for i = 1, #log_message, 2 do + local content = { + service = config.skywalking.service_name, + serviceInstance = service_instance_name, + endpoint = "", + body = { + text = { + text = log_message[i] + } + } + } + table.insert(entries, content) + end + + local httpc_res, httpc_err = httpc:request_uri( + config.skywalking.endpoint_addr, + { + method = "POST", + body = core.json.encode(entries), + keepalive_timeout = config.keepalive * 1000, + headers = { + ["Content-Type"] = "application/json", + } + } + ) + + if not httpc_res then + return false, "error while sending data to skywalking[" + .. config.skywalking.endpoint_addr .. "] " .. httpc_err + end + + -- some error occurred in the server + if httpc_res.status >= 400 then + res = false + err_msg = string.format( + "server returned status code[%s] skywalking[%s] body[%s]", + httpc_res.status, + config.skywalking.endpoint_addr.endpoint_addr, + httpc_res:read_body() + ) + end + + return res, err_msg +end + + +local function send_to_clickhouse(log_message) + local err_msg + local res = true + core.log.info("sending a batch logs to ", config.clickhouse.endpoint_addr) + + local httpc = http.new() + httpc:set_timeout(config.timeout * 1000) + + local entries = {} + for i = 1, #log_message, 2 do + -- TODO Here save error log as a whole string to clickhouse 'data' column. + -- We will add more columns in the future. + table.insert(entries, core.json.encode({data=log_message[i]})) + end + + local httpc_res, httpc_err = httpc:request_uri( + config.clickhouse.endpoint_addr, + { + method = "POST", + body = "INSERT INTO " .. config.clickhouse.logtable .." FORMAT JSONEachRow " + .. table.concat(entries, " "), + keepalive_timeout = config.keepalive * 1000, + headers = { + ["Content-Type"] = "application/json", + ["X-ClickHouse-User"] = config.clickhouse.user, + ["X-ClickHouse-Key"] = config.clickhouse.password, + ["X-ClickHouse-Database"] = config.clickhouse.database + } + } + ) + + if not httpc_res then + return false, "error while sending data to clickhouse[" + .. config.clickhouse.endpoint_addr .. "] " .. httpc_err + end + + -- some error occurred in the server + if httpc_res.status >= 400 then + res = false + err_msg = string.format( + "server returned status code[%s] clickhouse[%s] body[%s]", + httpc_res.status, + config.clickhouse.endpoint_addr.endpoint_addr, + httpc_res:read_body() + ) + end + + return res, err_msg +end + + +local function update_filter(value) + local level = log_level[value.level] + local status, err = errlog.set_filter_level(level) + if not status then + return nil, "failed to set filter level by ngx.errlog, the error is :" .. err + else + core.log.notice("set the filter_level to ", value.level) + end + + return value +end + + +local function create_producer(broker_list, broker_config, cluster_name) + core.log.info("create new kafka producer instance") + return producer:new(broker_list, broker_config, cluster_name) +end + + +local function send_to_kafka(log_message) + -- avoid race of the global config + local metadata = plugin.plugin_metadata(plugin_name) + if not (metadata and metadata.value and metadata.modifiedIndex) then + return false, "please set the correct plugin_metadata for " .. plugin_name + end + local config, err = lrucache(plugin_name, metadata.modifiedIndex, update_filter, metadata.value) + if not config then + return false, "get config failed: " .. err + end + + core.log.info("sending a batch logs to kafka brokers: ", + core.json.delay_encode(config.kafka.brokers)) + + local broker_config = {} + broker_config["request_timeout"] = config.timeout * 1000 + broker_config["producer_type"] = config.kafka.producer_type + broker_config["required_acks"] = config.kafka.required_acks + broker_config["refresh_interval"] = config.kafka.meta_refresh_interval * 1000 + + -- reuse producer via kafka_prod_lrucache to avoid unbalanced partitions of messages in kafka + local prod, err = kafka_prod_lrucache(plugin_name, metadata.modifiedIndex, + create_producer, config.kafka.brokers, broker_config, + config.kafka.cluster_name) + if not prod then + return false, "get kafka producer failed: " .. err + end + core.log.info("kafka cluster name ", config.kafka.cluster_name, ", broker_list[1] port ", + prod.client.broker_list[1].port) + + local ok + for i = 1, #log_message, 2 do + ok, err = prod:send(config.kafka.kafka_topic, + config.kafka.key, core.json.encode(log_message[i])) + if not ok then + return false, "failed to send data to Kafka topic: " .. err .. + ", brokers: " .. core.json.encode(config.kafka.brokers) + end + core.log.info("send data to kafka: ", core.json.delay_encode(log_message[i])) + end + + return true +end + + +local function send(data) + if config.skywalking then + return send_to_skywalking(data) + elseif config.clickhouse then + return send_to_clickhouse(data) + elseif config.kafka then + return send_to_kafka(data) + end + return send_to_tcp_server(data) +end + + +local function process() + local metadata = plugin.plugin_metadata(plugin_name) + if not (metadata and metadata.value and metadata.modifiedIndex) then + core.log.info("please set the correct plugin_metadata for ", plugin_name) + return + else + local err + config, err = lrucache(plugin_name, metadata.modifiedIndex, update_filter, metadata.value) + if not config then + core.log.warn("set log filter failed for ", err) + return + end + if not (config.tcp or config.skywalking or config.clickhouse or config.kafka) then + config.tcp = { + host = config.host, + port = config.port, + tls = config.tls, + tls_server_name = config.tls_server_name + } + core.log.warn( + string.format("The schema is out of date. Please update to the new configuration, " + .. "for example: {\"tcp\": {\"host\": \"%s\", \"port\": \"%s\"}}", + config.host, config.port + )) + end + end + + local err_level = log_level[metadata.value.level] + local entries = {} + local logs = errlog.get_logs(9) + while ( logs and #logs>0 ) do + for i = 1, #logs, 3 do + -- There will be some stale error logs after the filter level changed. + -- We should avoid reporting them. + if logs[i] <= err_level then + table.insert(entries, logs[i + 2]) + table.insert(entries, "\n") + end + end + logs = errlog.get_logs(9) + end + + if #entries == 0 then + return + end + + if log_buffer then + for _, v in ipairs(entries) do + log_buffer:push(v) + end + return + end + + local config_bat = { + name = config.name, + retry_delay = config.retry_delay, + batch_max_size = config.batch_max_size, + max_retry_count = config.max_retry_count, + buffer_duration = config.buffer_duration, + inactive_timeout = config.inactive_timeout, + } + + local err + log_buffer, err = batch_processor:new(send, config_bat) + + if not log_buffer then + core.log.warn("error when creating the batch processor: ", err) + return + end + + for _, v in ipairs(entries) do + log_buffer:push(v) + end + +end + + +function _M.init() + timers.register_timer("plugin#error-log-logger", process) +end + + +function _M.destroy() + timers.unregister_timer("plugin#error-log-logger") +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/example-plugin.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/example-plugin.lua new file mode 100644 index 0000000..767ccfa --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/example-plugin.lua @@ -0,0 +1,152 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ngx = ngx +local core = require("apisix.core") +local plugin = require("apisix.plugin") +local upstream = require("apisix.upstream") + +local schema = { + type = "object", + properties = { + i = {type = "number", minimum = 0}, + s = {type = "string"}, + t = {type = "array", minItems = 1}, + ip = {type = "string"}, + port = {type = "integer"}, + }, + required = {"i"}, +} + +local metadata_schema = { + type = "object", + properties = { + ikey = {type = "number", minimum = 0}, + skey = {type = "string"}, + }, + required = {"ikey", "skey"}, +} + +local plugin_name = "example-plugin" + +local _M = { + version = 0.1, + priority = 0, + name = plugin_name, + schema = schema, + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + return core.schema.check(schema, conf) +end + + +function _M.init() + -- call this function when plugin is loaded + local attr = plugin.plugin_attr(plugin_name) + if attr then + core.log.info(plugin_name, " get plugin attr val: ", attr.val) + end +end + + +function _M.destroy() + -- call this function when plugin is unloaded +end + + +function _M.rewrite(conf, ctx) + core.log.warn("plugin rewrite phase, conf: ", core.json.encode(conf)) + core.log.warn("conf_type: ", ctx.conf_type) + core.log.warn("conf_id: ", ctx.conf_id) + core.log.warn("conf_version: ", ctx.conf_version) +end + + +function _M.access(conf, ctx) + core.log.warn("plugin access phase, conf: ", core.json.encode(conf)) + -- return 200, {message = "hit example plugin"} + + if not conf.ip then + return + end + + local up_conf = { + type = "roundrobin", + nodes = { + {host = conf.ip, port = conf.port, weight = 1} + } + } + + local ok, err = upstream.check_schema(up_conf) + if not ok then + return 500, err + end + + local matched_route = ctx.matched_route + upstream.set(ctx, up_conf.type .. "#route_" .. matched_route.value.id, + ctx.conf_version, up_conf) + return +end + +function _M.header_filter(conf, ctx) + core.log.warn("plugin header_filter phase, conf: ", core.json.encode(conf)) +end + + +function _M.body_filter(conf, ctx) + core.log.warn("plugin body_filter phase, eof: ", ngx.arg[2], + ", conf: ", core.json.encode(conf)) +end + + +function _M.delayed_body_filter(conf, ctx) + core.log.warn("plugin delayed_body_filter phase, eof: ", ngx.arg[2], + ", conf: ", core.json.encode(conf)) +end + +function _M.log(conf, ctx) + core.log.warn("plugin log phase, conf: ", core.json.encode(conf)) +end + + +local function hello() + local args = ngx.req.get_uri_args() + if args["json"] then + return 200, {msg = "world"} + else + return 200, "world\n" + end +end + + +function _M.control_api() + return { + { + methods = {"GET"}, + uris = {"/v1/plugin/example-plugin/hello"}, + handler = hello, + } + } +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin-post-req.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin-post-req.lua new file mode 100644 index 0000000..a8b809f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin-post-req.lua @@ -0,0 +1,40 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ext = require("apisix.plugins.ext-plugin.init") + + +local name = "ext-plugin-post-req" +local _M = { + version = 0.1, + priority = -3000, + name = name, + schema = ext.schema, +} + + +function _M.check_schema(conf) + return core.schema.check(_M.schema, conf) +end + + +function _M.access(conf, ctx) + return ext.communicate(conf, ctx, name) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin-post-resp.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin-post-resp.lua new file mode 100644 index 0000000..40d3ca4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin-post-resp.lua @@ -0,0 +1,183 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ext = require("apisix.plugins.ext-plugin.init") +local helper = require("apisix.plugins.ext-plugin.helper") +local constants = require("apisix.constants") +local http = require("resty.http") + +local ngx = ngx +local ngx_print = ngx.print +local ngx_flush = ngx.flush +local string = string +local str_sub = string.sub + + +local name = "ext-plugin-post-resp" +local _M = { + version = 0.1, + priority = -4000, + name = name, + schema = ext.schema, +} + + +local function include_req_headers(ctx) + -- TODO: handle proxy_set_header + return core.request.headers(ctx) +end + + +local function close(http_obj) + -- TODO: keepalive + local ok, err = http_obj:close() + if not ok then + core.log.error("close http object failed: ", err) + end +end + + +local function get_response(ctx, http_obj) + local ok, err = http_obj:connect({ + scheme = ctx.upstream_scheme, + host = ctx.picked_server.host, + port = ctx.picked_server.port, + }) + + if not ok then + return nil, err + end + -- TODO: set timeout + local uri, args + if ctx.var.upstream_uri == "" then + -- use original uri instead of rewritten one + uri = ctx.var.uri + else + uri = ctx.var.upstream_uri + + -- the rewritten one may contain new args + local index = core.string.find(uri, "?") + if index then + local raw_uri = uri + uri = str_sub(raw_uri, 1, index - 1) + args = str_sub(raw_uri, index + 1) + end + end + local params = { + path = uri, + query = args or ctx.var.args, + headers = include_req_headers(ctx), + method = core.request.get_method(), + } + + local body, err = core.request.get_body() + if err then + return nil, err + end + + if body then + params["body"] = body + end + + local res, err = http_obj:request(params) + if not res then + return nil, err + end + + return res, err +end + +local function send_chunk(chunk) + if not chunk then + return nil + end + + local ok, print_err = ngx_print(chunk) + if not ok then + return "output response failed: ".. (print_err or "") + end + local ok, flush_err = ngx_flush(true) + if not ok then + core.log.warn("flush response failed: ", flush_err) + end + + return nil +end + +-- TODO: response body is empty (304 or HEAD) +-- If the upstream returns 304 or the request method is HEAD, +-- there is no response body. In this case, +-- we need to send a response to the client in the plugin, +-- instead of continuing to execute the subsequent plugin. +local function send_response(ctx, res, code) + ngx.status = code or res.status + + local chunks = ctx.runner_ext_response_body + if chunks then + for i=1, #chunks do + local err = send_chunk(chunks[i]) + if err then + return err + end + end + return + end + + return helper.response_reader(res.body_reader, send_chunk) +end + + +function _M.check_schema(conf) + return core.schema.check(_M.schema, conf) +end + + +function _M.before_proxy(conf, ctx) + local http_obj = http.new() + local res, err = get_response(ctx, http_obj) + if not res or err then + core.log.error("failed to request: ", err or "") + close(http_obj) + return 502 + end + ctx.runner_ext_response = res + + core.log.info("response info, status: ", res.status) + core.log.info("response info, headers: ", core.json.delay_encode(res.headers)) + + local code, body = ext.communicate(conf, ctx, name, constants.RPC_HTTP_RESP_CALL) + if body then + close(http_obj) + -- if the body is changed, the code will be set. + return code, body + end + core.log.info("ext-plugin will send response") + + -- send origin response, status maybe changed. + err = send_response(ctx, res, code) + close(http_obj) + + if err then + core.log.error(err) + return not ngx.headers_sent and 502 or nil + end + + core.log.info("ext-plugin send response succefully") +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin-pre-req.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin-pre-req.lua new file mode 100644 index 0000000..183506d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin-pre-req.lua @@ -0,0 +1,40 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ext = require("apisix.plugins.ext-plugin.init") + + +local name = "ext-plugin-pre-req" +local _M = { + version = 0.1, + priority = 12000, + name = name, + schema = ext.schema, +} + + +function _M.check_schema(conf) + return core.schema.check(_M.schema, conf) +end + + +function _M.rewrite(conf, ctx) + return ext.communicate(conf, ctx, name) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin/helper.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin/helper.lua new file mode 100644 index 0000000..7750bb5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin/helper.lua @@ -0,0 +1,81 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local is_http = ngx.config.subsystem == "http" +local core = require("apisix.core") +local config_local = require("apisix.core.config_local") +local process +if is_http then + process = require "ngx.process" +end +local pl_path = require("pl.path") + + +local _M = {} + + +do + local path + function _M.get_path() + if not path then + local local_conf = config_local.local_conf() + if local_conf then + local test_path = + core.table.try_read_attr(local_conf, "ext-plugin", "path_for_test") + if test_path then + path = "unix:" .. test_path + end + end + + if not path then + local sock = "./conf/apisix-" .. process.get_master_pid() .. ".sock" + path = "unix:" .. pl_path.abspath(sock) + end + end + + return path + end +end + + +function _M.get_conf_token_cache_time() + return 3600 +end + + +function _M.response_reader(reader, callback, ...) + if not reader then + return "get response reader failed" + end + + repeat + local chunk, read_err, cb_err + chunk, read_err = reader() + if read_err then + return "read response failed: ".. (read_err or "") + end + + if chunk then + cb_err = callback(chunk, ...) + if cb_err then + return cb_err + end + end + until not chunk +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin/init.lua new file mode 100644 index 0000000..2631afd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ext-plugin/init.lua @@ -0,0 +1,1025 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local is_http = ngx.config.subsystem == "http" +local flatbuffers = require("flatbuffers") +local a6_method = require("A6.Method") +local prepare_conf_req = require("A6.PrepareConf.Req") +local prepare_conf_resp = require("A6.PrepareConf.Resp") +local http_req_call_req = require("A6.HTTPReqCall.Req") +local http_req_call_resp = require("A6.HTTPReqCall.Resp") +local http_req_call_action = require("A6.HTTPReqCall.Action") +local http_req_call_stop = require("A6.HTTPReqCall.Stop") +local http_req_call_rewrite = require("A6.HTTPReqCall.Rewrite") +local http_resp_call_req = require("A6.HTTPRespCall.Req") +local http_resp_call_resp = require("A6.HTTPRespCall.Resp") +local extra_info = require("A6.ExtraInfo.Info") +local extra_info_req = require("A6.ExtraInfo.Req") +local extra_info_var = require("A6.ExtraInfo.Var") +local extra_info_resp = require("A6.ExtraInfo.Resp") +local extra_info_reqbody = require("A6.ExtraInfo.ReqBody") +local extra_info_respbody = require("A6.ExtraInfo.RespBody") +local text_entry = require("A6.TextEntry") +local err_resp = require("A6.Err.Resp") +local err_code = require("A6.Err.Code") +local constants = require("apisix.constants") +local core = require("apisix.core") +local helper = require("apisix.plugins.ext-plugin.helper") +local process, ngx_pipe, events +if is_http then + process = require("ngx.process") + ngx_pipe = require("ngx.pipe") + events = require("apisix.events") +end +local resty_lock = require("resty.lock") +local resty_signal = require "resty.signal" +local bit = require("bit") +local band = bit.band +local lshift = bit.lshift +local rshift = bit.rshift +local ffi = require("ffi") +local ffi_str = ffi.string +local socket_tcp = ngx.socket.tcp +local worker_id = ngx.worker.id +local ngx_timer_at = ngx.timer.at +local exiting = ngx.worker.exiting +local str_byte = string.byte +local str_format = string.format +local str_lower = string.lower +local str_sub = string.sub +local error = error +local ipairs = ipairs +local pairs = pairs +local tostring = tostring +local type = type +local ngx = ngx + + +local events_list +local exclude_resp_header = { + ["connection"] = true, + ["content-length"] = true, + ["transfer-encoding"] = true, + ["location"] = true, + ["server"] = true, + ["www-authenticate"] = true, + ["content-encoding"] = true, + ["content-type"] = true, + ["content-location"] = true, + ["content-language"] = true, +} + +local function new_lrucache() + return core.lrucache.new({ + type = "plugin", + invalid_stale = true, + ttl = helper.get_conf_token_cache_time(), + }) +end +local lrucache = new_lrucache() + +local shdict_name = "ext-plugin" +local shdict = ngx.shared[shdict_name] + +local schema = { + type = "object", + properties = { + conf = { + type = "array", + items = { + type = "object", + properties = { + name = { + type = "string", + maxLength = 128, + minLength = 1 + }, + value = { + type = "string", + }, + }, + required = {"name", "value"} + }, + minItems = 1, + }, + allow_degradation = {type = "boolean", default = false} + }, +} + +local _M = { + schema = schema, +} +local builder = flatbuffers.Builder(0) + + +local send +do + local hdr_buf = ffi.new("unsigned char[4]") + local buf = core.table.new(2, 0) + local MAX_DATA_SIZE = lshift(1, 24) - 1 + + function send(sock, ty, data) + hdr_buf[0] = ty + + local len = #data + + core.log.info("sending rpc type: ", ty, " data length: ", len) + + if len > MAX_DATA_SIZE then + return nil, str_format("the max length of data is %d but got %d", MAX_DATA_SIZE, len) + end + + -- length is sent as big endian + for i = 3, 1, -1 do + hdr_buf[i] = band(len, 255) + len = rshift(len, 8) + end + + buf[1] = ffi_str(hdr_buf, 4) + buf[2] = data + return sock:send(buf) + end +end +_M.send = send + + +local err_to_msg +do + local map = { + [err_code.BAD_REQUEST] = "bad request", + [err_code.SERVICE_UNAVAILABLE] = "service unavailable", + [err_code.CONF_TOKEN_NOT_FOUND] = "conf token not found", + } + + function err_to_msg(resp) + local buf = flatbuffers.binaryArray.New(resp) + local resp = err_resp.GetRootAsResp(buf, 0) + local code = resp:Code() + return map[code] or str_format("unknown err %d", code) + end +end + + +local function receive(sock) + local hdr, err = sock:receive(4) + if not hdr then + return nil, err + end + if #hdr ~= 4 then + return nil, "header too short" + end + + local ty = str_byte(hdr, 1) + local resp + local hi, mi, li = str_byte(hdr, 2, 4) + local len = 256 * (256 * hi + mi) + li + + core.log.info("receiving rpc type: ", ty, " data length: ", len) + + if len > 0 then + resp, err = sock:receive(len) + if not resp then + return nil, err + end + if #resp ~= len then + return nil, "data truncated" + end + end + + if ty == constants.RPC_ERROR then + return nil, err_to_msg(resp) + end + + return ty, resp +end +_M.receive = receive + + +local generate_id +do + local count = 0 + local MAX_COUNT = lshift(1, 22) + + function generate_id() + local wid = worker_id() + local id = lshift(wid, 22) + count + count = count + 1 + if count == MAX_COUNT then + count = 0 + end + return id + end +end + + +local encode_a6_method +do + local map = { + GET = a6_method.GET, + HEAD = a6_method.HEAD, + POST = a6_method.POST, + PUT = a6_method.PUT, + DELETE = a6_method.DELETE, + MKCOL = a6_method.MKCOL, + COPY = a6_method.COPY, + MOVE = a6_method.MOVE, + OPTIONS = a6_method.OPTIONS, + PROPFIND = a6_method.PROPFIND, + PROPPATCH = a6_method.PROPPATCH, + LOCK = a6_method.LOCK, + UNLOCK = a6_method.UNLOCK, + PATCH = a6_method.PATCH, + TRACE = a6_method.TRACE, + } + + function encode_a6_method(name) + return map[name] + end +end + + +local function build_args(builder, key, val) + local name = builder:CreateString(key) + local value + if val ~= true then + value = builder:CreateString(val) + end + + text_entry.Start(builder) + text_entry.AddName(builder, name) + if val ~= true then + text_entry.AddValue(builder, value) + end + return text_entry.End(builder) +end + + +local function build_headers(var, builder, key, val) + if key == "host" then + val = var.upstream_host + end + + local name = builder:CreateString(key) + local value = builder:CreateString(val) + + text_entry.Start(builder) + text_entry.AddName(builder, name) + text_entry.AddValue(builder, value) + return text_entry.End(builder) +end + + +local function handle_extra_info(ctx, input) + -- exact request + local buf = flatbuffers.binaryArray.New(input) + local req = extra_info_req.GetRootAsReq(buf, 0) + + local res + local info_type = req:InfoType() + if info_type == extra_info.Var then + local info = req:Info() + local var_req = extra_info_var.New() + var_req:Init(info.bytes, info.pos) + + local var_name = var_req:Name() + res = ctx.var[var_name] + elseif info_type == extra_info.ReqBody then + local info = req:Info() + local reqbody_req = extra_info_reqbody.New() + reqbody_req:Init(info.bytes, info.pos) + + local err + res, err = core.request.get_body() + if err then + core.log.error("failed to read request body: ", err) + end + elseif info_type == extra_info.RespBody then + local ext_res = ctx.runner_ext_response + if ext_res then + local info = req:Info() + local respbody_req = extra_info_respbody.New() + respbody_req:Init(info.byte, info.pos) + + local chunks = {} + local err = helper.response_reader(ext_res.body_reader, function (chunk, chunks) + -- When the upstream response is chunked type, + -- we will receive the complete response body + -- before sending it to the runner program + -- to reduce the number of RPC calls. + core.table.insert_tail(chunks, chunk) + end, chunks) + if err then + -- TODO: send RPC_ERROR to runner + core.log.error(err) + else + res = core.table.concat(chunks) + ctx.runner_ext_response_body = chunks + end + else + core.log.error("failed to read response body: not exits") + end + else + return nil, "unsupported info type: " .. info_type + end + + -- build response + builder:Clear() + + local packed_res + if res then + -- ensure to pass the res in string type + res = tostring(res) + packed_res = builder:CreateByteVector(res) + end + extra_info_resp.Start(builder) + if packed_res then + extra_info_resp.AddResult(builder, packed_res) + end + local resp = extra_info_resp.End(builder) + builder:Finish(resp) + return builder:Output() +end + + +local function fetch_token(key) + if shdict then + return shdict:get(key) + else + core.log.error('shm "ext-plugin" not found') + return nil + end +end + + +local function store_token(key, token) + if shdict then + local exp = helper.get_conf_token_cache_time() + -- early expiry, lrucache in critical state sends prepare_conf_req as original behaviour + exp = exp * 0.9 + local success, err, forcible = shdict:set(key, token, exp) + if not success then + core.log.error("ext-plugin:failed to set conf token, err: ", err) + end + if forcible then + core.log.warn("ext-plugin:set valid items forcibly overwritten") + end + else + core.log.error('shm "ext-plugin" not found') + end +end + + +local function flush_token() + if shdict then + core.log.warn("flush conf token in shared dict") + shdict:flush_all() + else + core.log.error('shm "ext-plugin" not found') + end +end + + +local rpc_call +local rpc_handlers = { + nil, + function (conf, ctx, sock, unique_key) + local token = fetch_token(unique_key) + if token then + core.log.info("fetch token from shared dict, token: ", token) + return token + end + + local lock, err = resty_lock:new(shdict_name) + if not lock then + return nil, "failed to create lock: " .. err + end + + local elapsed, err = lock:lock("prepare_conf") + if not elapsed then + return nil, "failed to acquire the lock: " .. err + end + + local token = fetch_token(unique_key) + if token then + lock:unlock() + core.log.info("fetch token from shared dict, token: ", token) + return token + end + + builder:Clear() + + local key = builder:CreateString(unique_key) + local conf_vec + if conf.conf then + local len = #conf.conf + local textEntries = core.table.new(len, 0) + for i = 1, len do + local name = builder:CreateString(conf.conf[i].name) + local value = builder:CreateString(conf.conf[i].value) + text_entry.Start(builder) + text_entry.AddName(builder, name) + text_entry.AddValue(builder, value) + local c = text_entry.End(builder) + textEntries[i] = c + end + prepare_conf_req.StartConfVector(builder, len) + for i = len, 1, -1 do + builder:PrependUOffsetTRelative(textEntries[i]) + end + conf_vec = builder:EndVector(len) + end + + prepare_conf_req.Start(builder) + prepare_conf_req.AddKey(builder, key) + if conf_vec then + prepare_conf_req.AddConf(builder, conf_vec) + end + local req = prepare_conf_req.End(builder) + builder:Finish(req) + + local ok, err = send(sock, constants.RPC_PREPARE_CONF, builder:Output()) + if not ok then + lock:unlock() + return nil, "failed to send RPC_PREPARE_CONF: " .. err + end + + local ty, resp = receive(sock) + if ty == nil then + lock:unlock() + return nil, "failed to receive RPC_PREPARE_CONF: " .. resp + end + + if ty ~= constants.RPC_PREPARE_CONF then + lock:unlock() + return nil, "failed to receive RPC_PREPARE_CONF: unexpected type " .. ty + end + + local buf = flatbuffers.binaryArray.New(resp) + local pcr = prepare_conf_resp.GetRootAsResp(buf, 0) + token = pcr:ConfToken() + + core.log.notice("get conf token: ", token, " conf: ", core.json.delay_encode(conf.conf)) + store_token(unique_key, token) + + lock:unlock() + + return token + end, + function (conf, ctx, sock, entry) + local lrucache_id = core.lrucache.plugin_ctx_id(ctx, entry) + local token, err = core.lrucache.plugin_ctx(lrucache, ctx, entry, rpc_call, + constants.RPC_PREPARE_CONF, conf, ctx, + lrucache_id) + if not token then + return nil, err + end + + builder:Clear() + local var = ctx.var + + local uri + if var.upstream_uri == "" then + -- use original uri instead of rewritten one + uri = var.uri + else + uri = var.upstream_uri + + -- the rewritten one may contain new args + local index = core.string.find(uri, "?") + if index then + local raw_uri = uri + uri = str_sub(raw_uri, 1, index - 1) + core.request.set_uri_args(ctx, str_sub(raw_uri, index + 1)) + end + end + + local path = builder:CreateString(uri) + + local bin_addr = var.binary_remote_addr + local src_ip = builder:CreateByteVector(bin_addr) + + local args = core.request.get_uri_args(ctx) + local textEntries = {} + for key, val in pairs(args) do + local ty = type(val) + if ty == "table" then + for _, v in ipairs(val) do + core.table.insert(textEntries, build_args(builder, key, v)) + end + else + core.table.insert(textEntries, build_args(builder, key, val)) + end + end + local len = #textEntries + http_req_call_req.StartArgsVector(builder, len) + for i = len, 1, -1 do + builder:PrependUOffsetTRelative(textEntries[i]) + end + local args_vec = builder:EndVector(len) + + local hdrs = core.request.headers(ctx) + core.table.clear(textEntries) + for key, val in pairs(hdrs) do + local ty = type(val) + if ty == "table" then + for _, v in ipairs(val) do + core.table.insert(textEntries, build_headers(var, builder, key, v)) + end + else + core.table.insert(textEntries, build_headers(var, builder, key, val)) + end + end + local len = #textEntries + http_req_call_req.StartHeadersVector(builder, len) + for i = len, 1, -1 do + builder:PrependUOffsetTRelative(textEntries[i]) + end + local hdrs_vec = builder:EndVector(len) + + local id = generate_id() + local method = var.method + + http_req_call_req.Start(builder) + http_req_call_req.AddId(builder, id) + http_req_call_req.AddConfToken(builder, token) + http_req_call_req.AddSrcIp(builder, src_ip) + http_req_call_req.AddPath(builder, path) + http_req_call_req.AddArgs(builder, args_vec) + http_req_call_req.AddHeaders(builder, hdrs_vec) + http_req_call_req.AddMethod(builder, encode_a6_method(method)) + + local req = http_req_call_req.End(builder) + builder:Finish(req) + + local ok, err = send(sock, constants.RPC_HTTP_REQ_CALL, builder:Output()) + if not ok then + return nil, "failed to send RPC_HTTP_REQ_CALL: " .. err + end + + local ty, resp + while true do + ty, resp = receive(sock) + if ty == nil then + return nil, "failed to receive RPC_HTTP_REQ_CALL: " .. resp + end + + if ty ~= constants.RPC_EXTRA_INFO then + break + end + + local out, err = handle_extra_info(ctx, resp) + if not out then + return nil, "failed to handle RPC_EXTRA_INFO: " .. err + end + + local ok, err = send(sock, constants.RPC_EXTRA_INFO, out) + if not ok then + return nil, "failed to reply RPC_EXTRA_INFO: " .. err + end + end + + if ty ~= constants.RPC_HTTP_REQ_CALL then + return nil, "failed to receive RPC_HTTP_REQ_CALL: unexpected type " .. ty + end + + local buf = flatbuffers.binaryArray.New(resp) + local call_resp = http_req_call_resp.GetRootAsResp(buf, 0) + local action_type = call_resp:ActionType() + if action_type == http_req_call_action.Stop then + local action = call_resp:Action() + local stop = http_req_call_stop.New() + stop:Init(action.bytes, action.pos) + + local len = stop:HeadersLength() + if len > 0 then + local stop_resp_headers = {} + for i = 1, len do + local entry = stop:Headers(i) + local name = str_lower(entry:Name()) + if stop_resp_headers[name] == nil then + core.response.set_header(name, entry:Value()) + stop_resp_headers[name] = true + else + core.response.add_header(name, entry:Value()) + end + end + end + + local body + local len = stop:BodyLength() + if len > 0 then + -- TODO: support empty body + body = stop:BodyAsString() + end + local code = stop:Status() + -- avoid using 0 as the default http status code + if code == 0 then + code = 200 + end + return true, nil, code, body + end + + if action_type == http_req_call_action.Rewrite then + local action = call_resp:Action() + local rewrite = http_req_call_rewrite.New() + rewrite:Init(action.bytes, action.pos) + + local path = rewrite:Path() + if path then + path = core.utils.uri_safe_encode(path) + var.upstream_uri = path + end + + local len = rewrite:HeadersLength() + if len > 0 then + for i = 1, len do + local entry = rewrite:Headers(i) + local name = entry:Name() + core.request.set_header(ctx, name, entry:Value()) + + if str_lower(name) == "host" then + var.upstream_host = entry:Value() + end + end + end + + local body_len = rewrite:BodyLength() + if body_len > 0 then + local body = rewrite:BodyAsString() + ngx.req.read_body() + ngx.req.set_body_data(body) + end + + local len = rewrite:RespHeadersLength() + if len > 0 then + local rewrite_resp_headers = {} + for i = 1, len do + local entry = rewrite:RespHeaders(i) + local name = str_lower(entry:Name()) + if exclude_resp_header[name] == nil then + if rewrite_resp_headers[name] == nil then + core.response.set_header(name, entry:Value()) + rewrite_resp_headers[name] = true + else + core.response.add_header(name, entry:Value()) + end + end + end + end + + local len = rewrite:ArgsLength() + if len > 0 then + local changed = {} + for i = 1, len do + local entry = rewrite:Args(i) + local name = entry:Name() + local value = entry:Value() + if value == nil then + args[name] = nil + + else + if changed[name] then + if type(args[name]) == "table" then + core.table.insert(args[name], value) + else + args[name] = {args[name], entry:Value()} + end + else + args[name] = entry:Value() + end + + changed[name] = true + end + end + + core.request.set_uri_args(ctx, args) + + if path then + var.upstream_uri = path .. '?' .. var.args + end + end + end + + return true + end, + nil, -- ignore RPC_EXTRA_INFO, already processed during RPC_HTTP_REQ_CALL interaction + function (conf, ctx, sock, entry) + local lrucache_id = core.lrucache.plugin_ctx_id(ctx, entry) + local token, err = core.lrucache.plugin_ctx(lrucache, ctx, entry, rpc_call, + constants.RPC_PREPARE_CONF, conf, ctx, + lrucache_id) + if not token then + return nil, err + end + + builder:Clear() + local var = ctx.var + + local res = ctx.runner_ext_response + local textEntries = {} + local hdrs = res.headers + for key, val in pairs(hdrs) do + local ty = type(val) + if ty == "table" then + for _, v in ipairs(val) do + core.table.insert(textEntries, build_headers(var, builder, key, v)) + end + else + core.table.insert(textEntries, build_headers(var, builder, key, val)) + end + end + local len = #textEntries + http_resp_call_req.StartHeadersVector(builder, len) + for i = len, 1, -1 do + builder:PrependUOffsetTRelative(textEntries[i]) + end + local hdrs_vec = builder:EndVector(len) + + local id = generate_id() + local status = res.status + + http_resp_call_req.Start(builder) + http_resp_call_req.AddId(builder, id) + http_resp_call_req.AddStatus(builder, status) + http_resp_call_req.AddConfToken(builder, token) + http_resp_call_req.AddHeaders(builder, hdrs_vec) + + local req = http_resp_call_req.End(builder) + builder:Finish(req) + + local ok, err = send(sock, constants.RPC_HTTP_RESP_CALL, builder:Output()) + if not ok then + return nil, "failed to send RPC_HTTP_RESP_CALL: " .. err + end + + local ty, resp + while true do + ty, resp = receive(sock) + if ty == nil then + return nil, "failed to receive RPC_HTTP_REQ_CALL: " .. resp + end + + if ty ~= constants.RPC_EXTRA_INFO then + break + end + + local out, err = handle_extra_info(ctx, resp) + if not out then + return nil, "failed to handle RPC_EXTRA_INFO: " .. err + end + + local ok, err = send(sock, constants.RPC_EXTRA_INFO, out) + if not ok then + return nil, "failed to reply RPC_EXTRA_INFO: " .. err + end + end + + if ty ~= constants.RPC_HTTP_RESP_CALL then + return nil, "failed to receive RPC_HTTP_RESP_CALL: unexpected type " .. ty + end + + local buf = flatbuffers.binaryArray.New(resp) + local call_resp = http_resp_call_resp.GetRootAsResp(buf, 0) + local len = call_resp:HeadersLength() + if len > 0 then + local resp_headers = {} + for i = 1, len do + local entry = call_resp:Headers(i) + local name = str_lower(entry:Name()) + if resp_headers[name] == nil then + core.response.set_header(name, entry:Value()) + resp_headers[name] = true + else + core.response.add_header(name, entry:Value()) + end + end + else + -- Filter out origin headeres + for k, v in pairs(res.headers) do + if not exclude_resp_header[str_lower(k)] then + core.response.set_header(k, v) + end + end + end + + local body + local len = call_resp:BodyLength() + if len > 0 then + -- TODO: support empty body + body = call_resp:BodyAsString() + end + local code = call_resp:Status() + core.log.info("recv resp, code: ", code, " body: ", body, " len: ", len) + + if code == 0 then + -- runner changes body only, we should set code. + code = body and res.status or nil + end + + return true, nil, code, body + end +} + + +rpc_call = function (ty, conf, ctx, ...) + local path = helper.get_path() + + local sock = socket_tcp() + sock:settimeouts(1000, 60000, 60000) + local ok, err = sock:connect(path) + if not ok then + return nil, "failed to connect to the unix socket " .. path .. ": " .. err + end + + local res, err, code, body = rpc_handlers[ty + 1](conf, ctx, sock, ...) + if not res then + sock:close() + return nil, err + end + + local ok, err = sock:setkeepalive(180 * 1000, 32) + if not ok then + core.log.info("failed to setkeepalive: ", err) + end + + return res, nil, code, body +end + + +local function recreate_lrucache() + flush_token() + + if lrucache then + core.log.warn("flush conf token lrucache") + end + + lrucache = new_lrucache() +end + + +function _M.communicate(conf, ctx, plugin_name, rpc_cmd) + local ok, err, code, body + local tries = 0 + local ty = rpc_cmd and rpc_cmd or constants.RPC_HTTP_REQ_CALL + while tries < 3 do + tries = tries + 1 + ok, err, code, body = rpc_call(ty, conf, ctx, plugin_name) + if ok then + if code then + return code, body + end + + return + end + + if not core.string.find(err, "conf token not found") then + core.log.error(err) + if conf.allow_degradation then + core.log.warn("Plugin Runner is wrong, allow degradation") + return + end + return 503 + end + + core.log.warn("refresh cache and try again") + recreate_lrucache() + end + + core.log.error(err) + if conf.allow_degradation then + core.log.warn("Plugin Runner is wrong after " .. tries .. " times retry, allow degradation") + return + end + return 503 +end + + +local function must_set(env, value) + local ok, err = core.os.setenv(env, value) + if not ok then + error(str_format("failed to set %s: %s", env, err), 2) + end +end + + +local function spawn_proc(cmd) + must_set("APISIX_CONF_EXPIRE_TIME", helper.get_conf_token_cache_time()) + must_set("APISIX_LISTEN_ADDRESS", helper.get_path()) + + local opt = { + merge_stderr = true, + } + local proc, err = ngx_pipe.spawn(cmd, opt) + if not proc then + error(str_format("failed to start %s: %s", core.json.encode(cmd), err)) + -- TODO: add retry + end + + proc:set_timeouts(nil, nil, nil, 0) + return proc +end + + +local runner +local function setup_runner(cmd) + + ngx_timer_at(0, function(premature) + if premature then + return + end + + runner = spawn_proc(cmd) + + while not exiting() do + while true do + -- drain output + local max = 3800 -- smaller than Nginx error log length limit + local data, err = runner:stdout_read_any(max) + if not data then + if exiting() then + return + end + + if err == "closed" then + break + end + else + -- we log stdout here just for debug or test + -- the runner itself should log to a file + core.log.warn(data) + end + end + + local ok, reason, status = runner:wait() + if not ok then + core.log.warn("runner exited with reason: ", reason, ", status: ", status) + end + + runner = nil + local ok, err = events:post(events_list._source, events_list.runner_exit) + if not ok then + core.log.error("post event failure with ", events_list._source, ", error: ", err) + end + + core.log.warn("respawn runner 3 seconds later with cmd: ", core.json.encode(cmd)) + core.utils.sleep(3) + core.log.warn("respawning new runner...") + runner = spawn_proc(cmd) + end + end) +end + + +function _M.init_worker() + local local_conf = core.config.local_conf() + local cmd = core.table.try_read_attr(local_conf, "ext-plugin", "cmd") + if not cmd then + return + end + + events_list = events:event_list( + "process_runner_exit_event", + "runner_exit" + ) + + -- flush cache when runner exited + events:register(recreate_lrucache, events_list._source, events_list.runner_exit) + + -- note that the runner is run under the same user as the Nginx master + if process.type() == "privileged agent" then + setup_runner(cmd) + end +end + + +function _M.exit_worker() + if process.type() == "privileged agent" and runner then + -- We need to send SIGTERM in the exit_worker phase, as: + -- 1. privileged agent doesn't support graceful exiting when I write this + -- 2. better to make it work without graceful exiting + local pid = runner:pid() + core.log.notice("terminate runner ", pid, " with SIGTERM") + local num = resty_signal.signum("TERM") + runner:kill(num) + + -- give 1s to clean up the mess + core.os.waitpid(pid, 1) + -- then we KILL it via gc finalizer + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/fault-injection.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/fault-injection.lua new file mode 100644 index 0000000..34ca05e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/fault-injection.lua @@ -0,0 +1,175 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local expr = require("resty.expr.v1") + +local sleep = core.sleep +local random = math.random +local ipairs = ipairs +local ngx = ngx +local pairs = pairs +local type = type + +local plugin_name = "fault-injection" + + +local schema = { + type = "object", + properties = { + abort = { + type = "object", + properties = { + http_status = {type = "integer", minimum = 200}, + body = {type = "string", minLength = 0}, + headers = { + type = "object", + minProperties = 1, + patternProperties = { + ["^[^:]+$"] = { + oneOf = { + { type = "string" }, + { type = "number" } + } + } + } + }, + percentage = {type = "integer", minimum = 0, maximum = 100}, + vars = { + type = "array", + maxItems = 20, + items = { + type = "array", + }, + } + }, + required = {"http_status"}, + }, + delay = { + type = "object", + properties = { + duration = {type = "number", minimum = 0}, + percentage = {type = "integer", minimum = 0, maximum = 100}, + vars = { + type = "array", + maxItems = 20, + items = { + type = "array", + }, + } + }, + required = {"duration"}, + } + }, + minProperties = 1, +} + + +local _M = { + version = 0.1, + priority = 11000, + name = plugin_name, + schema = schema, +} + + +local function sample_hit(percentage) + if not percentage then + return true + end + + return random(1, 100) <= percentage +end + + +local function vars_match(vars, ctx) + local match_result + for _, var in ipairs(vars) do + local expr, _ = expr.new(var) + match_result = expr:eval(ctx.var) + if match_result then + break + end + end + + return match_result +end + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + if conf.abort and conf.abort.vars then + for _, var in ipairs(conf.abort.vars) do + local _, err = expr.new(var) + if err then + core.log.error("failed to create vars expression: ", err) + return false, err + end + end + end + + if conf.delay and conf.delay.vars then + for _, var in ipairs(conf.delay.vars) do + local _, err = expr.new(var) + if err then + core.log.error("failed to create vars expression: ", err) + return false, err + end + end + end + + return true +end + + +function _M.rewrite(conf, ctx) + core.log.info("plugin rewrite phase, conf: ", core.json.delay_encode(conf)) + + local abort_vars = true + if conf.abort and conf.abort.vars then + abort_vars = vars_match(conf.abort.vars, ctx) + end + core.log.info("abort_vars: ", abort_vars) + + local delay_vars = true + if conf.delay and conf.delay.vars then + delay_vars = vars_match(conf.delay.vars, ctx) + end + core.log.info("delay_vars: ", delay_vars) + + if conf.delay and sample_hit(conf.delay.percentage) and delay_vars then + sleep(conf.delay.duration) + end + + if conf.abort and sample_hit(conf.abort.percentage) and abort_vars then + if conf.abort.headers then + for header_name, header_value in pairs(conf.abort.headers) do + if type(header_value) == "string" then + header_value = core.utils.resolve_var(header_value, ctx.var) + end + ngx.header[header_name] = header_value + end + end + return conf.abort.http_status, core.utils.resolve_var(conf.abort.body, ctx.var) + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/file-logger.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/file-logger.lua new file mode 100644 index 0000000..e0970d8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/file-logger.lua @@ -0,0 +1,184 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local log_util = require("apisix.utils.log-util") +local core = require("apisix.core") +local expr = require("resty.expr.v1") +local ngx = ngx +local io_open = io.open +local is_apisix_or, process = pcall(require, "resty.apisix.process") + + +local plugin_name = "file-logger" + + +local schema = { + type = "object", + properties = { + path = { + type = "string" + }, + log_format = {type = "object"}, + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = {type = "boolean", default = false}, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + match = { + type = "array", + maxItems = 20, + items = { + type = "array", + }, + } + }, + required = {"path"} +} + + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + } +} + + +local _M = { + version = 0.1, + priority = 399, + name = plugin_name, + schema = schema, + metadata_schema = metadata_schema +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + if conf.match then + local ok, err = expr.new(conf.match) + if not ok then + return nil, "failed to validate the 'match' expression: " .. err + end + end + return core.schema.check(schema, conf) +end + + +local open_file_cache +if is_apisix_or then + -- TODO: switch to a cache which supports inactive time, + -- so that unused files would not be cached + local path_to_file = core.lrucache.new({ + type = "plugin", + }) + + local function open_file_handler(conf, handler) + local file, err = io_open(conf.path, 'a+') + if not file then + return nil, err + end + + -- it will case output problem with buffer when log is larger than buffer + file:setvbuf("no") + + handler.file = file + handler.open_time = ngx.now() * 1000 + return handler + end + + function open_file_cache(conf) + local last_reopen_time = process.get_last_reopen_ms() + + local handler, err = path_to_file(conf.path, 0, open_file_handler, conf, {}) + if not handler then + return nil, err + end + + if handler.open_time < last_reopen_time then + core.log.notice("reopen cached log file: ", conf.path) + handler.file:close() + + local ok, err = open_file_handler(conf, handler) + if not ok then + return nil, err + end + end + + return handler.file + end +end + + +local function write_file_data(conf, log_message) + local msg = core.json.encode(log_message) + + local file, err + if open_file_cache then + file, err = open_file_cache(conf) + else + file, err = io_open(conf.path, 'a+') + end + + if not file then + core.log.error("failed to open file: ", conf.path, ", error info: ", err) + else + -- file:write(msg, "\n") will call fwrite several times + -- which will cause problem with the log output + -- it should be atomic + msg = msg .. "\n" + -- write to file directly, no need flush + local ok, err = file:write(msg) + if not ok then + core.log.error("failed to write file: ", conf.path, ", error info: ", err) + end + + -- file will be closed by gc, if open_file_cache exists + if not open_file_cache then + file:close() + end + end +end + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + +function _M.log(conf, ctx) + local entry = log_util.get_log_entry(plugin_name, conf, ctx) + if entry == nil then + return + end + write_file_data(conf, entry) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/forward-auth.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/forward-auth.lua new file mode 100644 index 0000000..bd58364 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/forward-auth.lua @@ -0,0 +1,164 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local ipairs = ipairs +local core = require("apisix.core") +local http = require("resty.http") + +local schema = { + type = "object", + properties = { + uri = {type = "string"}, + allow_degradation = {type = "boolean", default = false}, + status_on_error = {type = "integer", minimum = 200, maximum = 599, default = 403}, + ssl_verify = { + type = "boolean", + default = true, + }, + request_method = { + type = "string", + default = "GET", + enum = {"GET", "POST"}, + description = "the method for client to request the authorization service" + }, + request_headers = { + type = "array", + default = {}, + items = {type = "string"}, + description = "client request header that will be sent to the authorization service" + }, + upstream_headers = { + type = "array", + default = {}, + items = {type = "string"}, + description = "authorization response header that will be sent to the upstream" + }, + client_headers = { + type = "array", + default = {}, + items = {type = "string"}, + description = "authorization response header that will be sent to" + .. "the client when authorizing failed" + }, + timeout = { + type = "integer", + minimum = 1, + maximum = 60000, + default = 3000, + description = "timeout in milliseconds", + }, + keepalive = {type = "boolean", default = true}, + keepalive_timeout = {type = "integer", minimum = 1000, default = 60000}, + keepalive_pool = {type = "integer", minimum = 1, default = 5}, + }, + required = {"uri"} +} + + +local _M = { + version = 0.1, + priority = 2002, + name = "forward-auth", + schema = schema, +} + + +function _M.check_schema(conf) + local check = {"uri"} + core.utils.check_https(check, conf, _M.name) + core.utils.check_tls_bool({"ssl_verify"}, conf, _M.name) + + return core.schema.check(schema, conf) +end + + +function _M.access(conf, ctx) + local auth_headers = { + ["X-Forwarded-Proto"] = core.request.get_scheme(ctx), + ["X-Forwarded-Method"] = core.request.get_method(), + ["X-Forwarded-Host"] = core.request.get_host(ctx), + ["X-Forwarded-Uri"] = ctx.var.request_uri, + ["X-Forwarded-For"] = core.request.get_remote_client_ip(ctx), + } + + if conf.request_method == "POST" then + auth_headers["Content-Length"] = core.request.header(ctx, "content-length") + auth_headers["Expect"] = core.request.header(ctx, "expect") + auth_headers["Transfer-Encoding"] = core.request.header(ctx, "transfer-encoding") + auth_headers["Content-Encoding"] = core.request.header(ctx, "content-encoding") + end + + -- append headers that need to be get from the client request header + if #conf.request_headers > 0 then + for _, header in ipairs(conf.request_headers) do + if not auth_headers[header] then + auth_headers[header] = core.request.header(ctx, header) + end + end + end + + local params = { + headers = auth_headers, + keepalive = conf.keepalive, + ssl_verify = conf.ssl_verify, + method = conf.request_method + } + + if params.method == "POST" then + params.body = core.request.get_body() + end + + if conf.keepalive then + params.keepalive_timeout = conf.keepalive_timeout + params.keepalive_pool = conf.keepalive_pool + end + + local httpc = http.new() + httpc:set_timeout(conf.timeout) + + local res, err = httpc:request_uri(conf.uri, params) + if not res and conf.allow_degradation then + return + elseif not res then + core.log.warn("failed to process forward auth, err: ", err) + return conf.status_on_error + end + + if res.status >= 300 then + local client_headers = {} + + if #conf.client_headers > 0 then + for _, header in ipairs(conf.client_headers) do + client_headers[header] = res.headers[header] + end + end + + core.response.set_header(client_headers) + return res.status, res.body + end + + -- append headers that need to be get from the auth response header + for _, header in ipairs(conf.upstream_headers) do + local header_value = res.headers[header] + if header_value then + core.request.set_header(ctx, header, header_value) + end + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/gm.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/gm.lua new file mode 100644 index 0000000..ee147ce --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/gm.lua @@ -0,0 +1,175 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- local common libs +local require = require +local pcall = pcall +local ffi = require("ffi") +local C = ffi.C +local get_request = require("resty.core.base").get_request +local core = require("apisix.core") +local radixtree_sni = require("apisix.ssl.router.radixtree_sni") +local apisix_ssl = require("apisix.ssl") +local _, ssl = pcall(require, "resty.apisix.ssl") +local error = error + + +ffi.cdef[[ +unsigned long Tongsuo_version_num(void) +]] + + +-- local function +local function set_pem_ssl_key(sni, enc_cert, enc_pkey, sign_cert, sign_pkey) + local r = get_request() + if r == nil then + return false, "no request found" + end + + local parsed_enc_cert, err = apisix_ssl.fetch_cert(sni, enc_cert) + if not parsed_enc_cert then + return false, "failed to parse enc PEM cert: " .. err + end + + local parsed_sign_cert, err = apisix_ssl.fetch_cert(sni, sign_cert) + if not parsed_sign_cert then + return false, "failed to parse sign PEM cert: " .. err + end + + local ok, err = ssl.set_gm_cert(parsed_enc_cert, parsed_sign_cert) + if not ok then + return false, "failed to set PEM cert: " .. err + end + + local parsed_enc_pkey, err = apisix_ssl.fetch_pkey(sni, enc_pkey) + if not parsed_enc_pkey then + return false, "failed to parse enc PEM priv key: " .. err + end + + local parsed_sign_pkey, err = apisix_ssl.fetch_pkey(sni, sign_pkey) + if not parsed_sign_pkey then + return false, "failed to parse sign PEM priv key: " .. err + end + + ok, err = ssl.set_gm_priv_key(parsed_enc_pkey, parsed_sign_pkey) + if not ok then + return false, "failed to set PEM priv key: " .. err + end + + return true +end + + +local original_set_cert_and_key +local function set_cert_and_key(sni, value) + if value.gm then + -- process as GM certificate + -- For GM dual certificate, the `cert` and `key` will be encryption cert/key. + -- The first item in `certs` and `keys` will be sign cert/key. + local enc_cert = value.cert + local enc_pkey = value.key + local sign_cert = value.certs[1] + local sign_pkey = value.keys[1] + return set_pem_ssl_key(sni, enc_cert, enc_pkey, sign_cert, sign_pkey) + end + return original_set_cert_and_key(sni, value) +end + + +local original_check_ssl_conf +local function check_ssl_conf(in_dp, conf) + if conf.gm then + -- process as GM certificate + -- For GM dual certificate, the `cert` and `key` will be encryption cert/key. + -- The first item in `certs` and `keys` will be sign cert/key. + local ok, err = original_check_ssl_conf(in_dp, conf) + -- check cert/key first in the original method + if not ok then + return nil, err + end + + -- Currently, APISIX doesn't check the cert type (ECDSA / RSA). So we skip the + -- check for now in this plugin. + local num_certs = conf.certs and #conf.certs or 0 + local num_keys = conf.keys and #conf.keys or 0 + if num_certs ~= 1 or num_keys ~= 1 then + return nil, "sign cert/key are required" + end + return true + end + return original_check_ssl_conf(in_dp, conf) +end + + +-- module define +local plugin_name = "gm" + +-- plugin schema +local plugin_schema = { + type = "object", + properties = { + }, +} + +local _M = { + version = 0.1, -- plugin version + priority = -43, + name = plugin_name, -- plugin name + schema = plugin_schema, -- plugin schema +} + + +function _M.init() + if not pcall(function () return C.Tongsuo_version_num end) then + error("need to build Tongsuo (https://github.com/Tongsuo-Project/Tongsuo) " .. + "into the APISIX-Runtime") + end + + ssl.enable_ntls() + original_set_cert_and_key = radixtree_sni.set_cert_and_key + radixtree_sni.set_cert_and_key = set_cert_and_key + original_check_ssl_conf = apisix_ssl.check_ssl_conf + apisix_ssl.check_ssl_conf = check_ssl_conf + + if core.schema.ssl.properties.gm ~= nil then + error("Field 'gm' is occupied") + end + + -- inject a mark to distinguish GM certificate + core.schema.ssl.properties.gm = { + type = "boolean" + } +end + + +function _M.destroy() + ssl.disable_ntls() + radixtree_sni.set_cert_and_key = original_set_cert_and_key + apisix_ssl.check_ssl_conf = original_check_ssl_conf + core.schema.ssl.properties.gm = nil +end + +-- module interface for schema check +-- @param `conf` user defined conf data +-- @param `schema_type` defined in `apisix/core/schema.lua` +-- @return +function _M.check_schema(conf, schema_type) + return core.schema.check(plugin_schema, conf) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/google-cloud-logging.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/google-cloud-logging.lua new file mode 100644 index 0000000..62ca991 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/google-cloud-logging.lua @@ -0,0 +1,265 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local tostring = tostring +local http = require("resty.http") +local log_util = require("apisix.utils.log-util") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local google_oauth = require("apisix.utils.google-cloud-oauth") + + +local lrucache = core.lrucache.new({ + type = "plugin", +}) + +local plugin_name = "google-cloud-logging" +local batch_processor_manager = bp_manager_mod.new(plugin_name) +local schema = { + type = "object", + properties = { + auth_config = { + type = "object", + properties = { + client_email = { type = "string" }, + private_key = { type = "string" }, + project_id = { type = "string" }, + token_uri = { + type = "string", + default = "https://oauth2.googleapis.com/token" + }, + -- https://developers.google.com/identity/protocols/oauth2/scopes#logging + scope = { + type = "array", + items = { + description = "Google OAuth2 Authorization Scopes", + type = "string", + }, + minItems = 1, + uniqueItems = true, + default = { + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/cloud-platform" + } + }, + scopes = { + type = "array", + items = { + description = "Google OAuth2 Authorization Scopes", + type = "string", + }, + minItems = 1, + uniqueItems = true + }, + entries_uri = { + type = "string", + default = "https://logging.googleapis.com/v2/entries:write" + }, + }, + required = { "client_email", "private_key", "project_id", "token_uri" } + }, + ssl_verify = { + type = "boolean", + default = true + }, + auth_file = { type = "string" }, + -- https://cloud.google.com/logging/docs/reference/v2/rest/v2/MonitoredResource + resource = { + type = "object", + properties = { + type = { type = "string" }, + labels = { type = "object" } + }, + default = { + type = "global" + }, + required = { "type" } + }, + -- https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry + log_id = { + type = "string", + default = "apisix.apache.org%2Flogs" + }, + log_format = {type = "object"}, + }, + oneOf = { + { required = { "auth_config" } }, + { required = { "auth_file" } }, + }, + encrypt_fields = {"auth_config.private_key"}, +} + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + + +local function send_to_google(oauth, entries) + local http_new = http.new() + local access_token = oauth:generate_access_token() + if not access_token then + return nil, "failed to get google oauth token" + end + + local res, err = http_new:request_uri(oauth.entries_uri, { + ssl_verify = oauth.ssl_verify, + method = "POST", + body = core.json.encode({ + entries = entries, + partialSuccess = false, + }), + headers = { + ["Content-Type"] = "application/json", + ["Authorization"] = (oauth.access_token_type or "Bearer") .. " " .. access_token, + }, + }) + + if not res then + return nil, "failed to write log to google, " .. err + end + + if res.status ~= 200 then + return nil, res.body + end + + return res.body +end + + +local function fetch_oauth_conf(conf) + if conf.auth_config then + return conf.auth_config + end + + if not conf.auth_file then + return nil, "configuration is not defined" + end + + local file_content, err = core.io.get_file(conf.auth_file) + if not file_content then + return nil, "failed to read configuration, file: " .. conf.auth_file .. " err: " .. err + end + + local config_tab + config_tab, err = core.json.decode(file_content) + if not config_tab then + return nil, "config parse failure, data: " .. file_content .. " , err: " .. err + end + + return config_tab +end + + +local function create_oauth_object(conf) + local auth_conf, err = fetch_oauth_conf(conf) + if not auth_conf then + return nil, err + end + + auth_conf.scope = auth_conf.scopes or auth_conf.scope + + return google_oauth.new(auth_conf, conf.ssl_verify) +end + + +local function get_logger_entry(conf, ctx, oauth) + local entry, customized = log_util.get_log_entry(plugin_name, conf, ctx) + local google_entry + if not customized then + google_entry = { + httpRequest = { + requestMethod = entry.request.method, + requestUrl = entry.request.url, + requestSize = entry.request.size, + status = entry.response.status, + responseSize = entry.response.size, + userAgent = entry.request.headers and entry.request.headers["user-agent"], + remoteIp = entry.client_ip, + serverIp = entry.upstream, + latency = tostring(core.string.format("%0.3f", entry.latency / 1000)) .. "s" + }, + jsonPayload = { + route_id = entry.route_id, + service_id = entry.service_id, + }, + } + else + google_entry = { + jsonPayload = entry, + } + end + + google_entry.labels = { + source = "apache-apisix-google-cloud-logging" + } + google_entry.timestamp = log_util.get_rfc3339_zulu_timestamp() + google_entry.resource = conf.resource + google_entry.insertId = ctx.var.request_id + google_entry.logName = core.string.format("projects/%s/logs/%s", oauth.project_id, conf.log_id) + + return google_entry +end + + +local _M = { + version = 0.1, + priority = 407, + name = plugin_name, + metadata_schema = metadata_schema, + schema = batch_processor_manager:wrap_schema(schema), +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + return core.schema.check(schema, conf) +end + + +function _M.log(conf, ctx) + local oauth, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, + create_oauth_object, conf) + if not oauth then + core.log.error("failed to fetch google-cloud-logging.oauth object: ", err) + return + end + + local entry = get_logger_entry(conf, ctx, oauth) + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + local process = function(entries) + return send_to_google(oauth, entries) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, process) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode.lua new file mode 100644 index 0000000..625018f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode.lua @@ -0,0 +1,211 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ngx = ngx +local core = require("apisix.core") +local schema_def = require("apisix.schema_def") +local proto = require("apisix.plugins.grpc-transcode.proto") +local request = require("apisix.plugins.grpc-transcode.request") +local response = require("apisix.plugins.grpc-transcode.response") + + +local plugin_name = "grpc-transcode" + +local pb_option_def = { + { description = "enum as result", + type = "string", + enum = {"enum_as_name", "enum_as_value"}, + }, + { description = "int64 as result", + type = "string", + enum = {"int64_as_number", "int64_as_string", "int64_as_hexstring"}, + }, + { description ="default values option", + type = "string", + enum = {"auto_default_values", "no_default_values", + "use_default_values", "use_default_metatable"}, + }, + { description = "hooks option", + type = "string", + enum = {"enable_hooks", "disable_hooks" }, + }, +} + +local schema = { + type = "object", + properties = { + proto_id = schema_def.id_schema, + service = { + description = "the grpc service name", + type = "string" + }, + method = { + description = "the method name in the grpc service.", + type = "string" + }, + deadline = { + description = "deadline for grpc, millisecond", + type = "number", + default = 0 + }, + pb_option = { + type = "array", + items = { type="string", anyOf = pb_option_def }, + minItems = 1, + default = { + "enum_as_name", + "int64_as_number", + "auto_default_values", + "disable_hooks", + } + }, + show_status_in_body = { + description = "show decoded grpc-status-details-bin in response body", + type = "boolean", + default = false + }, + -- https://github.com/googleapis/googleapis/blob/b7cb84f5d42e6dba0fdcc2d8689313f6a8c9d7b9/ + -- google/rpc/status.proto#L46 + status_detail_type = { + description = "the message type of the grpc-status-details-bin's details part, " + .. "if not given, the details part will not be decoded", + type = "string", + }, + }, + additionalProperties = true, + required = { "proto_id", "service", "method" }, +} + +-- Based on https://cloud.google.com/apis/design/errors#handling_errors +local status_rel = { + ["1"] = 499, -- CANCELLED + ["2"] = 500, -- UNKNOWN + ["3"] = 400, -- INVALID_ARGUMENT + ["4"] = 504, -- DEADLINE_EXCEEDED + ["5"] = 404, -- NOT_FOUND + ["6"] = 409, -- ALREADY_EXISTS + ["7"] = 403, -- PERMISSION_DENIED + ["8"] = 429, -- RESOURCE_EXHAUSTED + ["9"] = 400, -- FAILED_PRECONDITION + ["10"] = 409, -- ABORTED + ["11"] = 400, -- OUT_OF_RANGE + ["12"] = 501, -- UNIMPLEMENTED + ["13"] = 500, -- INTERNAL + ["14"] = 503, -- UNAVAILABLE + ["15"] = 500, -- DATA_LOSS + ["16"] = 401, -- UNAUTHENTICATED +} + +local _M = { + version = 0.1, + priority = 506, + name = plugin_name, + schema = schema, +} + + +function _M.init() + proto.init() +end + + +function _M.destroy() + proto.destroy() +end + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + return true +end + + +function _M.access(conf, ctx) + core.log.info("conf: ", core.json.delay_encode(conf)) + + local proto_id = conf.proto_id + if not proto_id then + core.log.error("proto id miss: ", proto_id) + return + end + + local proto_obj, err = proto.fetch(proto_id) + if err then + core.log.error("proto load error: ", err) + return + end + + local ok, err, err_code = request(proto_obj, conf.service, + conf.method, conf.pb_option, conf.deadline) + if not ok then + core.log.error("transform request error: ", err) + return err_code + end + + ctx.proto_obj = proto_obj + +end + + +function _M.header_filter(conf, ctx) + if ngx.status >= 300 then + return + end + + ngx.header["Content-Type"] = "application/json" + ngx.header.content_length = nil + + local headers = ngx.resp.get_headers() + + if headers["grpc-status"] ~= nil and headers["grpc-status"] ~= "0" then + local http_status = status_rel[headers["grpc-status"]] + if http_status ~= nil then + ngx.status = http_status + else + ngx.status = 599 + end + else + -- The error response body does not contain grpc-status and grpc-message + ngx.header["Trailer"] = {"grpc-status", "grpc-message"} + end + +end + + +function _M.body_filter(conf, ctx) + if ngx.status >= 300 and not conf.show_status_in_body then + return + end + + local proto_obj = ctx.proto_obj + if not proto_obj then + return + end + + local err = response(ctx, proto_obj, conf.service, conf.method, conf.pb_option, + conf.show_status_in_body, conf.status_detail_type) + if err then + core.log.error("transform response error: ", err) + return + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode/proto.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode/proto.lua new file mode 100644 index 0000000..347ec39 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode/proto.lua @@ -0,0 +1,279 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local config_util = require("apisix.core.config_util") +local pb = require("pb") +local protoc = require("protoc") +local pcall = pcall +local ipairs = ipairs +local decode_base64 = ngx.decode_base64 + + +local protos +local lrucache_proto = core.lrucache.new({ + ttl = 300, count = 100 +}) + +local proto_fake_file = "filename for loaded" + +local function compile_proto_text(content) + protoc.reload() + local _p = protoc.new() + -- the loaded proto won't appears in _p.loaded without a file name after lua-protobuf=0.3.2, + -- which means _p.loaded after _p:load(content) is always empty, so we can pass a fake file + -- name to keep the code below unchanged, or we can create our own load function with returning + -- the loaded DescriptorProto table additionally, see more details in + -- https://github.com/apache/apisix/pull/4368 + local ok, res = pcall(_p.load, _p, content, proto_fake_file) + if not ok then + return nil, res + end + + if not res or not _p.loaded then + return nil, "failed to load proto content" + end + + local compiled = _p.loaded + + local index = {} + for _, s in ipairs(compiled[proto_fake_file].service or {}) do + local method_index = {} + for _, m in ipairs(s.method) do + method_index[m.name] = m + end + + index[compiled[proto_fake_file].package .. '.' .. s.name] = method_index + end + + compiled[proto_fake_file].index = index + + return compiled +end + + +local function compile_proto_bin(content) + content = decode_base64(content) + if not content then + return nil + end + + -- pb.load doesn't return err + local ok = pb.load(content) + if not ok then + return nil + end + + local files = pb.decode("google.protobuf.FileDescriptorSet", content).file + local index = {} + for _, f in ipairs(files) do + for _, s in ipairs(f.service or {}) do + local method_index = {} + for _, m in ipairs(s.method) do + method_index[m.name] = m + end + + index[f.package .. '.' .. s.name] = method_index + end + end + + local compiled = {} + compiled[proto_fake_file] = {} + compiled[proto_fake_file].index = index + return compiled +end + + +local function compile_proto(content) + -- clear pb state + local old_pb_state = pb.state(nil) + + local compiled, err = compile_proto_text(content) + if not compiled then + compiled = compile_proto_bin(content) + if not compiled then + return nil, err + end + end + + -- fetch pb state + compiled.pb_state = pb.state(old_pb_state) + return compiled +end + + +local _M = { + version = 0.1, + compile_proto = compile_proto, + proto_fake_file = proto_fake_file +} + +local function create_proto_obj(proto_id) + if protos.values == nil then + return nil + end + + local content + for _, proto in config_util.iterate_values(protos.values) do + if proto_id == proto.value.id then + content = proto.value.content + break + end + end + + if not content then + return nil, "failed to find proto by id: " .. proto_id + end + + return compile_proto(content) +end + + +function _M.fetch(proto_id) + return lrucache_proto(proto_id, protos.conf_version, + create_proto_obj, proto_id) +end + + +function _M.protos() + if not protos then + return nil, nil + end + + return protos.values, protos.conf_version +end + + +local grpc_status_proto = [[ + syntax = "proto3"; + + package grpc_status; + + message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; + } + + // The `Status` type defines a logical error model that is suitable for + // different programming environments, including REST APIs and RPC APIs. It is + // used by [gRPC](https://github.com/grpc). Each `Status` message contains + // three pieces of data: error code, error message, and error details. + // + // You can find out more about this error model and how to work with it in the + // [API Design Guide](https://cloud.google.com/apis/design/errors). + message ErrorStatus { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + int32 code = 1; + + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + string message = 2; + + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + repeated Any details = 3; + } +]] + + +local status_pb_state +local function init_status_pb_state() + if not status_pb_state then + -- clear current pb state + local old_pb_state = pb.state(nil) + + -- initialize protoc compiler + protoc.reload() + local status_protoc = protoc.new() + -- do not use loadfile here, it can not load the proto file when using a relative address + -- after luarocks install apisix + local ok, err = status_protoc:load(grpc_status_proto, "grpc_status.proto") + if not ok then + status_protoc:reset() + pb.state(old_pb_state) + return "failed to load grpc status protocol: " .. err + end + + status_pb_state = pb.state(old_pb_state) + end +end + + +function _M.fetch_status_pb_state() + return status_pb_state +end + + +function _M.init() + local err + protos, err = core.config.new("/protos", { + automatic = true, + item_schema = core.schema.proto + }) + if not protos then + core.log.error("failed to create etcd instance for fetching protos: ", + err) + return + end + + if not status_pb_state then + err = init_status_pb_state() + if err then + core.log.error("failed to init grpc status proto: ", + err) + return + end + end +end + +function _M.destroy() + if protos then + protos:close() + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode/request.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode/request.lua new file mode 100644 index 0000000..934a1c9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode/request.lua @@ -0,0 +1,72 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local util = require("apisix.plugins.grpc-transcode.util") +local core = require("apisix.core") +local pb = require("pb") +local bit = require("bit") +local ngx = ngx +local string = string +local table = table +local pcall = pcall +local tonumber = tonumber +local req_read_body = ngx.req.read_body + +return function (proto, service, method, pb_option, deadline, default_values) + core.log.info("proto: ", core.json.delay_encode(proto, true)) + local m = util.find_method(proto, service, method) + if not m then + return false, "Undefined service method: " .. service .. "/" .. method + .. " end", 503 + end + + req_read_body() + + local pb_old_state = pb.state(proto.pb_state) + util.set_options(proto, pb_option) + + local map_message = util.map_message(m.input_type, default_values or {}) + local ok, encoded = pcall(pb.encode, m.input_type, map_message) + pb.state(pb_old_state) + + if not ok or not encoded then + return false, "failed to encode request data to protobuf", 400 + end + + local size = #encoded + local prefix = { + string.char(0), + string.char(bit.band(bit.rshift(size, 24), 0xFF)), + string.char(bit.band(bit.rshift(size, 16), 0xFF)), + string.char(bit.band(bit.rshift(size, 8), 0xFF)), + string.char(bit.band(size, 0xFF)) + } + + local message = table.concat(prefix, "") .. encoded + + ngx.req.set_method(ngx.HTTP_POST) + ngx.req.set_uri("/" .. service .. "/" .. method, false) + ngx.req.set_uri_args({}) + ngx.req.set_body_data(message) + + local dl = tonumber(deadline) + if dl~= nil and dl > 0 then + ngx.req.set_header("grpc-timeout", dl .. "m") + end + + return true +end diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode/response.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode/response.lua new file mode 100644 index 0000000..9dd6780 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode/response.lua @@ -0,0 +1,144 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local util = require("apisix.plugins.grpc-transcode.util") +local grpc_proto = require("apisix.plugins.grpc-transcode.proto") +local core = require("apisix.core") +local pb = require("pb") +local ngx = ngx +local string = string +local ngx_decode_base64 = ngx.decode_base64 +local ipairs = ipairs +local pcall = pcall + + +local function handle_error_response(status_detail_type, proto) + local err_msg + + local grpc_status = ngx.header["grpc-status-details-bin"] + if grpc_status then + grpc_status = ngx_decode_base64(grpc_status) + if grpc_status == nil then + err_msg = "grpc-status-details-bin is not base64 format" + ngx.arg[1] = err_msg + return err_msg + end + + local status_pb_state = grpc_proto.fetch_status_pb_state() + local old_pb_state = pb.state(status_pb_state) + + local ok, decoded_grpc_status = pcall(pb.decode, "grpc_status.ErrorStatus", grpc_status) + pb.state(old_pb_state) + if not ok then + err_msg = "failed to call pb.decode to decode grpc-status-details-bin" + ngx.arg[1] = err_msg + return err_msg .. ", err: " .. decoded_grpc_status + end + + if not decoded_grpc_status then + err_msg = "failed to decode grpc-status-details-bin" + ngx.arg[1] = err_msg + return err_msg + end + + local details = decoded_grpc_status.details + if status_detail_type and details then + local decoded_details = {} + for _, detail in ipairs(details) do + local pb_old_state = pb.state(proto.pb_state) + local ok, err_or_value = pcall(pb.decode, status_detail_type, detail.value) + pb.state(pb_old_state) + if not ok then + err_msg = "failed to call pb.decode to decode details in " + .. "grpc-status-details-bin" + ngx.arg[1] = err_msg + return err_msg .. ", err: " .. err_or_value + end + + if not err_or_value then + err_msg = "failed to decode details in grpc-status-details-bin" + ngx.arg[1] = err_msg + return err_msg + end + + core.table.insert(decoded_details, err_or_value) + end + + decoded_grpc_status.details = decoded_details + end + + local resp_body = {error = decoded_grpc_status} + local response, err = core.json.encode(resp_body) + if not response then + err_msg = "failed to json_encode response body" + ngx.arg[1] = err_msg + return err_msg .. ", error: " .. err + end + + ngx.arg[1] = response + end +end + + +return function(ctx, proto, service, method, pb_option, show_status_in_body, status_detail_type) + local buffer = core.response.hold_body_chunk(ctx) + if not buffer then + return nil + end + + -- handle error response after the last response chunk + if ngx.status >= 300 and show_status_in_body then + return handle_error_response(status_detail_type, proto) + end + + -- when body has already been read by other plugin + -- the buffer is an empty string + if buffer == "" and ctx.resp_body then + buffer = ctx.resp_body + end + + local m = util.find_method(proto, service, method) + if not m then + return false, "2.Undefined service method: " .. service .. "/" .. method + .. " end." + end + + if not ngx.req.get_headers()["X-Grpc-Web"] then + buffer = string.sub(buffer, 6) + end + + local pb_old_state = pb.state(proto.pb_state) + util.set_options(proto, pb_option) + + local err_msg + local decoded = pb.decode(m.output_type, buffer) + pb.state(pb_old_state) + if not decoded then + err_msg = "failed to decode response data by protobuf" + ngx.arg[1] = err_msg + return err_msg + end + + local response, err = core.json.encode(decoded) + if not response then + err_msg = "failed to json_encode response body" + ngx.arg[1] = err_msg + return err_msg .. ", err: " .. err + end + + ngx.arg[1] = response + return nil +end diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode/util.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode/util.lua new file mode 100644 index 0000000..a95cb82 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-transcode/util.lua @@ -0,0 +1,202 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local proto_fake_file = require("apisix.plugins.grpc-transcode.proto").proto_fake_file +local json = core.json +local pb = require("pb") +local ngx = ngx +local string = string +local table = table +local ipairs = ipairs +local pairs = pairs +local tonumber = tonumber +local type = type + + +local _M = {version = 0.1} + + +function _M.find_method(proto, service, method) + local loaded = proto[proto_fake_file] + if type(loaded) ~= "table" then + core.log.error("compiled proto not found") + return nil + end + + if type(loaded.index[service]) ~= "table" then + core.log.error("compiled proto service not found") + return nil + end + + local res = loaded.index[service][method] + if not res then + core.log.error("compiled proto method not found") + return nil + end + + return res +end + + +function _M.set_options(proto, options) + local cur_opts = proto.options + if cur_opts then + if cur_opts == options then + -- same route + return + end + + local same = true + table.sort(options) + for i, v in ipairs(options) do + if cur_opts[i] ~= v then + same = false + break + end + end + + if same then + -- Routes have the same configuration, usually the default one. + -- As this is a small optimization, we don't care about routes have different + -- configuration but have the same effect eventually. + return + end + else + table.sort(options) + end + + for _, opt in ipairs(options) do + pb.option(opt) + end + + proto.options = options +end + + +local function get_request_table() + local method = ngx.req.get_method() + local content_type = ngx.req.get_headers()["Content-Type"] or "" + if string.find(content_type, "application/json", 1, true) and + (method == "POST" or method == "PUT" or method == "PATCH") + then + local req_body, _ = core.request.get_body() + if req_body then + local data, _ = json.decode(req_body) + if data then + return data + end + end + end + + if method == "POST" then + return ngx.req.get_post_args() + end + + return ngx.req.get_uri_args() +end + + +local function get_from_request(request_table, name, kind) + if not request_table then + return nil + end + + local prefix = kind:sub(1, 3) + if prefix == "int" then + if request_table[name] then + if kind == "int64" then + return request_table[name] + else + return tonumber(request_table[name]) + end + end + end + + return request_table[name] +end + + +function _M.map_message(field, default_values, request_table, real_key) + if not pb.type(field) then + return nil, "Field " .. field .. " is not defined" + end + + local request = {} + local sub, err + if not request_table then + request_table = get_request_table() + end + + for name, _, field_type in pb.fields(field) do + local _, _, ty = pb.type(field_type) + if ty ~= "enum" and field_type:sub(1, 1) == "." then + if request_table[name] == nil then + sub = default_values and default_values[name] + elseif core.table.isarray(request_table[name]) then + local sub_array = core.table.new(#request_table[name], 0) + for i, value in ipairs(request_table[name]) do + local sub_array_obj + if type(value) == "table" then + sub_array_obj, err = _M.map_message(field_type, + default_values and default_values[name], value) + if err then + return nil, err + end + else + sub_array_obj = value + end + sub_array[i] = sub_array_obj + end + sub = sub_array + else + if ty == "map" then + for k, v in pairs(request_table[name]) do + local tbl, err = _M.map_message(field_type, + default_values and default_values[name], + request_table[name], k) + if err then + return nil, err + end + if not sub then + sub = {} + end + sub[k] = tbl[k] + end + else + sub, err = _M.map_message(field_type, + default_values and default_values[name], + request_table[name]) + if err then + return nil, err + end + end + end + + request[name] = sub + else + if real_key then + name = real_key + end + request[name] = get_from_request(request_table, name, field_type) + or (default_values and default_values[name]) + end + end + return request +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-web.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-web.lua new file mode 100644 index 0000000..43a075c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/grpc-web.lua @@ -0,0 +1,228 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. + +local ngx = ngx +local ngx_arg = ngx.arg +local core = require("apisix.core") +local req_set_uri = ngx.req.set_uri +local req_set_body_data = ngx.req.set_body_data +local decode_base64 = ngx.decode_base64 +local encode_base64 = ngx.encode_base64 +local bit = require("bit") +local string = string + + +local ALLOW_METHOD_OPTIONS = "OPTIONS" +local ALLOW_METHOD_POST = "POST" +local CONTENT_ENCODING_BASE64 = "base64" +local CONTENT_ENCODING_BINARY = "binary" +local DEFAULT_CORS_ALLOW_ORIGIN = "*" +local DEFAULT_CORS_ALLOW_METHODS = ALLOW_METHOD_POST +local DEFAULT_CORS_ALLOW_HEADERS = "content-type,x-grpc-web,x-user-agent" +local DEFAULT_CORS_EXPOSE_HEADERS = "grpc-message,grpc-status" +local DEFAULT_PROXY_CONTENT_TYPE = "application/grpc" + + +local plugin_name = "grpc-web" + +local schema = { + type = "object", + properties = { + cors_allow_headers = { + description = + "multiple header use ',' to split. default: content-type,x-grpc-web,x-user-agent.", + type = "string", + default = DEFAULT_CORS_ALLOW_HEADERS + } + } +} + +local grpc_web_content_encoding = { + ["application/grpc-web"] = CONTENT_ENCODING_BINARY, + ["application/grpc-web-text"] = CONTENT_ENCODING_BASE64, + ["application/grpc-web+proto"] = CONTENT_ENCODING_BINARY, + ["application/grpc-web-text+proto"] = CONTENT_ENCODING_BASE64, +} + +local _M = { + version = 0.1, + priority = 505, + name = plugin_name, + schema = schema, +} + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + +local function exit(ctx, status) + ctx.grpc_web_skip_body_filter = true + return status +end + +--- Build gRPC-Web trailer chunk +-- grpc-web trailer format reference: +-- envoyproxy/envoy/source/extensions/filters/http/grpc_web/grpc_web_filter.cc +-- +-- Format for grpc-web trailer +-- 1 byte: 0x80 +-- 4 bytes: length of the trailer +-- n bytes: trailer +-- It using upstream_trailer_* variables from nginx, it is available since NGINX version 1.13.10 +-- https://nginx.org/en/docs/http/ngx_http_upstream_module.html#var_upstream_trailer_ +-- +-- @param grpc_status number grpc status code +-- @param grpc_message string grpc message +-- @return string grpc-web trailer chunk in raw string +local build_trailer = function (grpc_status, grpc_message) + local status_str = "grpc-status:" .. grpc_status + local status_msg = "grpc-message:" .. ( grpc_message or "") + local grpc_web_trailer = status_str .. "\r\n" .. status_msg .. "\r\n" + local len = #grpc_web_trailer + + -- 1 byte: 0x80 + local trailer_buf = string.char(0x80) + -- 4 bytes: length of the trailer + trailer_buf = trailer_buf .. string.char( + bit.band(bit.rshift(len, 24), 0xff), + bit.band(bit.rshift(len, 16), 0xff), + bit.band(bit.rshift(len, 8), 0xff), + bit.band(len, 0xff) + ) + -- n bytes: trailer + trailer_buf = trailer_buf .. grpc_web_trailer + + return trailer_buf +end + +function _M.access(conf, ctx) + -- set context variable mime + -- When processing non gRPC Web requests, `mime` can be obtained in the context + -- and set to the `Content-Type` of the response + ctx.grpc_web_mime = core.request.header(ctx, "Content-Type") + + local method = core.request.get_method() + if method == ALLOW_METHOD_OPTIONS then + return exit(ctx, 204) + end + + if method ~= ALLOW_METHOD_POST then + -- https://github.com/grpc/grpc-web/blob/master/doc/browser-features.md#cors-support + core.log.error("request method: `", method, "` invalid") + return exit(ctx, 405) + end + + local encoding = grpc_web_content_encoding[ctx.grpc_web_mime] + if not encoding then + core.log.error("request Content-Type: `", ctx.grpc_web_mime, "` invalid") + return exit(ctx, 400) + end + + -- set context variable encoding method + ctx.grpc_web_encoding = encoding + + -- set grpc path + if not (ctx.curr_req_matched and ctx.curr_req_matched[":ext"]) then + core.log.error("routing configuration error, grpc-web plugin only supports ", + "`prefix matching` pattern routing") + return exit(ctx, 400) + end + + local path = ctx.curr_req_matched[":ext"] + if path:byte(1) ~= core.string.byte("/") then + path = "/" .. path + end + + req_set_uri(path) + + -- set grpc body + local body, err = core.request.get_body() + if err or not body then + core.log.error("failed to read request body, err: ", err) + return exit(ctx, 400) + end + + if encoding == CONTENT_ENCODING_BASE64 then + body = decode_base64(body) + if not body then + core.log.error("failed to decode request body") + return exit(ctx, 400) + end + end + + -- set grpc content-type + core.request.set_header(ctx, "Content-Type", DEFAULT_PROXY_CONTENT_TYPE) + -- set grpc body + req_set_body_data(body) +end + +function _M.header_filter(conf, ctx) + local method = core.request.get_method() + if method == ALLOW_METHOD_OPTIONS then + core.response.set_header("Access-Control-Allow-Methods", DEFAULT_CORS_ALLOW_METHODS) + core.response.set_header("Access-Control-Allow-Headers", conf.cors_allow_headers) + end + + if not ctx.cors_allow_origins then + core.response.set_header("Access-Control-Allow-Origin", DEFAULT_CORS_ALLOW_ORIGIN) + end + core.response.set_header("Access-Control-Expose-Headers", DEFAULT_CORS_EXPOSE_HEADERS) + + if not ctx.grpc_web_skip_body_filter then + core.response.set_header("Content-Type", ctx.grpc_web_mime) + core.response.set_header("Content-Length", nil) + end +end + +function _M.body_filter(conf, ctx) + if ctx.grpc_web_skip_body_filter then + return + end + + -- If the MIME extension type description of the gRPC-Web standard is not obtained, + -- indicating that the request is not based on the gRPC Web specification, + -- the processing of the request body will be ignored + -- https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md + -- https://github.com/grpc/grpc-web/blob/master/doc/browser-features.md#cors-support + if not ctx.grpc_web_mime then + return + end + + if ctx.grpc_web_encoding == CONTENT_ENCODING_BASE64 then + local chunk = ngx_arg[1] + chunk = encode_base64(chunk) + ngx_arg[1] = chunk + end + + if ngx_arg[2] then -- if eof + local status = ctx.var.upstream_trailer_grpc_status + local message = ctx.var.upstream_trailer_grpc_message + + -- When the response body completes and still does not receive the grpc status + local resp_ok = status ~= nil and status ~= "" + local trailer_buf = build_trailer( + resp_ok and status or 2, + resp_ok and message or "upstream grpc status not received" + ) + if ctx.grpc_web_encoding == CONTENT_ENCODING_BASE64 then + trailer_buf = encode_base64(trailer_buf) + end + + ngx_arg[1] = ngx_arg[1] .. trailer_buf + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/gzip.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/gzip.lua new file mode 100644 index 0000000..dfd0f10 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/gzip.lua @@ -0,0 +1,170 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local is_apisix_or, response = pcall(require, "resty.apisix.response") +local ngx_header = ngx.header +local req_http_version = ngx.req.http_version +local str_sub = string.sub +local ipairs = ipairs +local tonumber = tonumber +local type = type + + +local schema = { + type = "object", + properties = { + types = { + anyOf = { + { + type = "array", + minItems = 1, + items = { + type = "string", + minLength = 1, + }, + }, + { + enum = {"*"} + } + }, + default = {"text/html"} + }, + min_length = { + type = "integer", + minimum = 1, + default = 20, + }, + comp_level = { + type = "integer", + minimum = 1, + maximum = 9, + default = 1, + }, + http_version = { + enum = {1.1, 1.0}, + default = 1.1, + }, + buffers = { + type = "object", + properties = { + number = { + type = "integer", + minimum = 1, + default = 32, + }, + size = { + type = "integer", + minimum = 1, + default = 4096, + } + }, + default = { + number = 32, + size = 4096, + } + }, + vary = { + type = "boolean", + } + }, +} + + +local plugin_name = "gzip" + + +local _M = { + version = 0.1, + priority = 995, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +function _M.header_filter(conf, ctx) + if not is_apisix_or then + core.log.error("need to build APISIX-Runtime to support setting gzip") + return 501 + end + + local types = conf.types + local content_type = ngx_header["Content-Type"] + if not content_type then + -- Like Nginx, don't gzip if Content-Type is missing + return + end + + if type(types) == "table" then + local matched = false + local from = core.string.find(content_type, ";") + if from then + content_type = str_sub(content_type, 1, from - 1) + end + + for _, ty in ipairs(types) do + if content_type == ty then + matched = true + break + end + end + + if not matched then + return + end + end + + local content_length = tonumber(ngx_header["Content-Length"]) + if content_length then + local min_length = conf.min_length + if content_length < min_length then + return + end + -- Like Nginx, don't check min_length if Content-Length is missing + end + + local http_version = req_http_version() + if http_version < conf.http_version then + return + end + + local buffers = conf.buffers + + core.log.info("set gzip with buffers: ", buffers.number, " ", buffers.size, + ", level: ", conf.comp_level) + + local ok, err = response.set_gzip({ + buffer_num = buffers.number, + buffer_size = buffers.size, + compress_level = conf.comp_level, + }) + if not ok then + core.log.error("failed to set gzip: ", err) + return + end + + if conf.vary then + core.response.add_header("Vary", "Accept-Encoding") + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/hmac-auth.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/hmac-auth.lua new file mode 100644 index 0000000..30e8db0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/hmac-auth.lua @@ -0,0 +1,372 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ngx = ngx +local abs = math.abs +local ngx_time = ngx.time +local ngx_re = require("ngx.re") +local ipairs = ipairs +local hmac_sha1 = ngx.hmac_sha1 +local core = require("apisix.core") +local hmac = require("resty.hmac") +local consumer = require("apisix.consumer") +local ngx_decode_base64 = ngx.decode_base64 +local ngx_encode_base64 = ngx.encode_base64 +local plugin_name = "hmac-auth" +local ALLOWED_ALGORITHMS = {"hmac-sha1", "hmac-sha256", "hmac-sha512"} +local resty_sha256 = require("resty.sha256") +local schema_def = require("apisix.schema_def") +local auth_utils = require("apisix.utils.auth") + +local schema = { + type = "object", + title = "work with route or service object", + properties = { + allowed_algorithms = { + type = "array", + minItems = 1, + items = { + type = "string", + enum = ALLOWED_ALGORITHMS + }, + default = ALLOWED_ALGORITHMS, + }, + clock_skew = { + type = "integer", + default = 300, + minimum = 1 + }, + signed_headers = { + type = "array", + items = { + type = "string", + minLength = 1, + maxLength = 50, + } + }, + validate_request_body = { + type = "boolean", + title = "A boolean value telling the plugin to enable body validation", + default = false, + }, + hide_credentials = {type = "boolean", default = false}, + anonymous_consumer = schema_def.anonymous_consumer_schema, + }, +} + +local consumer_schema = { + type = "object", + title = "work with consumer object", + properties = { + key_id = {type = "string", minLength = 1, maxLength = 256}, + secret_key = {type = "string", minLength = 1, maxLength = 256}, + }, + encrypt_fields = {"secret_key"}, + required = {"key_id", "secret_key"}, +} + +local _M = { + version = 0.1, + priority = 2530, + type = 'auth', + name = plugin_name, + schema = schema, + consumer_schema = consumer_schema +} + +local hmac_funcs = { + ["hmac-sha1"] = function(secret_key, message) + return hmac_sha1(secret_key, message) + end, + ["hmac-sha256"] = function(secret_key, message) + return hmac:new(secret_key, hmac.ALGOS.SHA256):final(message) + end, + ["hmac-sha512"] = function(secret_key, message) + return hmac:new(secret_key, hmac.ALGOS.SHA512):final(message) + end, +} + + +local function array_to_map(arr) + local map = core.table.new(0, #arr) + for _, v in ipairs(arr) do + map[v] = true + end + + return map +end + + +function _M.check_schema(conf, schema_type) + core.log.info("input conf: ", core.json.delay_encode(conf)) + + if schema_type == core.schema.TYPE_CONSUMER then + return core.schema.check(consumer_schema, conf) + else + return core.schema.check(schema, conf) + end +end + + +local function get_consumer(key_id) + if not key_id then + return nil, "missing key_id" + end + + local cur_consumer, _, err = consumer.find_consumer(plugin_name, "key_id", key_id) + if not cur_consumer then + return nil, err or "Invalid key_id" + end + core.log.info("consumer: ", core.json.delay_encode(consumer, true)) + + return cur_consumer +end + + +local function generate_signature(ctx, secret_key, params) + local uri = ctx.var.request_uri + local request_method = core.request.get_method() + + if uri == "" then + uri = "/" + end + + local signing_string_items = { + params.keyId, + } + + if params.headers then + for _, h in ipairs(params.headers) do + local canonical_header = core.request.header(ctx, h) + if not canonical_header then + if h == "@request-target" then + local request_target = request_method .. " " .. uri + core.table.insert(signing_string_items, request_target) + core.log.info("canonical_header name:", core.json.delay_encode(h)) + core.log.info("canonical_header value: ", + core.json.delay_encode(request_target)) + end + else + core.table.insert(signing_string_items, + h .. ": " .. canonical_header) + core.log.info("canonical_header name:", core.json.delay_encode(h)) + core.log.info("canonical_header value: ", + core.json.delay_encode(canonical_header)) + end + end + end + + local signing_string = core.table.concat(signing_string_items, "\n") .. "\n" + return hmac_funcs[params.algorithm](secret_key, signing_string) +end + + +local function sha256(key) + local hash = resty_sha256:new() + hash:update(key) + local digest = hash:final() + return digest +end + + +local function validate(ctx, conf, params) + if not params then + return nil + end + + if not params.keyId or not params.signature then + return nil, "keyId or signature missing" + end + + if not params.algorithm then + return nil, "algorithm missing" + end + + local consumer, err = get_consumer(params.keyId) + if err then + return nil, err + end + + local consumer_conf = consumer.auth_conf + local found_algorithm = false + -- check supported algorithm used + if not conf.allowed_algorithms then + conf.allowed_algorithms = ALLOWED_ALGORITHMS + end + + for _, algo in ipairs(conf.allowed_algorithms) do + if algo == params.algorithm then + found_algorithm = true + break + end + end + + if not found_algorithm then + return nil, "Invalid algorithm" + end + + core.log.info("clock_skew: ", conf.clock_skew) + if conf.clock_skew and conf.clock_skew > 0 then + if not params.date then + return nil, "Date header missing. failed to validate clock skew" + end + + local time = ngx.parse_http_time(params.date) + core.log.info("params.date: ", params.date, " time: ", time) + if not time then + return nil, "Invalid GMT format time" + end + + local diff = abs(ngx_time() - time) + + if diff > conf.clock_skew then + return nil, "Clock skew exceeded" + end + end + + -- validate headers + -- All headers passed in route conf.signed_headers must be used in signing(params.headers) + if conf.signed_headers and #conf.signed_headers >= 1 then + if not params.headers then + return nil, "headers missing" + end + local params_headers_map = array_to_map(params.headers) + if params_headers_map then + for _, header in ipairs(conf.signed_headers) do + if not params_headers_map[header] then + return nil, [[expected header "]] .. header .. [[" missing in signing]] + end + end + end + end + + local secret_key = consumer_conf and consumer_conf.secret_key + local request_signature = ngx_decode_base64(params.signature) + local generated_signature = generate_signature(ctx, secret_key, params) + if request_signature ~= generated_signature then + return nil, "Invalid signature" + end + + local validate_request_body = conf.validate_request_body + if validate_request_body then + local digest_header = params.body_digest + if not digest_header then + return nil, "Invalid digest" + end + + local req_body, err = core.request.get_body() + if err then + return nil, err + end + + req_body = req_body or "" + local digest_created = "SHA-256" .. "=" .. + ngx_encode_base64(sha256(req_body)) + if digest_created ~= digest_header then + return nil, "Invalid digest" + end + end + + return consumer +end + + +local function retrieve_hmac_fields(ctx) + local hmac_params = {} + local auth_string = core.request.header(ctx, "Authorization") + if not auth_string then + return nil, "missing Authorization header" + end + + if not core.string.has_prefix(auth_string, "Signature") then + return nil, "Authorization header does not start with 'Signature'" + end + + local signature_fields = auth_string:sub(10):gmatch('[^,]+') + + for field in signature_fields do + local key, value = field:match('%s*(%w+)="(.-)"') + if key and value then + if key == "keyId" or key == "algorithm" or key == "signature" then + hmac_params[key] = value + + elseif key == "headers" then + hmac_params.headers = ngx_re.split(value, " ") + end + end + end + + -- will be required to check clock skew + if core.request.header(ctx, "Date") then + hmac_params.date = core.request.header(ctx, "Date") + end + + if core.request.header(ctx, "Digest") then + hmac_params.body_digest = core.request.header(ctx, "Digest") + end + + return hmac_params +end + +local function find_consumer(conf, ctx) + local params,err = retrieve_hmac_fields(ctx) + if err then + if not auth_utils.is_running_under_multi_auth(ctx) then + core.log.warn("client request can't be validated: ", err) + end + return nil, nil, "client request can't be validated: " .. err + end + + local validated_consumer, err = validate(ctx, conf, params) + if not validated_consumer then + err = "client request can't be validated: " .. (err or "Invalid signature") + if auth_utils.is_running_under_multi_auth(ctx) then + return nil, nil, err + end + core.log.warn(err) + return nil, nil, "client request can't be validated" + end + + local consumers_conf = consumer.consumers_conf(plugin_name) + return validated_consumer, consumers_conf, err +end + + +function _M.rewrite(conf, ctx) + local cur_consumer, consumers_conf, err = find_consumer(conf, ctx) + if not cur_consumer then + if not conf.anonymous_consumer then + return 401, { message = err } + end + cur_consumer, consumers_conf, err = consumer.get_anonymous_consumer(conf.anonymous_consumer) + if not cur_consumer then + if auth_utils.is_running_under_multi_auth(ctx) then + return 401, err + end + core.log.error(err) + return 401, { message = "Invalid user authorization" } + end + end + + if conf.hide_credentials then + core.request.set_header("Authorization", nil) + end + + consumer.attach_consumer(ctx, cur_consumer, consumers_conf) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/http-dubbo.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/http-dubbo.lua new file mode 100644 index 0000000..f068654 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/http-dubbo.lua @@ -0,0 +1,262 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local require = require +local core = require("apisix.core") +local pairs = pairs +local str_format = string.format +local bit = require("bit") +local rshift = bit.rshift +local band = bit.band +local char = string.char +local tostring = tostring +local ngx = ngx +local type = type +local plugin_name = "http-dubbo" + + +local schema = { + type = "object", + properties = { + service_name = { + type = "string", + minLength = 1, + }, + service_version = { + type = "string", + pattern = [[^\d+\.\d+\.\d+]], + default ="0.0.0" + }, + method = { + type = "string", + minLength = 1, + }, + params_type_desc = { + type = "string", + default = "" + }, + serialization_header_key = { + type = "string" + }, + serialized = { + type = "boolean", + default = false + }, + connect_timeout={ + type = "number", + default = 6000 + }, + read_timeout={ + type = "number", + default = 6000 + }, + send_timeout={ + type = "number", + default = 6000 + } + }, + required = { "service_name", "method" }, +} + +local _M = { + version = 0.1, + priority = 504, + name = plugin_name, + schema = schema, +} + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +local function str_int32(int) + return char(band(rshift(int, 24), 0xff), + band(rshift(int, 16), 0xff), + band(rshift(int, 8), 0xff), + band(int, 0xff)) +end + + +local function parse_dubbo_header(header) + for i = 1, 16 do + local currentByte = header:byte(i) + if not currentByte then + return nil + end + end + + local magic_number = str_format("%04x", header:byte(1) * 256 + header:byte(2)) + local message_flag = header:byte(3) + local status = header:byte(4) + local request_id = 0 + for i = 5, 12 do + request_id = request_id * 256 + header:byte(i) + end + + local byte13Val = header:byte(13) * 256 * 256 * 256 + local byte14Val = header:byte(14) * 256 * 256 + local data_length = byte13Val + byte14Val + header:byte(15) * 256 + header:byte(16) + + local is_request = bit.band(bit.rshift(message_flag, 7), 0x01) == 1 and 1 or 0 + local is_two_way = bit.band(bit.rshift(message_flag, 6), 0x01) == 1 and 1 or 0 + local is_event = bit.band(bit.rshift(message_flag, 5), 0x01) == 1 and 1 or 0 + + return { + magic_number = magic_number, + message_flag = message_flag, + is_request = is_request, + is_two_way = is_two_way, + is_event = is_event, + status = status, + request_id = request_id, + data_length = data_length + } +end + + +local function string_to_json_string(str) + local result = "\"" + for i = 1, #str do + local byte = core.string.sub(str, i, i) + if byte == "\\" then + result = result .. "\\\\" + elseif byte == "\n" then + result = result .. "\\n" + elseif byte == "\t" then + result = result .. "\\t" + elseif byte == "\r" then + result = result .. "\\r" + elseif byte == "\b" then + result = result .. "\\b" + elseif byte == "\f" then + result = result .. "\\f" + elseif byte == "\"" then + result = result .. "\\\"" + else + result = result .. byte + end + end + return result .. "\"" +end + + +local function get_dubbo_request(conf, ctx) + -- use dubbo and fastjson + local first_byte4 = "\xda\xbb\xc6\x00" + + local requestId = "\x00\x00\x00\x00\x00\x00\x00\x01" + local version = "\"2.0.2\"\n" + local service = "\"" .. conf.service_name .. "\"" .. "\n" + + local service_version = "\"" .. conf.service_version .. "\"" .. "\n" + local method_name = "\"" .. conf.method .. "\"" .. "\n" + + local params_desc = "\"" .. conf.params_type_desc .. "\"" .. "\n" + local params = "" + local serialized = conf.serialized + if conf.serialization_header_key then + local serialization_header = core.request.header(ctx, conf.serialization_header_key) + serialized = serialization_header == "true" + end + if serialized then + params = core.request.get_body() + if params then + local end_of_params = core.string.sub(params, -1) + if end_of_params ~= "\n" then + params = params .. "\n" + end + end + else + local body_data = core.request.get_body() + if body_data then + local lua_object = core.json.decode(body_data); + for _, v in pairs(lua_object) do + local pt = type(v) + if pt == "nil" then + params = params .. "null" .. "\n" + elseif pt == "string" then + params = params .. string_to_json_string(v) .. "\n" + elseif pt == "number" then + params = params .. tostring(v) .. "\n" + else + params = params .. core.json.encode(v) .. "\n" + end + end + end + + end + local attachments = "{}\n" + if params == nil then + params = "" + end + local payload = #version + #service + #service_version + + #method_name + #params_desc + #params + #attachments + return { + first_byte4, + requestId, + str_int32(payload), + version, + service, + service_version, + method_name, + params_desc, + params, + attachments + } +end + + +function _M.before_proxy(conf, ctx) + local sock = ngx.socket.tcp() + + sock:settimeouts(conf.connect_timeout, conf.send_timeout, conf.read_timeout) + local ok, err = sock:connect(ctx.picked_server.host, ctx.picked_server.port) + if not ok then + sock:close() + core.log.error("failed to connect to upstream ", err) + return 502 + end + local request = get_dubbo_request(conf, ctx) + local bytes, _ = sock:send(request) + if bytes > 0 then + local header, _ = sock:receiveany(16); + if header then + local header_info = parse_dubbo_header(header) + if header_info and header_info.status == 20 then + local readline = sock:receiveuntil("\n") + local body_status, _, _ = readline() + if body_status then + local response_status = core.string.sub(body_status, 1, 1) + if response_status == "2" or response_status == "5" then + sock:close() + return 200 + elseif response_status == "1" or response_status == "4" then + local body, _, _ = readline() + sock:close() + return 200, body + end + end + end + end + end + sock:close() + return 500 + +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/http-logger.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/http-logger.lua new file mode 100644 index 0000000..44f84ac --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/http-logger.lua @@ -0,0 +1,223 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local log_util = require("apisix.utils.log-util") +local core = require("apisix.core") +local http = require("resty.http") +local url = require("net.url") + +local tostring = tostring +local ipairs = ipairs + +local plugin_name = "http-logger" +local batch_processor_manager = bp_manager_mod.new("http logger") + +local schema = { + type = "object", + properties = { + uri = core.schema.uri_def, + auth_header = {type = "string"}, + timeout = {type = "integer", minimum = 1, default = 3}, + log_format = {type = "object"}, + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = {type = "boolean", default = false}, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + concat_method = {type = "string", default = "json", + enum = {"json", "new_line"}}, + ssl_verify = {type = "boolean", default = false}, + }, + required = {"uri"} +} + + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + + +local _M = { + version = 0.1, + priority = 410, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local check = {"uri"} + core.utils.check_https(check, conf, plugin_name) + core.utils.check_tls_bool({"ssl_verify"}, conf, plugin_name) + + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, err + end + return log_util.check_log_schema(conf) +end + + +local function send_http_data(conf, log_message) + local err_msg + local res = true + local url_decoded = url.parse(conf.uri) + local host = url_decoded.host + local port = url_decoded.port + + core.log.info("sending a batch logs to ", conf.uri) + + if ((not port) and url_decoded.scheme == "https") then + port = 443 + elseif not port then + port = 80 + end + + local httpc = http.new() + httpc:set_timeout(conf.timeout * 1000) + local ok, err = httpc:connect(host, port) + + if not ok then + return false, "failed to connect to host[" .. host .. "] port[" + .. tostring(port) .. "] " .. err + end + + if url_decoded.scheme == "https" then + ok, err = httpc:ssl_handshake(true, host, conf.ssl_verify) + if not ok then + return false, "failed to perform SSL with host[" .. host .. "] " + .. "port[" .. tostring(port) .. "] " .. err + end + end + + local content_type + if conf.concat_method == "json" then + content_type = "application/json" + else + content_type = "text/plain" + end + + local httpc_res, httpc_err = httpc:request({ + method = "POST", + path = #url_decoded.path ~= 0 and url_decoded.path or "/", + query = url_decoded.query, + body = log_message, + headers = { + ["Host"] = url_decoded.host, + ["Content-Type"] = content_type, + ["Authorization"] = conf.auth_header + } + }) + + if not httpc_res then + return false, "error while sending data to [" .. host .. "] port[" + .. tostring(port) .. "] " .. httpc_err + end + + -- some error occurred in the server + if httpc_res.status >= 400 then + res = false + err_msg = "server returned status code[" .. httpc_res.status .. "] host[" + .. host .. "] port[" .. tostring(port) .. "] " + .. "body[" .. httpc_res:read_body() .. "]" + end + + return res, err_msg +end + + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + + +function _M.log(conf, ctx) + local entry = log_util.get_log_entry(plugin_name, conf, ctx) + + if not entry.route_id then + entry.route_id = "no-matched" + end + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + -- Generate a function to be executed by the batch processor + local func = function(entries, batch_max_size) + local data, err + + if conf.concat_method == "json" then + if batch_max_size == 1 then + data, err = core.json.encode(entries[1]) -- encode as single {} + else + data, err = core.json.encode(entries) -- encode as array [{}] + end + + elseif conf.concat_method == "new_line" then + if batch_max_size == 1 then + data, err = core.json.encode(entries[1]) -- encode as single {} + else + local t = core.table.new(#entries, 0) + for i, entry in ipairs(entries) do + t[i], err = core.json.encode(entry) + if err then + core.log.warn("failed to encode http log: ", err, ", log data: ", entry) + break + end + end + data = core.table.concat(t, "\n") -- encode as multiple string + end + + else + -- defensive programming check + err = "unknown concat_method " .. (conf.concat_method or "nil") + end + + if not data then + return false, 'error occurred while encoding the data: ' .. err + end + + return send_http_data(conf, data) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/inspect.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/inspect.lua new file mode 100644 index 0000000..19f50c7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/inspect.lua @@ -0,0 +1,61 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local plugin = require("apisix.plugin") +local inspect = require("apisix.inspect") + + +local plugin_name = "inspect" + + +local schema = { + type = "object", + properties = {}, +} + + +local _M = { + version = 0.1, + priority = 200, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf, schema_type) + return core.schema.check(schema, conf) +end + + +function _M.init() + local attr = plugin.plugin_attr(plugin_name) + local delay + local hooks_file + if attr then + delay = attr.delay + hooks_file = attr.hooks_file + end + core.log.info("delay=", delay, ", hooks_file=", hooks_file) + return inspect.init(delay, hooks_file) +end + + +function _M.destroy() + return inspect.destroy() +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ip-restriction.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ip-restriction.lua new file mode 100644 index 0000000..b499f2d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ip-restriction.lua @@ -0,0 +1,26 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local base = require("apisix.plugins.ip-restriction.init") + + +-- avoid unexpected data sharing +local ip_restriction = core.table.clone(base) +ip_restriction.access = base.restrict + + +return ip_restriction diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ip-restriction/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ip-restriction/init.lua new file mode 100644 index 0000000..1800024 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ip-restriction/init.lua @@ -0,0 +1,122 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ipairs = ipairs +local core = require("apisix.core") +local lrucache = core.lrucache.new({ + ttl = 300, count = 512 +}) + + +local schema = { + type = "object", + properties = { + message = { + type = "string", + minLength = 1, + maxLength = 1024, + default = "Your IP address is not allowed" + }, + response_code = { + type = "integer", + minimum = 403, + maximum = 404, + default = 403 + }, + whitelist = { + type = "array", + items = {anyOf = core.schema.ip_def}, + minItems = 1 + }, + blacklist = { + type = "array", + items = {anyOf = core.schema.ip_def}, + minItems = 1 + }, + }, + oneOf = { + {required = {"whitelist"}}, + {required = {"blacklist"}}, + }, +} + + +local plugin_name = "ip-restriction" + + +local _M = { + version = 0.1, + priority = 3000, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + + if not ok then + return false, err + end + + -- we still need this as it is too complex to filter out all invalid IPv6 via regex + if conf.whitelist then + for _, cidr in ipairs(conf.whitelist) do + if not core.ip.validate_cidr_or_ip(cidr) then + return false, "invalid ip address: " .. cidr + end + end + end + + if conf.blacklist then + for _, cidr in ipairs(conf.blacklist) do + if not core.ip.validate_cidr_or_ip(cidr) then + return false, "invalid ip address: " .. cidr + end + end + end + + return true +end + + +function _M.restrict(conf, ctx) + local block = false + local remote_addr = ctx.var.remote_addr + + if conf.blacklist then + local matcher = lrucache(conf.blacklist, nil, + core.ip.create_ip_matcher, conf.blacklist) + if matcher then + block = matcher:match(remote_addr) + end + end + + if conf.whitelist then + local matcher = lrucache(conf.whitelist, nil, + core.ip.create_ip_matcher, conf.whitelist) + if matcher then + block = not matcher:match(remote_addr) + end + end + + if block then + return conf.response_code, { message = conf.message } + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/jwe-decrypt.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/jwe-decrypt.lua new file mode 100644 index 0000000..b0d1e16 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/jwe-decrypt.lua @@ -0,0 +1,279 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local consumer_mod = require("apisix.consumer") +local base64 = require("ngx.base64") +local aes = require("resty.aes") +local ngx = ngx +local sub_str = string.sub +local cipher = aes.cipher(256, "gcm") + +local plugin_name = "jwe-decrypt" + +local schema = { + type = "object", + properties = { + header = { + type = "string", + default = "Authorization" + }, + forward_header = { + type = "string", + default = "Authorization" + }, + strict = { + type = "boolean", + default = true + } + }, + required = { "header", "forward_header" }, +} + +local consumer_schema = { + type = "object", + properties = { + key = { type = "string" }, + secret = { type = "string" }, + is_base64_encoded = { type = "boolean" }, + }, + required = { "key", "secret" }, + encrypt_fields = { "key", "secret" }, +} + + +local _M = { + version = 0.1, + priority = 2509, + type = 'auth', + name = plugin_name, + schema = schema, + consumer_schema = consumer_schema +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_CONSUMER then + local ok, err = core.schema.check(consumer_schema, conf) + if not ok then + return false, err + end + + local local_conf, err = core.config.local_conf(true) + if not local_conf then + return false, "failed to load the configuration file: " .. err + end + + local encrypted = core.table.try_read_attr(local_conf, "apisix", "data_encryption", + "enable_encrypt_fields") and (core.config.type == "etcd") + + -- if encrypted, the secret length will exceed 32 so don't check + if not encrypted then + -- restrict the length of secret, we use A256GCM for encryption, + -- so the length should be 32 chars only + if conf.is_base64_encoded then + if #base64.decode_base64url(conf.secret) ~= 32 then + return false, "the secret length after base64 decode should be 32 chars" + end + else + if #conf.secret ~= 32 then + return false, "the secret length should be 32 chars" + end + end + end + + return true + end + return core.schema.check(schema, conf) +end + + +local function get_secret(conf) + local secret = conf.secret + + if conf.is_base64_encoded then + return base64.decode_base64url(secret) + end + + return secret +end + + +local function load_jwe_token(jwe_token) + local o = { valid = false } + o.header, o.enckey, o.iv, o.ciphertext, o.tag = jwe_token:match("(.-)%.(.-)%.(.-)%.(.-)%.(.*)") + if not o.header then + return o + end + local he = base64.decode_base64url(o.header) + if not he then + return o + end + o.header_obj = core.json.decode(he) + if not o.header_obj then + return o + end + o.valid = true + return o +end + + +local function jwe_decrypt_with_obj(o, consumer) + local secret = get_secret(consumer.auth_conf) + local dec = base64.decode_base64url + + local aes_default = aes:new( + secret, + nil, + cipher, + {iv = dec(o.iv)} + ) + + local decrypted = aes_default:decrypt(dec(o.ciphertext), dec(o.tag)) + return decrypted +end + + +local function jwe_encrypt(o, consumer) + local secret = get_secret(consumer.auth_conf) + local enc = base64.encode_base64url + + local aes_default = aes:new( + secret, + nil, + cipher, + {iv = o.iv}) + + local encrypted = aes_default:encrypt(o.plaintext) + + o.ciphertext = encrypted[1] + o.tag = encrypted[2] + return o.header .. ".." .. enc(o.iv) .. "." .. enc(o.ciphertext) .. "." .. enc(o.tag) +end + + +local function get_consumer(key) + local consumer_conf = consumer_mod.plugin(plugin_name) + if not consumer_conf then + return nil + end + local consumers = consumer_mod.consumers_kv(plugin_name, consumer_conf, "key") + if not consumers then + return nil + end + core.log.info("consumers: ", core.json.delay_encode(consumers)) + return consumers[key] +end + + +local function fetch_jwe_token(conf, ctx) + local token = core.request.header(ctx, conf.header) + if token then + local prefix = sub_str(token, 1, 7) + if prefix == 'Bearer ' or prefix == 'bearer ' then + return sub_str(token, 8) + end + + return token + end +end + + +function _M.rewrite(conf, ctx) + -- fetch token and hide credentials if necessary + local jwe_token, err = fetch_jwe_token(conf, ctx) + if not jwe_token and conf.strict then + core.log.info("failed to fetch JWE token: ", err) + return 403, { message = "missing JWE token in request" } + end + + local jwe_obj = load_jwe_token(jwe_token) + if not jwe_obj.valid then + return 400, { message = "JWE token invalid" } + end + + if not jwe_obj.header_obj.kid then + return 400, { message = "missing kid in JWE token" } + end + + local consumer = get_consumer(jwe_obj.header_obj.kid) + if not consumer then + return 400, { message = "invalid kid in JWE token" } + end + + local plaintext, err = jwe_decrypt_with_obj(jwe_obj, consumer) + if err ~= nil then + return 400, { message = "failed to decrypt JWE token" } + end + core.request.set_header(ctx, conf.forward_header, plaintext) +end + + +local function gen_token() + local args = core.request.get_uri_args() + if not args or not args.key then + return core.response.exit(400) + end + + local key = args.key + local payload = args.payload + if payload then + payload = ngx.unescape_uri(payload) + end + + local consumer = get_consumer(key) + if not consumer then + return core.response.exit(404) + end + + core.log.info("consumer: ", core.json.delay_encode(consumer)) + + local iv = args.iv + if not iv then + -- TODO: random bytes + iv = "123456789012" + end + + local obj = { + iv = iv, + plaintext = payload, + header_obj = { + kid = key, + alg = "dir", + enc = "A256GCM", + }, + } + obj.header = base64.encode_base64url(core.json.encode(obj.header_obj)) + local jwe_token = jwe_encrypt(obj, consumer) + if jwe_token then + return core.response.exit(200, jwe_token) + end + + return core.response.exit(404) +end + + +function _M.api() + return { + { + methods = { "GET" }, + uri = "/apisix/plugin/jwe/encrypt", + handler = gen_token, + } + } +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/jwt-auth.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/jwt-auth.lua new file mode 100644 index 0000000..b61d82d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/jwt-auth.lua @@ -0,0 +1,331 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local jwt = require("resty.jwt") +local consumer_mod = require("apisix.consumer") +local resty_random = require("resty.random") +local new_tab = require ("table.new") +local auth_utils = require("apisix.utils.auth") + +local ngx_encode_base64 = ngx.encode_base64 +local ngx_decode_base64 = ngx.decode_base64 +local ngx = ngx +local sub_str = string.sub +local table_insert = table.insert +local table_concat = table.concat +local ngx_re_gmatch = ngx.re.gmatch +local plugin_name = "jwt-auth" +local schema_def = require("apisix.schema_def") + + +local schema = { + type = "object", + properties = { + header = { + type = "string", + default = "authorization" + }, + query = { + type = "string", + default = "jwt" + }, + cookie = { + type = "string", + default = "jwt" + }, + hide_credentials = { + type = "boolean", + default = false + }, + key_claim_name = { + type = "string", + default = "key", + minLength = 1, + }, + store_in_ctx = { + type = "boolean", + default = false + }, + anonymous_consumer = schema_def.anonymous_consumer_schema, + }, +} + +local consumer_schema = { + type = "object", + -- can't use additionalProperties with dependencies + properties = { + key = { + type = "string", + minLength = 1, + }, + secret = { + type = "string", + minLength = 1, + }, + algorithm = { + type = "string", + enum = {"HS256", "HS512", "RS256", "ES256"}, + default = "HS256" + }, + exp = {type = "integer", minimum = 1, default = 86400}, + base64_secret = { + type = "boolean", + default = false + }, + lifetime_grace_period = { + type = "integer", + minimum = 0, + default = 0 + } + }, + dependencies = { + algorithm = { + oneOf = { + { + properties = { + algorithm = { + enum = {"HS256", "HS512"}, + default = "HS256" + }, + }, + }, + { + properties = { + public_key = {type = "string"}, + algorithm = { + enum = {"RS256", "ES256"}, + }, + }, + required = {"public_key"}, + }, + } + } + }, + encrypt_fields = {"secret"}, + required = {"key"}, +} + + +local _M = { + version = 0.1, + priority = 2510, + type = 'auth', + name = plugin_name, + schema = schema, + consumer_schema = consumer_schema +} + + +function _M.check_schema(conf, schema_type) + core.log.info("input conf: ", core.json.delay_encode(conf)) + + local ok, err + if schema_type == core.schema.TYPE_CONSUMER then + ok, err = core.schema.check(consumer_schema, conf) + else + return core.schema.check(schema, conf) + end + + if not ok then + return false, err + end + + if conf.algorithm ~= "RS256" and conf.algorithm ~= "ES256" and not conf.secret then + conf.secret = ngx_encode_base64(resty_random.bytes(32, true)) + elseif conf.base64_secret then + if ngx_decode_base64(conf.secret) == nil then + return false, "base64_secret required but the secret is not in base64 format" + end + end + + return true +end + +local function remove_specified_cookie(src, key) + local cookie_key_pattern = "([a-zA-Z0-9-_]*)" + local cookie_val_pattern = "([a-zA-Z0-9-._]*)" + local t = new_tab(1, 0) + + local it, err = ngx_re_gmatch(src, cookie_key_pattern .. "=" .. cookie_val_pattern, "jo") + if not it then + core.log.error("match origins failed: ", err) + return src + end + while true do + local m, err = it() + if err then + core.log.error("iterate origins failed: ", err) + return src + end + if not m then + break + end + if m[1] ~= key then + table_insert(t, m[0]) + end + end + + return table_concat(t, "; ") +end + +local function fetch_jwt_token(conf, ctx) + local token = core.request.header(ctx, conf.header) + if token then + if conf.hide_credentials then + -- hide for header + core.request.set_header(ctx, conf.header, nil) + end + + local prefix = sub_str(token, 1, 7) + if prefix == 'Bearer ' or prefix == 'bearer ' then + return sub_str(token, 8) + end + + return token + end + + local uri_args = core.request.get_uri_args(ctx) or {} + token = uri_args[conf.query] + if token then + if conf.hide_credentials then + -- hide for query + uri_args[conf.query] = nil + core.request.set_uri_args(ctx, uri_args) + end + return token + end + + local val = ctx.var["cookie_" .. conf.cookie] + if not val then + return nil, "JWT not found in cookie" + end + + if conf.hide_credentials then + -- hide for cookie + local src = core.request.header(ctx, "Cookie") + local reset_val = remove_specified_cookie(src, conf.cookie) + core.request.set_header(ctx, "Cookie", reset_val) + end + + return val +end + +local function get_secret(conf) + local secret = conf.secret + + if conf.base64_secret then + return ngx_decode_base64(secret) + end + + return secret +end + +local function get_auth_secret(auth_conf) + if not auth_conf.algorithm or auth_conf.algorithm == "HS256" + or auth_conf.algorithm == "HS512" then + return get_secret(auth_conf) + elseif auth_conf.algorithm == "RS256" or auth_conf.algorithm == "ES256" then + return auth_conf.public_key + end +end + +local function find_consumer(conf, ctx) + -- fetch token and hide credentials if necessary + local jwt_token, err = fetch_jwt_token(conf, ctx) + if not jwt_token then + core.log.info("failed to fetch JWT token: ", err) + return nil, nil, "Missing JWT token in request" + end + + local jwt_obj = jwt:load_jwt(jwt_token) + core.log.info("jwt object: ", core.json.delay_encode(jwt_obj)) + if not jwt_obj.valid then + err = "JWT token invalid: " .. jwt_obj.reason + if auth_utils.is_running_under_multi_auth(ctx) then + return nil, nil, err + end + core.log.warn(err) + return nil, nil, "JWT token invalid" + end + + local key_claim_name = conf.key_claim_name + local user_key = jwt_obj.payload and jwt_obj.payload[key_claim_name] + if not user_key then + return nil, nil, "missing user key in JWT token" + end + + local consumer, consumer_conf, err = consumer_mod.find_consumer(plugin_name, "key", user_key) + if not consumer then + core.log.warn("failed to find consumer: ", err or "invalid user key") + return nil, nil, "Invalid user key in JWT token" + end + core.log.info("consumer: ", core.json.delay_encode(consumer)) + + local auth_secret, err = get_auth_secret(consumer.auth_conf) + if not auth_secret then + err = "failed to retrieve secrets, err: " .. err + if auth_utils.is_running_under_multi_auth(ctx) then + return nil, nil, err + end + core.log.error(err) + return nil, nil, "failed to verify jwt" + end + local claim_specs = jwt:get_default_validation_options(jwt_obj) + claim_specs.lifetime_grace_period = consumer.auth_conf.lifetime_grace_period + + jwt_obj = jwt:verify_jwt_obj(auth_secret, jwt_obj, claim_specs) + core.log.info("jwt object: ", core.json.delay_encode(jwt_obj)) + + if not jwt_obj.verified then + err = "failed to verify jwt: " .. jwt_obj.reason + if auth_utils.is_running_under_multi_auth(ctx) then + return nil, nil, err + end + core.log.warn(err) + return nil, nil, "failed to verify jwt" + end + + if conf.store_in_ctx then + ctx.jwt_auth_payload = jwt_obj.payload + end + + return consumer, consumer_conf +end + + +function _M.rewrite(conf, ctx) + local consumer, consumer_conf, err = find_consumer(conf, ctx) + if not consumer then + if not conf.anonymous_consumer then + return 401, { message = err } + end + consumer, consumer_conf, err = consumer_mod.get_anonymous_consumer(conf.anonymous_consumer) + if not consumer then + err = "jwt-auth failed to authenticate the request, code: 401. error: " .. err + core.log.error(err) + return 401, { message = "Invalid user authorization"} + end + end + + core.log.info("consumer: ", core.json.delay_encode(consumer)) + + consumer_mod.attach_consumer(ctx, consumer, consumer_conf) + core.log.info("hit jwt-auth rewrite") +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/kafka-logger.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/kafka-logger.lua new file mode 100644 index 0000000..75510f5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/kafka-logger.lua @@ -0,0 +1,327 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local expr = require("resty.expr.v1") +local core = require("apisix.core") +local log_util = require("apisix.utils.log-util") +local producer = require ("resty.kafka.producer") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local plugin = require("apisix.plugin") + +local math = math +local pairs = pairs +local type = type +local req_read_body = ngx.req.read_body +local plugin_name = "kafka-logger" +local batch_processor_manager = bp_manager_mod.new("kafka logger") + +local lrucache = core.lrucache.new({ + type = "plugin", +}) + +local schema = { + type = "object", + properties = { + meta_format = { + type = "string", + default = "default", + enum = {"default", "origin"}, + }, + log_format = {type = "object"}, + -- deprecated, use "brokers" instead + broker_list = { + type = "object", + minProperties = 1, + patternProperties = { + [".*"] = { + description = "the port of kafka broker", + type = "integer", + minimum = 1, + maximum = 65535, + }, + }, + }, + brokers = { + type = "array", + minItems = 1, + items = { + type = "object", + properties = { + host = { + type = "string", + description = "the host of kafka broker", + }, + port = { + type = "integer", + minimum = 1, + maximum = 65535, + description = "the port of kafka broker", + }, + sasl_config = { + type = "object", + description = "sasl config", + properties = { + mechanism = { + type = "string", + default = "PLAIN", + enum = {"PLAIN"}, + }, + user = { type = "string", description = "user" }, + password = { type = "string", description = "password" }, + }, + required = {"user", "password"}, + }, + }, + required = {"host", "port"}, + }, + uniqueItems = true, + }, + kafka_topic = {type = "string"}, + producer_type = { + type = "string", + default = "async", + enum = {"async", "sync"}, + }, + required_acks = { + type = "integer", + default = 1, + enum = { 1, -1 }, + }, + key = {type = "string"}, + timeout = {type = "integer", minimum = 1, default = 3}, + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = {type = "boolean", default = false}, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + max_req_body_bytes = {type = "integer", minimum = 1, default = 524288}, + max_resp_body_bytes = {type = "integer", minimum = 1, default = 524288}, + -- in lua-resty-kafka, cluster_name is defined as number + -- see https://github.com/doujiang24/lua-resty-kafka#new-1 + cluster_name = {type = "integer", minimum = 1, default = 1}, + -- config for lua-resty-kafka, default value is same as lua-resty-kafka + producer_batch_num = {type = "integer", minimum = 1, default = 200}, + producer_batch_size = {type = "integer", minimum = 0, default = 1048576}, + producer_max_buffering = {type = "integer", minimum = 1, default = 50000}, + producer_time_linger = {type = "integer", minimum = 1, default = 1}, + meta_refresh_interval = {type = "integer", minimum = 1, default = 30}, + }, + oneOf = { + { required = {"broker_list", "kafka_topic"},}, + { required = {"brokers", "kafka_topic"},}, + } +} + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + }, + max_pending_entries = { + type = "integer", + description = "maximum number of pending entries in the batch processor", + minimum = 1, + }, + }, +} + +local _M = { + version = 0.1, + priority = 403, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, err + end + return log_util.check_log_schema(conf) +end + + +local function get_partition_id(prod, topic, log_message) + if prod.async then + local ringbuffer = prod.ringbuffer + for i = 1, ringbuffer.size, 3 do + if ringbuffer.queue[i] == topic and + ringbuffer.queue[i+2] == log_message then + return math.floor(i / 3) + end + end + core.log.info("current topic in ringbuffer has no message") + return nil + end + + -- sync mode + local sendbuffer = prod.sendbuffer + if not sendbuffer.topics[topic] then + core.log.info("current topic in sendbuffer has no message") + return nil + end + for i, message in pairs(sendbuffer.topics[topic]) do + if log_message == message.queue[2] then + return i + end + end +end + + +local function create_producer(broker_list, broker_config, cluster_name) + core.log.info("create new kafka producer instance") + return producer:new(broker_list, broker_config, cluster_name) +end + + +local function send_kafka_data(conf, log_message, prod) + local ok, err = prod:send(conf.kafka_topic, conf.key, log_message) + core.log.info("partition_id: ", + core.log.delay_exec(get_partition_id, + prod, conf.kafka_topic, log_message)) + + if not ok then + return false, "failed to send data to Kafka topic: " .. err .. + ", brokers: " .. core.json.encode(conf.broker_list) + end + + return true +end + + +function _M.access(conf, ctx) + if conf.include_req_body then + local should_read_body = true + if conf.include_req_body_expr then + if not conf.request_expr then + local request_expr, err = expr.new(conf.include_req_body_expr) + if not request_expr then + core.log.error('generate request expr err ', err) + return + end + conf.request_expr = request_expr + end + + local result = conf.request_expr:eval(ctx.var) + + if not result then + should_read_body = false + end + end + if should_read_body then + req_read_body() + end + end +end + + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + + +function _M.log(conf, ctx) + local metadata = plugin.plugin_metadata(plugin_name) + local max_pending_entries = metadata and metadata.value and + metadata.value.max_pending_entries or nil + local entry + if conf.meta_format == "origin" then + entry = log_util.get_req_original(ctx, conf) + -- core.log.info("origin entry: ", entry) + + else + entry = log_util.get_log_entry(plugin_name, conf, ctx) + end + + if batch_processor_manager:add_entry(conf, entry, max_pending_entries) then + return + end + + -- reuse producer via lrucache to avoid unbalanced partitions of messages in kafka + local broker_list = core.table.clone(conf.brokers or {}) + local broker_config = {} + + if conf.broker_list then + for host, port in pairs(conf.broker_list) do + local broker = { + host = host, + port = port + } + core.table.insert(broker_list, broker) + end + end + + broker_config["request_timeout"] = conf.timeout * 1000 + broker_config["producer_type"] = conf.producer_type + broker_config["required_acks"] = conf.required_acks + broker_config["batch_num"] = conf.producer_batch_num + broker_config["batch_size"] = conf.producer_batch_size + broker_config["max_buffering"] = conf.producer_max_buffering + broker_config["flush_time"] = conf.producer_time_linger * 1000 + broker_config["refresh_interval"] = conf.meta_refresh_interval * 1000 + + local prod, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, create_producer, + broker_list, broker_config, conf.cluster_name) + core.log.info("kafka cluster name ", conf.cluster_name, ", broker_list[1] port ", + prod.client.broker_list[1].port) + if err then + return nil, "failed to identify the broker specified: " .. err + end + + -- Generate a function to be executed by the batch processor + local func = function(entries, batch_max_size) + local data, err + if batch_max_size == 1 then + data = entries[1] + if type(data) ~= "string" then + data, err = core.json.encode(data) -- encode as single {} + end + else + data, err = core.json.encode(entries) -- encode as array [{}] + end + + if not data then + return false, 'error occurred while encoding the data: ' .. err + end + + core.log.info("send data to kafka: ", data) + + return send_kafka_data(conf, data, prod) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func, max_pending_entries) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/kafka-proxy.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/kafka-proxy.lua new file mode 100644 index 0000000..0882692 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/kafka-proxy.lua @@ -0,0 +1,62 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") + + +local schema = { + type = "object", + properties = { + sasl = { + type = "object", + properties = { + username = { + type = "string", + }, + password = { + type = "string", + }, + }, + required = {"username", "password"}, + }, + }, + encrypt_fields = {"sasl.password"}, +} + + +local _M = { + version = 0.1, + priority = 508, + name = "kafka-proxy", + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +function _M.access(conf, ctx) + if conf.sasl then + ctx.kafka_consumer_enable_sasl = true + ctx.kafka_consumer_sasl_username = conf.sasl.username + ctx.kafka_consumer_sasl_password = conf.sasl.password + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/key-auth.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/key-auth.lua new file mode 100644 index 0000000..539a489 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/key-auth.lua @@ -0,0 +1,124 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local consumer_mod = require("apisix.consumer") +local plugin_name = "key-auth" +local schema_def = require("apisix.schema_def") + +local schema = { + type = "object", + properties = { + header = { + type = "string", + default = "apikey", + }, + query = { + type = "string", + default = "apikey", + }, + hide_credentials = { + type = "boolean", + default = false, + }, + anonymous_consumer = schema_def.anonymous_consumer_schema, + }, +} + +local consumer_schema = { + type = "object", + properties = { + key = { type = "string" }, + }, + encrypt_fields = {"key"}, + required = {"key"}, +} + + +local _M = { + version = 0.1, + priority = 2500, + type = 'auth', + name = plugin_name, + schema = schema, + consumer_schema = consumer_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_CONSUMER then + return core.schema.check(consumer_schema, conf) + else + return core.schema.check(schema, conf) + end +end + +local function find_consumer(ctx, conf) + local from_header = true + local key = core.request.header(ctx, conf.header) + + if not key then + local uri_args = core.request.get_uri_args(ctx) or {} + key = uri_args[conf.query] + from_header = false + end + + if not key then + return nil, nil, "Missing API key in request" + end + + local consumer, consumer_conf, err = consumer_mod.find_consumer(plugin_name, "key", key) + if not consumer then + core.log.warn("failed to find consumer: ", err or "invalid api key") + return nil, nil, "Invalid API key in request" + end + core.log.info("consumer: ", core.json.delay_encode(consumer)) + + if conf.hide_credentials then + if from_header then + core.request.set_header(ctx, conf.header, nil) + else + local args = core.request.get_uri_args(ctx) + args[conf.query] = nil + core.request.set_uri_args(ctx, args) + end + end + + return consumer, consumer_conf +end + + +function _M.rewrite(conf, ctx) + local consumer, consumer_conf, err = find_consumer(ctx, conf) + if not consumer then + if not conf.anonymous_consumer then + return 401, { message = err} + end + consumer, consumer_conf, err = consumer_mod.get_anonymous_consumer(conf.anonymous_consumer) + if not consumer then + err = "key-auth failed to authenticate the request, code: 401. error: " .. err + core.log.error(err) + return 401, { message = "Invalid user authorization"} + end + end + + core.log.info("consumer: ", core.json.delay_encode(consumer)) + consumer_mod.attach_consumer(ctx, consumer, consumer_conf) + core.log.info("hit key-auth rewrite") +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/lago.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/lago.lua new file mode 100644 index 0000000..3c5b1f1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/lago.lua @@ -0,0 +1,229 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local type = type +local pairs = pairs +local math_random = math.random +local ngx = ngx + +local http = require("resty.http") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local core = require("apisix.core") +local str_format = core.string.format + +local plugin_name = "lago" +local batch_processor_manager = bp_manager_mod.new("lago logger") + +local schema = { + type = "object", + properties = { + -- core configurations + endpoint_addrs = { + type = "array", + minItems = 1, + items = core.schema.uri_def, + description = "Lago API address, like http://127.0.0.1:3000, " + .. "it supports both self-hosted and cloud. If multiple endpoints are" + .. " configured, the log will be pushed to a randomly determined" + .. " endpoint from the list.", + }, + endpoint_uri = { + type = "string", + minLength = 1, + default = "/api/v1/events/batch", + description = "Lago API endpoint, it needs to be set to the batch send endpoint.", + }, + token = { + type = "string", + description = "Lago API key, create one for your organization on dashboard." + }, + event_transaction_id = { + type = "string", + description = "Event's transaction ID, it is used to identify and de-duplicate" + .. " the event, it supports string templates containing APISIX and" + .. " NGINX variables, like \"req_${request_id}\", which allows you" + .. " to use values returned by upstream services or request-id" + .. " plugin integration", + }, + event_subscription_id = { + type = "string", + description = "Event's subscription ID, which is automatically generated or" + .. " specified by you when you assign the plan to the customer on" + .. " Lago, used to associate API consumption to a customer subscription," + .. " it supports string templates containing APISIX and NGINX variables," + .. " like \"cus_${consumer_name}\", which allows you to use values" + .. " returned by upstream services or APISIX consumer", + }, + event_code = { + type = "string", + description = "Lago billable metric's code for associating an event to a specified" + .. "billable item", + }, + event_properties = { + type = "object", + patternProperties = { + [".*"] = { + type = "string", + minLength = 1, + }, + }, + description = "Event's properties, used to attach information to an event, this" + .. " allows you to send certain information on a event to Lago, such" + .. " as sending HTTP status to take a failed request off the bill, or" + .. " sending the AI token consumption in the response body for accurate" + .. " billing, its keys are fixed strings and its values can be string" + .. " templates containing APISIX and NGINX variables, like \"${status}\"" + }, + + -- connection layer configurations + ssl_verify = {type = "boolean", default = true}, + timeout = { + type = "integer", + minimum = 1, + maximum = 60000, + default = 3000, + description = "timeout in milliseconds", + }, + keepalive = {type = "boolean", default = true}, + keepalive_timeout = { + type = "integer", + minimum = 1000, + default = 60000, + description = "keepalive timeout in milliseconds", + }, + keepalive_pool = {type = "integer", minimum = 1, default = 5}, + }, + required = {"endpoint_addrs", "token", "event_transaction_id", "event_subscription_id", + "event_code"}, + encrypt_fields = {"token"}, +} +schema = batch_processor_manager:wrap_schema(schema) + +-- According to https://getlago.com/docs/api-reference/events/batch, the maximum batch size is 100, +-- so we have to override the default batch size to make it work out of the box,the plugin does +-- not set a maximum limit, so if Lago relaxes the limit, then user can modify it +-- to a larger batch size +-- This does not affect other plugins, schema is appended after deep copy +schema.properties.batch_max_size.default = 100 + + +local _M = { + version = 0.1, + priority = 415, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf, schema_type) + local check = {"endpoint_addrs"} + core.utils.check_https(check, conf, plugin_name) + core.utils.check_tls_bool({"ssl_verify"}, conf, plugin_name) + + return core.schema.check(schema, conf) +end + + +local function send_http_data(conf, data) + local body, err = core.json.encode(data) + if not body then + return false, str_format("failed to encode json: %s", err) + end + local params = { + headers = { + ["Content-Type"] = "application/json", + ["Authorization"] = "Bearer " .. conf.token, + }, + keepalive = conf.keepalive, + ssl_verify = conf.ssl_verify, + method = "POST", + body = body, + } + + if conf.keepalive then + params.keepalive_timeout = conf.keepalive_timeout + params.keepalive_pool = conf.keepalive_pool + end + + local httpc, err = http.new() + if not httpc then + return false, str_format("create http client error: %s", err) + end + httpc:set_timeout(conf.timeout) + + -- select an random endpoint and build URL + local endpoint_url = conf.endpoint_addrs[math_random(#conf.endpoint_addrs)]..conf.endpoint_uri + local res, err = httpc:request_uri(endpoint_url, params) + if not res then + return false, err + end + + if res.status >= 300 then + return false, str_format("lago api returned status: %d, body: %s", + res.status, res.body or "") + end + + return true +end + + +function _M.log(conf, ctx) + -- build usage event + local event_transaction_id, err = core.utils.resolve_var(conf.event_transaction_id, ctx.var) + if err then + core.log.error("failed to resolve event_transaction_id, event dropped: ", err) + return + end + + local event_subscription_id, err = core.utils.resolve_var(conf.event_subscription_id, ctx.var) + if err then + core.log.error("failed to resolve event_subscription_id, event dropped: ", err) + return + end + + local entry = { + transaction_id = event_transaction_id, + external_subscription_id = event_subscription_id, + code = conf.event_code, + timestamp = ngx.req.start_time(), + } + + if conf.event_properties and type(conf.event_properties) == "table" then + entry.properties = core.table.deepcopy(conf.event_properties) + for key, value in pairs(entry.properties) do + local new_val, err, n_resolved = core.utils.resolve_var(value, ctx.var) + if not err and n_resolved > 0 then + entry.properties[key] = new_val + end + end + end + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + -- generate a function to be executed by the batch processor + local func = function(entries) + return send_http_data(conf, { + events = entries, + }) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ldap-auth.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ldap-auth.lua new file mode 100644 index 0000000..592d2d5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ldap-auth.lua @@ -0,0 +1,160 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ngx = ngx +local ngx_re = require("ngx.re") +local consumer_mod = require("apisix.consumer") +local ldap = require("resty.ldap") + +local schema = { + type = "object", + title = "work with route or service object", + properties = { + base_dn = { type = "string" }, + ldap_uri = { type = "string" }, + use_tls = { type = "boolean", default = false }, + tls_verify = { type = "boolean", default = false }, + uid = { type = "string", default = "cn" } + }, + required = {"base_dn","ldap_uri"}, +} + +local consumer_schema = { + type = "object", + title = "work with consumer object", + properties = { + user_dn = { type = "string" }, + }, + required = {"user_dn"}, +} + +local plugin_name = "ldap-auth" + + +local _M = { + version = 0.1, + priority = 2540, + type = 'auth', + name = plugin_name, + schema = schema, + consumer_schema = consumer_schema +} + +function _M.check_schema(conf, schema_type) + local ok, err + if schema_type == core.schema.TYPE_CONSUMER then + ok, err = core.schema.check(consumer_schema, conf) + else + core.utils.check_tls_bool({"use_tls", "tls_verify"}, conf, plugin_name) + ok, err = core.schema.check(schema, conf) + end + + return ok, err +end + +local function extract_auth_header(authorization) + local obj = { username = "", password = "" } + + local m, err = ngx.re.match(authorization, "Basic\\s(.+)", "jo") + if err then + -- error authorization + return nil, err + end + + if not m then + return nil, "Invalid authorization header format" + end + + local decoded = ngx.decode_base64(m[1]) + + if not decoded then + return nil, "Failed to decode authentication header: " .. m[1] + end + + local res + res, err = ngx_re.split(decoded, ":") + if err then + return nil, "Split authorization err:" .. err + end + if #res < 2 then + return nil, "Split authorization err: invalid decoded data: " .. decoded + end + + obj.username = ngx.re.gsub(res[1], "\\s+", "", "jo") + obj.password = ngx.re.gsub(res[2], "\\s+", "", "jo") + + return obj, nil +end + +function _M.rewrite(conf, ctx) + core.log.info("plugin rewrite phase, conf: ", core.json.delay_encode(conf)) + + -- 1. extract authorization from header + local auth_header = core.request.header(ctx, "Authorization") + if not auth_header then + core.response.set_header("WWW-Authenticate", "Basic realm='.'") + return 401, { message = "Missing authorization in request" } + end + + local user, err = extract_auth_header(auth_header) + if err or not user then + if err then + core.log.warn(err) + else + core.log.warn("nil user") + end + return 401, { message = "Invalid authorization in request" } + end + + -- 2. try authenticate the user against the ldap server + local ldap_host, ldap_port = core.utils.parse_addr(conf.ldap_uri) + local ldapconf = { + timeout = 10000, + start_tls = false, + ldap_host = ldap_host, + ldap_port = ldap_port or 389, + ldaps = conf.use_tls, + tls_verify = conf.tls_verify, + base_dn = conf.base_dn, + attribute = conf.uid, + keepalive = 60000, + } + local res, err = ldap.ldap_authenticate(user.username, user.password, ldapconf) + if not res then + core.log.warn("ldap-auth failed: ", err) + return 401, { message = "Invalid user authorization" } + end + + local user_dn = conf.uid .. "=" .. user.username .. "," .. conf.base_dn + + -- 3. Retrieve consumer for authorization plugin + local consumer_conf = consumer_mod.plugin(plugin_name) + if not consumer_conf then + return 401, { message = "Missing related consumer" } + end + + local consumers = consumer_mod.consumers_kv(plugin_name, consumer_conf, "user_dn") + local consumer = consumers[user_dn] + if not consumer then + return 401, {message = "Invalid user authorization"} + end + consumer_mod.attach_consumer(ctx, consumer, consumer_conf) + + core.log.info("hit basic-auth access") +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn.lua new file mode 100644 index 0000000..31a2919 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn.lua @@ -0,0 +1,94 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local limit_conn = require("apisix.plugins.limit-conn.init") +local redis_schema = require("apisix.utils.redis-schema") +local policy_to_additional_properties = redis_schema.schema +local plugin_name = "limit-conn" + + + +local schema = { + type = "object", + properties = { + conn = {type = "integer", exclusiveMinimum = 0}, -- limit.conn max + burst = {type = "integer", minimum = 0}, + default_conn_delay = {type = "number", exclusiveMinimum = 0}, + only_use_default_delay = {type = "boolean", default = false}, + key = {type = "string"}, + key_type = {type = "string", + enum = {"var", "var_combination"}, + default = "var", + }, + policy = { + type = "string", + enum = {"redis", "redis-cluster", "local"}, + default = "local", + }, + rejected_code = { + type = "integer", minimum = 200, maximum = 599, default = 503 + }, + rejected_msg = { + type = "string", minLength = 1 + }, + allow_degradation = {type = "boolean", default = false} + }, + required = {"conn", "burst", "default_conn_delay", "key"}, + ["if"] = { + properties = { + policy = { + enum = {"redis"}, + }, + }, + }, + ["then"] = policy_to_additional_properties.redis, + ["else"] = { + ["if"] = { + properties = { + policy = { + enum = {"redis-cluster"}, + }, + }, + }, + ["then"] = policy_to_additional_properties["redis-cluster"], + } +} + +local _M = { + version = 0.1, + priority = 1003, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +function _M.access(conf, ctx) + return limit_conn.increase(conf, ctx) +end + + +function _M.log(conf, ctx) + return limit_conn.decrease(conf, ctx) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn/init.lua new file mode 100644 index 0000000..d7401df --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn/init.lua @@ -0,0 +1,171 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local limit_conn_new = require("resty.limit.conn").new +local core = require("apisix.core") +local is_http = ngx.config.subsystem == "http" +local sleep = core.sleep +local shdict_name = "plugin-limit-conn" +if ngx.config.subsystem == "stream" then + shdict_name = shdict_name .. "-stream" +end + +local redis_single_new +local redis_cluster_new +do + local redis_src = "apisix.plugins.limit-conn.limit-conn-redis" + redis_single_new = require(redis_src).new + + local cluster_src = "apisix.plugins.limit-conn.limit-conn-redis-cluster" + redis_cluster_new = require(cluster_src).new +end + + +local lrucache = core.lrucache.new({ + type = "plugin", +}) +local _M = {} + + +local function create_limit_obj(conf) + if conf.policy == "local" then + core.log.info("create new limit-conn plugin instance") + return limit_conn_new(shdict_name, conf.conn, conf.burst, + conf.default_conn_delay) + elseif conf.policy == "redis" then + + core.log.info("create new limit-conn redis plugin instance") + + return redis_single_new("plugin-limit-conn", conf, conf.conn, conf.burst, + conf.default_conn_delay) + + elseif conf.policy == "redis-cluster" then + + core.log.info("create new limit-conn redis-cluster plugin instance") + + return redis_cluster_new("plugin-limit-conn", conf, conf.conn, conf.burst, + conf.default_conn_delay) + else + return nil, "policy enum not match" + end +end + + +function _M.increase(conf, ctx) + core.log.info("ver: ", ctx.conf_version) + local lim, err = lrucache(conf, nil, create_limit_obj, conf) + if not lim then + core.log.error("failed to instantiate a resty.limit.conn object: ", err) + if conf.allow_degradation then + return + end + return 500 + end + + local conf_key = conf.key + local key + if conf.key_type == "var_combination" then + local err, n_resolved + key, err, n_resolved = core.utils.resolve_var(conf_key, ctx.var) + if err then + core.log.error("could not resolve vars in ", conf_key, " error: ", err) + end + + if n_resolved == 0 then + key = nil + end + else + key = ctx.var[conf_key] + end + + if key == nil then + core.log.info("The value of the configured key is empty, use client IP instead") + -- When the value of key is empty, use client IP instead + key = ctx.var["remote_addr"] + end + + key = key .. ctx.conf_type .. ctx.conf_version + core.log.info("limit key: ", key) + + local delay, err = lim:incoming(key, true) + if not delay then + if err == "rejected" then + if conf.rejected_msg then + return conf.rejected_code, { error_msg = conf.rejected_msg } + end + return conf.rejected_code or 503 + end + + core.log.error("failed to limit conn: ", err) + if conf.allow_degradation then + return + end + return 500 + end + + if lim:is_committed() then + if not ctx.limit_conn then + ctx.limit_conn = core.tablepool.fetch("plugin#limit-conn", 0, 6) + end + + core.table.insert_tail(ctx.limit_conn, lim, key, delay, conf.only_use_default_delay) + end + + if delay >= 0.001 then + sleep(delay) + end +end + + +function _M.decrease(conf, ctx) + local limit_conn = ctx.limit_conn + if not limit_conn then + return + end + + for i = 1, #limit_conn, 4 do + local lim = limit_conn[i] + local key = limit_conn[i + 1] + local delay = limit_conn[i + 2] + local use_delay = limit_conn[i + 3] + + local latency + if is_http then + if not use_delay then + if ctx.proxy_passed then + latency = ctx.var.upstream_response_time + else + latency = ctx.var.request_time - delay + end + end + end + core.log.debug("request latency is ", latency) -- for test + + local conn, err = lim:leaving(key, latency) + if not conn then + core.log.error("failed to record the connection leaving request: ", + err) + break + end + end + + core.tablepool.release("plugin#limit-conn", limit_conn) + ctx.limit_conn = nil + return +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn/limit-conn-redis-cluster.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn/limit-conn-redis-cluster.lua new file mode 100644 index 0000000..9e46a04 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn/limit-conn-redis-cluster.lua @@ -0,0 +1,78 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local redis_cluster = require("apisix.utils.rediscluster") +local core = require("apisix.core") +local util = require("apisix.plugins.limit-conn.util") +local setmetatable = setmetatable +local ngx_timer_at = ngx.timer.at + +local _M = {version = 0.1} + + +local mt = { + __index = _M +} + + +function _M.new(plugin_name, conf, max, burst, default_conn_delay) + + local red_cli, err = redis_cluster.new(conf, "plugin-limit-conn-redis-cluster-slot-lock") + if not red_cli then + return nil, err + end + local self = { + conf = conf, + plugin_name = plugin_name, + burst = burst, + max = max + 0, -- just to ensure the param is good + unit_delay = default_conn_delay, + red_cli = red_cli, + } + return setmetatable(self, mt) +end + + +function _M.incoming(self, key, commit) + return util.incoming(self, self.red_cli, key, commit) +end + + +function _M.is_committed(self) + return self.committed +end + + +local function leaving_thread(premature, self, key, req_latency) + return util.leaving(self, self.red_cli, key, req_latency) +end + + +function _M.leaving(self, key, req_latency) + -- log_by_lua can't use cosocket + local ok, err = ngx_timer_at(0, leaving_thread, self, key, req_latency) + if not ok then + core.log.error("failed to create timer: ", err) + return nil, err + end + + return ok + +end + + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn/limit-conn-redis.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn/limit-conn-redis.lua new file mode 100644 index 0000000..4de7a27 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn/limit-conn-redis.lua @@ -0,0 +1,85 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local redis = require("apisix.utils.redis") +local core = require("apisix.core") +local util = require("apisix.plugins.limit-conn.util") +local ngx_timer_at = ngx.timer.at + +local setmetatable = setmetatable + + +local _M = {version = 0.1} + + +local mt = { + __index = _M +} + +function _M.new(plugin_name, conf, max, burst, default_conn_delay) + + local self = { + conf = conf, + plugin_name = plugin_name, + burst = burst, + max = max + 0, -- just to ensure the param is good + unit_delay = default_conn_delay, + } + return setmetatable(self, mt) +end + + +function _M.incoming(self, key, commit) + local conf = self.conf + local red, err = redis.new(conf) + if not red then + return red, err + end + return util.incoming(self, red, key, commit) +end + + +function _M.is_committed(self) + return self.committed +end + + +local function leaving_thread(premature, self, key, req_latency) + + local conf = self.conf + local red, err = redis.new(conf) + if not red then + return red, err + end + return util.leaving(self, red, key, req_latency) +end + + +function _M.leaving(self, key, req_latency) + -- log_by_lua can't use cosocket + local ok, err = ngx_timer_at(0, leaving_thread, self, key, req_latency) + if not ok then + core.log.error("failed to create timer: ", err) + return nil, err + end + + return ok + +end + + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn/util.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn/util.lua new file mode 100644 index 0000000..f3ba5bd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-conn/util.lua @@ -0,0 +1,81 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local assert = assert +local math = require "math" +local floor = math.floor +local _M = {version = 0.3} + + +function _M.incoming(self, red, key, commit) + local max = self.max + self.committed = false + key = "limit_conn" .. ":" .. key + + local conn, err + if commit then + conn, err = red:incrby(key, 1) + if not conn then + return nil, err + end + + if conn > max + self.burst then + conn, err = red:incrby(key, -1) + if not conn then + return nil, err + end + return nil, "rejected" + end + self.committed = true + + else + local conn_from_red, err = red:get(key) + if err then + return nil, err + end + conn = (conn_from_red or 0) + 1 + end + + if conn > max then + -- make the excessive connections wait + return self.unit_delay * floor((conn - 1) / max), conn + end + + -- we return a 0 delay by default + return 0, conn +end + + +function _M.leaving(self, red, key, req_latency) + assert(key) + key = "limit_conn" .. ":" .. key + + local conn, err = red:incrby(key, -1) + if not conn then + return nil, err + end + + if req_latency then + local unit_delay = self.unit_delay + self.unit_delay = (req_latency + unit_delay) / 2 + end + + return conn +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count.lua new file mode 100644 index 0000000..1472a6d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count.lua @@ -0,0 +1,51 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local fetch_secrets = require("apisix.secret").fetch_secrets +local limit_count = require("apisix.plugins.limit-count.init") +local workflow = require("apisix.plugins.workflow") + +local plugin_name = "limit-count" +local _M = { + version = 0.5, + priority = 1002, + name = plugin_name, + schema = limit_count.schema, + metadata_schema = limit_count.metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + return limit_count.check_schema(conf, schema_type) +end + + +function _M.access(conf, ctx) + conf = fetch_secrets(conf, true, conf, "") + return limit_count.rate_limit(conf, ctx, plugin_name, 1) +end + +function _M.workflow_handler() + workflow.register(plugin_name, + function (conf, ctx) + return limit_count.rate_limit(conf, ctx, plugin_name, 1) + end, + function (conf) + return limit_count.check_schema(conf) + end) +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count/init.lua new file mode 100644 index 0000000..1f37965 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count/init.lua @@ -0,0 +1,332 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local apisix_plugin = require("apisix.plugin") +local tab_insert = table.insert +local ipairs = ipairs +local pairs = pairs +local redis_schema = require("apisix.utils.redis-schema") +local policy_to_additional_properties = redis_schema.schema +local get_phase = ngx.get_phase + +local limit_redis_cluster_new +local limit_redis_new +local limit_local_new +do + local local_src = "apisix.plugins.limit-count.limit-count-local" + limit_local_new = require(local_src).new + + local redis_src = "apisix.plugins.limit-count.limit-count-redis" + limit_redis_new = require(redis_src).new + + local cluster_src = "apisix.plugins.limit-count.limit-count-redis-cluster" + limit_redis_cluster_new = require(cluster_src).new +end +local lrucache = core.lrucache.new({ + type = 'plugin', serial_creating = true, +}) +local group_conf_lru = core.lrucache.new({ + type = 'plugin', +}) + +local metadata_defaults = { + limit_header = "X-RateLimit-Limit", + remaining_header = "X-RateLimit-Remaining", + reset_header = "X-RateLimit-Reset", +} + +local metadata_schema = { + type = "object", + properties = { + limit_header = { + type = "string", + default = metadata_defaults.limit_header, + }, + remaining_header = { + type = "string", + default = metadata_defaults.remaining_header, + }, + reset_header = { + type = "string", + default = metadata_defaults.reset_header, + }, + }, +} + +local schema = { + type = "object", + properties = { + count = {type = "integer", exclusiveMinimum = 0}, + time_window = {type = "integer", exclusiveMinimum = 0}, + group = {type = "string"}, + key = {type = "string", default = "remote_addr"}, + key_type = {type = "string", + enum = {"var", "var_combination", "constant"}, + default = "var", + }, + rejected_code = { + type = "integer", minimum = 200, maximum = 599, default = 503 + }, + rejected_msg = { + type = "string", minLength = 1 + }, + policy = { + type = "string", + enum = {"local", "redis", "redis-cluster"}, + default = "local", + }, + allow_degradation = {type = "boolean", default = false}, + show_limit_quota_header = {type = "boolean", default = true} + }, + required = {"count", "time_window"}, + ["if"] = { + properties = { + policy = { + enum = {"redis"}, + }, + }, + }, + ["then"] = policy_to_additional_properties.redis, + ["else"] = { + ["if"] = { + properties = { + policy = { + enum = {"redis-cluster"}, + }, + }, + }, + ["then"] = policy_to_additional_properties["redis-cluster"], + } +} + +local schema_copy = core.table.deepcopy(schema) + +local _M = { + schema = schema, + metadata_schema = metadata_schema, +} + + +local function group_conf(conf) + return conf +end + + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + if conf.group then + -- means that call by some plugin not support + if conf._vid then + return false, "group is not supported" + end + + local fields = {} + -- When the goup field is configured, + -- we will use schema_copy to get the whitelist of properties, + -- so that we can avoid getting injected properties. + for k in pairs(schema_copy.properties) do + tab_insert(fields, k) + end + local extra = policy_to_additional_properties[conf.policy] + if extra then + for k in pairs(extra.properties) do + tab_insert(fields, k) + end + end + + local prev_conf = group_conf_lru(conf.group, "", group_conf, conf) + + for _, field in ipairs(fields) do + if not core.table.deep_eq(prev_conf[field], conf[field]) then + core.log.error("previous limit-conn group ", prev_conf.group, + " conf: ", core.json.encode(prev_conf)) + core.log.error("current limit-conn group ", conf.group, + " conf: ", core.json.encode(conf)) + return false, "group conf mismatched" + end + end + end + + return true +end + + +local function create_limit_obj(conf, plugin_name) + core.log.info("create new " .. plugin_name .. " plugin instance") + + if not conf.policy or conf.policy == "local" then + return limit_local_new("plugin-" .. plugin_name, conf.count, + conf.time_window) + end + + if conf.policy == "redis" then + return limit_redis_new("plugin-" .. plugin_name, + conf.count, conf.time_window, conf) + end + + if conf.policy == "redis-cluster" then + return limit_redis_cluster_new("plugin-" .. plugin_name, conf.count, + conf.time_window, conf) + end + + return nil +end + + +local function gen_limit_key(conf, ctx, key) + if conf.group then + return conf.group .. ':' .. key + end + + -- here we add a separator ':' to mark the boundary of the prefix and the key itself + -- Here we use plugin-level conf version to prevent the counter from being resetting + -- because of the change elsewhere. + -- A route which reuses a previous route's ID will inherits its counter. + local conf_type = ctx.conf_type_without_consumer or ctx.conf_type + local conf_id = ctx.conf_id_without_consumer or ctx.conf_id + local new_key = conf_type .. conf_id .. ':' .. apisix_plugin.conf_version(conf) + .. ':' .. key + if conf._vid then + -- conf has _vid means it's from workflow plugin, add _vid to the key + -- so that the counter is unique per action. + return new_key .. ':' .. conf._vid + end + + return new_key +end + + +local function gen_limit_obj(conf, ctx, plugin_name) + if conf.group then + return lrucache(conf.group, "", create_limit_obj, conf, plugin_name) + end + + local extra_key + if conf._vid then + extra_key = conf.policy .. '#' .. conf._vid + else + extra_key = conf.policy + end + + return core.lrucache.plugin_ctx(lrucache, ctx, extra_key, create_limit_obj, conf, plugin_name) +end + +function _M.rate_limit(conf, ctx, name, cost, dry_run) + core.log.info("ver: ", ctx.conf_version) + core.log.info("conf: ", core.json.delay_encode(conf, true)) + + local lim, err = gen_limit_obj(conf, ctx, name) + + if not lim then + core.log.error("failed to fetch limit.count object: ", err) + if conf.allow_degradation then + return + end + return 500 + end + + local conf_key = conf.key + local key + if conf.key_type == "var_combination" then + local err, n_resolved + key, err, n_resolved = core.utils.resolve_var(conf_key, ctx.var) + if err then + core.log.error("could not resolve vars in ", conf_key, " error: ", err) + end + + if n_resolved == 0 then + key = nil + end + elseif conf.key_type == "constant" then + key = conf_key + else + key = ctx.var[conf_key] + end + + if key == nil then + core.log.info("The value of the configured key is empty, use client IP instead") + -- When the value of key is empty, use client IP instead + key = ctx.var["remote_addr"] + end + + key = gen_limit_key(conf, ctx, key) + core.log.info("limit key: ", key) + + local delay, remaining, reset + if not conf.policy or conf.policy == "local" then + delay, remaining, reset = lim:incoming(key, not dry_run, conf, cost) + else + delay, remaining, reset = lim:incoming(key, cost) + end + + local metadata = apisix_plugin.plugin_metadata("limit-count") + if metadata then + metadata = metadata.value + else + metadata = metadata_defaults + end + core.log.info("limit-count plugin-metadata: ", core.json.delay_encode(metadata)) + + local set_limit_headers = { + limit_header = conf.limit_header or metadata.limit_header, + remaining_header = conf.remaining_header or metadata.remaining_header, + reset_header = conf.reset_header or metadata.reset_header, + } + local phase = get_phase() + local set_header = phase ~= "log" + + if not delay then + local err = remaining + if err == "rejected" then + -- show count limit header when rejected + if conf.show_limit_quota_header and set_header then + core.response.set_header(set_limit_headers.limit_header, conf.count, + set_limit_headers.remaining_header, 0, + set_limit_headers.reset_header, reset) + end + + if conf.rejected_msg then + return conf.rejected_code, { error_msg = conf.rejected_msg } + end + return conf.rejected_code + end + + core.log.error("failed to limit count: ", err) + if conf.allow_degradation then + return + end + return 500, {error_msg = "failed to limit count"} + end + + if conf.show_limit_quota_header and set_header then + core.response.set_header(set_limit_headers.limit_header, conf.count, + set_limit_headers.remaining_header, remaining, + set_limit_headers.reset_header, reset) + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count/limit-count-local.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count/limit-count-local.lua new file mode 100644 index 0000000..b6f319a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count/limit-count-local.lua @@ -0,0 +1,79 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local limit_count = require("resty.limit.count") + +local ngx = ngx +local ngx_time = ngx.time +local assert = assert +local setmetatable = setmetatable +local core = require("apisix.core") + +local _M = {} + +local mt = { + __index = _M +} + +local function set_endtime(self, key, time_window) + -- set an end time + local end_time = ngx_time() + time_window + -- save to dict by key + local success, err = self.dict:set(key, end_time, time_window) + + if not success then + core.log.error("dict set key ", key, " error: ", err) + end + + local reset = time_window + return reset +end + +local function read_reset(self, key) + -- read from dict + local end_time = (self.dict:get(key) or 0) + local reset = end_time - ngx_time() + if reset < 0 then + reset = 0 + end + return reset +end + +function _M.new(plugin_name, limit, window) + assert(limit > 0 and window > 0) + + local self = { + limit_count = limit_count.new(plugin_name, limit, window), + dict = ngx.shared[plugin_name .. "-reset-header"] + } + + return setmetatable(self, mt) +end + +function _M.incoming(self, key, commit, conf, cost) + local delay, remaining = self.limit_count:incoming(key, commit, cost) + local reset + + if remaining == conf.count - cost then + reset = set_endtime(self, key, conf.time_window) + else + reset = read_reset(self, key) + end + + return delay, remaining, reset +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count/limit-count-redis-cluster.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count/limit-count-redis-cluster.lua new file mode 100644 index 0000000..be7029b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count/limit-count-redis-cluster.lua @@ -0,0 +1,83 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local redis_cluster = require("apisix.utils.rediscluster") +local core = require("apisix.core") +local setmetatable = setmetatable +local tostring = tostring + +local _M = {} + + +local mt = { + __index = _M +} + + +local script = core.string.compress_script([=[ + assert(tonumber(ARGV[3]) >= 1, "cost must be at least 1") + local ttl = redis.call('ttl', KEYS[1]) + if ttl < 0 then + redis.call('set', KEYS[1], ARGV[1] - ARGV[3], 'EX', ARGV[2]) + return {ARGV[1] - ARGV[3], ARGV[2]} + end + return {redis.call('incrby', KEYS[1], 0 - ARGV[3]), ttl} +]=]) + + +function _M.new(plugin_name, limit, window, conf) + local red_cli, err = redis_cluster.new(conf, "plugin-limit-count-redis-cluster-slot-lock") + if not red_cli then + return nil, err + end + + local self = { + limit = limit, + window = window, + conf = conf, + plugin_name = plugin_name, + red_cli = red_cli, + } + + return setmetatable(self, mt) +end + + +function _M.incoming(self, key, cost) + local red = self.red_cli + local limit = self.limit + local window = self.window + key = self.plugin_name .. tostring(key) + + local ttl = 0 + local res, err = red:eval(script, 1, key, limit, window, cost or 1) + + if err then + return nil, err, ttl + end + + local remaining = res[1] + ttl = res[2] + + if remaining < 0 then + return nil, "rejected", ttl + end + return 0, remaining, ttl +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count/limit-count-redis.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count/limit-count-redis.lua new file mode 100644 index 0000000..c40ed43 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-count/limit-count-redis.lua @@ -0,0 +1,89 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local redis = require("apisix.utils.redis") +local core = require("apisix.core") +local assert = assert +local setmetatable = setmetatable +local tostring = tostring + + +local _M = {version = 0.3} + + +local mt = { + __index = _M +} + + +local script = core.string.compress_script([=[ + assert(tonumber(ARGV[3]) >= 1, "cost must be at least 1") + local ttl = redis.call('ttl', KEYS[1]) + if ttl < 0 then + redis.call('set', KEYS[1], ARGV[1] - ARGV[3], 'EX', ARGV[2]) + return {ARGV[1] - ARGV[3], ARGV[2]} + end + return {redis.call('incrby', KEYS[1], 0 - ARGV[3]), ttl} +]=]) + + +function _M.new(plugin_name, limit, window, conf) + assert(limit > 0 and window > 0) + + local self = { + limit = limit, + window = window, + conf = conf, + plugin_name = plugin_name, + } + return setmetatable(self, mt) +end + +function _M.incoming(self, key, cost) + local conf = self.conf + local red, err = redis.new(conf) + if not red then + return red, err, 0 + end + + local limit = self.limit + local window = self.window + local res + key = self.plugin_name .. tostring(key) + + local ttl = 0 + res, err = red:eval(script, 1, key, limit, window, cost or 1) + + if err then + return nil, err, ttl + end + + local remaining = res[1] + ttl = res[2] + + local ok, err = red:set_keepalive(10000, 100) + if not ok then + return nil, err, ttl + end + + if remaining < 0 then + return nil, "rejected", ttl + end + return 0, remaining, ttl +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-req.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-req.lua new file mode 100644 index 0000000..641eed4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-req.lua @@ -0,0 +1,183 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local limit_req_new = require("resty.limit.req").new +local core = require("apisix.core") +local redis_schema = require("apisix.utils.redis-schema") +local policy_to_additional_properties = redis_schema.schema +local plugin_name = "limit-req" +local sleep = core.sleep + +local redis_single_new +local redis_cluster_new +do + local redis_src = "apisix.plugins.limit-req.limit-req-redis" + redis_single_new = require(redis_src).new + + local cluster_src = "apisix.plugins.limit-req.limit-req-redis-cluster" + redis_cluster_new = require(cluster_src).new +end + + +local lrucache = core.lrucache.new({ + type = "plugin", +}) + + +local schema = { + type = "object", + properties = { + rate = {type = "number", exclusiveMinimum = 0}, + burst = {type = "number", minimum = 0}, + key = {type = "string"}, + key_type = {type = "string", + enum = {"var", "var_combination"}, + default = "var", + }, + policy = { + type = "string", + enum = {"redis", "redis-cluster", "local"}, + default = "local", + }, + rejected_code = { + type = "integer", minimum = 200, maximum = 599, default = 503 + }, + rejected_msg = { + type = "string", minLength = 1 + }, + nodelay = { + type = "boolean", default = false + }, + allow_degradation = {type = "boolean", default = false} + }, + required = {"rate", "burst", "key"}, + ["if"] = { + properties = { + policy = { + enum = {"redis"}, + }, + }, + }, + ["then"] = policy_to_additional_properties.redis, + ["else"] = { + ["if"] = { + properties = { + policy = { + enum = {"redis-cluster"}, + }, + }, + }, + ["then"] = policy_to_additional_properties["redis-cluster"], + } +} + + +local _M = { + version = 0.1, + priority = 1001, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + return true +end + + +local function create_limit_obj(conf) + if conf.policy == "local" then + core.log.info("create new limit-req plugin instance") + return limit_req_new("plugin-limit-req", conf.rate, conf.burst) + + elseif conf.policy == "redis" then + core.log.info("create new limit-req redis plugin instance") + return redis_single_new("plugin-limit-req", conf, conf.rate, conf.burst) + + elseif conf.policy == "redis-cluster" then + core.log.info("create new limit-req redis-cluster plugin instance") + return redis_cluster_new("plugin-limit-req", conf, conf.rate, conf.burst) + + else + return nil, "policy enum not match" + end +end + + +function _M.access(conf, ctx) + local lim, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, + create_limit_obj, conf) + if not lim then + core.log.error("failed to instantiate a resty.limit.req object: ", err) + if conf.allow_degradation then + return + end + return 500 + end + + local conf_key = conf.key + local key + if conf.key_type == "var_combination" then + local err, n_resolved + key, err, n_resolved = core.utils.resolve_var(conf_key, ctx.var) + if err then + core.log.error("could not resolve vars in ", conf_key, " error: ", err) + end + + if n_resolved == 0 then + key = nil + end + + else + key = ctx.var[conf_key] + end + + if key == nil then + core.log.info("The value of the configured key is empty, use client IP instead") + -- When the value of key is empty, use client IP instead + key = ctx.var["remote_addr"] + end + + key = key .. ctx.conf_type .. ctx.conf_version + core.log.info("limit key: ", key) + + local delay, err = lim:incoming(key, true) + if not delay then + if err == "rejected" then + if conf.rejected_msg then + return conf.rejected_code, { error_msg = conf.rejected_msg } + end + return conf.rejected_code + end + + core.log.error("failed to limit req: ", err) + if conf.allow_degradation then + return + end + return 500 + end + + if delay >= 0.001 and not conf.nodelay then + sleep(delay) + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-req/limit-req-redis-cluster.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-req/limit-req-redis-cluster.lua new file mode 100644 index 0000000..21ae635 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-req/limit-req-redis-cluster.lua @@ -0,0 +1,50 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local redis_cluster = require("apisix.utils.rediscluster") +local setmetatable = setmetatable +local util = require("apisix.plugins.limit-req.util") + +local _M = {version = 0.1} + + +local mt = { + __index = _M +} + + +function _M.new(plugin_name, conf, rate, burst) + local red_cli, err = redis_cluster.new(conf, "plugin-limit-req-redis-cluster-slot-lock") + if not red_cli then + return nil, err + end + local self = { + conf = conf, + plugin_name = plugin_name, + burst = burst * 1000, + rate = rate * 1000, + red_cli = red_cli, + } + return setmetatable(self, mt) +end + + +function _M.incoming(self, key, commit) + return util.incoming(self, self.red_cli, key, commit) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-req/limit-req-redis.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-req/limit-req-redis.lua new file mode 100644 index 0000000..e097800 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-req/limit-req-redis.lua @@ -0,0 +1,54 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local redis = require("apisix.utils.redis") +local setmetatable = setmetatable +local util = require("apisix.plugins.limit-req.util") + +local setmetatable = setmetatable + + +local _M = {version = 0.1} + + +local mt = { + __index = _M +} + + +function _M.new(plugin_name, conf, rate, burst) + local self = { + conf = conf, + plugin_name = plugin_name, + burst = burst * 1000, + rate = rate * 1000, + } + return setmetatable(self, mt) +end + + +function _M.incoming(self, key, commit) + local conf = self.conf + local red, err = redis.new(conf) + if not red then + return red, err + end + + return util.incoming(self, red, key, commit) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-req/util.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-req/util.lua new file mode 100644 index 0000000..282c04c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/limit-req/util.lua @@ -0,0 +1,78 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local math = require "math" +local abs = math.abs +local max = math.max +local ngx_now = ngx.now +local ngx_null = ngx.null +local tonumber = tonumber + + +local _M = {version = 0.1} + + +-- the "commit" argument controls whether should we record the event in shm. +function _M.incoming(self, red, key, commit) + local rate = self.rate + local now = ngx_now() * 1000 + + key = "limit_req" .. ":" .. key + local excess_key = key .. "excess" + local last_key = key .. "last" + + local excess, err = red:get(excess_key) + if err then + return nil, err + end + local last, err = red:get(last_key) + if err then + return nil, err + end + + if excess ~= ngx_null and last ~= ngx_null then + excess = tonumber(excess) + last = tonumber(last) + local elapsed = now - last + excess = max(excess - rate * abs(elapsed) / 1000 + 1000, 0) + + if excess > self.burst then + return nil, "rejected" + end + else + excess = 0 + end + + if commit then + local ok + local err + ok, err = red:set(excess_key, excess) + if not ok then + return nil, err + end + + ok, err = red:set(last_key, now) + if not ok then + return nil, err + end + end + + -- return the delay in seconds, as well as excess + return excess / rate, excess / 1000 +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/log-rotate.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/log-rotate.lua new file mode 100644 index 0000000..4b0f327 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/log-rotate.lua @@ -0,0 +1,327 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local timers = require("apisix.timers") +local plugin = require("apisix.plugin") +local process = require("ngx.process") +local signal = require("resty.signal") +local shell = require("resty.shell") +local ipairs = ipairs +local ngx = ngx +local ngx_time = ngx.time +local ngx_update_time = ngx.update_time +local lfs = require("lfs") +local type = type +local io_open = io.open +local os_date = os.date +local os_remove = os.remove +local os_rename = os.rename +local str_sub = string.sub +local str_format = string.format +local str_byte = string.byte +local ngx_sleep = require("apisix.core.utils").sleep +local string_rfind = require("pl.stringx").rfind +local local_conf + + +local plugin_name = "log-rotate" +local INTERVAL = 60 * 60 -- rotate interval (unit: second) +local MAX_KEPT = 24 * 7 -- max number of log files will be kept +local MAX_SIZE = -1 -- max size of file will be rotated +local COMPRESSION_FILE_SUFFIX = ".tar.gz" -- compression file suffix +local rotate_time +local default_logs +local enable_compression = false +local DEFAULT_ACCESS_LOG_FILENAME = "access.log" +local DEFAULT_ERROR_LOG_FILENAME = "error.log" +local SLASH_BYTE = str_byte("/") + +local schema = { + type = "object", + properties = {}, +} + + +local _M = { + version = 0.1, + priority = 100, + name = plugin_name, + schema = schema, + scope = "global", +} + + +local function file_exists(path) + local file = io_open(path, "r") + if file then + file:close() + end + return file ~= nil +end + + +local function get_log_path_info(file_type) + local_conf = core.config.local_conf() + local conf_path + if file_type == "error.log" then + conf_path = local_conf and local_conf.nginx_config and + local_conf.nginx_config.error_log + else + conf_path = local_conf and local_conf.nginx_config and + local_conf.nginx_config.http and + local_conf.nginx_config.http.access_log + end + + local prefix = ngx.config.prefix() + + if conf_path then + -- relative path + if str_byte(conf_path) ~= SLASH_BYTE then + conf_path = prefix .. conf_path + end + local n = string_rfind(conf_path, "/") + if n ~= nil and n ~= #conf_path then + local dir = str_sub(conf_path, 1, n) + local name = str_sub(conf_path, n + 1) + return dir, name + end + end + + return prefix .. "logs/", file_type +end + + +local function tab_sort_comp(a, b) + return a > b +end + + +local function scan_log_folder(log_file_name) + local t = {} + + local log_dir, log_name = get_log_path_info(log_file_name) + + local compression_log_type = log_name .. COMPRESSION_FILE_SUFFIX + for file in lfs.dir(log_dir) do + local n = string_rfind(file, "__") + if n ~= nil then + local log_type = file:sub(n + 2) + if log_type == log_name or log_type == compression_log_type then + core.table.insert(t, file) + end + end + end + + core.table.sort(t, tab_sort_comp) + return t, log_dir +end + + +local function rename_file(log, date_str) + local new_file + if not log.new_file then + core.log.warn(log.type, " is off") + return + end + + new_file = str_format(log.new_file, date_str) + if file_exists(new_file) then + core.log.info("file exist: ", new_file) + return new_file + end + + local ok, err = os_rename(log.file, new_file) + if not ok then + core.log.error("move file from ", log.file, " to ", new_file, + " res:", ok, " msg:", err) + return + end + + return new_file +end + + +local function compression_file(new_file, timeout) + if not new_file or type(new_file) ~= "string" then + core.log.info("compression file: ", new_file, " invalid") + return + end + + local n = string_rfind(new_file, "/") + local new_filepath = str_sub(new_file, 1, n) + local new_filename = str_sub(new_file, n + 1) + local com_filename = new_filename .. COMPRESSION_FILE_SUFFIX + local cmd = str_format("cd %s && tar -zcf %s %s", new_filepath, + com_filename, new_filename) + core.log.info("log file compress command: " .. cmd) + + local ok, stdout, stderr, reason, status = shell.run(cmd, nil, timeout, nil) + if not ok then + core.log.error("compress log file from ", new_filename, " to ", com_filename, + " fail, stdout: ", stdout, " stderr: ", stderr, " reason: ", reason, + " status: ", status) + return + end + + ok, stderr = os_remove(new_file) + if stderr then + core.log.error("remove uncompressed log file: ", new_file, + " fail, err: ", stderr, " res:", ok) + end +end + + +local function init_default_logs(logs_info, log_type) + local filepath, filename = get_log_path_info(log_type) + logs_info[log_type] = { type = log_type } + if filename ~= "off" then + logs_info[log_type].file = filepath .. filename + logs_info[log_type].new_file = filepath .. "/%s__" .. filename + end +end + + +local function file_size(file) + local attr = lfs.attributes(file) + if attr then + return attr.size + end + return 0 +end + + +local function rotate_file(files, now_time, max_kept, timeout) + if core.table.isempty(files) then + return + end + + local new_files = core.table.new(2, 0) + -- rename the log files + for _, file in ipairs(files) do + local now_date = os_date("%Y-%m-%d_%H-%M-%S", now_time) + local new_file = rename_file(default_logs[file], now_date) + if not new_file then + return + end + + core.table.insert(new_files, new_file) + end + + -- send signal to reopen log files + local pid = process.get_master_pid() + core.log.warn("send USR1 signal to master process [", pid, "] for reopening log file") + local ok, err = signal.kill(pid, signal.signum("USR1")) + if not ok then + core.log.error("failed to send USR1 signal for reopening log file: ", err) + end + + if enable_compression then + -- Waiting for nginx reopen files + -- to avoid losing logs during compression + ngx_sleep(0.5) + + for _, new_file in ipairs(new_files) do + compression_file(new_file, timeout) + end + end + + for _, file in ipairs(files) do + -- clean the oldest file + local log_list, log_dir = scan_log_folder(file) + for i = max_kept + 1, #log_list do + local path = log_dir .. log_list[i] + local ok, err = os_remove(path) + if err then + core.log.error("remove old log file: ", path, " err: ", err, " res:", ok) + end + end + end +end + + +local function rotate() + local interval = INTERVAL + local max_kept = MAX_KEPT + local max_size = MAX_SIZE + local attr = plugin.plugin_attr(plugin_name) + local timeout = 10000 -- default timeout 10 seconds + if attr then + interval = attr.interval or interval + max_kept = attr.max_kept or max_kept + max_size = attr.max_size or max_size + timeout = attr.timeout or timeout + enable_compression = attr.enable_compression or enable_compression + end + + core.log.info("rotate interval:", interval) + core.log.info("rotate max keep:", max_kept) + core.log.info("rotate max size:", max_size) + core.log.info("rotate timeout:", timeout) + + if not default_logs then + -- first init default log filepath and filename + default_logs = {} + init_default_logs(default_logs, DEFAULT_ACCESS_LOG_FILENAME) + init_default_logs(default_logs, DEFAULT_ERROR_LOG_FILENAME) + end + + ngx_update_time() + local now_time = ngx_time() + if not rotate_time then + -- first init rotate time + rotate_time = now_time + interval - (now_time % interval) + core.log.info("first init rotate time is: ", rotate_time) + return + end + + if now_time >= rotate_time then + local files = {DEFAULT_ACCESS_LOG_FILENAME, DEFAULT_ERROR_LOG_FILENAME} + rotate_file(files, now_time, max_kept, timeout) + + -- reset rotate time + rotate_time = rotate_time + interval + + elseif max_size > 0 then + local access_log_file_size = file_size(default_logs[DEFAULT_ACCESS_LOG_FILENAME].file) + local error_log_file_size = file_size(default_logs[DEFAULT_ERROR_LOG_FILENAME].file) + local files = core.table.new(2, 0) + + if access_log_file_size >= max_size then + core.table.insert(files, DEFAULT_ACCESS_LOG_FILENAME) + end + + if error_log_file_size >= max_size then + core.table.insert(files, DEFAULT_ERROR_LOG_FILENAME) + end + + rotate_file(files, now_time, max_kept, timeout) + end +end + + +function _M.init() + timers.register_timer("plugin#log-rotate", rotate, true) +end + + +function _M.destroy() + timers.unregister_timer("plugin#log-rotate", true) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/loggly.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/loggly.lua new file mode 100644 index 0000000..16dc9b4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/loggly.lua @@ -0,0 +1,351 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local plugin = require("apisix.plugin") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local log_util = require("apisix.utils.log-util") +local path = require("pl.path") +local http = require("resty.http") +local ngx = ngx +local tostring = tostring +local pairs = pairs +local tab_concat = table.concat +local udp = ngx.socket.udp + +local plugin_name = "loggly" +local batch_processor_manager = bp_manager_mod.new(plugin_name) + + +local severity = { + EMEGR = 0, -- system is unusable + ALERT = 1, -- action must be taken immediately + CRIT = 2, -- critical conditions + ERR = 3, -- error conditions + WARNING = 4, -- warning conditions + NOTICE = 5, -- normal but significant condition + INFO = 6, -- informational + DEBUG = 7, -- debug-level messages +} + + +local severity_enums = {} +do + for k, _ in pairs(severity) do + severity_enums[#severity_enums+1] = k + severity_enums[#severity_enums+1] = k:lower() + end +end + + +local schema = { + type = "object", + properties = { + customer_token = {type = "string"}, + severity = { + type = "string", + default = "INFO", + enum = severity_enums, + description = "base severity log level", + }, + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = {type = "boolean", default = false}, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + tags = { + type = "array", + minItems = 1, + items = { + type = "string", + -- we prevent of having `tag=` prefix + pattern = "^(?!tag=)[ -~]*", + }, + default = {"apisix"} + }, + ssl_verify = { + -- applicable for https protocol + type = "boolean", + default = true + }, + log_format = {type = "object"}, + severity_map = { + type = "object", + description = "upstream response code vs syslog severity mapping", + patternProperties = { + ["^[1-5][0-9]{2}$"] = { + description = "keys are HTTP status code, values are severity", + type = "string", + enum = severity_enums + }, + }, + additionalProperties = false + } + }, + required = {"customer_token"} +} + + +local defaults = { + host = "logs-01.loggly.com", + port = 514, + protocol = "syslog", + timeout = 5000 +} + + +local metadata_schema = { + type = "object", + properties = { + host = { + type = "string", + default = defaults.host + }, + port = { + type = "integer", + default = defaults.port + }, + protocol = { + type = "string", + default = defaults.protocol, + -- in case of http and https, we use bulk endpoints + enum = {"syslog", "http", "https"} + }, + timeout = { + type = "integer", + minimum = 1, + default= defaults.timeout + }, + log_format = { + type = "object", + } + } +} + + +local _M = { + version = 0.1, + priority = 411, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, err + end + + if conf.severity_map then + local cache = {} + for k, v in pairs(conf.severity_map) do + cache[k] = severity[v:upper()] + end + conf._severity_cache = cache + end + return log_util.check_log_schema(conf) +end + + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + + +local function generate_log_message(conf, ctx) + local entry = log_util.get_log_entry(plugin_name, conf, ctx) + local json_str, err = core.json.encode(entry) + if not json_str then + core.log.error('error occurred while encoding the data: ', err) + return nil + end + + local metadata = plugin.plugin_metadata(plugin_name) + if metadata and metadata.value.protocol ~= "syslog" then + return json_str + end + + -- generate rfc5424 compliant syslog event + local timestamp = log_util.get_rfc3339_zulu_timestamp() + local taglist = {} + if conf.tags then + for i = 1, #conf.tags do + core.table.insert(taglist, "tag=\"" .. conf.tags[i] .. "\"") + end + end + + local message_severity = severity[conf.severity:upper()] + if conf._severity_cache and conf._severity_cache[tostring(ngx.status)] then + message_severity = conf._severity_cache[tostring(ngx.status)] + end + + local message = { + -- facility LOG_USER - random user level message + "<".. tostring(8 + message_severity) .. ">1",-- 1 + timestamp, -- timestamp + ctx.var.host or "-", -- hostname + "apisix", -- appname + ctx.var.pid, -- proc-id + "-", -- msgid + "[" .. conf.customer_token .. "@41058 " .. tab_concat(taglist, " ") .. "]", + json_str + } + + return tab_concat(message, " ") +end + + +local function send_data_over_udp(message, metadata) + local err_msg + local res = true + local sock = udp() + local host, port = metadata.value.host, metadata.value.port + sock:settimeout(metadata.value.timeout) + + local ok, err = sock:setpeername(host, port) + + if not ok then + core.log.error("failed to send log: ", err) + return false, "failed to connect to UDP server: host[" .. host + .. "] port[" .. tostring(port) .. "] err: " .. err + end + + ok, err = sock:send(message) + if not ok then + res = false + core.log.error("failed to send log: ", err) + err_msg = "failed to send data to UDP server: host[" .. host + .. "] port[" .. tostring(port) .. "] err:" .. err + end + + ok, err = sock:close() + if not ok then + core.log.error("failed to close the UDP connection, host[", + host, "] port[", port, "] ", err) + end + + return res, err_msg +end + + +local function send_bulk_over_http(message, metadata, conf) + local endpoint = path.join(metadata.value.host, "bulk", conf.customer_token, "tag", "bulk") + local has_prefix = core.string.has_prefix(metadata.value.host, "http") + if not has_prefix then + if metadata.value.protocol == "http" then + endpoint = "http://" .. endpoint + else + endpoint = "https://" .. endpoint + end + end + + local httpc = http.new() + httpc:set_timeout(metadata.value.timeout) + local res, err = httpc:request_uri(endpoint, { + ssl_verify = conf.ssl_verify, + method = "POST", + body = message, + headers = { + ["Content-Type"] = "application/json", + ["X-LOGGLY-TAG"] = conf.tags + }, + }) + + if not res then + return false, "failed to write log to loggly, " .. err + end + + if res.status ~= 200 then + local body = core.json.decode(res.body) + if not body then + return false, "failed to send log to loggly, http status code: " .. res.status + else + return false, "failed to send log to loggly, http status code: " .. res.status + .. " response body: ".. res.body + end + end + + return true +end + + +local handle_http_payload + +local function handle_log(entries) + local metadata = plugin.plugin_metadata(plugin_name) + core.log.info("metadata: ", core.json.delay_encode(metadata)) + + if not metadata then + core.log.info("received nil metadata: using metadata defaults: ", + core.json.delay_encode(defaults, true)) + metadata = {} + metadata.value = defaults + end + core.log.info("sending a batch logs to ", metadata.value.host) + + if metadata.value.protocol == "syslog" then + for i = 1, #entries do + local ok, err = send_data_over_udp(entries[i], metadata) + if not ok then + return false, err, i + end + end + else + return handle_http_payload(entries, metadata) + end + + return true +end + + +function _M.log(conf, ctx) + local log_data = generate_log_message(conf, ctx) + if not log_data then + return + end + + handle_http_payload = function (entries, metadata) + -- loggly bulk endpoint expects entries concatenated in newline("\n") + local message = tab_concat(entries, "\n") + return send_bulk_over_http(message, metadata, conf) + end + + if batch_processor_manager:add_entry(conf, log_data) then + return + end + + batch_processor_manager:add_entry_to_new_processor(conf, log_data, ctx, handle_log) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/loki-logger.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/loki-logger.lua new file mode 100644 index 0000000..6ff5311 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/loki-logger.lua @@ -0,0 +1,251 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local log_util = require("apisix.utils.log-util") +local core = require("apisix.core") +local http = require("resty.http") +local new_tab = require("table.new") + +local pairs = pairs +local ipairs = ipairs +local tostring = tostring +local math_random = math.random +local table_insert = table.insert +local ngx = ngx +local str_format = core.string.format + +local plugin_name = "loki-logger" +local batch_processor_manager = bp_manager_mod.new("loki logger") + +local schema = { + type = "object", + properties = { + -- core configurations + endpoint_addrs = { + type = "array", + minItems = 1, + items = core.schema.uri_def, + }, + endpoint_uri = { + type = "string", + minLength = 1, + default = "/loki/api/v1/push" + }, + tenant_id = {type = "string", default = "fake"}, + headers = { + type = "object", + patternProperties = { + [".*"] = { + type = "string", + minLength = 1, + }, + }, + }, + log_labels = { + type = "object", + patternProperties = { + [".*"] = { + type = "string", + minLength = 1, + }, + }, + default = { + job = "apisix", + }, + }, + + -- connection layer configurations + ssl_verify = {type = "boolean", default = false}, + timeout = { + type = "integer", + minimum = 1, + maximum = 60000, + default = 3000, + description = "timeout in milliseconds", + }, + keepalive = {type = "boolean", default = true}, + keepalive_timeout = { + type = "integer", + minimum = 1000, + default = 60000, + description = "keepalive timeout in milliseconds", + }, + keepalive_pool = {type = "integer", minimum = 1, default = 5}, + + -- logger related configurations + log_format = {type = "object"}, + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = {type = "boolean", default = false}, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + }, + required = {"endpoint_addrs"} +} + + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + + +local _M = { + version = 0.1, + priority = 414, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local check = {"endpoint_addrs"} + core.utils.check_https(check, conf, plugin_name) + core.utils.check_tls_bool({"ssl_verify"}, conf, plugin_name) + + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, err + end + return log_util.check_log_schema(conf) +end + + +local function send_http_data(conf, log) + local headers = conf.headers or {} + headers = core.table.clone(headers) + headers["X-Scope-OrgID"] = conf.tenant_id + headers["Content-Type"] = "application/json" + + local params = { + headers = headers, + keepalive = conf.keepalive, + ssl_verify = conf.ssl_verify, + method = "POST", + body = core.json.encode(log) + } + + if conf.keepalive then + params.keepalive_timeout = conf.keepalive_timeout + params.keepalive_pool = conf.keepalive_pool + end + + local httpc, err = http.new() + if not httpc then + return false, str_format("create http client error: %s", err) + end + httpc:set_timeout(conf.timeout) + + -- select an random endpoint and build URL + local endpoint_url = conf.endpoint_addrs[math_random(#conf.endpoint_addrs)] .. conf.endpoint_uri + local res, err = httpc:request_uri(endpoint_url, params) + if not res then + return false, err + end + + if res.status >= 300 then + return false, str_format("loki server returned status: %d, body: %s", + res.status, res.body or "") + end + + return true +end + + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + + +function _M.log(conf, ctx) + local entry = log_util.get_log_entry(plugin_name, conf, ctx) + + if not entry.route_id then + entry.route_id = "no-matched" + end + + -- insert start time as log time, multiply to nanoseconds + -- use string concat to circumvent 64bit integers that LuaVM cannot handle + -- that is, first process the decimal part of the millisecond value + -- and then add 6 zeros by string concatenation + entry.loki_log_time = tostring(ngx.req.start_time() * 1000) .. "000000" + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + local labels = conf.log_labels + + -- parsing possible variables in label value + for key, value in pairs(labels) do + local new_val, err, n_resolved = core.utils.resolve_var(value, ctx.var) + if not err and n_resolved > 0 then + labels[key] = new_val + end + end + + -- generate a function to be executed by the batch processor + local func = function(entries) + -- build loki request data + local data = { + streams = { + { + stream = labels, + values = new_tab(1, 0), + } + } + } + + -- add all entries to the batch + for _, entry in ipairs(entries) do + local log_time = entry.loki_log_time + entry.loki_log_time = nil -- clean logger internal field + + table_insert(data.streams[1].values, { + log_time, core.json.encode(entry) + }) + end + + return send_http_data(conf, data) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp-bridge.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp-bridge.lua new file mode 100644 index 0000000..a73d943 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp-bridge.lua @@ -0,0 +1,173 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local unpack = unpack +local ngx = ngx +local thread_spawn = ngx.thread.spawn +local thread_kill = ngx.thread.kill +local worker_exiting = ngx.worker.exiting +local resty_signal = require("resty.signal") +local core = require("apisix.core") +local pipe = require("ngx.pipe") + +local mcp_server_wrapper = require("apisix.plugins.mcp.server_wrapper") + +local schema = { + type = "object", + properties = { + base_uri = { + type = "string", + minLength = 1, + default = "", + }, + command = { + type = "string", + minLength = 1, + }, + args = { + type = "array", + items = { + type = "string", + }, + minItems = 0, + }, + }, + required = { + "command" + }, +} + +local plugin_name = "mcp-bridge" + +local _M = { + version = 0.1, + priority = 510, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf, schema_type) + return core.schema.check(schema, conf) +end + + +local function on_connect(conf, ctx) + return function(additional) + local proc, err = pipe.spawn({conf.command, unpack(conf.args or {})}) + if not proc then + core.log.error("failed to spawn mcp process: ", err) + return 500 + end + proc:set_timeouts(nil, 100, 100) + ctx.mcp_bridge_proc = proc + + local server = additional.server + + -- ngx_pipe is a yield operation, so we no longer need + -- to explicitly yield to other threads by ngx_sleep + ctx.mcp_bridge_proc_event_loop = thread_spawn(function () + local stdout_partial, stderr_partial, need_exit + while not worker_exiting() do + -- read all the messages in stdout's pipe, line by line + -- if there is an incomplete message it is buffered and + -- spliced before the next message + repeat + local line, _ + line, _, stdout_partial = proc:stdout_read_line() + if line then + local ok, err = server.transport:send( + stdout_partial and stdout_partial .. line or line + ) + if not ok then + core.log.info("session ", server.session_id, + " exit, failed to send response message: ", err) + need_exit = true + break + end + stdout_partial = nil -- luacheck: ignore + end + until not line + if need_exit then + break + end + + repeat + local line, _ + line, _, stderr_partial = proc:stderr_read_line() + if line then + local ok, err = server.transport:send( + '{"jsonrpc":"2.0","method":"notifications/stderr","params":{"content":"' + .. (stderr_partial and stderr_partial .. line or line) .. '"}}') + if not ok then + core.log.info("session ", server.session_id, + " exit, failed to send response message: ", err) + need_exit = true + break + end + stderr_partial = "" -- luacheck: ignore + end + until not line + if need_exit then + break + end + end + end) + end +end + + +local function on_client_message(conf, ctx) + return function(message, additional) + core.log.info("session ", additional.server.session_id, + " send message to mcp server: ", additional.raw) + ctx.mcp_bridge_proc:write(additional.raw .. "\n") + end +end + + +local function on_disconnect(conf, ctx) + return function() + if ctx.mcp_bridge_proc_event_loop then + thread_kill(ctx.mcp_bridge_proc_event_loop) + ctx.mcp_bridge_proc_event_loop = nil + end + + local proc = ctx.mcp_bridge_proc + if proc then + proc:shutdown("stdin") + proc:wait() + local _, err = proc:wait() -- check if process not exited then kill it + if err ~= "exited" then + proc:kill(resty_signal.signum("KILL") or 9) + end + end + end +end + + +function _M.access(conf, ctx) + return mcp_server_wrapper.access(conf, ctx, { + event_handler = { + on_connect = on_connect(conf, ctx), + on_client_message = on_client_message(conf, ctx), + on_disconnect = on_disconnect(conf, ctx), + }, + }) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/broker/shared_dict.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/broker/shared_dict.lua new file mode 100644 index 0000000..83e3d86 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/broker/shared_dict.lua @@ -0,0 +1,90 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local type = type +local setmetatable = setmetatable +local ngx = ngx +local ngx_sleep = ngx.sleep +local thread_spawn = ngx.thread.spawn +local thread_kill = ngx.thread.kill +local worker_exiting = ngx.worker.exiting +local shared_dict = ngx.shared["mcp-session"] -- TODO: rename to something like mcp-broker +local core = require("apisix.core") +local broker_utils = require("apisix.plugins.mcp.broker.utils") + +local _M = {} +local mt = { __index = _M } + + +local STORAGE_SUFFIX_QUEUE = ":queue" + + +function _M.new(opts) + return setmetatable({ + session_id = opts.session_id, + event_handler = {} + }, mt) +end + + +function _M.on(self, event, cb) + self.event_handler[event] = cb +end + + +function _M.push(self, message) + if not message then + return nil, "message is nil" + end + local ok, err = shared_dict:rpush(self.session_id .. STORAGE_SUFFIX_QUEUE, message) + if not ok then + return nil, "failed to push message to queue: " .. err + end + return true +end + + +function _M.start(self) + self.thread = thread_spawn(function() + while not worker_exiting() do + local item, err = shared_dict:lpop(self.session_id .. STORAGE_SUFFIX_QUEUE) + if err then + core.log.info("session ", self.session_id, + " exit, failed to pop message from queue: ", err) + break + end + if item and type(item) == "string" + and type(self.event_handler[broker_utils.EVENT_MESSAGE]) == "function" then + self.event_handler[broker_utils.EVENT_MESSAGE]( + core.json.decode(item), { raw = item } + ) + end + + ngx_sleep(0.1) -- yield to other light threads + end + end) +end + + +function _M.close(self) + if self.thread then + thread_kill(self.thread) + self.thread = nil + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/broker/utils.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/broker/utils.lua new file mode 100644 index 0000000..ded12ae --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/broker/utils.lua @@ -0,0 +1,21 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local _M = {} + +_M.EVENT_MESSAGE = "message" + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/server.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/server.lua new file mode 100644 index 0000000..11a41b9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/server.lua @@ -0,0 +1,116 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local setmetatable = setmetatable +local ngx = ngx +local ngx_sleep = ngx.sleep +local thread_spwan = ngx.thread.spawn +local thread_wait = ngx.thread.wait +local thread_kill = ngx.thread.kill +local worker_exiting = ngx.worker.exiting +local core = require("apisix.core") +local broker_utils = require("apisix.plugins.mcp.broker.utils") + + +local _M = {} +local mt = { __index = _M } + + +_M.EVENT_CLIENT_MESSAGE = "event:client_message" + + +-- TODO: ping requester and handler +function _M.new(opts) + local session_id = opts.session_id or core.id.gen_uuid_v4() + + -- TODO: configurable broker type + local message_broker = require("apisix.plugins.mcp.broker.shared_dict").new({ + session_id = session_id, + }) + + -- TODO: configurable transport type + local transport = require("apisix.plugins.mcp.transport.sse").new() + + local obj = setmetatable({ + opts = opts, + session_id = session_id, + next_ping_id = 0, + transport = transport, + message_broker = message_broker, + event_handler = {}, + need_exit = false, + }, mt) + + message_broker:on(broker_utils.EVENT_MESSAGE, function (message, additional) + if obj.event_handler[_M.EVENT_CLIENT_MESSAGE] then + obj.event_handler[_M.EVENT_CLIENT_MESSAGE](message, additional) + end + end) + + return obj +end + + +function _M.on(self, event, cb) + self.event_handler[event] = cb +end + + +function _M.start(self) + self.message_broker:start() + + -- ping loop + local ping = thread_spwan(function() + while not worker_exiting() do + if self.need_exit then + break + end + + self.next_ping_id = self.next_ping_id + 1 + local ok, err = self.transport:send( + '{"jsonrpc": "2.0","method": "ping","id":"ping:' .. self.next_ping_id .. '"}') + if not ok then + core.log.info("session ", self.session_id, + " exit, failed to send ping message: ", err) + self.need_exit = true + break + end + ngx_sleep(30) + end + end) + thread_wait(ping) + thread_kill(ping) +end + + +function _M.close(self) + if self.message_broker then + self.message_broker:close() + end +end + + +function _M.push_message(self, message) + local ok, err = self.message_broker:push(message) + if not ok then + return nil, "failed to push message to broker: " .. err + end + return true +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/server_wrapper.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/server_wrapper.lua new file mode 100644 index 0000000..5b0ed88 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/server_wrapper.lua @@ -0,0 +1,106 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ngx = ngx +local ngx_exit = ngx.exit +local re_match = ngx.re.match +local core = require("apisix.core") +local mcp_server = require("apisix.plugins.mcp.server") + +local _M = {} + +local V241105_ENDPOINT_SSE = "sse" +local V241105_ENDPOINT_MESSAGE = "message" + + +local function sse_handler(conf, ctx, opts) + -- send SSE headers and first chunk + core.response.set_header("Content-Type", "text/event-stream") + core.response.set_header("Cache-Control", "no-cache") + + local server = opts.server + + -- send endpoint event to advertise the message endpoint + server.transport:send(conf.base_uri .. "/message?sessionId=" .. server.session_id, "endpoint") + + if opts.event_handler and opts.event_handler.on_client_message then + server:on(mcp_server.EVENT_CLIENT_MESSAGE, function(message, additional) + additional.server = server + opts.event_handler.on_client_message(message, additional) + end) + end + + if opts.event_handler and opts.event_handler.on_connect then + local code, body = opts.event_handler.on_connect({ server = server }) + if code then + return code, body + end + server:start() -- this is a sync call that only returns when the client disconnects + end + + if opts.event_handler.on_disconnect then + opts.event_handler.on_disconnect({ server = server }) + server:close() + end + + ngx_exit(0) -- exit current phase, skip the upstream module +end + + +local function message_handler(conf, ctx, opts) + local body = core.request.get_body(nil, ctx) + if not body then + return 400 + end + + local ok, err = opts.server:push_message(body) + if not ok then + core.log.error("failed to add task to queue: ", err) + return 500 + end + + return 202 +end + + +function _M.access(conf, ctx, opts) + local m, err = re_match(ctx.var.uri, "^" .. conf.base_uri .. "/(.*)", "jo") + if err then + core.log.info("failed to mcp base uri: ", err) + return core.response.exit(404) + end + local action = m and m[1] or false + if not action then + return core.response.exit(404) + end + + if action == V241105_ENDPOINT_SSE and core.request.get_method() == "GET" then + opts.server = mcp_server.new({}) + return sse_handler(conf, ctx, opts) + end + + if action == V241105_ENDPOINT_MESSAGE and core.request.get_method() == "POST" then + -- TODO: check ctx.var.arg_sessionId + -- recover server instead of create + opts.server = mcp_server.new({ session_id = ctx.var.arg_sessionId }) + return core.response.exit(message_handler(conf, ctx, opts)) + end + + return core.response.exit(404) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/transport/sse.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/transport/sse.lua new file mode 100644 index 0000000..83d72a1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mcp/transport/sse.lua @@ -0,0 +1,44 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local setmetatable = setmetatable +local type = type +local ngx = ngx +local ngx_print = ngx.print +local ngx_flush = ngx.flush +local core = require("apisix.core") + +local _M = {} +local mt = { __index = _M } + + +function _M.new() + return setmetatable({}, mt) +end + + +function _M.send(self, message, event_type) + local data = type(message) == "table" and core.json.encode(message) or message + local ok, err = ngx_print("event: " .. (event_type or "message") .. + "\ndata: " .. data .. "\n\n") + if not ok then + return ok, "failed to write buffer: " .. err + end + return ngx_flush(true) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/mocking.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mocking.lua new file mode 100644 index 0000000..51c4bff --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/mocking.lua @@ -0,0 +1,243 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local xml2lua = require("xml2lua") + +local json = core.json +local math = math +local ngx = ngx +local ngx_re = ngx.re +local pairs = pairs +local string = string +local table = table +local type = type + +local support_content_type = { + ["application/xml"] = true, + ["application/json"] = true, + ["text/plain"] = true, + ["text/html"] = true, + ["text/xml"] = true +} + +local schema = { + type = "object", + properties = { + -- specify response delay time,default 0ms + delay = { type = "integer", default = 0 }, + -- specify response status,default 200 + response_status = { type = "integer", default = 200, minimum = 100 }, + -- specify response content type, support application/xml, text/plain + -- and application/json, default application/json + content_type = { type = "string", default = "application/json;charset=utf8" }, + -- specify response body. + response_example = { type = "string" }, + -- specify response json schema, if response_example is not nil, this conf will be ignore. + -- generate random response by json schema. + response_schema = { type = "object" }, + with_mock_header = { type = "boolean", default = true }, + response_headers = { + type = "object", + minProperties = 1, + patternProperties = { + ["^[^:]+$"] = { + oneOf = { + { type = "string" }, + { type = "number" } + } + } + }, + } + }, + anyOf = { + { required = { "response_example" } }, + { required = { "response_schema" } } + } +} + +local _M = { + version = 0.1, + priority = 10900, + name = "mocking", + schema = schema, +} + +local function parse_content_type(content_type) + if not content_type then + return "" + end + local m = ngx_re.match(content_type, "([ -~]*);([ -~]*)", "jo") + if m and #m == 2 then + return m[1], m[2] + end + return content_type +end + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + local typ = parse_content_type(conf.content_type) + if not support_content_type[typ] then + return false, "unsupported content type!" + end + return true +end + + +local function gen_string(example) + if example and type(example) == "string" then + return example + end + local n = math.random(1, 10) + local list = {} + for i = 1, n do + table.insert(list, string.char(math.random(97, 122))) + end + return table.concat(list) +end + + +local function gen_number(example) + if example and type(example) == "number" then + return example + end + return math.random() * 10000 +end + + +local function gen_integer(example) + if example and type(example) == "number" then + return math.floor(example) + end + return math.random(1, 10000) +end + + +local function gen_boolean(example) + if example and type(example) == "boolean" then + return example + end + local r = math.random(0, 1) + if r == 0 then + return false + end + return true +end + + +local gen_array, gen_object, gen_by_property + +function gen_array(property) + local output = {} + if property.items == nil then + return nil + end + local v = property.items + local n = math.random(1, 3) + for i = 1, n do + table.insert(output, gen_by_property(v)) + end + return output +end + + +function gen_object(property) + local output = {} + if not property.properties then + return output + end + for k, v in pairs(property.properties) do + output[k] = gen_by_property(v) + end + return output +end + + +function gen_by_property(property) + local typ = string.lower(property.type) + local example = property.example + + if typ == "array" then + return gen_array(property) + end + + if typ == "object" then + return gen_object(property) + end + + if typ == "string" then + return gen_string(example) + end + + if typ == "number" then + return gen_number(example) + end + + if typ == "integer" then + return gen_integer(example) + end + + if typ == "boolean" then + return gen_boolean(example) + end + + return nil +end + + +function _M.access(conf, ctx) + local response_content = "" + + if conf.response_example then + response_content = conf.response_example + else + local output = gen_object(conf.response_schema) + local typ = parse_content_type(conf.content_type) + if typ == "application/xml" or typ == "text/xml" then + response_content = xml2lua.toXml(output, "data") + + elseif typ == "application/json" or typ == "text/plain" then + response_content = json.encode(output) + + else + core.log.error("json schema body only support xml and json content type") + end + end + + ngx.header["Content-Type"] = conf.content_type + if conf.with_mock_header then + ngx.header["x-mock-by"] = "APISIX/" .. core.version.VERSION + end + + if conf.response_headers then + for key, value in pairs(conf.response_headers) do + value = core.utils.resolve_var(value, ctx.var) + core.response.add_header(key, value) + end + end + + if conf.delay > 0 then + ngx.sleep(conf.delay) + end + return conf.response_status, core.utils.resolve_var(response_content, ctx.var) +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/multi-auth.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/multi-auth.lua new file mode 100644 index 0000000..7d34ffb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/multi-auth.lua @@ -0,0 +1,105 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local require = require +local pairs = pairs +local type = type +local plugin = require("apisix.plugin") + +local schema = { + type = "object", + title = "work with route or service object", + properties = { + auth_plugins = { type = "array", minItems = 2 } + }, + required = { "auth_plugins" }, +} + + +local plugin_name = "multi-auth" + +local _M = { + version = 0.1, + priority = 2600, + type = 'auth', + name = plugin_name, + schema = schema +} + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + local auth_plugins = conf.auth_plugins + for k, auth_plugin in pairs(auth_plugins) do + for auth_plugin_name, auth_plugin_conf in pairs(auth_plugin) do + local auth = plugin.get(auth_plugin_name) + if auth == nil then + return false, auth_plugin_name .. " plugin did not found" + else + if auth.type ~= 'auth' then + return false, auth_plugin_name .. " plugin is not supported" + end + local ok, err = auth.check_schema(auth_plugin_conf, auth.schema) + if not ok then + return false, "plugin " .. auth_plugin_name .. " check schema failed: " .. err + end + end + end + end + + return true +end + +function _M.rewrite(conf, ctx) + local auth_plugins = conf.auth_plugins + local status_code + local errors = {} + + for k, auth_plugin in pairs(auth_plugins) do + for auth_plugin_name, auth_plugin_conf in pairs(auth_plugin) do + local auth = plugin.get(auth_plugin_name) + -- returns 401 HTTP status code if authentication failed, otherwise returns nothing. + local auth_code, err = auth.rewrite(auth_plugin_conf, ctx) + if type(err) == "table" then + err = err.message -- compat + end + + status_code = auth_code + if auth_code == nil then + core.log.debug(auth_plugin_name .. " succeed to authenticate the request") + goto authenticated + else + core.table.insert(errors, auth_plugin_name .. + " failed to authenticate the request, code: " + .. auth_code .. ". error: " .. err) + end + end + end + + :: authenticated :: + if status_code ~= nil then + for _, error in pairs(errors) do + core.log.warn(error) + end + return 401, { message = "Authorization Failed" } + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/node-status.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/node-status.lua new file mode 100644 index 0000000..b80007b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/node-status.lua @@ -0,0 +1,98 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ngx = ngx +local re_gmatch = ngx.re.gmatch +local ngx_capture = ngx.location.capture +local plugin_name = "node-status" +local apisix_id = core.id.get() +local ipairs = ipairs + + +local schema = { + type = "object", +} + + +local _M = { + version = 0.1, + priority = 1000, + name = plugin_name, + schema = schema, + scope = "global", +} + + +local ngx_status = {} +local ngx_status_items = { + "active", "accepted", "handled", "total", + "reading", "writing", "waiting" +} + + +local function collect() + local res = ngx_capture("/apisix/nginx_status") + if res.status ~= 200 then + return res.status + end + + -- Active connections: 2 + -- server accepts handled requests + -- 26 26 84 + -- Reading: 0 Writing: 1 Waiting: 1 + + local iterator, err = re_gmatch(res.body, [[(\d+)]], "jmo") + if not iterator then + return 500, "failed to re.gmatch Nginx status: " .. err + end + + core.table.clear(ngx_status) + for _, name in ipairs(ngx_status_items) do + local val = iterator() + if not val then + break + end + + ngx_status[name] = val[0] + end + + return 200, core.json.encode({id = apisix_id, status = ngx_status}) +end + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + return true +end + + +function _M.api() + return { + { + methods = {"GET"}, + uri = "/apisix/status", + handler = collect, + } + } +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ocsp-stapling.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ocsp-stapling.lua new file mode 100644 index 0000000..cbe2bb1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ocsp-stapling.lua @@ -0,0 +1,220 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. +-- + +local require = require +local http = require("resty.http") +local ngx = ngx +local ngx_ocsp = require("ngx.ocsp") +local ngx_ssl = require("ngx.ssl") +local radixtree_sni = require("apisix.ssl.router.radixtree_sni") +local core = require("apisix.core") + +local plugin_name = "ocsp-stapling" +local ocsp_resp_cache = ngx.shared[plugin_name] + +local plugin_schema = { + type = "object", + properties = {}, +} + +local _M = { + name = plugin_name, + schema = plugin_schema, + version = 0.1, + priority = -44, +} + + +function _M.check_schema(conf) + return core.schema.check(plugin_schema, conf) +end + + +local function fetch_ocsp_resp(der_cert_chain) + core.log.info("fetch ocsp response from remote") + local ocsp_url, err = ngx_ocsp.get_ocsp_responder_from_der_chain(der_cert_chain) + + if not ocsp_url then + -- if cert not support ocsp, the report error is nil + if not err then + err = "cert not contains authority_information_access extension" + end + return nil, "failed to get ocsp url: " .. err + end + + local ocsp_req, err = ngx_ocsp.create_ocsp_request(der_cert_chain) + if not ocsp_req then + return nil, "failed to create ocsp request: " .. err + end + + local httpc = http.new() + local res, err = httpc:request_uri(ocsp_url, { + method = "POST", + headers = { + ["Content-Type"] = "application/ocsp-request", + }, + body = ocsp_req + }) + + if not res then + return nil, "ocsp responder query failed: " .. err + end + + local http_status = res.status + if http_status ~= 200 then + return nil, "ocsp responder returns bad http status code: " + .. http_status + end + + if res.body and #res.body > 0 then + return res.body, nil + end + + return nil, "ocsp responder returns empty body" +end + + +local function set_ocsp_resp(full_chain_pem_cert, skip_verify, cache_ttl) + local der_cert_chain, err = ngx_ssl.cert_pem_to_der(full_chain_pem_cert) + if not der_cert_chain then + return false, "failed to convert certificate chain from PEM to DER: ", err + end + + local ocsp_resp = ocsp_resp_cache:get(der_cert_chain) + if ocsp_resp == nil then + core.log.info("not ocsp resp cache found, fetch from ocsp responder") + ocsp_resp, err = fetch_ocsp_resp(der_cert_chain) + if ocsp_resp == nil then + return false, err + end + core.log.info("fetch ocsp resp ok, cache it") + ocsp_resp_cache:set(der_cert_chain, ocsp_resp, cache_ttl) + end + + if not skip_verify then + local ok, err = ngx_ocsp.validate_ocsp_response(ocsp_resp, der_cert_chain) + if not ok then + return false, "failed to validate ocsp response: " .. err + end + end + + -- set the OCSP stapling + local ok, err = ngx_ocsp.set_ocsp_status_resp(ocsp_resp) + if not ok then + return false, "failed to set ocsp status response: " .. err + end + + return true +end + + +local original_set_cert_and_key +local function set_cert_and_key(sni, value) + if value.gm then + -- should not run with gm plugin + core.log.warn("gm plugin enabled, no need to run ocsp-stapling plugin") + return original_set_cert_and_key(sni, value) + end + + if not value.ocsp_stapling then + core.log.info("no 'ocsp_stapling' field found, no need to run ocsp-stapling plugin") + return original_set_cert_and_key(sni, value) + end + + if not value.ocsp_stapling.enabled then + return original_set_cert_and_key(sni, value) + end + + if not ngx.ctx.tls_ext_status_req then + core.log.info("no status request required, no need to send ocsp response") + return original_set_cert_and_key(sni, value) + end + + local ok, err = radixtree_sni.set_pem_ssl_key(sni, value.cert, value.key) + if not ok then + return false, err + end + local fin_pem_cert = value.cert + + -- multiple certificates support. + if value.certs then + for i = 1, #value.certs do + local cert = value.certs[i] + local key = value.keys[i] + ok, err = radixtree_sni.set_pem_ssl_key(sni, cert, key) + if not ok then + return false, err + end + fin_pem_cert = cert + end + end + + local ok, err = set_ocsp_resp(fin_pem_cert, + value.ocsp_stapling.skip_verify, + value.ocsp_stapling.cache_ttl) + if not ok then + core.log.error("no ocsp response send: ", err) + end + + return true +end + + +function _M.init() + if core.schema.ssl.properties.gm ~= nil then + core.log.error("ocsp-stapling plugin should not run with gm plugin") + end + + original_set_cert_and_key = radixtree_sni.set_cert_and_key + radixtree_sni.set_cert_and_key = set_cert_and_key + + if core.schema.ssl.properties.ocsp_stapling ~= nil then + core.log.error("Field 'ocsp_stapling' is occupied") + end + + core.schema.ssl.properties.ocsp_stapling = { + type = "object", + properties = { + enabled = { + type = "boolean", + default = false, + }, + skip_verify = { + type = "boolean", + default = false, + }, + cache_ttl = { + type = "integer", + minimum = 60, + default = 3600, + }, + } + } + +end + + +function _M.destroy() + radixtree_sni.set_cert_and_key = original_set_cert_and_key + core.schema.ssl.properties.ocsp_stapling = nil + ocsp_resp_cache:flush_all() +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/opa.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/opa.lua new file mode 100644 index 0000000..0475529 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/opa.lua @@ -0,0 +1,152 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local http = require("resty.http") +local helper = require("apisix.plugins.opa.helper") +local type = type +local ipairs = ipairs + +local schema = { + type = "object", + properties = { + host = {type = "string"}, + ssl_verify = { + type = "boolean", + default = true, + }, + policy = {type = "string"}, + timeout = { + type = "integer", + minimum = 1, + maximum = 60000, + default = 3000, + description = "timeout in milliseconds", + }, + keepalive = {type = "boolean", default = true}, + send_headers_upstream = { + type = "array", + minItems = 1, + items = { + type = "string" + }, + description = "list of headers to pass to upstream in request" + }, + keepalive_timeout = {type = "integer", minimum = 1000, default = 60000}, + keepalive_pool = {type = "integer", minimum = 1, default = 5}, + with_route = {type = "boolean", default = false}, + with_service = {type = "boolean", default = false}, + with_consumer = {type = "boolean", default = false}, + }, + required = {"host", "policy"} +} + + +local _M = { + version = 0.1, + priority = 2001, + name = "opa", + schema = schema, +} + + +function _M.check_schema(conf) + local check = {"host"} + core.utils.check_https(check, conf, _M.name) + core.utils.check_tls_bool({"ssl_verify"}, conf, _M.name) + return core.schema.check(schema, conf) +end + + +function _M.access(conf, ctx) + local body = helper.build_opa_input(conf, ctx, "http") + + local params = { + method = "POST", + body = core.json.encode(body), + headers = { + ["Content-Type"] = "application/json", + }, + keepalive = conf.keepalive, + ssl_verify = conf.ssl_verify + } + + if conf.keepalive then + params.keepalive_timeout = conf.keepalive_timeout + params.keepalive_pool = conf.keepalive_pool + end + + local endpoint = conf.host .. "/v1/data/" .. conf.policy + + local httpc = http.new() + httpc:set_timeout(conf.timeout) + + local res, err = httpc:request_uri(endpoint, params) + + -- block by default when decision is unavailable + if not res then + core.log.error("failed to process OPA decision, err: ", err) + return 403 + end + + -- parse the results of the decision + local data, err = core.json.decode(res.body) + + if not data then + core.log.error("invalid response body: ", res.body, " err: ", err) + return 503 + end + + if not data.result then + core.log.error("invalid OPA decision format: ", res.body, + " err: `result` field does not exist") + return 503 + end + + local result = data.result + + if not result.allow then + if result.headers then + core.response.set_header(result.headers) + end + + local status_code = 403 + if result.status_code then + status_code = result.status_code + end + + local reason = nil + if result.reason then + reason = type(result.reason) == "table" + and core.json.encode(result.reason) + or result.reason + end + + return status_code, reason + else if result.headers and conf.send_headers_upstream then + for _, name in ipairs(conf.send_headers_upstream) do + local value = result.headers[name] + if value then + core.request.set_header(ctx, name, value) + end + end + end + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/opa/helper.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/opa/helper.lua new file mode 100644 index 0000000..638adcf --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/opa/helper.lua @@ -0,0 +1,117 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local get_service = require("apisix.http.service").get +local ngx_time = ngx.time + +local _M = {} + + +-- build a table of Nginx variables with some generality +-- between http subsystem and stream subsystem +local function build_var(conf, ctx) + return { + server_addr = ctx.var.server_addr, + server_port = ctx.var.server_port, + remote_addr = ctx.var.remote_addr, + remote_port = ctx.var.remote_port, + timestamp = ngx_time(), + } +end + + +local function build_http_request(conf, ctx) + return { + scheme = core.request.get_scheme(ctx), + method = core.request.get_method(), + host = core.request.get_host(ctx), + port = core.request.get_port(ctx), + path = ctx.var.uri, + headers = core.request.headers(ctx), + query = core.request.get_uri_args(ctx), + } +end + + +local function build_http_route(conf, ctx, remove_upstream) + local route = core.table.deepcopy(ctx.matched_route).value + + if remove_upstream and route and route.upstream then + -- unimportant to send upstream info to OPA + route.upstream = nil + end + + return route +end + + +local function build_http_service(conf, ctx) + local service_id = ctx.service_id + + -- possible that there is no service bound to the route + if service_id then + local service = core.table.clone(get_service(service_id)).value + + if service then + if service.upstream then + service.upstream = nil + end + return service + end + end + + return nil +end + + +local function build_http_consumer(conf, ctx) + -- possible that there is no consumer bound to the route + if ctx.consumer then + return core.table.clone(ctx.consumer) + end + + return nil +end + + +function _M.build_opa_input(conf, ctx, subsystem) + local data = { + type = subsystem, + request = build_http_request(conf, ctx), + var = build_var(conf, ctx) + } + + if conf.with_route then + data.route = build_http_route(conf, ctx, true) + end + + if conf.with_consumer then + data.consumer = build_http_consumer(conf, ctx) + end + + if conf.with_service then + data.service = build_http_service(conf, ctx) + end + + return { + input = data, + } +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/openfunction.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/openfunction.lua new file mode 100644 index 0000000..935d6eb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/openfunction.lua @@ -0,0 +1,35 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ngx_encode_base64 = ngx.encode_base64 +local plugin_name, plugin_version, priority = "openfunction", 0.1, -1902 + +local openfunction_authz_schema = { + service_token = {type = "string"} +} + +local function request_processor(conf, ctx, params) + local headers = params.headers or {} + -- setting authorization headers if authorization.service_token exists + if conf.authorization and conf.authorization.service_token then + headers["authorization"] = "Basic " .. ngx_encode_base64(conf.authorization.service_token) + end + + params.headers = headers +end + +return require("apisix.plugins.serverless.generic-upstream")(plugin_name, + plugin_version, priority, request_processor, openfunction_authz_schema) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/openid-connect.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/openid-connect.lua new file mode 100644 index 0000000..6c94ea5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/openid-connect.lua @@ -0,0 +1,717 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local ngx_re = require("ngx.re") +local openidc = require("resty.openidc") +local random = require("resty.random") +local string = string +local ngx = ngx +local ipairs = ipairs +local type = type +local concat = table.concat + +local ngx_encode_base64 = ngx.encode_base64 + +local plugin_name = "openid-connect" + + +local schema = { + type = "object", + properties = { + client_id = {type = "string"}, + client_secret = {type = "string"}, + discovery = {type = "string"}, + scope = { + type = "string", + default = "openid", + }, + ssl_verify = { + type = "boolean", + default = false, + }, + timeout = { + type = "integer", + minimum = 1, + default = 3, + description = "timeout in seconds", + }, + introspection_endpoint = { + type = "string" + }, + introspection_endpoint_auth_method = { + type = "string", + default = "client_secret_basic" + }, + token_endpoint_auth_method = { + type = "string", + default = "client_secret_basic" + }, + bearer_only = { + type = "boolean", + default = false, + }, + session = { + type = "object", + properties = { + secret = { + type = "string", + description = "the key used for the encrypt and HMAC calculation", + minLength = 16, + }, + cookie = { + type = "object", + properties = { + lifetime = { + type = "integer", + description = "it holds the cookie lifetime in seconds in the future", + } + } + } + }, + required = {"secret"}, + additionalProperties = false, + }, + realm = { + type = "string", + default = "apisix", + }, + claim_validator = { + type = "object", + properties = { + issuer = { + description = [[Whitelist the vetted issuers of the jwt. + When not passed by the user, the issuer returned by + discovery endpoint will be used. In case both are missing, + the issuer will not be validated.]], + type = "object", + properties = { + valid_issuers = { + type = "array", + items = { + type = "string" + } + } + } + }, + audience = { + type = "object", + description = "audience claim value to validate", + properties = { + claim = { + type = "string", + description = "custom claim name", + default = "aud", + }, + required = { + type = "boolean", + description = "audience claim is required", + default = false, + }, + match_with_client_id = { + type = "boolean", + description = "audience must euqal to or includes client_id", + default = false, + } + }, + }, + }, + }, + logout_path = { + type = "string", + default = "/logout", + }, + redirect_uri = { + type = "string", + description = "auto append '.apisix/redirect' to ngx.var.uri if not configured" + }, + post_logout_redirect_uri = { + type = "string", + description = "the URI will be redirect when request logout_path", + }, + unauth_action = { + type = "string", + default = "auth", + enum = {"auth", "deny", "pass"}, + description = "The action performed when client is not authorized. Use auth to " .. + "redirect user to identity provider, deny to respond with 401 Unauthorized, and " .. + "pass to allow the request regardless." + }, + public_key = {type = "string"}, + token_signing_alg_values_expected = {type = "string"}, + use_pkce = { + description = "when set to true the PKCE(Proof Key for Code Exchange) will be used.", + type = "boolean", + default = false + }, + set_access_token_header = { + description = "Whether the access token should be added as a header to the request " .. + "for downstream", + type = "boolean", + default = true + }, + access_token_in_authorization_header = { + description = "Whether the access token should be added in the Authorization " .. + "header as opposed to the X-Access-Token header.", + type = "boolean", + default = false + }, + set_id_token_header = { + description = "Whether the ID token should be added in the X-ID-Token header to " .. + "the request for downstream.", + type = "boolean", + default = true + }, + set_userinfo_header = { + description = "Whether the user info token should be added in the X-Userinfo " .. + "header to the request for downstream.", + type = "boolean", + default = true + }, + set_refresh_token_header = { + description = "Whether the refresh token should be added in the X-Refresh-Token " .. + "header to the request for downstream.", + type = "boolean", + default = false + }, + proxy_opts = { + description = "HTTP proxy server be used to access identity server.", + type = "object", + properties = { + http_proxy = { + type = "string", + description = "HTTP proxy like: http://proxy-server:80.", + }, + https_proxy = { + type = "string", + description = "HTTPS proxy like: http://proxy-server:80.", + }, + http_proxy_authorization = { + type = "string", + description = "Basic [base64 username:password].", + }, + https_proxy_authorization = { + type = "string", + description = "Basic [base64 username:password].", + }, + no_proxy = { + type = "string", + description = "Comma separated list of hosts that should not be proxied.", + } + }, + }, + authorization_params = { + description = "Extra authorization params to the authorize endpoint", + type = "object" + }, + client_rsa_private_key = { + description = "Client RSA private key used to sign JWT.", + type = "string" + }, + client_rsa_private_key_id = { + description = "Client RSA private key ID used to compute a signed JWT.", + type = "string" + }, + client_jwt_assertion_expires_in = { + description = "Life duration of the signed JWT in seconds.", + type = "integer", + default = 60 + }, + renew_access_token_on_expiry = { + description = "Whether to attempt silently renewing the access token.", + type = "boolean", + default = true + }, + access_token_expires_in = { + description = "Lifetime of the access token in seconds if expires_in is not present.", + type = "integer" + }, + refresh_session_interval = { + description = "Time interval to refresh user ID token without re-authentication.", + type = "integer" + }, + iat_slack = { + description = "Tolerance of clock skew in seconds with the iat claim in an ID token.", + type = "integer", + default = 120 + }, + accept_none_alg = { + description = "Set to true if the OpenID provider does not sign its ID token.", + type = "boolean", + default = false + }, + accept_unsupported_alg = { + description = "Ignore ID token signature to accept unsupported signature algorithm.", + type = "boolean", + default = true + }, + access_token_expires_leeway = { + description = "Expiration leeway in seconds for access token renewal.", + type = "integer", + default = 0 + }, + force_reauthorize = { + description = "Whether to execute the authorization flow when a token has been cached.", + type = "boolean", + default = false + }, + use_nonce = { + description = "Whether to include nonce parameter in authorization request.", + type = "boolean", + default = false + }, + revoke_tokens_on_logout = { + description = "Notify authorization server a previous token is no longer needed.", + type = "boolean", + default = false + }, + jwk_expires_in = { + description = "Expiration time for JWK cache in seconds.", + type = "integer", + default = 86400 + }, + jwt_verification_cache_ignore = { + description = "Whether to ignore cached verification and re-verify.", + type = "boolean", + default = false + }, + cache_segment = { + description = "Name of a cache segment to differentiate caches.", + type = "string" + }, + introspection_interval = { + description = "TTL of the cached and introspected access token in seconds.", + type = "integer", + default = 0 + }, + introspection_expiry_claim = { + description = "Name of the expiry claim that controls the cached access token TTL.", + type = "string" + }, + introspection_addon_headers = { + description = "Extra http headers in introspection", + type = "array", + minItems = 1, + items = { + type = "string", + pattern = "^[^:]+$" + } + }, + required_scopes = { + description = "List of scopes that are required to be granted to the access token", + type = "array", + items = { + type = "string" + } + } + }, + encrypt_fields = {"client_secret", "client_rsa_private_key"}, + required = {"client_id", "client_secret", "discovery"} +} + + +local _M = { + version = 0.2, + priority = 2599, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + if conf.ssl_verify == "no" then + -- we used to set 'ssl_verify' to "no" + conf.ssl_verify = false + end + + if not conf.bearer_only and not conf.session then + core.log.warn("when bearer_only = false, " .. + "you'd better complete the session configuration manually") + conf.session = { + -- generate a secret when bearer_only = false and no secret is configured + secret = ngx_encode_base64(random.bytes(32, true) or random.bytes(32)) + } + end + + local check = {"discovery", "introspection_endpoint", "redirect_uri", + "post_logout_redirect_uri", "proxy_opts.http_proxy", "proxy_opts.https_proxy"} + core.utils.check_https(check, conf, plugin_name) + core.utils.check_tls_bool({"ssl_verify"}, conf, plugin_name) + + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + return true +end + + +local function get_bearer_access_token(ctx) + -- Get Authorization header, maybe. + local auth_header = core.request.header(ctx, "Authorization") + if not auth_header then + -- No Authorization header, get X-Access-Token header, maybe. + local access_token_header = core.request.header(ctx, "X-Access-Token") + if not access_token_header then + -- No X-Access-Token header neither. + return false, nil, nil + end + + -- Return extracted header value. + return true, access_token_header, nil + end + + -- Check format of Authorization header. + local res, err = ngx_re.split(auth_header, " ", nil, nil, 2) + + if not res then + -- No result was returned. + return false, nil, err + elseif #res < 2 then + -- Header doesn't split into enough tokens. + return false, nil, "Invalid Authorization header format." + end + + if string.lower(res[1]) == "bearer" then + -- Return extracted token. + return true, res[2], nil + end + + return false, nil, nil +end + + +local function introspect(ctx, conf) + -- Extract token, maybe. + local has_token, token, err = get_bearer_access_token(ctx) + + if err then + return ngx.HTTP_BAD_REQUEST, err, nil, nil + end + + if not has_token then + -- Could not find token. + + if conf.bearer_only then + -- Token strictly required in request. + ngx.header["WWW-Authenticate"] = 'Bearer realm="' .. conf.realm .. '"' + return ngx.HTTP_UNAUTHORIZED, "No bearer token found in request.", nil, nil + else + -- Return empty result. + return nil, nil, nil, nil + end + end + + if conf.public_key or conf.use_jwks then + local opts = {} + -- Validate token against public key or jwks document of the oidc provider. + -- TODO: In the called method, the openidc module will try to extract + -- the token by itself again -- from a request header or session cookie. + -- It is inefficient that we also need to extract it (just from headers) + -- so we can add it in the configured header. Find a way to use openidc + -- module's internal methods to extract the token. + local valid_issuers + if conf.claim_validator and conf.claim_validator.issuer then + valid_issuers = conf.claim_validator.issuer.valid_issuers + end + if not valid_issuers then + local discovery, discovery_err = openidc.get_discovery_doc(conf) + if discovery_err then + core.log.warn("OIDC access discovery url failed : ", discovery_err) + else + core.log.info("valid_issuers not provided explicitly," .. + " using issuer from discovery doc: ", + discovery.issuer) + valid_issuers = {discovery.issuer} + end + end + if valid_issuers then + opts.valid_issuers = valid_issuers + end + local res, err = openidc.bearer_jwt_verify(conf, opts) + if err then + -- Error while validating or token invalid. + ngx.header["WWW-Authenticate"] = 'Bearer realm="' .. conf.realm .. + '", error="invalid_token", error_description="' .. err .. '"' + return ngx.HTTP_UNAUTHORIZED, err, nil, nil + end + + -- Token successfully validated. + local method = (conf.public_key and "public_key") or (conf.use_jwks and "jwks") + core.log.debug("token validate successfully by ", method) + return res, err, token, res + else + -- Validate token against introspection endpoint. + -- TODO: Same as above for public key validation. + if conf.introspection_addon_headers then + -- http_request_decorator option provided by lua-resty-openidc + conf.http_request_decorator = function(req) + local h = req.headers or {} + for _, name in ipairs(conf.introspection_addon_headers) do + local value = core.request.header(ctx, name) + if value then + h[name] = value + end + end + req.headers = h + return req + end + end + + local res, err = openidc.introspect(conf) + conf.http_request_decorator = nil + + if err then + ngx.header["WWW-Authenticate"] = 'Bearer realm="' .. conf.realm .. + '", error="invalid_token", error_description="' .. err .. '"' + return ngx.HTTP_UNAUTHORIZED, err, nil, nil + end + + -- Token successfully validated and response from the introspection + -- endpoint contains the userinfo. + core.log.debug("token validate successfully by introspection") + return res, err, token, res + end +end + + +local function add_access_token_header(ctx, conf, token) + if token then + -- Add Authorization or X-Access-Token header, respectively, if not already set. + if conf.set_access_token_header then + if conf.access_token_in_authorization_header then + if not core.request.header(ctx, "Authorization") then + -- Add Authorization header. + core.request.set_header(ctx, "Authorization", "Bearer " .. token) + end + else + if not core.request.header(ctx, "X-Access-Token") then + -- Add X-Access-Token header. + core.request.set_header(ctx, "X-Access-Token", token) + end + end + end + end +end + +-- Function to split the scope string into a table +local function split_scopes_by_space(scope_string) + local scopes = {} + for scope in string.gmatch(scope_string, "%S+") do + scopes[scope] = true + end + return scopes +end + +-- Function to check if all required scopes are present +local function required_scopes_present(required_scopes, http_scopes) + for _, scope in ipairs(required_scopes) do + if not http_scopes[scope] then + return false + end + end + return true +end + +function _M.rewrite(plugin_conf, ctx) + local conf = core.table.clone(plugin_conf) + + -- Previously, we multiply conf.timeout before storing it in etcd. + -- If the timeout is too large, we should not multiply it again. + if not (conf.timeout >= 1000 and conf.timeout % 1000 == 0) then + conf.timeout = conf.timeout * 1000 + end + + local path = ctx.var.request_uri + + if not conf.redirect_uri then + -- NOTE: 'lua-resty-openidc' requires that 'redirect_uri' be + -- different from 'uri'. So default to append the + -- '.apisix/redirect' suffix if not configured. + local suffix = "/.apisix/redirect" + local uri = ctx.var.uri + if core.string.has_suffix(uri, suffix) then + -- This is the redirection response from the OIDC provider. + conf.redirect_uri = uri + else + if string.sub(uri, -1, -1) == "/" then + conf.redirect_uri = string.sub(uri, 1, -2) .. suffix + else + conf.redirect_uri = uri .. suffix + end + end + core.log.debug("auto set redirect_uri: ", conf.redirect_uri) + end + + if not conf.ssl_verify then + -- openidc use "no" to disable ssl verification + conf.ssl_verify = "no" + end + + if path == (conf.logout_path or "/logout") then + local discovery, discovery_err = openidc.get_discovery_doc(conf) + if discovery_err then + core.log.error("OIDC access discovery url failed : ", discovery_err) + return 503 + end + if conf.post_logout_redirect_uri and not discovery.end_session_endpoint then + -- If the end_session_endpoint field does not exist in the OpenID Provider Discovery + -- Metadata, the redirect_after_logout_uri field is used for redirection. + conf.redirect_after_logout_uri = conf.post_logout_redirect_uri + end + end + + local response, err, session, _ + + if conf.bearer_only or conf.introspection_endpoint or conf.public_key or conf.use_jwks then + -- An introspection endpoint or a public key has been configured. Try to + -- validate the access token from the request, if it is present in a + -- request header. Otherwise, return a nil response. See below for + -- handling of the case where the access token is stored in a session cookie. + local access_token, userinfo + response, err, access_token, userinfo = introspect(ctx, conf) + + if err then + -- Error while validating token or invalid token. + core.log.error("OIDC introspection failed: ", err) + return response + end + + if response then + if conf.required_scopes then + local http_scopes = response.scope and split_scopes_by_space(response.scope) or {} + local is_authorized = required_scopes_present(conf.required_scopes, http_scopes) + if not is_authorized then + core.log.error("OIDC introspection failed: ", "required scopes not present") + local error_response = { + error = "required scopes " .. concat(conf.required_scopes, ", ") .. + " not present" + } + return 403, core.json.encode(error_response) + end + end + + -- jwt audience claim validator + local audience_claim = core.table.try_read_attr(conf, "claim_validator", + "audience", "claim") or "aud" + local audience_value = response[audience_claim] + if core.table.try_read_attr(conf, "claim_validator", "audience", "required") + and not audience_value then + core.log.error("OIDC introspection failed: required audience (", + audience_claim, ") not present") + local error_response = { error = "required audience claim not present" } + return 403, core.json.encode(error_response) + end + if core.table.try_read_attr(conf, "claim_validator", "audience", "match_with_client_id") + and audience_value ~= nil then + local error_response = { error = "mismatched audience" } + local matched = false + if type(audience_value) == "table" then + for _, v in ipairs(audience_value) do + if conf.client_id == v then + matched = true + end + end + if not matched then + core.log.error("OIDC introspection failed: ", + "audience list does not contain the client id") + return 403, core.json.encode(error_response) + end + elseif conf.client_id ~= audience_value then + core.log.error("OIDC introspection failed: ", + "audience does not match the client id") + return 403, core.json.encode(error_response) + end + end + + -- Add configured access token header, maybe. + add_access_token_header(ctx, conf, access_token) + + if userinfo and conf.set_userinfo_header then + -- Set X-Userinfo header to introspection endpoint response. + core.request.set_header(ctx, "X-Userinfo", + ngx_encode_base64(core.json.encode(userinfo))) + end + end + end + + if not response then + -- Either token validation via introspection endpoint or public key is + -- not configured, and/or token could not be extracted from the request. + + local unauth_action = conf.unauth_action + if unauth_action ~= "auth" then + unauth_action = "deny" + end + + -- Authenticate the request. This will validate the access token if it + -- is stored in a session cookie, and also renew the token if required. + -- If no token can be extracted, the response will redirect to the ID + -- provider's authorization endpoint to initiate the Relying Party flow. + -- This code path also handles when the ID provider then redirects to + -- the configured redirect URI after successful authentication. + response, err, _, session = openidc.authenticate(conf, nil, unauth_action, conf.session) + + if err then + if session then + session:close() + end + if err == "unauthorized request" then + if conf.unauth_action == "pass" then + return nil + end + return 401 + end + core.log.error("OIDC authentication failed: ", err) + return 500 + end + + if response then + -- If the openidc module has returned a response, it may contain, + -- respectively, the access token, the ID token, the refresh token, + -- and the userinfo. + -- Add respective headers to the request, if so configured. + + -- Add configured access token header, maybe. + add_access_token_header(ctx, conf, response.access_token) + + -- Add X-ID-Token header, maybe. + if response.id_token and conf.set_id_token_header then + local token = core.json.encode(response.id_token) + core.request.set_header(ctx, "X-ID-Token", ngx.encode_base64(token)) + end + + -- Add X-Userinfo header, maybe. + if response.user and conf.set_userinfo_header then + core.request.set_header(ctx, "X-Userinfo", + ngx_encode_base64(core.json.encode(response.user))) + end + + -- Add X-Refresh-Token header, maybe. + if session.data.refresh_token and conf.set_refresh_token_header then + core.request.set_header(ctx, "X-Refresh-Token", session.data.refresh_token) + end + end + end + if session then + session:close() + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/opentelemetry.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/opentelemetry.lua new file mode 100644 index 0000000..d98ac44 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/opentelemetry.lua @@ -0,0 +1,426 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local plugin_name = "opentelemetry" +local core = require("apisix.core") +local plugin = require("apisix.plugin") +local process = require("ngx.process") + +local always_off_sampler_new = require("opentelemetry.trace.sampling.always_off_sampler").new +local always_on_sampler_new = require("opentelemetry.trace.sampling.always_on_sampler").new +local parent_base_sampler_new = require("opentelemetry.trace.sampling.parent_base_sampler").new +local trace_id_ratio_sampler_new = + require("opentelemetry.trace.sampling.trace_id_ratio_sampler").new + +local exporter_client_new = require("opentelemetry.trace.exporter.http_client").new +local otlp_exporter_new = require("opentelemetry.trace.exporter.otlp").new +local batch_span_processor_new = require("opentelemetry.trace.batch_span_processor").new +local id_generator = require("opentelemetry.trace.id_generator") +local tracer_provider_new = require("opentelemetry.trace.tracer_provider").new + +local span_kind = require("opentelemetry.trace.span_kind") +local span_status = require("opentelemetry.trace.span_status") +local resource_new = require("opentelemetry.resource").new +local attr = require("opentelemetry.attribute") + +local context = require("opentelemetry.context").new() +local trace_context_propagator = + require("opentelemetry.trace.propagation.text_map.trace_context_propagator").new() + +local ngx = ngx +local ngx_var = ngx.var +local table = table +local type = type +local pairs = pairs +local ipairs = ipairs +local unpack = unpack +local string_format = string.format + +local lrucache = core.lrucache.new({ + type = 'plugin', count = 128, ttl = 24 * 60 * 60, +}) + +local asterisk = string.byte("*", 1) + +local metadata_schema = { + type = "object", + properties = { + trace_id_source = { + type = "string", + enum = {"x-request-id", "random"}, + description = "the source of trace id", + default = "random", + }, + resource = { + type = "object", + description = "additional resource", + additionalProperties = {{type = "boolean"}, {type = "number"}, {type = "string"}}, + }, + collector = { + type = "object", + description = "opentelemetry collector", + properties = { + address = {type = "string", description = "host:port", default = "127.0.0.1:4318"}, + request_timeout = {type = "integer", description = "second uint", default = 3}, + request_headers = { + type = "object", + description = "http headers", + additionalProperties = { + one_of = {{type = "boolean"},{type = "number"}, {type = "string"}}, + }, + } + }, + default = {address = "127.0.0.1:4318", request_timeout = 3} + }, + batch_span_processor = { + type = "object", + description = "batch span processor", + properties = { + drop_on_queue_full = { + type = "boolean", + description = "if true, drop span when queue is full," + .. " otherwise force process batches", + }, + max_queue_size = { + type = "integer", + description = "maximum queue size to buffer spans for delayed processing", + }, + batch_timeout = { + type = "number", + description = "maximum duration for constructing a batch", + }, + inactive_timeout = { + type = "number", + description = "maximum duration for processing batches", + }, + max_export_batch_size = { + type = "integer", + description = "maximum number of spans to process in a single batch", + } + }, + default = {}, + }, + set_ngx_var = { + type = "boolean", + description = "set nginx variables", + default = false, + }, + }, +} + +local schema = { + type = "object", + properties = { + sampler = { + type = "object", + properties = { + name = { + type = "string", + enum = {"always_on", "always_off", "trace_id_ratio", "parent_base"}, + title = "sampling strategy", + default = "always_off" + }, + options = { + type = "object", + properties = { + fraction = { + type = "number", title = "trace_id_ratio fraction", default = 0 + }, + root = { + type = "object", + title = "parent_base root sampler", + properties = { + name = { + type = "string", + enum = {"always_on", "always_off", "trace_id_ratio"}, + title = "sampling strategy", + default = "always_off" + }, + options = { + type = "object", + properties = { + fraction = { + type = "number", + title = "trace_id_ratio fraction parameter", + default = 0, + }, + }, + default = {fraction = 0} + } + }, + default = {name = "always_off", options = {fraction = 0}} + }, + }, + default = {fraction = 0, root = {name = "always_off"}} + } + }, + default = {name = "always_off", options = {fraction = 0, root = {name = "always_off"}}} + }, + additional_attributes = { + type = "array", + items = { + type = "string", + minLength = 1, + } + }, + additional_header_prefix_attributes = { + type = "array", + items = { + type = "string", + minLength = 1, + } + } + } +} + + +local _M = { + version = 0.1, + priority = 12009, + name = plugin_name, + schema = schema, + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + local ok, err = core.schema.check(metadata_schema, conf) + if not ok then + return ok, err + end + local check = {"collector.address"} + core.utils.check_https(check, conf, plugin_name) + return true + end + return core.schema.check(schema, conf) +end + + +local hostname +local sampler_factory + +function _M.init() + if process.type() ~= "worker" then + return + end + + sampler_factory = { + always_off = always_off_sampler_new, + always_on = always_on_sampler_new, + parent_base = parent_base_sampler_new, + trace_id_ratio = trace_id_ratio_sampler_new, + } + hostname = core.utils.gethostname() +end + + +local function create_tracer_obj(conf, plugin_info) + if plugin_info.trace_id_source == "x-request-id" then + id_generator.new_ids = function() + local trace_id = core.request.headers()["x-request-id"] or ngx_var.request_id + return trace_id, id_generator.new_span_id() + end + end + -- create exporter + local exporter = otlp_exporter_new(exporter_client_new(plugin_info.collector.address, + plugin_info.collector.request_timeout, + plugin_info.collector.request_headers)) + -- create span processor + local batch_span_processor = batch_span_processor_new(exporter, + plugin_info.batch_span_processor) + -- create sampler + local sampler + local sampler_name = conf.sampler.name + local sampler_options = conf.sampler.options + if sampler_name == "parent_base" then + local root_sampler + if sampler_options.root then + local name, fraction = sampler_options.root.name, sampler_options.root.options.fraction + root_sampler = sampler_factory[name](fraction) + else + root_sampler = always_off_sampler_new() + end + sampler = sampler_factory[sampler_name](root_sampler) + else + sampler = sampler_factory[sampler_name](sampler_options.fraction) + end + local resource_attrs = {attr.string("hostname", hostname)} + if plugin_info.resource then + if not plugin_info.resource["service.name"] then + table.insert(resource_attrs, attr.string("service.name", "APISIX")) + end + for k, v in pairs(plugin_info.resource) do + if type(v) == "string" then + table.insert(resource_attrs, attr.string(k, v)) + end + if type(v) == "number" then + table.insert(resource_attrs, attr.double(k, v)) + end + if type(v) == "boolean" then + table.insert(resource_attrs, attr.bool(k, v)) + end + end + end + -- create tracer provider + local tp = tracer_provider_new(batch_span_processor, { + resource = resource_new(unpack(resource_attrs)), + sampler = sampler, + }) + -- create tracer + return tp:tracer("opentelemetry-lua") +end + + +local function inject_attributes(attributes, wanted_attributes, source, with_prefix) + for _, key in ipairs(wanted_attributes) do + local is_key_a_match = #key >= 2 and key:byte(-1) == asterisk and with_prefix + + if is_key_a_match then + local prefix = key:sub(0, -2) + for possible_key, value in pairs(source) do + if core.string.has_prefix(possible_key, prefix) then + core.table.insert(attributes, attr.string(possible_key, value)) + end + end + else + local val = source[key] + if val then + core.table.insert(attributes, attr.string(key, val)) + end + end + end +end + + +function _M.rewrite(conf, api_ctx) + local metadata = plugin.plugin_metadata(plugin_name) + if metadata == nil then + core.log.warn("plugin_metadata is required for opentelemetry plugin to working properly") + return + end + core.log.info("metadata: ", core.json.delay_encode(metadata)) + local plugin_info = metadata.value + local vars = api_ctx.var + + local tracer, err = core.lrucache.plugin_ctx(lrucache, api_ctx, nil, + create_tracer_obj, conf, plugin_info) + if not tracer then + core.log.error("failed to fetch tracer object: ", err) + return + end + + local span_name = vars.method + + local attributes = { + attr.string("net.host.name", vars.host), + attr.string("http.method", vars.method), + attr.string("http.scheme", vars.scheme), + attr.string("http.target", vars.request_uri), + attr.string("http.user_agent", vars.http_user_agent), + } + + if api_ctx.curr_req_matched then + table.insert(attributes, attr.string("apisix.route_id", api_ctx.route_id)) + table.insert(attributes, attr.string("apisix.route_name", api_ctx.route_name)) + table.insert(attributes, attr.string("http.route", api_ctx.curr_req_matched._path)) + span_name = span_name .. " " .. api_ctx.curr_req_matched._path + end + + if api_ctx.service_id then + table.insert(attributes, attr.string("apisix.service_id", api_ctx.service_id)) + table.insert(attributes, attr.string("apisix.service_name", api_ctx.service_name)) + end + + if conf.additional_attributes then + inject_attributes(attributes, conf.additional_attributes, api_ctx.var, false) + end + + if conf.additional_header_prefix_attributes then + inject_attributes( + attributes, + conf.additional_header_prefix_attributes, + core.request.headers(api_ctx), + true + ) + end + + -- extract trace context from the headers of downstream HTTP request + local upstream_context = trace_context_propagator:extract(context, ngx.req) + + local ctx = tracer:start(upstream_context, span_name, { + kind = span_kind.server, + attributes = attributes, + }) + + if plugin_info.set_ngx_var then + local span_context = ctx:span():context() + ngx_var.opentelemetry_context_traceparent = string_format("00-%s-%s-%02x", + span_context.trace_id, + span_context.span_id, + span_context.trace_flags) + ngx_var.opentelemetry_trace_id = span_context.trace_id + ngx_var.opentelemetry_span_id = span_context.span_id + end + + api_ctx.otel_context_token = ctx:attach() + + -- inject trace context into the headers of upstream HTTP request + trace_context_propagator:inject(ctx, ngx.req) +end + + +function _M.delayed_body_filter(conf, api_ctx) + if api_ctx.otel_context_token and ngx.arg[2] then + local ctx = context:current() + ctx:detach(api_ctx.otel_context_token) + api_ctx.otel_context_token = nil + + -- get span from current context + local span = ctx:span() + local upstream_status = core.response.get_upstream_status(api_ctx) + if upstream_status and upstream_status >= 500 then + span:set_status(span_status.ERROR, + "upstream response status: " .. upstream_status) + end + + span:set_attributes(attr.int("http.status_code", upstream_status)) + + span:finish() + end +end + + +-- body_filter maybe not called because of empty http body response +-- so we need to check if the span has finished in log phase +function _M.log(conf, api_ctx) + if api_ctx.otel_context_token then + -- ctx:detach() is not necessary, because of ctx is stored in ngx.ctx + local upstream_status = core.response.get_upstream_status(api_ctx) + + -- get span from current context + local span = context:current():span() + if upstream_status and upstream_status >= 500 then + span:set_status(span_status.ERROR, + "upstream response status: " .. upstream_status) + end + + span:finish() + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/openwhisk.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/openwhisk.lua new file mode 100644 index 0000000..f139925 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/openwhisk.lua @@ -0,0 +1,143 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local http = require("resty.http") +local ngx_encode_base64 = ngx.encode_base64 +local tostring = tostring + +local name_pattern = [[\A([\w]|[\w][\w@ .-]*[\w@.-]+)\z]] + +local schema = { + type = "object", + properties = { + api_host = {type = "string"}, + ssl_verify = { + type = "boolean", + default = true, + }, + service_token = {type = "string"}, + namespace = {type = "string", maxLength = 256, pattern = name_pattern}, + package = {type = "string", maxLength = 256, pattern = name_pattern}, + action = {type = "string", maxLength = 256, pattern = name_pattern}, + result = { + type = "boolean", + default = true, + }, + timeout = { + type = "integer", + minimum = 1, + maximum = 60000, + default = 3000, + description = "timeout in milliseconds", + }, + keepalive = {type = "boolean", default = true}, + keepalive_timeout = {type = "integer", minimum = 1000, default = 60000}, + keepalive_pool = {type = "integer", minimum = 1, default = 5} + }, + required = {"api_host", "service_token", "namespace", "action"}, + encrypt_fields = {"service_token"} +} + + +local _M = { + version = 0.1, + priority = -1901, + name = "openwhisk", + schema = schema, +} + + +function _M.check_schema(conf) + local check = {"api_host"} + core.utils.check_https(check, conf, _M.name) + core.utils.check_tls_bool({"ssl_verify"}, conf, _M.name) + + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + return true +end + + +function _M.access(conf, ctx) + local params = { + method = "POST", + body = core.request.get_body(), + query = { + blocking = "true", + result = tostring(conf.result), + timeout = conf.timeout + }, + headers = { + ["Authorization"] = "Basic " .. ngx_encode_base64(conf.service_token), + ["Content-Type"] = "application/json", + }, + keepalive = conf.keepalive, + ssl_verify = conf.ssl_verify + } + + if conf.keepalive then + params.keepalive_timeout = conf.keepalive_timeout + params.keepalive_pool = conf.keepalive_pool + end + + -- OpenWhisk action endpoint + local package = conf.package and conf.package .. "/" or "" + local endpoint = conf.api_host .. "/api/v1/namespaces/" .. conf.namespace .. + "/actions/" .. package .. conf.action + + local httpc = http.new() + httpc:set_timeout(conf.timeout) + + local res, err = httpc:request_uri(endpoint, params) + + if not res then + core.log.error("failed to process openwhisk action, err: ", err) + return 503 + end + + -- check if res.body is nil + if res.body == nil then + return res.status, res.body + end + + -- parse OpenWhisk JSON response + -- OpenWhisk supports two types of responses, the user can return only + -- the response body, or set the status code and header. + local result, err = core.json.decode(res.body) + + if not result then + core.log.error("failed to parse openwhisk response data: ", err) + return 503 + end + + -- setting response headers + if result.headers ~= nil then + core.response.set_header(result.headers) + end + + local code = result.statusCode or res.status + local body = result.body or res.body + return code, body + +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/prometheus.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/prometheus.lua new file mode 100644 index 0000000..b154697 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/prometheus.lua @@ -0,0 +1,58 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local exporter = require("apisix.plugins.prometheus.exporter") + + +local plugin_name = "prometheus" +local schema = { + type = "object", + properties = { + prefer_name = { + type = "boolean", + default = false + } + }, +} + + +local _M = { + version = 0.2, + priority = 500, + name = plugin_name, + log = exporter.http_log, + schema = schema, + run_policy = "prefer_route", +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + return true +end + + +function _M.api() + return exporter.get_api(true) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/prometheus/exporter.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/prometheus/exporter.lua new file mode 100644 index 0000000..d34ab87 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/prometheus/exporter.lua @@ -0,0 +1,569 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local base_prometheus = require("prometheus") +local core = require("apisix.core") +local plugin = require("apisix.plugin") +local control = require("apisix.control.v1") +local ipairs = ipairs +local pairs = pairs +local ngx = ngx +local re_gmatch = ngx.re.gmatch +local ffi = require("ffi") +local C = ffi.C +local pcall = pcall +local select = select +local type = type +local prometheus +local prometheus_bkp +local router = require("apisix.router") +local get_routes = router.http_routes +local get_ssls = router.ssls +local get_services = require("apisix.http.service").services +local get_consumers = require("apisix.consumer").consumers +local get_upstreams = require("apisix.upstream").upstreams +local get_global_rules = require("apisix.global_rules").global_rules +local get_global_rules_prev_index = require("apisix.global_rules").get_pre_index +local clear_tab = core.table.clear +local get_stream_routes = router.stream_routes +local get_protos = require("apisix.plugins.grpc-transcode.proto").protos +local service_fetch = require("apisix.http.service").get +local latency_details = require("apisix.utils.log-util").latency_details_in_ms +local xrpc = require("apisix.stream.xrpc") +local unpack = unpack +local next = next + + +local ngx_capture +if ngx.config.subsystem == "http" then + ngx_capture = ngx.location.capture +end + + +local plugin_name = "prometheus" +local default_export_uri = "/apisix/prometheus/metrics" +-- Default set of latency buckets, 1ms to 60s: +local DEFAULT_BUCKETS = {1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000, 30000, 60000} + +local metrics = {} + +local inner_tab_arr = {} + +local function gen_arr(...) + clear_tab(inner_tab_arr) + for i = 1, select('#', ...) do + inner_tab_arr[i] = select(i, ...) + end + + return inner_tab_arr +end + +local extra_labels_tbl = {} + +local function extra_labels(name, ctx) + clear_tab(extra_labels_tbl) + + local attr = plugin.plugin_attr("prometheus") + local metrics = attr.metrics + + if metrics and metrics[name] and metrics[name].extra_labels then + local labels = metrics[name].extra_labels + for _, kv in ipairs(labels) do + local val, v = next(kv) + if ctx then + val = ctx.var[v:sub(2)] + if val == nil then + val = "" + end + end + core.table.insert(extra_labels_tbl, val) + end + end + + return extra_labels_tbl +end + + +local _M = {} + + +local function init_stream_metrics() + metrics.stream_connection_total = prometheus:counter("stream_connection_total", + "Total number of connections handled per stream route in APISIX", + {"route"}) + + xrpc.init_metrics(prometheus) +end + + +function _M.http_init(prometheus_enabled_in_stream) + -- todo: support hot reload, we may need to update the lua-prometheus + -- library + if ngx.get_phase() ~= "init" and ngx.get_phase() ~= "init_worker" then + if prometheus_bkp then + prometheus = prometheus_bkp + end + return + end + + clear_tab(metrics) + + -- Newly added metrics should follow the naming best practices described in + -- https://prometheus.io/docs/practices/naming/#metric-names + -- For example, + -- 1. Add unit as the suffix + -- 2. Add `_total` as the suffix if the metric type is counter + -- 3. Use base unit + -- We keep the old metric names for the compatibility. + + -- across all services + local metric_prefix = "apisix_" + local attr = plugin.plugin_attr("prometheus") + if attr and attr.metric_prefix then + metric_prefix = attr.metric_prefix + end + + local status_metrics_exptime = core.table.try_read_attr(attr, "metrics", + "http_status", "expire") + local latency_metrics_exptime = core.table.try_read_attr(attr, "metrics", + "http_latency", "expire") + local bandwidth_metrics_exptime = core.table.try_read_attr(attr, "metrics", + "bandwidth", "expire") + local upstream_status_exptime = core.table.try_read_attr(attr, "metrics", + "upstream_status", "expire") + + prometheus = base_prometheus.init("prometheus-metrics", metric_prefix) + + metrics.connections = prometheus:gauge("nginx_http_current_connections", + "Number of HTTP connections", + {"state"}) + + metrics.requests = prometheus:gauge("http_requests_total", + "The total number of client requests since APISIX started") + + metrics.etcd_reachable = prometheus:gauge("etcd_reachable", + "Config server etcd reachable from APISIX, 0 is unreachable") + + metrics.node_info = prometheus:gauge("node_info", + "Info of APISIX node", + {"hostname", "version"}) + + metrics.etcd_modify_indexes = prometheus:gauge("etcd_modify_indexes", + "Etcd modify index for APISIX keys", + {"key"}) + + metrics.shared_dict_capacity_bytes = prometheus:gauge("shared_dict_capacity_bytes", + "The capacity of each nginx shared DICT since APISIX start", + {"name"}) + + metrics.shared_dict_free_space_bytes = prometheus:gauge("shared_dict_free_space_bytes", + "The free space of each nginx shared DICT since APISIX start", + {"name"}) + + metrics.upstream_status = prometheus:gauge("upstream_status", + "Upstream status from health check", + {"name", "ip", "port"}, + upstream_status_exptime) + + -- per service + + -- The consumer label indicates the name of consumer corresponds to the + -- request to the route/service, it will be an empty string if there is + -- no consumer in request. + metrics.status = prometheus:counter("http_status", + "HTTP status codes per service in APISIX", + {"code", "route", "matched_uri", "matched_host", "service", "consumer", "node", + unpack(extra_labels("http_status"))}, + status_metrics_exptime) + + local buckets = DEFAULT_BUCKETS + if attr and attr.default_buckets then + buckets = attr.default_buckets + end + + metrics.latency = prometheus:histogram("http_latency", + "HTTP request latency in milliseconds per service in APISIX", + {"type", "route", "service", "consumer", "node", unpack(extra_labels("http_latency"))}, + buckets, latency_metrics_exptime) + + metrics.bandwidth = prometheus:counter("bandwidth", + "Total bandwidth in bytes consumed per service in APISIX", + {"type", "route", "service", "consumer", "node", unpack(extra_labels("bandwidth"))}, + bandwidth_metrics_exptime) + + if prometheus_enabled_in_stream then + init_stream_metrics() + end +end + + +function _M.stream_init() + if ngx.get_phase() ~= "init" and ngx.get_phase() ~= "init_worker" then + return + end + + if not pcall(function() return C.ngx_meta_lua_ffi_shdict_udata_to_zone end) then + core.log.error("need to build APISIX-Runtime to support L4 metrics") + return + end + + clear_tab(metrics) + + local metric_prefix = "apisix_" + local attr = plugin.plugin_attr("prometheus") + if attr and attr.metric_prefix then + metric_prefix = attr.metric_prefix + end + + prometheus = base_prometheus.init("prometheus-metrics", metric_prefix) + + init_stream_metrics() +end + + +function _M.http_log(conf, ctx) + local vars = ctx.var + + local route_id = "" + local balancer_ip = ctx.balancer_ip or "" + local service_id = "" + local consumer_name = ctx.consumer_name or "" + + local matched_route = ctx.matched_route and ctx.matched_route.value + if matched_route then + route_id = matched_route.id + service_id = matched_route.service_id or "" + if conf.prefer_name == true then + route_id = matched_route.name or route_id + if service_id ~= "" then + local service = service_fetch(service_id) + service_id = service and service.value.name or service_id + end + end + end + + local matched_uri = "" + local matched_host = "" + if ctx.curr_req_matched then + matched_uri = ctx.curr_req_matched._path or "" + matched_host = ctx.curr_req_matched._host or "" + end + + metrics.status:inc(1, + gen_arr(vars.status, route_id, matched_uri, matched_host, + service_id, consumer_name, balancer_ip, + unpack(extra_labels("http_status", ctx)))) + + local latency, upstream_latency, apisix_latency = latency_details(ctx) + local latency_extra_label_values = extra_labels("http_latency", ctx) + + metrics.latency:observe(latency, + gen_arr("request", route_id, service_id, consumer_name, balancer_ip, + unpack(latency_extra_label_values))) + + if upstream_latency then + metrics.latency:observe(upstream_latency, + gen_arr("upstream", route_id, service_id, consumer_name, balancer_ip, + unpack(latency_extra_label_values))) + end + + metrics.latency:observe(apisix_latency, + gen_arr("apisix", route_id, service_id, consumer_name, balancer_ip, + unpack(latency_extra_label_values))) + + local bandwidth_extra_label_values = extra_labels("bandwidth", ctx) + + metrics.bandwidth:inc(vars.request_length, + gen_arr("ingress", route_id, service_id, consumer_name, balancer_ip, + unpack(bandwidth_extra_label_values))) + + metrics.bandwidth:inc(vars.bytes_sent, + gen_arr("egress", route_id, service_id, consumer_name, balancer_ip, + unpack(bandwidth_extra_label_values))) +end + + +function _M.stream_log(conf, ctx) + local route_id = "" + local matched_route = ctx.matched_route and ctx.matched_route.value + if matched_route then + route_id = matched_route.id + if conf.prefer_name == true then + route_id = matched_route.name or route_id + end + end + + metrics.stream_connection_total:inc(1, gen_arr(route_id)) +end + + +local ngx_status_items = {"active", "accepted", "handled", "total", + "reading", "writing", "waiting"} +local label_values = {} + +local function nginx_status() + local res = ngx_capture("/apisix/nginx_status") + if not res or res.status ~= 200 then + core.log.error("failed to fetch Nginx status") + return + end + + -- Active connections: 2 + -- server accepts handled requests + -- 26 26 84 + -- Reading: 0 Writing: 1 Waiting: 1 + + local iterator, err = re_gmatch(res.body, [[(\d+)]], "jmo") + if not iterator then + core.log.error("failed to re.gmatch Nginx status: ", err) + return + end + + for _, name in ipairs(ngx_status_items) do + local val = iterator() + if not val then + break + end + + if name == "total" then + metrics.requests:set(val[0]) + else + label_values[1] = name + metrics.connections:set(val[0], label_values) + end + end +end + + +local key_values = {} +local function set_modify_index(key, items, items_ver, global_max_index) + clear_tab(key_values) + local max_idx = 0 + if items_ver and items then + for _, item in ipairs(items) do + if type(item) == "table" then + local modify_index = item.orig_modifiedIndex or item.modifiedIndex + if modify_index > max_idx then + max_idx = modify_index + end + end + end + end + + key_values[1] = key + metrics.etcd_modify_indexes:set(max_idx, key_values) + + + global_max_index = max_idx > global_max_index and max_idx or global_max_index + + return global_max_index +end + + +local function etcd_modify_index() + clear_tab(key_values) + local global_max_idx = 0 + + -- routes + local routes, routes_ver = get_routes() + global_max_idx = set_modify_index("routes", routes, routes_ver, global_max_idx) + + -- services + local services, services_ver = get_services() + global_max_idx = set_modify_index("services", services, services_ver, global_max_idx) + + -- ssls + local ssls, ssls_ver = get_ssls() + global_max_idx = set_modify_index("ssls", ssls, ssls_ver, global_max_idx) + + -- consumers + local consumers, consumers_ver = get_consumers() + global_max_idx = set_modify_index("consumers", consumers, consumers_ver, global_max_idx) + + -- global_rules + local global_rules, global_rules_ver = get_global_rules() + if global_rules then + global_max_idx = set_modify_index("global_rules", global_rules, + global_rules_ver, global_max_idx) + + -- prev_index + key_values[1] = "prev_index" + local prev_index = get_global_rules_prev_index() + metrics.etcd_modify_indexes:set(prev_index, key_values) + + else + global_max_idx = set_modify_index("global_rules", nil, nil, global_max_idx) + end + + -- upstreams + local upstreams, upstreams_ver = get_upstreams() + global_max_idx = set_modify_index("upstreams", upstreams, upstreams_ver, global_max_idx) + + -- stream_routes + local stream_routes, stream_routes_ver = get_stream_routes() + global_max_idx = set_modify_index("stream_routes", stream_routes, + stream_routes_ver, global_max_idx) + + -- proto + local protos, protos_ver = get_protos() + global_max_idx = set_modify_index("protos", protos, protos_ver, global_max_idx) + + -- global max + key_values[1] = "max_modify_index" + metrics.etcd_modify_indexes:set(global_max_idx, key_values) + +end + + +local function shared_dict_status() + local name = {} + for shared_dict_name, shared_dict in pairs(ngx.shared) do + name[1] = shared_dict_name + metrics.shared_dict_capacity_bytes:set(shared_dict:capacity(), name) + metrics.shared_dict_free_space_bytes:set(shared_dict:free_space(), name) + end +end + + +local function collect(ctx, stream_only) + if not prometheus or not metrics then + core.log.error("prometheus: plugin is not initialized, please make sure ", + " 'prometheus_metrics' shared dict is present in nginx template") + return 500, {message = "An unexpected error occurred"} + end + + -- collect ngx.shared.DICT status + shared_dict_status() + + -- across all services + nginx_status() + + local config = core.config.new() + + -- config server status + local vars = ngx.var or {} + local hostname = vars.hostname or "" + local version = core.version.VERSION or "" + + -- we can't get etcd index in metric server if only stream subsystem is enabled + if config.type == "etcd" and not stream_only then + -- etcd modify index + etcd_modify_index() + + local version, err = config:server_version() + if version then + metrics.etcd_reachable:set(1) + + else + metrics.etcd_reachable:set(0) + core.log.error("prometheus: failed to reach config server while ", + "processing metrics endpoint: ", err) + end + + -- Because request any key from etcd will return the "X-Etcd-Index". + -- A non-existed key is preferred because it doesn't return too much data. + -- So use phantom key to get etcd index. + local res, _ = config:getkey("/phantomkey") + if res and res.headers then + clear_tab(key_values) + -- global max + key_values[1] = "x_etcd_index" + metrics.etcd_modify_indexes:set(res.headers["X-Etcd-Index"], key_values) + end + end + + metrics.node_info:set(1, gen_arr(hostname, version)) + + -- update upstream_status metrics + local stats = control.get_health_checkers() + for _, stat in ipairs(stats) do + for _, node in ipairs(stat.nodes) do + metrics.upstream_status:set( + (node.status == "healthy" or node.status == "mostly_healthy") and 1 or 0, + gen_arr(stat.name, node.ip, node.port) + ) + end + end + + core.response.set_header("content_type", "text/plain") + return 200, core.table.concat(prometheus:metric_data()) +end +_M.collect = collect + + +local function get_api(called_by_api_router) + local export_uri = default_export_uri + local attr = plugin.plugin_attr(plugin_name) + if attr and attr.export_uri then + export_uri = attr.export_uri + end + + local api = { + methods = {"GET"}, + uri = export_uri, + handler = collect + } + + if not called_by_api_router then + return api + end + + if attr.enable_export_server then + return {} + end + + return {api} +end +_M.get_api = get_api + + +function _M.export_metrics(stream_only) + if not prometheus then + core.response.exit(200, "{}") + end + local api = get_api(false) + local uri = ngx.var.uri + local method = ngx.req.get_method() + + if uri == api.uri and method == api.methods[1] then + local code, body = api.handler(nil, stream_only) + if code or body then + core.response.exit(code, body) + end + end + + return core.response.exit(404) +end + + +function _M.metric_data() + return prometheus:metric_data() +end + +function _M.get_prometheus() + return prometheus +end + + +function _M.destroy() + if prometheus ~= nil then + prometheus_bkp = core.table.deepcopy(prometheus) + prometheus = nil + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/disk_handler.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/disk_handler.lua new file mode 100644 index 0000000..70d3532 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/disk_handler.lua @@ -0,0 +1,102 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local os = os +local ngx_re = require("ngx.re") +local core = require("apisix.core") +local util = require("apisix.plugins.proxy-cache.util") + +local _M = {} + + +local function disk_cache_purge(conf, ctx) + local cache_zone_info = ngx_re.split(ctx.var.upstream_cache_zone_info, ",") + + local filename = util.generate_cache_filename(cache_zone_info[1], cache_zone_info[2], + ctx.var.upstream_cache_key) + + if util.file_exists(filename) then + os.remove(filename) + return nil + end + + return "Not found" +end + + +function _M.access(conf, ctx) + ctx.var.upstream_cache_zone = conf.cache_zone + + if ctx.var.request_method == "PURGE" then + local err = disk_cache_purge(conf, ctx) + if err ~= nil then + return 404 + end + + return 200 + end + + if conf.cache_bypass ~= nil then + local value = util.generate_complex_value(conf.cache_bypass, ctx) + ctx.var.upstream_cache_bypass = value + core.log.info("proxy-cache cache bypass value:", value) + end + + if not util.match_method(conf, ctx) then + ctx.var.upstream_cache_bypass = "1" + core.log.info("proxy-cache cache bypass method: ", ctx.var.request_method) + end +end + + +function _M.header_filter(conf, ctx) + local no_cache = "1" + + if util.match_method(conf, ctx) and util.match_status(conf, ctx) then + no_cache = "0" + end + + if conf.no_cache ~= nil then + local value = util.generate_complex_value(conf.no_cache, ctx) + core.log.info("proxy-cache no-cache value:", value) + + if value ~= nil and value ~= "" and value ~= "0" then + no_cache = "1" + end + end + + local upstream_hdr_cache_control + local upstream_hdr_expires + + if conf.hide_cache_headers == true then + upstream_hdr_cache_control = "" + upstream_hdr_expires = "" + else + upstream_hdr_cache_control = ctx.var.upstream_http_cache_control + upstream_hdr_expires = ctx.var.upstream_http_expires + end + + core.response.set_header("Cache-Control", upstream_hdr_cache_control, + "Expires", upstream_hdr_expires, + "Apisix-Cache-Status", ctx.var.upstream_cache_status) + + ctx.var.upstream_no_cache = no_cache + core.log.info("proxy-cache no cache:", no_cache) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/init.lua new file mode 100644 index 0000000..918f755 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/init.lua @@ -0,0 +1,198 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local memory_handler = require("apisix.plugins.proxy-cache.memory_handler") +local disk_handler = require("apisix.plugins.proxy-cache.disk_handler") +local util = require("apisix.plugins.proxy-cache.util") +local core = require("apisix.core") +local ipairs = ipairs + +local plugin_name = "proxy-cache" + +local STRATEGY_DISK = "disk" +local STRATEGY_MEMORY = "memory" +local DEFAULT_CACHE_ZONE = "disk_cache_one" + +local schema = { + type = "object", + properties = { + cache_zone = { + type = "string", + minLength = 1, + maxLength = 100, + default = DEFAULT_CACHE_ZONE, + }, + cache_strategy = { + type = "string", + enum = {STRATEGY_DISK, STRATEGY_MEMORY}, + default = STRATEGY_DISK, + }, + cache_key = { + type = "array", + minItems = 1, + items = { + description = "a key for caching", + type = "string", + pattern = [[(^[^\$].+$|^\$[0-9a-zA-Z_]+$)]], + }, + default = {"$host", "$request_uri"} + }, + cache_http_status = { + type = "array", + minItems = 1, + items = { + description = "http response status", + type = "integer", + minimum = 200, + maximum = 599, + }, + uniqueItems = true, + default = {200, 301, 404}, + }, + cache_method = { + type = "array", + minItems = 1, + items = { + description = "supported http method", + type = "string", + enum = {"GET", "POST", "HEAD"}, + }, + uniqueItems = true, + default = {"GET", "HEAD"}, + }, + hide_cache_headers = { + type = "boolean", + default = false, + }, + cache_control = { + type = "boolean", + default = false, + }, + cache_bypass = { + type = "array", + minItems = 1, + items = { + type = "string", + pattern = [[(^[^\$].+$|^\$[0-9a-zA-Z_]+$)]] + }, + }, + no_cache = { + type = "array", + minItems = 1, + items = { + type = "string", + pattern = [[(^[^\$].+$|^\$[0-9a-zA-Z_]+$)]] + }, + }, + cache_ttl = { + type = "integer", + minimum = 1, + default = 300, + }, + }, +} + + +local _M = { + version = 0.2, + priority = 1085, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + for _, key in ipairs(conf.cache_key) do + if key == "$request_method" then + return false, "cache_key variable " .. key .. " unsupported" + end + end + + local found = false + local local_conf = core.config.local_conf() + if local_conf.apisix.proxy_cache then + local err = "cache_zone " .. conf.cache_zone .. " not found" + for _, cache in ipairs(local_conf.apisix.proxy_cache.zones) do + -- cache_zone passed in plugin config matched one of the proxy_cache zones + if cache.name == conf.cache_zone then + -- check for the mismatch between cache_strategy and corresponding cache zone + if (conf.cache_strategy == STRATEGY_MEMORY and cache.disk_path) or + (conf.cache_strategy == STRATEGY_DISK and not cache.disk_path) then + err = "invalid or empty cache_zone for cache_strategy: "..conf.cache_strategy + else + found = true + end + break + end + end + + if found == false then + return false, err + end + end + + return true +end + + +function _M.access(conf, ctx) + core.log.info("proxy-cache plugin access phase, conf: ", core.json.delay_encode(conf)) + + local value = util.generate_complex_value(conf.cache_key, ctx) + ctx.var.upstream_cache_key = value + core.log.info("proxy-cache cache key value:", value) + + local handler + if conf.cache_strategy == STRATEGY_MEMORY then + handler = memory_handler + else + handler = disk_handler + end + + return handler.access(conf, ctx) +end + + +function _M.header_filter(conf, ctx) + core.log.info("proxy-cache plugin header filter phase, conf: ", core.json.delay_encode(conf)) + + local handler + if conf.cache_strategy == STRATEGY_MEMORY then + handler = memory_handler + else + handler = disk_handler + end + + handler.header_filter(conf, ctx) +end + + +function _M.body_filter(conf, ctx) + core.log.info("proxy-cache plugin body filter phase, conf: ", core.json.delay_encode(conf)) + + if conf.cache_strategy == STRATEGY_MEMORY then + memory_handler.body_filter(conf, ctx) + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/memory.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/memory.lua new file mode 100644 index 0000000..6d8d804 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/memory.lua @@ -0,0 +1,84 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local ngx = ngx +local ngx_shared = ngx.shared +local setmetatable = setmetatable +local core = require("apisix.core") + +local _M = {} +local mt = { __index = _M } + + +function _M.new(opts) + return setmetatable({ + dict = ngx_shared[opts.shdict_name], + }, mt) +end + + +function _M:set(key, obj, ttl) + if self.dict == nil then + return nil, "invalid cache_zone provided" + end + + local obj_json = core.json.encode(obj) + if not obj_json then + return nil, "could not encode object" + end + + local succ, err = self.dict:set(key, obj_json, ttl) + return succ and obj_json or nil, err +end + + +function _M:get(key) + if self.dict == nil then + return nil, "invalid cache_zone provided" + end + + -- If the key does not exist or has expired, then res_json will be nil. + local res_json, err, stale = self.dict:get_stale(key) + if not res_json then + if not err then + return nil, "not found" + else + return nil, err + end + end + if stale then + return nil, "expired" + end + + local res_obj, err = core.json.decode(res_json) + if not res_obj then + return nil, err + end + + return res_obj, nil +end + + +function _M:purge(key) + if self.dict == nil then + return nil, "invalid cache_zone provided" + end + self.dict:delete(key) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/memory_handler.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/memory_handler.lua new file mode 100644 index 0000000..e41cb72 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/memory_handler.lua @@ -0,0 +1,332 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local memory_strategy = require("apisix.plugins.proxy-cache.memory").new +local util = require("apisix.plugins.proxy-cache.util") +local core = require("apisix.core") +local tab_new = require("table.new") +local ngx_re_gmatch = ngx.re.gmatch +local ngx_re_match = ngx.re.match +local parse_http_time = ngx.parse_http_time +local concat = table.concat +local lower = string.lower +local floor = math.floor +local tostring = tostring +local tonumber = tonumber +local ngx = ngx +local type = type +local pairs = pairs +local time = ngx.now +local max = math.max + +local CACHE_VERSION = 1 + +local _M = {} + +-- http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.5.1 +-- note content-length & apisix-cache-status are not strictly +-- hop-by-hop but we will be adjusting it here anyhow +local hop_by_hop_headers = { + ["connection"] = true, + ["keep-alive"] = true, + ["proxy-authenticate"] = true, + ["proxy-authorization"] = true, + ["te"] = true, + ["trailers"] = true, + ["transfer-encoding"] = true, + ["upgrade"] = true, + ["content-length"] = true, + ["apisix-cache-status"] = true, +} + + +local function include_cache_header(header) + local n_header = lower(header) + if n_header == "expires" or n_header == "cache-control" then + return true + end + + return false +end + + +local function overwritable_header(header) + local n_header = lower(header) + + return not hop_by_hop_headers[n_header] + and not ngx_re_match(n_header, "ratelimit-remaining") +end + + +-- The following format can accept: +-- Cache-Control: no-cache +-- Cache-Control: no-store +-- Cache-Control: max-age=3600 +-- Cache-Control: max-stale=3600 +-- Cache-Control: min-fresh=3600 +-- Cache-Control: private, max-age=600 +-- Cache-Control: public, max-age=31536000 +-- Refer to: https://www.holisticseo.digital/pagespeed/cache-control/ +local function parse_directive_header(h) + if not h then + return {} + end + + if type(h) == "table" then + h = concat(h, ", ") + end + + local t = {} + local res = tab_new(3, 0) + local iter = ngx_re_gmatch(h, "([^,]+)", "oj") + + local m = iter() + while m do + local _, err = ngx_re_match(m[0], [[^\s*([^=]+)(?:=(.+))?]], + "oj", nil, res) + if err then + core.log.error(err) + end + + -- store the directive token as a numeric value if it looks like a number; + -- otherwise, store the string value. for directives without token, we just + -- set the key to true + t[lower(res[1])] = tonumber(res[2]) or res[2] or true + + m = iter() + end + + return t +end + + +local function parse_resource_ttl(ctx, cc) + local max_age = cc["s-maxage"] or cc["max-age"] + + if not max_age then + local expires = ctx.var.upstream_http_expires + + -- if multiple Expires headers are present, last one wins + if type(expires) == "table" then + expires = expires[#expires] + end + + local exp_time = parse_http_time(tostring(expires)) + if exp_time then + max_age = exp_time - time() + end + end + + return max_age and max(max_age, 0) or 0 +end + + +local function cacheable_request(conf, ctx, cc) + if not util.match_method(conf, ctx) then + return false, "MISS" + end + + if conf.cache_bypass ~= nil then + local value = util.generate_complex_value(conf.cache_bypass, ctx) + core.log.info("proxy-cache cache bypass value:", value) + if value ~= nil and value ~= "" and value ~= "0" then + return false, "BYPASS" + end + end + + if conf.cache_control and (cc["no-store"] or cc["no-cache"]) then + return false, "BYPASS" + end + + return true, "" +end + + +local function cacheable_response(conf, ctx, cc) + if not util.match_status(conf, ctx) then + return false + end + + if conf.no_cache ~= nil then + local value = util.generate_complex_value(conf.no_cache, ctx) + core.log.info("proxy-cache no-cache value:", value) + + if value ~= nil and value ~= "" and value ~= "0" then + return false + end + end + + if conf.cache_control and (cc["private"] or cc["no-store"] or cc["no-cache"]) then + return false + end + + if conf.cache_control and parse_resource_ttl(ctx, cc) <= 0 then + return false + end + + return true +end + + +function _M.access(conf, ctx) + local cc = parse_directive_header(ctx.var.http_cache_control) + + if ctx.var.request_method ~= "PURGE" then + local ret, msg = cacheable_request(conf, ctx, cc) + if not ret then + core.response.set_header("Apisix-Cache-Status", msg) + return + end + end + + if not ctx.cache then + ctx.cache = { + memory = memory_strategy({shdict_name = conf.cache_zone}), + hit = false, + ttl = 0, + } + end + + local res, err = ctx.cache.memory:get(ctx.var.upstream_cache_key) + + if ctx.var.request_method == "PURGE" then + if err == "not found" then + return 404 + end + ctx.cache.memory:purge(ctx.var.upstream_cache_key) + ctx.cache = nil + return 200 + end + + if err then + if err == "expired" then + core.response.set_header("Apisix-Cache-Status", "EXPIRED") + + elseif err ~= "not found" then + core.response.set_header("Apisix-Cache-Status", "MISS") + core.log.error("failed to get from cache, err: ", err) + + elseif conf.cache_control and cc["only-if-cached"] then + core.response.set_header("Apisix-Cache-Status", "MISS") + return 504 + + else + core.response.set_header("Apisix-Cache-Status", "MISS") + end + return + end + + if res.version ~= CACHE_VERSION then + core.log.warn("cache format mismatch, purging ", ctx.var.upstream_cache_key) + core.response.set_header("Apisix-Cache-Status", "BYPASS") + ctx.cache.memory:purge(ctx.var.upstream_cache_key) + return + end + + if conf.cache_control then + if cc["max-age"] and time() - res.timestamp > cc["max-age"] then + core.response.set_header("Apisix-Cache-Status", "STALE") + return + end + + if cc["max-stale"] and time() - res.timestamp - res.ttl > cc["max-stale"] then + core.response.set_header("Apisix-Cache-Status", "STALE") + return + end + + if cc["min-fresh"] and res.ttl - (time() - res.timestamp) < cc["min-fresh"] then + core.response.set_header("Apisix-Cache-Status", "STALE") + return + end + else + if time() - res.timestamp > res.ttl then + core.response.set_header("Apisix-Cache-Status", "STALE") + return + end + end + + ctx.cache.hit = true + + for key, value in pairs(res.headers) do + if conf.hide_cache_headers == true and include_cache_header(key) then + core.response.set_header(key, "") + elseif overwritable_header(key) then + core.response.set_header(key, value) + end + end + + core.response.set_header("Age", floor(time() - res.timestamp)) + core.response.set_header("Apisix-Cache-Status", "HIT") + + return res.status, res.body +end + + +function _M.header_filter(conf, ctx) + local cache = ctx.cache + if not cache or cache.hit then + return + end + + local res_headers = ngx.resp.get_headers(0, true) + + for key in pairs(res_headers) do + if conf.hide_cache_headers == true and include_cache_header(key) then + core.response.set_header(key, "") + end + end + + local cc = parse_directive_header(ctx.var.upstream_http_cache_control) + + if cacheable_response(conf, ctx, cc) then + cache.res_headers = res_headers + cache.ttl = conf.cache_control and parse_resource_ttl(ctx, cc) or conf.cache_ttl + else + ctx.cache = nil + end +end + + +function _M.body_filter(conf, ctx) + local cache = ctx.cache + if not cache or cache.hit then + return + end + + local res_body = core.response.hold_body_chunk(ctx, true) + if not res_body then + return + end + + local res = { + status = ngx.status, + body = res_body, + body_len = #res_body, + headers = cache.res_headers, + ttl = cache.ttl, + timestamp = time(), + version = CACHE_VERSION, + } + + local res, err = cache.memory:set(ctx.var.upstream_cache_key, res, cache.ttl) + if not res then + core.log.error("failed to set cache, err: ", err) + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/util.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/util.lua new file mode 100644 index 0000000..26c6e81 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-cache/util.lua @@ -0,0 +1,102 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local ngx_re = require("ngx.re") +local tab_concat = table.concat +local string = string +local io_open = io.open +local io_close = io.close +local ngx = ngx +local ipairs = ipairs +local pairs = pairs +local tonumber = tonumber + +local _M = {} + +local tmp = {} +function _M.generate_complex_value(data, ctx) + core.table.clear(tmp) + + core.log.info("proxy-cache complex value: ", core.json.delay_encode(data)) + for i, value in ipairs(data) do + core.log.info("proxy-cache complex value index-", i, ": ", value) + + if string.byte(value, 1, 1) == string.byte('$') then + tmp[i] = ctx.var[string.sub(value, 2)] or "" + else + tmp[i] = value + end + end + + return tab_concat(tmp, "") +end + + +-- check whether the request method match the user defined. +function _M.match_method(conf, ctx) + for _, method in ipairs(conf.cache_method) do + if method == ctx.var.request_method then + return true + end + end + + return false +end + + +-- check whether the response status match the user defined. +function _M.match_status(conf, ctx) + for _, status in ipairs(conf.cache_http_status) do + if status == ngx.status then + return true + end + end + + return false +end + + +function _M.file_exists(name) + local f = io_open(name, "r") + if f ~= nil then + io_close(f) + return true + end + return false +end + + +function _M.generate_cache_filename(cache_path, cache_levels, cache_key) + local md5sum = ngx.md5(cache_key) + local levels = ngx_re.split(cache_levels, ":") + local filename = "" + + local index = #md5sum + for k, v in pairs(levels) do + local length = tonumber(v) + index = index - length + filename = filename .. md5sum:sub(index+1, index+length) .. "/" + end + if cache_path:sub(-1) ~= "/" then + cache_path = cache_path .. "/" + end + filename = cache_path .. filename .. md5sum + return filename +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-control.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-control.lua new file mode 100644 index 0000000..fc87e45 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-control.lua @@ -0,0 +1,65 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local ok, apisix_ngx_client = pcall(require, "resty.apisix.client") + + +local schema = { + type = "object", + properties = { + request_buffering = { + type = "boolean", + default = true, + }, + }, +} + + +local plugin_name = "proxy-control" +local _M = { + version = 0.1, + priority = 21990, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +-- we want to control proxy behavior before auth, so put the code under rewrite method +function _M.rewrite(conf, ctx) + if not ok then + core.log.error("need to build APISIX-Runtime to support proxy control") + return 501 + end + + local request_buffering = conf.request_buffering + if request_buffering ~= nil then + local ok, err = apisix_ngx_client.set_proxy_request_buffering(request_buffering) + if not ok then + core.log.error("failed to set request_buffering: ", err) + return 503 + end + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-mirror.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-mirror.lua new file mode 100644 index 0000000..d6cede6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-mirror.lua @@ -0,0 +1,133 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local url = require("net.url") + +local math_random = math.random +local has_mod, apisix_ngx_client = pcall(require, "resty.apisix.client") + + +local plugin_name = "proxy-mirror" +local schema = { + type = "object", + properties = { + host = { + type = "string", + pattern = [=[^(http(s)?|grpc(s)?):\/\/([\da-zA-Z.-]+|\[[\da-fA-F:]+\])(:\d+)?$]=], + }, + path = { + type = "string", + pattern = [[^/[^?&]+$]], + }, + path_concat_mode = { + type = "string", + default = "replace", + enum = {"replace", "prefix"}, + description = "the concatenation mode for custom path" + }, + sample_ratio = { + type = "number", + minimum = 0.00001, + maximum = 1, + default = 1, + }, + }, + required = {"host"}, +} + +local _M = { + version = 0.1, + priority = 1010, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + return true +end + + +local function resolver_host(prop_host) + local url_decoded = url.parse(prop_host) + local decoded_host = url_decoded.host + if not core.utils.parse_ipv4(decoded_host) and not core.utils.parse_ipv6(decoded_host) then + local ip, err = core.resolver.parse_domain(decoded_host) + + if not ip then + core.log.error("dns resolver resolves domain: ", decoded_host," error: ", err, + " will continue to use the host: ", decoded_host) + return url_decoded.scheme, prop_host + end + + local host = url_decoded.scheme .. '://' .. ip .. + (url_decoded.port and ':' .. url_decoded.port or '') + core.log.info(prop_host, " is resolved to: ", host) + return url_decoded.scheme, host + end + return url_decoded.scheme, prop_host +end + + +local function enable_mirror(ctx, conf) + local uri = (ctx.var.upstream_uri and ctx.var.upstream_uri ~= "") and + ctx.var.upstream_uri or + ctx.var.uri .. ctx.var.is_args .. (ctx.var.args or '') + + if conf.path then + if conf.path_concat_mode == "prefix" then + uri = conf.path .. uri + else + uri = conf.path .. ctx.var.is_args .. (ctx.var.args or '') + end + end + + local _, mirror_host = resolver_host(conf.host) + ctx.var.upstream_mirror_host = mirror_host + ctx.var.upstream_mirror_uri = mirror_host .. uri + + if has_mod then + apisix_ngx_client.enable_mirror() + end +end + + +function _M.rewrite(conf, ctx) + core.log.info("proxy mirror plugin rewrite phase, conf: ", core.json.delay_encode(conf)) + + if conf.sample_ratio == 1 then + enable_mirror(ctx, conf) + ctx.enable_mirror = true + else + local val = math_random() + core.log.info("mirror request sample_ratio conf: ", conf.sample_ratio, + ", random value: ", val) + if val < conf.sample_ratio then + enable_mirror(ctx, conf) + ctx.enable_mirror = true + end + end + +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-rewrite.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-rewrite.lua new file mode 100644 index 0000000..21f44bc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/proxy-rewrite.lua @@ -0,0 +1,398 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local plugin_name = "proxy-rewrite" +local pairs = pairs +local ipairs = ipairs +local ngx = ngx +local type = type +local re_sub = ngx.re.sub +local re_match = ngx.re.match +local req_set_uri = ngx.req.set_uri +local sub_str = string.sub +local str_find = core.string.find + +local switch_map = {GET = ngx.HTTP_GET, POST = ngx.HTTP_POST, PUT = ngx.HTTP_PUT, + HEAD = ngx.HTTP_HEAD, DELETE = ngx.HTTP_DELETE, + OPTIONS = ngx.HTTP_OPTIONS, MKCOL = ngx.HTTP_MKCOL, + COPY = ngx.HTTP_COPY, MOVE = ngx.HTTP_MOVE, + PROPFIND = ngx.HTTP_PROPFIND, LOCK = ngx.HTTP_LOCK, + UNLOCK = ngx.HTTP_UNLOCK, PATCH = ngx.HTTP_PATCH, + TRACE = ngx.HTTP_TRACE, + } +local schema_method_enum = {} +for key in pairs(switch_map) do + core.table.insert(schema_method_enum, key) +end + +local lrucache = core.lrucache.new({ + type = "plugin", +}) + +core.ctx.register_var("proxy_rewrite_regex_uri_captures", function(ctx) + return ctx.proxy_rewrite_regex_uri_captures +end) + +local schema = { + type = "object", + properties = { + uri = { + description = "new uri for upstream", + type = "string", + minLength = 1, + maxLength = 4096, + pattern = [[^\/.*]], + }, + method = { + description = "proxy route method", + type = "string", + enum = schema_method_enum + }, + regex_uri = { + description = "new uri that substitute from client uri " .. + "for upstream, lower priority than uri property", + type = "array", + minItems = 2, + items = { + description = "regex uri", + type = "string", + } + }, + host = { + description = "new host for upstream", + type = "string", + pattern = [[^[0-9a-zA-Z-.]+(:\d{1,5})?$]], + }, + headers = { + description = "new headers for request", + oneOf = { + { + type = "object", + minProperties = 1, + additionalProperties = false, + properties = { + add = { + type = "object", + minProperties = 1, + patternProperties = { + ["^[^:]+$"] = { + oneOf = { + { type = "string" }, + { type = "number" } + } + } + }, + }, + set = { + type = "object", + minProperties = 1, + patternProperties = { + ["^[^:]+$"] = { + oneOf = { + { type = "string" }, + { type = "number" }, + } + } + }, + }, + remove = { + type = "array", + minItems = 1, + items = { + type = "string", + -- "Referer" + pattern = "^[^:]+$" + } + }, + }, + }, + { + type = "object", + minProperties = 1, + patternProperties = { + ["^[^:]+$"] = { + oneOf = { + { type = "string" }, + { type = "number" } + } + } + }, + } + }, + + }, + use_real_request_uri_unsafe = { + description = "use real_request_uri instead, THIS IS VERY UNSAFE.", + type = "boolean", + default = false, + }, + }, + minProperties = 1, +} + + +local _M = { + version = 0.1, + priority = 1008, + name = plugin_name, + schema = schema, +} + +local function is_new_headers_conf(headers) + return (headers.add and type(headers.add) == "table") or + (headers.set and type(headers.set) == "table") or + (headers.remove and type(headers.remove) == "table") +end + +local function check_set_headers(headers) + for field, value in pairs(headers) do + if type(field) ~= 'string' then + return false, 'invalid type as header field' + end + + if type(value) ~= 'string' and type(value) ~= 'number' then + return false, 'invalid type as header value' + end + + if #field == 0 then + return false, 'invalid field length in header' + end + + core.log.info("header field: ", field) + if not core.utils.validate_header_field(field) then + return false, 'invalid field character in header' + end + if not core.utils.validate_header_value(value) then + return false, 'invalid value character in header' + end + end + + return true +end + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + if conf.regex_uri and #conf.regex_uri > 0 then + if (#conf.regex_uri % 2 ~= 0) then + return false, "The length of regex_uri should be an even number" + end + for i = 1, #conf.regex_uri, 2 do + local _, _, err = re_sub("/fake_uri", conf.regex_uri[i], + conf.regex_uri[i + 1], "jo") + if err then + return false, "invalid regex_uri(" .. conf.regex_uri[i] .. + ", " .. conf.regex_uri[i + 1] .. "): " .. err + end + end + end + + -- check headers + if not conf.headers then + return true + end + + if conf.headers then + if not is_new_headers_conf(conf.headers) then + ok, err = check_set_headers(conf.headers) + if not ok then + return false, err + end + end + end + + return true +end + + +do + local upstream_vars = { + host = "upstream_host", + upgrade = "upstream_upgrade", + connection = "upstream_connection", + } + local upstream_names = {} + for name, _ in pairs(upstream_vars) do + core.table.insert(upstream_names, name) + end + + local function create_header_operation(hdr_conf) + local set = {} + local add = {} + + if is_new_headers_conf(hdr_conf) then + if hdr_conf.add then + for field, value in pairs(hdr_conf.add) do + core.table.insert_tail(add, field, value) + end + end + if hdr_conf.set then + for field, value in pairs(hdr_conf.set) do + core.table.insert_tail(set, field, value) + end + end + + else + for field, value in pairs(hdr_conf) do + core.table.insert_tail(set, field, value) + end + end + + return { + add = add, + set = set, + remove = hdr_conf.remove or {}, + } + end + + + local function escape_separator(s) + return re_sub(s, [[\?]], "%3F", "jo") + end + + +function _M.rewrite(conf, ctx) + for _, name in ipairs(upstream_names) do + if conf[name] then + ctx.var[upstream_vars[name]] = conf[name] + end + end + + local upstream_uri = ctx.var.uri + local separator_escaped = false + if conf.use_real_request_uri_unsafe then + upstream_uri = ctx.var.real_request_uri + end + + if conf.uri ~= nil then + separator_escaped = true + upstream_uri = core.utils.resolve_var(conf.uri, ctx.var, escape_separator) + + elseif conf.regex_uri ~= nil then + if not str_find(upstream_uri, "?") then + separator_escaped = true + end + + local error_msg + for i = 1, #conf.regex_uri, 2 do + local captures, err = re_match(upstream_uri, conf.regex_uri[i], "jo") + if err then + error_msg = "failed to match the uri " .. ctx.var.uri .. + " (" .. conf.regex_uri[i] .. ") " .. " : " .. err + break + end + + if captures then + ctx.proxy_rewrite_regex_uri_captures = captures + + local uri, _, err = re_sub(upstream_uri, + conf.regex_uri[i], conf.regex_uri[i + 1], "jo") + if uri then + upstream_uri = uri + else + error_msg = "failed to substitute the uri " .. ngx.var.uri .. + " (" .. conf.regex_uri[i] .. ") with " .. + conf.regex_uri[i + 1] .. " : " .. err + end + + break + end + end + + if error_msg ~= nil then + core.log.error(error_msg) + return 500, { error_msg = error_msg } + end + end + + if not conf.use_real_request_uri_unsafe then + local index + if separator_escaped then + index = str_find(upstream_uri, "?") + end + + if index then + upstream_uri = core.utils.uri_safe_encode(sub_str(upstream_uri, 1, index - 1)) .. + sub_str(upstream_uri, index) + else + -- The '?' may come from client request '%3f' when we use ngx.var.uri directly or + -- via regex_uri + upstream_uri = core.utils.uri_safe_encode(upstream_uri) + end + + req_set_uri(upstream_uri) + + if ctx.var.is_args == "?" then + if index then + ctx.var.upstream_uri = upstream_uri .. "&" .. (ctx.var.args or "") + else + ctx.var.upstream_uri = upstream_uri .. "?" .. (ctx.var.args or "") + end + else + ctx.var.upstream_uri = upstream_uri + end + else + ctx.var.upstream_uri = upstream_uri + end + + if conf.headers then + local hdr_op, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, + create_header_operation, conf.headers) + if not hdr_op then + core.log.error("failed to create header operation: ", err) + return + end + + local field_cnt = #hdr_op.add + for i = 1, field_cnt, 2 do + local val = core.utils.resolve_var_with_captures(hdr_op.add[i + 1], + ctx.proxy_rewrite_regex_uri_captures) + val = core.utils.resolve_var(val, ctx.var) + -- A nil or empty table value will cause add_header function to throw an error. + if val then + local header = hdr_op.add[i] + core.request.add_header(ctx, header, val) + end + end + + local field_cnt = #hdr_op.set + for i = 1, field_cnt, 2 do + local val = core.utils.resolve_var_with_captures(hdr_op.set[i + 1], + ctx.proxy_rewrite_regex_uri_captures) + val = core.utils.resolve_var(val, ctx.var) + core.request.set_header(ctx, hdr_op.set[i], val) + end + + local field_cnt = #hdr_op.remove + for i = 1, field_cnt do + core.request.set_header(ctx, hdr_op.remove[i], nil) + end + + end + + if conf.method then + ngx.req.set_method(switch_map[conf.method]) + end +end + +end -- do + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/public-api.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/public-api.lua new file mode 100644 index 0000000..ad3f9dc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/public-api.lua @@ -0,0 +1,55 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local router = require("apisix.router") + +local schema = { + type = "object", + properties = { + uri = {type = "string"}, + }, +} + + +local _M = { + version = 0.1, + priority = 501, + name = "public-api", + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +function _M.access(conf, ctx) + -- overwrite the uri in the ctx when the user has set the target uri + ctx.var.uri = conf.uri or ctx.var.uri + + -- perform route matching + if router.api.match(ctx) then + return + end + + return 404 +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/real-ip.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/real-ip.lua new file mode 100644 index 0000000..2121996 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/real-ip.lua @@ -0,0 +1,185 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ngx_re_split = require("ngx.re").split +local is_apisix_or, client = pcall(require, "resty.apisix.client") +local str_byte = string.byte +local str_sub = string.sub +local ipairs = ipairs +local type = type + +local lrucache = core.lrucache.new({ + type = "plugin", +}) + +local schema = { + type = "object", + properties = { + trusted_addresses = { + type = "array", + items = {anyOf = core.schema.ip_def}, + minItems = 1 + }, + source = { + type = "string", + minLength = 1 + }, + recursive = { + type = "boolean", + default = false + } + }, + required = {"source"}, +} + + +local plugin_name = "real-ip" + + +local _M = { + version = 0.1, + priority = 23000, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + if conf.trusted_addresses then + for _, cidr in ipairs(conf.trusted_addresses) do + if not core.ip.validate_cidr_or_ip(cidr) then + return false, "invalid ip address: " .. cidr + end + end + end + return true +end + + +local function addr_match(conf, ctx, addr) + local matcher, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, + core.ip.create_ip_matcher, conf.trusted_addresses) + if not matcher then + core.log.error("failed to create ip matcher: ", err) + return false + end + + return matcher:match(addr) +end + + +local function get_addr(conf, ctx) + if conf.source == "http_x_forwarded_for" then + -- use the last address from X-Forwarded-For header + -- after core.request.header function changed + -- we need to get original header value by using core.request.headers + local addrs = core.request.headers(ctx)["X-Forwarded-For"] + if not addrs then + return nil + end + + if type(addrs) == "table" then + addrs = addrs[#addrs] + end + + local idx = core.string.rfind_char(addrs, ",") + if not idx then + return addrs + end + + if conf.recursive and conf.trusted_addresses then + local split_addrs = ngx_re_split(addrs, ",\\s*", "jo") + for i = #split_addrs, 2, -1 do + if not addr_match(conf, ctx, split_addrs[i]) then + return split_addrs[i] + end + end + + return split_addrs[1] + end + + for i = idx + 1, #addrs do + if str_byte(addrs, i) == str_byte(" ") then + idx = idx + 1 + else + break + end + end + + return str_sub(addrs, idx + 1) + end + return ctx.var[conf.source] +end + + +function _M.rewrite(conf, ctx) + if not is_apisix_or then + core.log.error("need to build APISIX-Runtime to support setting real ip") + return 501 + end + + if conf.trusted_addresses then + local remote_addr = ctx.var.remote_addr + if not addr_match(conf, ctx, remote_addr) then + return + end + end + + local addr = get_addr(conf, ctx) + if not addr then + core.log.warn("missing real address") + return + end + + local ip, port = core.utils.parse_addr(addr) + if not ip or (not core.utils.parse_ipv4(ip) and not core.utils.parse_ipv6(ip)) then + core.log.warn("bad address: ", addr) + return + end + + if str_byte(ip, 1, 1) == str_byte("[") then + -- For IPv6, the `set_real_ip` accepts '::1' but not '[::1]' + ip = str_sub(ip, 2, #ip - 1) + end + + if port ~= nil and (port < 1 or port > 65535) then + core.log.warn("bad port: ", port) + return + end + + core.log.info("set real ip: ", ip, ", port: ", port) + + local ok, err = client.set_real_ip(ip, port) + if not ok then + core.log.error("failed to set real ip: ", err) + return + end + + -- flush cached vars in APISIX + ctx.var.remote_addr = nil + ctx.var.remote_port = nil + ctx.var.realip_remote_addr = nil + ctx.var.realip_remote_port = nil +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/redirect.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/redirect.lua new file mode 100644 index 0000000..421007d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/redirect.lua @@ -0,0 +1,264 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local plugin = require("apisix.plugin") +local tab_insert = table.insert +local tab_concat = table.concat +local string_format = string.format +local re_gmatch = ngx.re.gmatch +local re_sub = ngx.re.sub +local ipairs = ipairs +local ngx = ngx +local str_find = core.string.find +local str_sub = string.sub +local type = type +local math_random = math.random + +local lrucache = core.lrucache.new({ + ttl = 300, count = 100 +}) + + +local reg = [[(\\\$[0-9a-zA-Z_]+)|]] -- \$host + .. [[\$\{([0-9a-zA-Z_]+)\}|]] -- ${host} + .. [[\$([0-9a-zA-Z_]+)|]] -- $host + .. [[(\$|[^$\\]+)]] -- $ or others +local schema = { + type = "object", + properties = { + ret_code = {type = "integer", minimum = 200, default = 302}, + uri = {type = "string", minLength = 2, pattern = reg}, + regex_uri = { + description = "params for generating new uri that substitute from client uri, " .. + "first param is regular expression, the second one is uri template", + type = "array", + maxItems = 2, + minItems = 2, + items = { + description = "regex uri", + type = "string", + } + }, + http_to_https = {type = "boolean"}, + encode_uri = {type = "boolean", default = false}, + append_query_string = {type = "boolean", default = false}, + }, + oneOf = { + {required = {"uri"}}, + {required = {"regex_uri"}}, + {required = {"http_to_https"}} + } +} + + +local plugin_name = "redirect" + +local _M = { + version = 0.1, + priority = 900, + name = plugin_name, + schema = schema, +} + + +local function parse_uri(uri) + local iterator, err = re_gmatch(uri, reg, "jiox") + if not iterator then + return nil, err + end + + local t = {} + while true do + local m, err = iterator() + if err then + return nil, err + end + + if not m then + break + end + + tab_insert(t, m) + end + + return t +end + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + + if not ok then + return false, err + end + + if conf.regex_uri and #conf.regex_uri > 0 then + local _, _, err = re_sub("/fake_uri", conf.regex_uri[1], + conf.regex_uri[2], "jo") + if err then + local msg = string_format("invalid regex_uri (%s, %s), err:%s", + conf.regex_uri[1], conf.regex_uri[2], err) + return false, msg + end + end + + if conf.http_to_https and conf.append_query_string then + return false, "only one of `http_to_https` and `append_query_string` can be configured." + end + + return true +end + + + local tmp = {} +local function concat_new_uri(uri, ctx) + local passed_uri_segs, err = lrucache(uri, nil, parse_uri, uri) + if not passed_uri_segs then + return nil, err + end + + core.table.clear(tmp) + + for _, uri_segs in ipairs(passed_uri_segs) do + local pat1 = uri_segs[1] -- \$host + local pat2 = uri_segs[2] -- ${host} + local pat3 = uri_segs[3] -- $host + local pat4 = uri_segs[4] -- $ or others + core.log.info("parsed uri segs: ", core.json.delay_encode(uri_segs)) + + if pat2 or pat3 then + tab_insert(tmp, ctx.var[pat2 or pat3]) + else + tab_insert(tmp, pat1 or pat4) + end + end + + return tab_concat(tmp, "") +end + +local function get_port(attr) + local port + if attr then + port = attr.https_port + end + + if port then + return port + end + + local local_conf = core.config.local_conf() + local ssl = core.table.try_read_attr(local_conf, "apisix", "ssl") + if not ssl or not ssl["enable"] then + return port + end + + local ports = ssl["listen"] + if ports and #ports > 0 then + local idx = math_random(1, #ports) + port = ports[idx] + if type(port) == "table" then + port = port.port + end + end + + return port +end + +function _M.rewrite(conf, ctx) + core.log.info("plugin rewrite phase, conf: ", core.json.delay_encode(conf)) + + local ret_code = conf.ret_code + + local attr = plugin.plugin_attr(plugin_name) + local ret_port = get_port(attr) + + local uri = conf.uri + local regex_uri = conf.regex_uri + + local proxy_proto = core.request.header(ctx, "X-Forwarded-Proto") + local _scheme = proxy_proto or core.request.get_scheme(ctx) + if conf.http_to_https and _scheme == "http" then + if ret_port == nil or ret_port == 443 or ret_port <= 0 or ret_port > 65535 then + uri = "https://$host$request_uri" + else + uri = "https://$host:" .. ret_port .. "$request_uri" + end + + local method_name = ngx.req.get_method() + if method_name == "GET" or method_name == "HEAD" then + ret_code = 301 + else + -- https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308 + ret_code = 308 + end + end + + if ret_code then + local new_uri + if uri then + local err + new_uri, err = concat_new_uri(uri, ctx) + if not new_uri then + core.log.error("failed to generate new uri by: " .. uri .. err) + return 500 + end + elseif regex_uri then + local n, err + new_uri, n, err = re_sub(ctx.var.uri, regex_uri[1], + regex_uri[2], "jo") + if not new_uri then + local msg = string_format("failed to substitute the uri:%s (%s) with %s, error:%s", + ctx.var.uri, regex_uri[1], regex_uri[2], err) + core.log.error(msg) + return 500 + end + + if n < 1 then + return + end + end + + if not new_uri then + return + end + + local index = str_find(new_uri, "?") + if conf.encode_uri then + if index then + new_uri = core.utils.uri_safe_encode(str_sub(new_uri, 1, index-1)) .. + str_sub(new_uri, index) + else + new_uri = core.utils.uri_safe_encode(new_uri) + end + end + + if conf.append_query_string and ctx.var.is_args == "?" then + if index then + new_uri = new_uri .. "&" .. (ctx.var.args or "") + else + new_uri = new_uri .. "?" .. (ctx.var.args or "") + end + end + + core.response.set_header("Location", new_uri) + return ret_code + end + +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/referer-restriction.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/referer-restriction.lua new file mode 100644 index 0000000..85e8ea3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/referer-restriction.lua @@ -0,0 +1,141 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ipairs = ipairs +local core = require("apisix.core") +local http = require "resty.http" +local lrucache = core.lrucache.new({ + ttl = 300, count = 512 +}) + + +local schema = { + type = "object", + properties = { + bypass_missing = { + type = "boolean", + default = false, + }, + whitelist = { + type = "array", + items = core.schema.host_def, + minItems = 1, + }, + blacklist = { + type = "array", + items = core.schema.host_def, + minItems = 1, + }, + message = { + type = "string", + minLength = 1, + maxLength = 1024, + default = "Your referer host is not allowed", + }, + }, + oneOf = { + {required = {"whitelist"}}, + {required = {"blacklist"}}, + }, +} + + +local plugin_name = "referer-restriction" + + +local _M = { + version = 0.1, + priority = 2990, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +local function match_host(matcher, host) + if matcher.map[host] then + return true + end + for _, h in ipairs(matcher.suffixes) do + if core.string.has_suffix(host, h) then + return true + end + end + return false +end + + +local function create_host_matcher(hosts) + local hosts_suffix = {} + local hosts_map = {} + + for _, h in ipairs(hosts) do + if h:byte(1) == 42 then -- start with '*' + core.table.insert(hosts_suffix, h:sub(2)) + else + hosts_map[h] = true + end + end + + return { + suffixes = hosts_suffix, + map = hosts_map, + } +end + + +function _M.access(conf, ctx) + local block = false + local referer = ctx.var.http_referer + if referer then + -- parse_uri doesn't support IPv6 literal, it is OK since we only + -- expect hostname in the whitelist. + -- See https://github.com/ledgetech/lua-resty-http/pull/104 + local uri = http.parse_uri(nil, referer) + if not uri then + -- malformed Referer + referer = nil + else + -- take host part only + referer = uri[2] + end + end + + + if not referer then + block = not conf.bypass_missing + + elseif conf.whitelist then + local matcher = lrucache(conf.whitelist, nil, + create_host_matcher, conf.whitelist) + block = not match_host(matcher, referer) + elseif conf.blacklist then + local matcher = lrucache(conf.blacklist, nil, + create_host_matcher, conf.blacklist) + block = match_host(matcher, referer) + end + + if block then + return 403, { message = conf.message } + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/request-id.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/request-id.lua new file mode 100644 index 0000000..dac3162 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/request-id.lua @@ -0,0 +1,120 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local ngx = ngx +local core = require("apisix.core") +local uuid = require("resty.jit-uuid") +local nanoid = require("nanoid") +local math_random = math.random +local str_byte = string.byte +local ffi = require "ffi" + +local plugin_name = "request-id" + +local schema = { + type = "object", + properties = { + header_name = {type = "string", default = "X-Request-Id"}, + include_in_response = {type = "boolean", default = true}, + algorithm = { + type = "string", + enum = {"uuid", "nanoid", "range_id"}, + default = "uuid" + }, + range_id = { + type = "object", + properties = { + length = { + type = "integer", + minimum = 6, + default = 16 + }, + char_set = { + type = "string", + -- The Length is set to 6 just avoid too short length, it may repeat + minLength = 6, + default = "abcdefghijklmnopqrstuvwxyzABCDEFGHIGKLMNOPQRSTUVWXYZ0123456789" + } + }, + default = {} + } + } +} + +local _M = { + version = 0.1, + priority = 12015, + name = plugin_name, + schema = schema +} + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + +-- generate range_id +local function get_range_id(range_id) + local res = ffi.new("unsigned char[?]", range_id.length) + for i = 0, range_id.length - 1 do + res[i] = str_byte(range_id.char_set, math_random(#range_id.char_set)) + end + return ffi.string(res, range_id.length) +end + +local function get_request_id(conf) + if conf.algorithm == "uuid" then + return uuid() + end + if conf.algorithm == "nanoid" then + return nanoid.safe_simple() + end + + if conf.algorithm == "range_id" then + return get_range_id(conf.range_id) + end + + return uuid() +end + + +function _M.rewrite(conf, ctx) + local headers = ngx.req.get_headers() + local uuid_val + if not headers[conf.header_name] then + uuid_val = get_request_id(conf) + core.request.set_header(ctx, conf.header_name, uuid_val) + else + uuid_val = headers[conf.header_name] + end + + if conf.include_in_response then + ctx["request-id-" .. conf.header_name] = uuid_val + end +end + +function _M.header_filter(conf, ctx) + if not conf.include_in_response then + return + end + + local headers = ngx.resp.get_headers() + if not headers[conf.header_name] then + core.response.set_header(conf.header_name, ctx["request-id-" .. conf.header_name]) + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/request-validation.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/request-validation.lua new file mode 100644 index 0000000..0e6d36d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/request-validation.lua @@ -0,0 +1,120 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local plugin_name = "request-validation" +local ngx = ngx + +local schema = { + type = "object", + properties = { + header_schema = {type = "object"}, + body_schema = {type = "object"}, + rejected_code = {type = "integer", minimum = 200, maximum = 599, default = 400}, + rejected_msg = {type = "string", minLength = 1, maxLength = 256} + }, + anyOf = { + {required = {"header_schema"}}, + {required = {"body_schema"}} + } +} + + +local _M = { + version = 0.1, + priority = 2800, + type = 'validation', + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + if conf.body_schema then + ok, err = core.schema.valid(conf.body_schema) + if not ok then + return false, err + end + end + + if conf.header_schema then + ok, err = core.schema.valid(conf.header_schema) + if not ok then + return false, err + end + end + + return true, nil +end + + +function _M.rewrite(conf, ctx) + local headers = core.request.headers(ctx) + + if conf.header_schema then + local ok, err = core.schema.check(conf.header_schema, headers) + if not ok then + core.log.error("req schema validation failed", err) + return conf.rejected_code, conf.rejected_msg or err + end + end + + if conf.body_schema then + local req_body + local body, err = core.request.get_body() + if not body then + if err then + core.log.error("failed to get body: ", err) + end + return conf.rejected_code, conf.rejected_msg + end + + local body_is_json = true + if headers["content-type"] == "application/x-www-form-urlencoded" then + -- use 0 to avoid truncated result and keep the behavior as the + -- same as other platforms + req_body, err = ngx.decode_args(body, 0) + body_is_json = false + else -- JSON as default + req_body, err = core.json.decode(body) + end + + if not req_body then + core.log.error('failed to decode the req body: ', err) + return conf.rejected_code, conf.rejected_msg or err + end + + local ok, err = core.schema.check(conf.body_schema, req_body) + if not ok then + core.log.error("req schema validation failed: ", err) + return conf.rejected_code, conf.rejected_msg or err + end + + if body_is_json then + -- ensure the JSON we check is the JSON we pass to the upstream, + -- see https://bishopfox.com/blog/json-interoperability-vulnerabilities + req_body = core.json.encode(req_body) + ngx.req.set_body_data(req_body) + end + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/response-rewrite.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/response-rewrite.lua new file mode 100644 index 0000000..adf630f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/response-rewrite.lua @@ -0,0 +1,390 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local expr = require("resty.expr.v1") +local re_compile = require("resty.core.regex").re_match_compile +local plugin_name = "response-rewrite" +local ngx = ngx +local ngx_header = ngx.header +local re_match = ngx.re.match +local re_sub = ngx.re.sub +local re_gsub = ngx.re.gsub +local pairs = pairs +local ipairs = ipairs +local type = type +local pcall = pcall +local content_decode = require("apisix.utils.content-decode") + + +local lrucache = core.lrucache.new({ + type = "plugin", +}) + +local schema = { + type = "object", + properties = { + headers = { + description = "new headers for response", + anyOf = { + { + type = "object", + minProperties = 1, + patternProperties = { + ["^[^:]+$"] = { + oneOf = { + {type = "string"}, + {type = "number"}, + } + } + }, + }, + { + properties = { + add = { + type = "array", + minItems = 1, + items = { + type = "string", + -- "Set-Cookie: =; Max-Age=" + pattern = "^[^:]+:[^:]*[^/]$" + } + }, + set = { + type = "object", + minProperties = 1, + patternProperties = { + ["^[^:]+$"] = { + oneOf = { + {type = "string"}, + {type = "number"}, + } + } + }, + }, + remove = { + type = "array", + minItems = 1, + items = { + type = "string", + -- "Set-Cookie" + pattern = "^[^:]+$" + } + }, + }, + } + } + }, + body = { + description = "new body for response", + type = "string", + }, + body_base64 = { + description = "whether new body for response need base64 decode before return", + type = "boolean", + default = false, + }, + status_code = { + description = "new status code for response", + type = "integer", + minimum = 200, + maximum = 598, + }, + vars = { + type = "array", + }, + filters = { + description = "a group of filters that modify response body" .. + "by replacing one specified string by another", + type = "array", + minItems = 1, + items = { + description = "filter that modifies response body", + type = "object", + required = {"regex", "replace"}, + properties = { + regex = { + description = "match pattern on response body", + type = "string", + minLength = 1, + }, + scope = { + description = "regex substitution range", + type = "string", + enum = {"once", "global"}, + default = "once", + }, + replace = { + description = "regex substitution content", + type = "string", + }, + options = { + description = "regex options", + type = "string", + default = "jo", + } + }, + }, + }, + }, + dependencies = { + body = { + ["not"] = {required = {"filters"}} + }, + filters = { + ["not"] = {required = {"body"}} + } + } +} + + +local _M = { + version = 0.1, + priority = 899, + name = plugin_name, + schema = schema, +} + +local function vars_matched(conf, ctx) + if not conf.vars then + return true + end + + if not conf.response_expr then + local response_expr, _ = expr.new(conf.vars) + conf.response_expr = response_expr + end + + local match_result = conf.response_expr:eval(ctx.var) + + return match_result +end + + +local function is_new_headers_conf(headers) + return + (headers.add and type(headers.add) == "table") or + (headers.set and type(headers.set) == "table") or + (headers.remove and type(headers.remove) == "table") +end + + +local function check_set_headers(headers) + for field, value in pairs(headers) do + if type(field) ~= 'string' then + return false, 'invalid type as header field' + end + + if type(value) ~= 'string' and type(value) ~= 'number' then + return false, 'invalid type as header value' + end + + if #field == 0 then + return false, 'invalid field length in header' + end + end + + return true +end + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + if conf.headers then + if not is_new_headers_conf(conf.headers) then + ok, err = check_set_headers(conf.headers) + if not ok then + return false, err + end + end + end + + if conf.body_base64 then + if not conf.body or #conf.body == 0 then + return false, 'invalid base64 content' + end + local body = ngx.decode_base64(conf.body) + if not body then + return false, 'invalid base64 content' + end + end + + if conf.vars then + local ok, err = expr.new(conf.vars) + if not ok then + return false, "failed to validate the 'vars' expression: " .. err + end + end + + if conf.filters then + for _, filter in ipairs(conf.filters) do + local ok, err = pcall(re_compile, filter.regex, filter.options) + if not ok then + return false, "regex \"" .. filter.regex .. + "\" validation failed: " .. err + end + end + end + + return true +end + + +do + +function _M.body_filter(conf, ctx) + if not ctx.response_rewrite_matched then + return + end + + if conf.filters then + + local body = core.response.hold_body_chunk(ctx) + if not body then + return + end + + local err + if ctx.response_encoding ~= nil then + local decoder = content_decode.dispatch_decoder(ctx.response_encoding) + if not decoder then + core.log.error("filters may not work as expected ", + "due to unsupported compression encoding type: ", + ctx.response_encoding) + return + end + body, err = decoder(body) + if err ~= nil then + core.log.error("filters may not work as expected: ", err) + return + end + end + + for _, filter in ipairs(conf.filters) do + if filter.scope == "once" then + body, _, err = re_sub(body, filter.regex, filter.replace, filter.options) + else + body, _, err = re_gsub(body, filter.regex, filter.replace, filter.options) + end + if err ~= nil then + core.log.error("regex \"" .. filter.regex .. "\" substitutes failed:" .. err) + end + end + + ngx.arg[1] = body + return + end + + if conf.body then + ngx.arg[2] = true + if conf.body_base64 then + ngx.arg[1] = ngx.decode_base64(conf.body) + else + ngx.arg[1] = conf.body + end + end +end + + +local function create_header_operation(hdr_conf) + local set = {} + local add = {} + if is_new_headers_conf(hdr_conf) then + if hdr_conf.add then + for _, value in ipairs(hdr_conf.add) do + local m, err = re_match(value, [[^([^:\s]+)\s*:\s*([^:]+)$]], "jo") + if not m then + return nil, err + end + core.table.insert_tail(add, m[1], m[2]) + end + end + + if hdr_conf.set then + for field, value in pairs(hdr_conf.set) do + --reform header from object into array, so can avoid use pairs, which is NYI + core.table.insert_tail(set, field, value) + end + end + + else + for field, value in pairs(hdr_conf) do + core.table.insert_tail(set, field, value) + end + end + + return { + add = add, + set = set, + remove = hdr_conf.remove or {}, + } +end + + +function _M.header_filter(conf, ctx) + ctx.response_rewrite_matched = vars_matched(conf, ctx) + if not ctx.response_rewrite_matched then + return + end + + if conf.status_code then + ngx.status = conf.status_code + end + + -- if filters have no any match, response body won't be modified. + if conf.filters or conf.body then + local response_encoding = ngx_header["Content-Encoding"] + core.response.clear_header_as_body_modified() + ctx.response_encoding = response_encoding + end + + if not conf.headers then + return + end + + local hdr_op, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, + create_header_operation, conf.headers) + if not hdr_op then + core.log.error("failed to create header operation: ", err) + return + end + + local field_cnt = #hdr_op.add + for i = 1, field_cnt, 2 do + local val = core.utils.resolve_var(hdr_op.add[i+1], ctx.var) + core.response.add_header(hdr_op.add[i], val) + end + + local field_cnt = #hdr_op.set + for i = 1, field_cnt, 2 do + local val = core.utils.resolve_var(hdr_op.set[i+1], ctx.var) + core.response.set_header(hdr_op.set[i], val) + end + + local field_cnt = #hdr_op.remove + for i = 1, field_cnt do + core.response.set_header(hdr_op.remove[i], nil) + end +end + +end -- do + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/rocketmq-logger.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/rocketmq-logger.lua new file mode 100644 index 0000000..2f0cd5b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/rocketmq-logger.lua @@ -0,0 +1,191 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local log_util = require("apisix.utils.log-util") +local producer = require ("resty.rocketmq.producer") +local acl_rpchook = require("resty.rocketmq.acl_rpchook") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") + +local type = type +local plugin_name = "rocketmq-logger" +local batch_processor_manager = bp_manager_mod.new("rocketmq logger") + +local lrucache = core.lrucache.new({ + type = "plugin", +}) + +local schema = { + type = "object", + properties = { + meta_format = { + type = "string", + default = "default", + enum = {"default", "origin"}, + }, + nameserver_list = { + type = "array", + minItems = 1, + items = { + type = "string" + } + }, + topic = {type = "string"}, + key = {type = "string"}, + tag = {type = "string"}, + log_format = {type = "object"}, + timeout = {type = "integer", minimum = 1, default = 3}, + use_tls = {type = "boolean", default = false}, + access_key = {type = "string", default = ""}, + secret_key = {type = "string", default = ""}, + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = {type = "boolean", default = false}, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + }, + encrypt_fields = {"secret_key"}, + required = {"nameserver_list", "topic"} +} + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + +local _M = { + version = 0.1, + priority = 402, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, err + end + core.utils.check_tls_bool({"use_tls"}, conf, plugin_name) + return log_util.check_log_schema(conf) +end + + +local function create_producer(nameserver_list, producer_config) + core.log.info("create new rocketmq producer instance") + local prod = producer.new(nameserver_list, "apisixLogProducer") + if producer_config.use_tls then + prod:setUseTLS(true) + end + if producer_config.access_key ~= '' then + local aclHook = acl_rpchook.new(producer_config.access_key, producer_config.secret_key) + prod:addRPCHook(aclHook) + end + prod:setTimeout(producer_config.timeout) + return prod +end + + +local function send_rocketmq_data(conf, log_message, prod) + local result, err = prod:send(conf.topic, log_message, conf.tag, conf.key) + if not result then + return false, "failed to send data to rocketmq topic: " .. err .. + ", nameserver_list: " .. core.json.encode(conf.nameserver_list) + end + + core.log.info("queue: ", result.sendResult.messageQueue.queueId) + + return true +end + + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + + +function _M.log(conf, ctx) + local entry + if conf.meta_format == "origin" then + entry = log_util.get_req_original(ctx, conf) + else + entry = log_util.get_log_entry(plugin_name, conf, ctx) + end + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + -- reuse producer via lrucache to avoid unbalanced partitions of messages in rocketmq + local producer_config = { + timeout = conf.timeout * 1000, + use_tls = conf.use_tls, + access_key = conf.access_key, + secret_key = conf.secret_key, + } + + local prod, err = core.lrucache.plugin_ctx(lrucache, ctx, nil, create_producer, + conf.nameserver_list, producer_config) + if err then + return nil, "failed to create the rocketmq producer: " .. err + end + core.log.info("rocketmq nameserver_list[1] port ", + prod.client.nameservers[1].port) + -- Generate a function to be executed by the batch processor + local func = function(entries, batch_max_size) + local data, err + if batch_max_size == 1 then + data = entries[1] + if type(data) ~= "string" then + data, err = core.json.encode(data) -- encode as single {} + end + else + data, err = core.json.encode(entries) -- encode as array [{}] + end + + if not data then + return false, 'error occurred while encoding the data: ' .. err + end + + core.log.info("send data to rocketmq: ", data) + return send_rocketmq_data(conf, data, prod) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/server-info.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/server-info.lua new file mode 100644 index 0000000..441b2ae --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/server-info.lua @@ -0,0 +1,316 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local timers = require("apisix.timers") +local plugin = require("apisix.plugin") + +local ngx_time = ngx.time +local ngx_timer_at = ngx.timer.at +local ngx_worker_id = ngx.worker.id +local type = type + +local load_time = os.time() +local plugin_name = "server-info" +local default_report_ttl = 60 +local lease_id + +local schema = { + type = "object", +} +local attr_schema = { + type = "object", + properties = { + report_ttl = { + type = "integer", + description = "live time for server info in etcd", + default = default_report_ttl, + minimum = 3, + maximum = 86400, + } + } +} + +local internal_status = ngx.shared["internal-status"] +if not internal_status then + error("lua_shared_dict \"internal-status\" not configured") +end + + +local _M = { + version = 0.1, + priority = 990, + name = plugin_name, + schema = schema, + scope = "global", +} + + +local function get_boot_time() + local time, err = internal_status:get("server_info:boot_time") + if err ~= nil then + core.log.error("failed to get boot_time from shdict: ", err) + return load_time + end + + if time ~= nil then + return time + end + + local _, err = internal_status:set("server_info:boot_time", load_time) + if err ~= nil then + core.log.error("failed to save boot_time to shdict: ", err) + end + + return load_time +end + + +local function uninitialized_server_info() + local boot_time = get_boot_time() + return { + etcd_version = "unknown", + hostname = core.utils.gethostname(), + id = core.id.get(), + version = core.version.VERSION, + boot_time = boot_time, + } +end + + +local function get() + local data, err = internal_status:get("server_info") + if err ~= nil then + core.log.error("get error: ", err) + return nil, err + end + + if not data then + return uninitialized_server_info() + end + + local server_info, err = core.json.decode(data) + if not server_info then + core.log.error("failed to decode server_info: ", err) + return nil, err + end + + return server_info +end + + +local function get_server_info() + local info, err = get() + if not info then + core.log.error("failed to get server_info: ", err) + return 500 + end + + return 200, info +end + + +local function set(key, value, ttl) + local res_new, err = core.etcd.set(key, value, ttl) + if not res_new then + core.log.error("failed to set server_info: ", err) + return nil, err + end + + if not res_new.body.lease_id then + core.log.error("failed to get lease_id: ", err) + return nil, err + end + + lease_id = res_new.body.lease_id + + -- set or update lease_id + local ok, err = internal_status:set("lease_id", lease_id) + if not ok then + core.log.error("failed to set lease_id to shdict: ", err) + return nil, err + end + + return true +end + + +local function report(premature, report_ttl) + if premature then + return + end + + -- get apisix node info + local server_info, err = get() + if not server_info then + core.log.error("failed to get server_info: ", err) + return + end + + if server_info.etcd_version == "unknown" then + local res, err = core.etcd.server_version() + if not res then + core.log.error("failed to fetch etcd version: ", err) + return + + elseif type(res.body) ~= "table" then + core.log.error("failed to fetch etcd version: bad version info") + return + + else + if res.body.etcdcluster == "" then + server_info.etcd_version = res.body.etcdserver + else + server_info.etcd_version = res.body.etcdcluster + end + end + end + + -- get inside etcd data, if not exist, create it + local key = "/data_plane/server_info/" .. server_info.id + local res, err = core.etcd.get(key) + if not res or (res.status ~= 200 and res.status ~= 404) then + core.log.error("failed to get server_info from etcd: ", err) + return + end + + if not res.body.node then + local ok, err = set(key, server_info, report_ttl) + if not ok then + core.log.error("failed to set server_info to etcd: ", err) + return + end + + return + end + + local ok = core.table.deep_eq(server_info, res.body.node.value) + -- not equal, update it + if not ok then + local ok, err = set(key, server_info, report_ttl) + if not ok then + core.log.error("failed to set server_info to etcd: ", err) + return + end + + return + end + + -- get lease_id from ngx dict + lease_id, err = internal_status:get("lease_id") + if not lease_id then + core.log.error("failed to get lease_id from shdict: ", err) + return + end + + -- call keepalive + local res, err = core.etcd.keepalive(lease_id) + if not res then + core.log.error("send heartbeat failed: ", err) + return + end + + local data, err = core.json.encode(server_info) + if not data then + core.log.error("failed to encode server_info: ", err) + return + end + + local ok, err = internal_status:set("server_info", data) + if not ok then + core.log.error("failed to encode and save server info: ", err) + return + end +end + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + return true +end + + +function _M.control_api() + return { + { + methods = {"GET"}, + uris ={"/v1/server_info"}, + handler = get_server_info, + } + } +end + + +function _M.init() + core.log.warn("The server-info plugin is deprecated and will be removed in a future release.") + if core.config ~= require("apisix.core.config_etcd") then + -- we don't need to report server info if etcd is not in use. + return + end + + + local local_conf = core.config.local_conf() + local deployment_role = core.table.try_read_attr( + local_conf, "deployment", "role") + if deployment_role == "data_plane" then + -- data_plane should not write to etcd + return + end + + local attr = plugin.plugin_attr(plugin_name) + local ok, err = core.schema.check(attr_schema, attr) + if not ok then + core.log.error("failed to check plugin_attr: ", err) + return + end + + local report_ttl = attr and attr.report_ttl or default_report_ttl + local start_at = ngx_time() + + local fn = function() + local now = ngx_time() + -- If ttl remaining time is less than half, then flush the ttl + if now - start_at >= (report_ttl / 2) then + start_at = now + report(nil, report_ttl) + end + end + + if ngx_worker_id() == 0 then + local ok, err = ngx_timer_at(0, report, report_ttl) + if not ok then + core.log.error("failed to create initial timer to report server info: ", err) + return + end + end + + timers.register_timer("plugin#server-info", fn, true) + + core.log.info("timer update the server info ttl, current ttl: ", report_ttl) +end + + +function _M.destroy() + timers.unregister_timer("plugin#server-info", true) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/serverless-post-function.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/serverless-post-function.lua new file mode 100644 index 0000000..cd3a3f9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/serverless-post-function.lua @@ -0,0 +1,17 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +return require("apisix.plugins.serverless.init")("serverless-post-function", -2000) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/serverless-pre-function.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/serverless-pre-function.lua new file mode 100644 index 0000000..40c40ad --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/serverless-pre-function.lua @@ -0,0 +1,17 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +return require("apisix.plugins.serverless.init")("serverless-pre-function", 10000) diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/serverless/generic-upstream.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/serverless/generic-upstream.lua new file mode 100644 index 0000000..52a0cb3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/serverless/generic-upstream.lua @@ -0,0 +1,136 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. + +local ngx = ngx +local require = require +local type = type +local string = string + +return function(plugin_name, version, priority, request_processor, authz_schema, metadata_schema) + local core = require("apisix.core") + local http = require("resty.http") + local url = require("net.url") + + if request_processor and type(request_processor) ~= "function" then + return "Failed to generate plugin due to invalid header processor type, " .. + "expected: function, received: " .. type(request_processor) + end + + local schema = { + type = "object", + properties = { + function_uri = {type = "string"}, + authorization = authz_schema, + timeout = {type = "integer", minimum = 100, default = 3000}, + ssl_verify = {type = "boolean", default = true}, + keepalive = {type = "boolean", default = true}, + keepalive_timeout = {type = "integer", minimum = 1000, default = 60000}, + keepalive_pool = {type = "integer", minimum = 1, default = 5} + }, + required = {"function_uri"} + } + + local _M = { + version = version, + priority = priority, + name = plugin_name, + schema = schema, + metadata_schema = metadata_schema + } + + function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + return core.schema.check(schema, conf) + end + + function _M.access(conf, ctx) + local uri_args = core.request.get_uri_args(ctx) + local headers = core.request.headers(ctx) or {} + + local req_body, err = core.request.get_body() + + if err then + core.log.error("error while reading request body: ", err) + return 400 + end + + -- forward the url path came through the matched uri + local url_decoded = url.parse(conf.function_uri) + local path = url_decoded.path or "/" + + if ctx.curr_req_matched and ctx.curr_req_matched[":ext"] then + local end_path = ctx.curr_req_matched[":ext"] + + if path:byte(-1) == string.byte("/") or end_path:byte(1) == string.byte("/") then + path = path .. end_path + else + path = path .. "/" .. end_path + end + end + + + headers["host"] = url_decoded.host + local params = { + method = ngx.req.get_method(), + body = req_body, + query = uri_args, + headers = headers, + path = path, + keepalive = conf.keepalive, + ssl_verify = conf.ssl_verify + } + + -- Keepalive options + if conf.keepalive then + params.keepalive_timeout = conf.keepalive_timeout + params.keepalive_pool = conf.keepalive_pool + end + + -- modify request info (if required) + request_processor(conf, ctx, params) + + local httpc = http.new() + httpc:set_timeout(conf.timeout) + + local res + res, err = httpc:request_uri(conf.function_uri, params) + + if not res then + core.log.error("failed to process ", plugin_name, ", err: ", err) + return 503 + end + + -- According to RFC7540 https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2, + -- endpoint must not generate any connection specific headers for HTTP/2 requests. + local response_headers = res.headers + if ngx.var.http2 then + response_headers["Connection"] = nil + response_headers["Keep-Alive"] = nil + response_headers["Proxy-Connection"] = nil + response_headers["Upgrade"] = nil + response_headers["Transfer-Encoding"] = nil + end + + -- setting response headers + core.response.set_header(response_headers) + + return res.status, res.body + end + + return _M +end diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/serverless/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/serverless/init.lua new file mode 100644 index 0000000..6ed8c96 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/serverless/init.lua @@ -0,0 +1,124 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ipairs = ipairs +local pcall = pcall +local loadstring = loadstring +local require = require +local type = type + + +local phases = { + "rewrite", "access", "header_filter", "body_filter", + "log", "before_proxy" +} + + +return function(plugin_name, priority) + local core = require("apisix.core") + + + local lrucache = core.lrucache.new({ + type = "plugin", + }) + + local schema = { + type = "object", + properties = { + phase = { + type = "string", + default = "access", + enum = phases, + }, + functions = { + type = "array", + items = {type = "string"}, + minItems = 1 + }, + }, + required = {"functions"} + } + + local _M = { + version = 0.1, + priority = priority, + name = plugin_name, + schema = schema, + } + + local function load_funcs(functions) + local funcs = core.table.new(#functions, 0) + + local index = 1 + for _, func_str in ipairs(functions) do + local _, func = pcall(loadstring(func_str)) + funcs[index] = func + index = index + 1 + end + + return funcs + end + + local function call_funcs(phase, conf, ctx) + if phase ~= conf.phase then + return + end + + local functions = core.lrucache.plugin_ctx(lrucache, ctx, nil, + load_funcs, conf.functions) + + for _, func in ipairs(functions) do + local code, body = func(conf, ctx) + if code or body then + return code, body + end + end + end + + function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + local functions = conf.functions + for _, func_str in ipairs(functions) do + local func, err = loadstring(func_str) + if err then + return false, 'failed to loadstring: ' .. err + end + + local ok, ret = pcall(func) + if not ok then + return false, 'pcall error: ' .. ret + end + if type(ret) ~= 'function' then + return false, 'only accept Lua function,' + .. ' the input code type is ' .. type(ret) + end + end + + return true + end + + for _, phase in ipairs(phases) do + _M[phase] = function (conf, ctx) + return call_funcs(phase, conf, ctx) + end + end + + return _M +end diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/skywalking-logger.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/skywalking-logger.lua new file mode 100644 index 0000000..8a7e309 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/skywalking-logger.lua @@ -0,0 +1,194 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local log_util = require("apisix.utils.log-util") +local core = require("apisix.core") +local http = require("resty.http") +local url = require("net.url") + +local base64 = require("ngx.base64") +local ngx_re = require("ngx.re") + +local ngx = ngx +local tostring = tostring +local tonumber = tonumber + +local plugin_name = "skywalking-logger" +local batch_processor_manager = bp_manager_mod.new("skywalking logger") +local schema = { + type = "object", + properties = { + endpoint_addr = core.schema.uri_def, + service_name = {type = "string", default = "APISIX"}, + service_instance_name = {type = "string", default = "APISIX Instance Name"}, + log_format = {type = "object"}, + timeout = {type = "integer", minimum = 1, default = 3}, + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = { type = "boolean", default = false }, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + }, + required = {"endpoint_addr"}, +} + + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + + +local _M = { + version = 0.1, + priority = 408, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + local check = {"endpoint_addr"} + core.utils.check_https(check, conf, plugin_name) + return core.schema.check(schema, conf) +end + + +local function send_http_data(conf, log_message) + local err_msg + local res = true + local url_decoded = url.parse(conf.endpoint_addr) + local host = url_decoded.host + local port = url_decoded.port + + core.log.info("sending a batch logs to ", conf.endpoint_addr) + + local httpc = http.new() + httpc:set_timeout(conf.timeout * 1000) + local ok, err = httpc:connect(host, port) + + if not ok then + return false, "failed to connect to host[" .. host .. "] port[" + .. tostring(port) .. "] " .. err + end + + local httpc_res, httpc_err = httpc:request({ + method = "POST", + path = "/v3/logs", + body = log_message, + headers = { + ["Host"] = url_decoded.host, + ["Content-Type"] = "application/json", + } + }) + + if not httpc_res then + return false, "error while sending data to [" .. host .. "] port[" + .. tostring(port) .. "] " .. httpc_err + end + + -- some error occurred in the server + if httpc_res.status >= 400 then + res = false + err_msg = "server returned status code[" .. httpc_res.status .. "] host[" + .. host .. "] port[" .. tostring(port) .. "] " + .. "body[" .. httpc_res:read_body() .. "]" + end + + return res, err_msg +end + + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + + +function _M.log(conf, ctx) + local log_body = log_util.get_log_entry(plugin_name, conf, ctx) + local trace_context + local sw_header = ngx.req.get_headers()["sw8"] + if sw_header then + -- 1-TRACEID-SEGMENTID-SPANID-PARENT_SERVICE-PARENT_INSTANCE-PARENT_ENDPOINT-IPPORT + local ids = ngx_re.split(sw_header, '-') + if #ids == 8 then + trace_context = { + traceId = base64.decode_base64url(ids[2]), + traceSegmentId = base64.decode_base64url(ids[3]), + spanId = tonumber(ids[4]) + } + else + core.log.warn("failed to parse trace_context header: ", sw_header) + end + end + + local service_instance_name = conf.service_instance_name + if service_instance_name == "$hostname" then + service_instance_name = core.utils.gethostname() + end + + local entry = { + traceContext = trace_context, + body = { + json = { + json = core.json.encode(log_body, true) + } + }, + service = conf.service_name, + serviceInstance = service_instance_name, + endpoint = ctx.var.uri, + } + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + -- Generate a function to be executed by the batch processor + local func = function(entries, batch_max_size) + local data, err = core.json.encode(entries) + if not data then + return false, 'error occurred while encoding the data: ' .. err + end + + return send_http_data(conf, data) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/skywalking.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/skywalking.lua new file mode 100644 index 0000000..2ef435b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/skywalking.lua @@ -0,0 +1,158 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local plugin = require("apisix.plugin") +local process = require("ngx.process") +local sw_tracer = require("skywalking.tracer") +local Span = require("skywalking.span") +local ngx = ngx +local math = math + +local plugin_name = "skywalking" +local attr_schema = { + type = "object", + properties = { + service_name = { + type = "string", + description = "service name for skywalking", + default = "APISIX", + }, + service_instance_name = { + type = "string", + description = "User Service Instance Name", + default = "APISIX Instance Name", + }, + endpoint_addr = { + type = "string", + default = "http://127.0.0.1:12800", + }, + report_interval = { + type = "integer", + }, + }, +} + +local schema = { + type = "object", + properties = { + sample_ratio = { + type = "number", + minimum = 0.00001, + maximum = 1, + default = 1 + } + }, +} + + +local _M = { + version = 0.1, + priority = 12010, + name = plugin_name, + schema = schema, + attr_schema = attr_schema, + run_policy = "prefer_route", +} + + +function _M.check_schema(conf) + local check = {"endpoint_addr"} + core.utils.check_https(check, conf, plugin_name) + return core.schema.check(schema, conf) +end + + +function _M.rewrite(conf, ctx) + core.log.debug("rewrite phase of skywalking plugin") + ctx.skywalking_sample = false + if conf.sample_ratio == 1 or math.random() < conf.sample_ratio then + ctx.skywalking_sample = true + sw_tracer:start("upstream service") + core.log.info("tracer start") + return + end + + core.log.info("miss sampling, ignore") +end + + +function _M.delayed_body_filter(conf, ctx) + if ctx.skywalking_sample and ngx.arg[2] then + Span.setComponentId(ngx.ctx.exitSpan, 6002) + Span.setComponentId(ngx.ctx.entrySpan, 6002) + sw_tracer:finish() + core.log.info("tracer finish") + end +end + + +function _M.log(conf, ctx) + if ctx.skywalking_sample then + sw_tracer:prepareForReport() + core.log.info("tracer prepare for report") + end +end + + +function _M.init() + if process.type() ~= "worker" then + return + end + + local local_plugin_info = plugin.plugin_attr(plugin_name) + local_plugin_info = local_plugin_info and core.table.clone(local_plugin_info) or {} + local ok, err = core.schema.check(attr_schema, local_plugin_info) + if not ok then + core.log.error("failed to check the plugin_attr[", plugin_name, "]", + ": ", err) + return + end + + core.log.info("plugin attribute: ", + core.json.delay_encode(local_plugin_info)) + + -- TODO: maybe need to fetch them from plugin-metadata + local metadata_shdict = ngx.shared.tracing_buffer + + if local_plugin_info.service_instance_name == "$hostname" then + local_plugin_info.service_instance_name = core.utils.gethostname() + end + + metadata_shdict:set('serviceName', local_plugin_info.service_name) + metadata_shdict:set('serviceInstanceName', local_plugin_info.service_instance_name) + + local sk_cli = require("skywalking.client") + if local_plugin_info.report_interval then + sk_cli.backendTimerDelay = local_plugin_info.report_interval + end + + sk_cli:startBackendTimer(local_plugin_info.endpoint_addr) +end + + +function _M.destroy() + if process.type() ~= "worker" then + return + end + + local sk_cli = require("skywalking.client") + sk_cli:destroyBackendTimer() +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/sls-logger.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/sls-logger.lua new file mode 100644 index 0000000..819f084 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/sls-logger.lua @@ -0,0 +1,197 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local log_util = require("apisix.utils.log-util") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") + + +local plugin_name = "sls-logger" +local ngx = ngx +local rf5424 = require("apisix.utils.rfc5424") +local tcp = ngx.socket.tcp +local tostring = tostring +local ipairs = ipairs +local table = table + + +local batch_processor_manager = bp_manager_mod.new(plugin_name) +local schema = { + type = "object", + properties = { + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = { type = "boolean", default = false }, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + timeout = {type = "integer", minimum = 1, default= 5000}, + log_format = {type = "object"}, + host = {type = "string"}, + port = {type = "integer"}, + project = {type = "string"}, + logstore = {type = "string"}, + access_key_id = {type = "string"}, + access_key_secret = {type ="string"} + }, + encrypt_fields = {"access_key_secret"}, + required = {"host", "port", "project", "logstore", "access_key_id", "access_key_secret"} +} + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + +local _M = { + version = 0.1, + priority = 406, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + +function _M.check_schema(conf,schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + return core.schema.check(schema, conf) +end + +local function send_tcp_data(route_conf, log_message) + local err_msg + local res = true + local sock, soc_err = tcp() + local can_close + + if not sock then + return false, "failed to init the socket" .. soc_err + end + + sock:settimeout(route_conf.timeout) + local ok, err = sock:connect(route_conf.host, route_conf.port) + if not ok then + return false, "failed to connect to TCP server: host[" .. route_conf.host + .. "] port[" .. tostring(route_conf.port) .. "] err: " .. err + end + + ok, err = sock:sslhandshake(true, nil, false) + if not ok then + return false, "failed to perform TLS handshake to TCP server: host[" + .. route_conf.host .. "] port[" .. tostring(route_conf.port) + .. "] err: " .. err + end + + core.log.debug("sls logger send data ", log_message) + ok, err = sock:send(log_message) + if not ok then + res = false + can_close = true + err_msg = "failed to send data to TCP server: host[" .. route_conf.host + .. "] port[" .. tostring(route_conf.port) .. "] err: " .. err + else + ok, err = sock:setkeepalive(120 * 1000, 20) + if not ok then + can_close = true + core.log.warn("failed to set socket keepalive: host[", route_conf.host, + "] port[", tostring(route_conf.port), "] err: ", err) + end + end + + if can_close then + ok, err = sock:close() + if not ok then + core.log.warn("failed to close the TCP connection, host[", + route_conf.host, "] port[", route_conf.port, "] ", err) + end + end + + return res, err_msg +end + +local function combine_syslog(entries) + local items = {} + for _, entry in ipairs(entries) do + table.insert(items, entry.data) + core.log.info("buffered logs:", entry.data) + end + + return table.concat(items) +end + +_M.combine_syslog = combine_syslog + +local function handle_log(entries) + local data = combine_syslog(entries) + if not data then + return true + end + + return send_tcp_data(entries[1].route_conf, data) +end + + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + + +-- log phase in APISIX +function _M.log(conf, ctx) + local entry = log_util.get_log_entry(plugin_name, conf, ctx) + local json_str, err = core.json.encode(entry) + if not json_str then + core.log.error('error occurred while encoding the data: ', err) + return + end + + local structured_data = { + {name = "project", value = conf.project}, + {name = "logstore", value = conf.logstore}, + {name = "access-key-id", value = conf.access_key_id}, + {name = "access-key-secret", value = conf.access_key_secret}, + } + local rf5424_data = rf5424.encode("SYSLOG", "INFO", ctx.var.host, "apisix", + ctx.var.pid, json_str, structured_data) + core.log.info("collect_data:" .. rf5424_data) + local process_context = { + data = rf5424_data, + route_conf = conf + } + + if batch_processor_manager:add_entry(conf, process_context) then + return + end + + batch_processor_manager:add_entry_to_new_processor(conf, process_context, ctx, handle_log) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/splunk-hec-logging.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/splunk-hec-logging.lua new file mode 100644 index 0000000..c93b273 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/splunk-hec-logging.lua @@ -0,0 +1,186 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local ngx = ngx +local ngx_now = ngx.now +local http = require("resty.http") +local log_util = require("apisix.utils.log-util") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local table_insert = core.table.insert +local table_concat = core.table.concat +local ipairs = ipairs + + +local DEFAULT_SPLUNK_HEC_ENTRY_SOURCE = "apache-apisix-splunk-hec-logging" +local DEFAULT_SPLUNK_HEC_ENTRY_TYPE = "_json" + + +local plugin_name = "splunk-hec-logging" +local batch_processor_manager = bp_manager_mod.new(plugin_name) + + +local schema = { + type = "object", + properties = { + endpoint = { + type = "object", + properties = { + uri = core.schema.uri_def, + token = { + type = "string", + }, + channel = { + type = "string", + }, + timeout = { + type = "integer", + minimum = 1, + default = 10 + }, + keepalive_timeout = { + type = "integer", + minimum = 1000, + default = 60000, + description = "keepalive timeout in milliseconds", + } + }, + required = { "uri", "token" } + }, + ssl_verify = { + type = "boolean", + default = true + }, + log_format = {type = "object"}, + }, + required = { "endpoint" }, +} + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + +local _M = { + version = 0.1, + priority = 409, + name = plugin_name, + metadata_schema = metadata_schema, + schema = batch_processor_manager:wrap_schema(schema), +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + return core.schema.check(schema, conf) +end + + +local function get_logger_entry(conf, ctx) + local entry, customized = log_util.get_log_entry(plugin_name, conf, ctx) + local splunk_entry = { + time = ngx_now(), + source = DEFAULT_SPLUNK_HEC_ENTRY_SOURCE, + sourcetype = DEFAULT_SPLUNK_HEC_ENTRY_TYPE, + } + + if not customized then + splunk_entry.host = entry.server.hostname + splunk_entry.event = { + request_url = entry.request.url, + request_method = entry.request.method, + request_headers = entry.request.headers, + request_query = entry.request.querystring, + request_size = entry.request.size, + response_headers = entry.response.headers, + response_status = entry.response.status, + response_size = entry.response.size, + latency = entry.latency, + upstream = entry.upstream, + } + else + splunk_entry.host = core.utils.gethostname() + splunk_entry.event = entry + end + + return splunk_entry +end + + +local function send_to_splunk(conf, entries) + local request_headers = {} + request_headers["Content-Type"] = "application/json" + request_headers["Authorization"] = "Splunk " .. conf.endpoint.token + if conf.endpoint.channel then + request_headers["X-Splunk-Request-Channel"] = conf.endpoint.channel + end + + local http_new = http.new() + http_new:set_timeout(conf.endpoint.timeout * 1000) + local t = {} + for _, e in ipairs(entries) do + table_insert(t, core.json.encode(e)) + end + + local res, err = http_new:request_uri(conf.endpoint.uri, { + ssl_verify = conf.ssl_verify, + method = "POST", + body = table_concat(t), + headers = request_headers, + keepalive_timeout = conf.endpoint.keepalive_timeout + }) + + if not res then + return false, "failed to write log to splunk, " .. err + end + + if res.status ~= 200 then + local body = core.json.decode(res.body) + if not body then + return false, "failed to send splunk, http status code: " .. res.status + else + return false, "failed to send splunk, " .. body.text + end + end + + return true +end + + +function _M.log(conf, ctx) + local entry = get_logger_entry(conf, ctx) + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + local process = function(entries) + return send_to_splunk(conf, entries) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, process) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/syslog.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/syslog.lua new file mode 100644 index 0000000..1f35395 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/syslog.lua @@ -0,0 +1,99 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local log_util = require("apisix.utils.log-util") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local syslog = require("apisix.plugins.syslog.init") +local plugin_name = "syslog" + +local batch_processor_manager = bp_manager_mod.new("sys logger") +local schema = { + type = "object", + properties = { + host = {type = "string"}, + port = {type = "integer"}, + flush_limit = {type = "integer", minimum = 1, default = 4096}, + drop_limit = {type = "integer", default = 1048576}, + timeout = {type = "integer", minimum = 1, default = 3000}, + sock_type = {type = "string", default = "tcp", enum = {"tcp", "udp"}}, + pool_size = {type = "integer", minimum = 5, default = 5}, + tls = {type = "boolean", default = false}, + log_format = {type = "object"}, + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = { type = "boolean", default = false }, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + }, + required = {"host", "port"} +} + + +local schema = batch_processor_manager:wrap_schema(schema) + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + +local _M = { + version = 0.1, + priority = 401, + name = plugin_name, + schema = schema, + metadata_schema = metadata_schema, + flush_syslog = syslog.flush_syslog, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + core.utils.check_tls_bool({"tls"}, conf, plugin_name) + return core.schema.check(schema, conf) +end + + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + + +function _M.log(conf, ctx) + local entry = log_util.get_log_entry(plugin_name, conf, ctx) + syslog.push_entry(conf, ctx, entry) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/syslog/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/syslog/init.lua new file mode 100644 index 0000000..8a3d90e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/syslog/init.lua @@ -0,0 +1,112 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local logger_socket = require("resty.logger.socket") +local rfc5424 = require("apisix.utils.rfc5424") +local ipairs = ipairs +local table_insert = core.table.insert +local table_concat = core.table.concat + +local batch_processor_manager = bp_manager_mod.new("sys logger") + +local lrucache = core.lrucache.new({ + ttl = 300, count = 512, serial_creating = true, +}) + +local _M = {} + +function _M.flush_syslog(logger) + local ok, err = logger:flush(logger) + if not ok then + core.log.error("failed to flush message:", err) + end + + return ok +end + + +local function send_syslog_data(conf, log_message, api_ctx) + local err_msg + local res = true + + core.log.info("sending a batch logs to ", conf.host, ":", conf.port) + + -- fetch it from lrucache + local logger, err = core.lrucache.plugin_ctx( + lrucache, api_ctx, nil, logger_socket.new, logger_socket, { + host = conf.host, + port = conf.port, + flush_limit = conf.flush_limit, + drop_limit = conf.drop_limit, + timeout = conf.timeout, + sock_type = conf.sock_type, + pool_size = conf.pool_size, + tls = conf.tls, + } + ) + + if not logger then + res = false + err_msg = "failed when initiating the sys logger processor".. err + end + + -- reuse the logger object + local ok, err = logger:log(log_message) + + if not ok then + res = false + err_msg = "failed to log message" .. err + end + + return res, err_msg +end + + +-- called in log phase of APISIX +function _M.push_entry(conf, ctx, entry) + local json_str, err = core.json.encode(entry) + if not json_str then + core.log.error('error occurred while encoding the data: ', err) + return + end + + local rfc5424_data = rfc5424.encode("SYSLOG", "INFO", ctx.var.host, + "apisix", ctx.var.pid, json_str) + core.log.info("collect_data:" .. rfc5424_data) + if batch_processor_manager:add_entry(conf, rfc5424_data) then + return + end + + -- Generate a function to be executed by the batch processor + local cp_ctx = core.table.clone(ctx) + local func = function(entries) + local items = {} + for _, e in ipairs(entries) do + table_insert(items, e) + core.log.debug("buffered logs:", e) + end + + return send_syslog_data(conf, table_concat(items), cp_ctx) + end + + batch_processor_manager:add_entry_to_new_processor(conf, rfc5424_data, ctx, func) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/tcp-logger.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/tcp-logger.lua new file mode 100644 index 0000000..7482fe5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/tcp-logger.lua @@ -0,0 +1,161 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local log_util = require("apisix.utils.log-util") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local plugin_name = "tcp-logger" +local tostring = tostring +local ngx = ngx +local tcp = ngx.socket.tcp + + +local batch_processor_manager = bp_manager_mod.new("tcp logger") +local schema = { + type = "object", + properties = { + host = {type = "string"}, + port = {type = "integer", minimum = 0}, + tls = {type = "boolean", default = false}, + tls_options = {type = "string"}, + timeout = {type = "integer", minimum = 1, default= 1000}, + log_format = {type = "object"}, + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = { type = "boolean", default = false }, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + }, + required = {"host", "port"} +} + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + +local _M = { + version = 0.1, + priority = 405, + name = plugin_name, + metadata_schema = metadata_schema, + schema = batch_processor_manager:wrap_schema(schema), +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + core.utils.check_tls_bool({"tls"}, conf, plugin_name) + return core.schema.check(schema, conf) +end + + +local function send_tcp_data(conf, log_message) + local err_msg + local res = true + local sock, soc_err = tcp() + + if not sock then + return false, "failed to init the socket" .. soc_err + end + + sock:settimeout(conf.timeout) + + core.log.info("sending a batch logs to ", conf.host, ":", conf.port) + core.log.info("sending log_message: ", log_message) + + local ok, err = sock:connect(conf.host, conf.port) + if not ok then + return false, "failed to connect to TCP server: host[" .. conf.host + .. "] port[" .. tostring(conf.port) .. "] err: " .. err + end + + if conf.tls then + ok, err = sock:sslhandshake(true, conf.tls_options, false) + if not ok then + return false, "failed to perform TLS handshake to TCP server: host[" + .. conf.host .. "] port[" .. tostring(conf.port) .. "] err: " .. err + end + end + + ok, err = sock:send(log_message) + if not ok then + res = false + err_msg = "failed to send data to TCP server: host[" .. conf.host + .. "] port[" .. tostring(conf.port) .. "] err: " .. err + end + + ok, err = sock:close() + if not ok then + core.log.error("failed to close the TCP connection, host[", + conf.host, "] port[", conf.port, "] ", err) + end + + return res, err_msg +end + + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + + +function _M.log(conf, ctx) + local entry = log_util.get_log_entry(plugin_name, conf, ctx) + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + -- Generate a function to be executed by the batch processor + local func = function(entries, batch_max_size) + local data, err + if batch_max_size == 1 then + data, err = core.json.encode(entries[1]) -- encode as single {} + else + data, err = core.json.encode(entries) -- encode as array [{}] + end + + if not data then + core.log.error('error occurred while encoding the data: ', err) + end + + return send_tcp_data(conf, data) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/tencent-cloud-cls.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/tencent-cloud-cls.lua new file mode 100644 index 0000000..38fe565 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/tencent-cloud-cls.lua @@ -0,0 +1,146 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local log_util = require("apisix.utils.log-util") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local cls_sdk = require("apisix.plugins.tencent-cloud-cls.cls-sdk") +local math = math +local pairs = pairs + + +local plugin_name = "tencent-cloud-cls" +local batch_processor_manager = bp_manager_mod.new(plugin_name) +local schema = { + type = "object", + properties = { + cls_host = { type = "string" }, + cls_topic = { type = "string" }, + secret_id = { type = "string" }, + secret_key = { type = "string" }, + sample_ratio = { + type = "number", + minimum = 0.00001, + maximum = 1, + default = 1 + }, + include_req_body = { type = "boolean", default = false }, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = { type = "boolean", default = false }, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + global_tag = { type = "object" }, + log_format = {type = "object"}, + }, + encrypt_fields = {"secret_key"}, + required = { "cls_host", "cls_topic", "secret_id", "secret_key" } +} + + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + + +local _M = { + version = 0.1, + priority = 397, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, err + end + return log_util.check_log_schema(conf) +end + + +function _M.access(conf, ctx) + ctx.cls_sample = false + if conf.sample_ratio == 1 or math.random() < conf.sample_ratio then + core.log.debug("cls sampled") + ctx.cls_sample = true + return + end +end + + +function _M.body_filter(conf, ctx) + if ctx.cls_sample then + log_util.collect_body(conf, ctx) + end +end + + +function _M.log(conf, ctx) + -- sample if set + if not ctx.cls_sample then + core.log.debug("cls not sampled, skip log") + return + end + + local entry = log_util.get_log_entry(plugin_name, conf, ctx) + + if conf.global_tag then + for k, v in pairs(conf.global_tag) do + entry[k] = v + end + end + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + local process = function(entries) + local sdk, err = cls_sdk.new(conf.cls_host, conf.cls_topic, conf.secret_id, conf.secret_key) + if err then + core.log.error("init sdk failed err:", err) + return false, err + end + return sdk:send_to_cls(entries) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, process) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/tencent-cloud-cls/cls-sdk.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/tencent-cloud-cls/cls-sdk.lua new file mode 100644 index 0000000..650d4ab --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/tencent-cloud-cls/cls-sdk.lua @@ -0,0 +1,329 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local pb = require "pb" +local protoc = require("protoc").new() +local http = require("resty.http") +local socket = require("socket") +local str_util = require("resty.string") +local core = require("apisix.core") +local core_gethostname = require("apisix.core.utils").gethostname +local json = core.json +local json_encode = json.encode +local ngx = ngx +local ngx_time = ngx.time +local ngx_now = ngx.now +local ngx_sha1_bin = ngx.sha1_bin +local ngx_hmac_sha1 = ngx.hmac_sha1 +local fmt = string.format +local table = table +local concat_tab = table.concat +local clear_tab = table.clear +local new_tab = table.new +local insert_tab = table.insert +local ipairs = ipairs +local pairs = pairs +local type = type +local tostring = tostring +local setmetatable = setmetatable +local pcall = pcall +local unpack = unpack + +-- api doc https://www.tencentcloud.com/document/product/614/16873 +local MAX_SINGLE_VALUE_SIZE = 1 * 1024 * 1024 +local MAX_LOG_GROUP_VALUE_SIZE = 5 * 1024 * 1024 -- 5MB + +local cls_api_path = "/structuredlog" +local auth_expire_time = 60 +local cls_conn_timeout = 1000 +local cls_read_timeout = 10000 +local cls_send_timeout = 10000 + +local headers_cache = {} +local params_cache = { + ssl_verify = false, + headers = headers_cache, +} + + +local function get_ip(hostname) + local _, resolved = socket.dns.toip(hostname) + local ip_list = {} + if not resolved.ip then + -- DNS parsing failure + local err = resolved + core.log.error("resolve ip failed, hostname: " .. hostname .. ", error: " .. err) + return nil, err + else + for _, v in ipairs(resolved.ip) do + insert_tab(ip_list, v) + end + end + return ip_list +end + +local host_ip +local log_group_list = {} +local log_group_list_pb = { + logGroupList = log_group_list, +} + + +local function sha1(msg) + return str_util.to_hex(ngx_sha1_bin(msg)) +end + + +local function sha1_hmac(key, msg) + return str_util.to_hex(ngx_hmac_sha1(key, msg)) +end + + +-- sign algorithm https://cloud.tencent.com/document/product/614/12445 +local function sign(secret_id, secret_key) + local method = "post" + local format_params = "" + local format_headers = "" + local sign_algorithm = "sha1" + local http_request_info = fmt("%s\n%s\n%s\n%s\n", + method, cls_api_path, format_params, format_headers) + local cur_time = ngx_time() + local sign_time = fmt("%d;%d", cur_time, cur_time + auth_expire_time) + local string_to_sign = fmt("%s\n%s\n%s\n", sign_algorithm, sign_time, sha1(http_request_info)) + + local sign_key = sha1_hmac(secret_key, sign_time) + local signature = sha1_hmac(sign_key, string_to_sign) + + local arr = { + "q-sign-algorithm=sha1", + "q-ak=" .. secret_id, + "q-sign-time=" .. sign_time, + "q-key-time=" .. sign_time, + "q-header-list=", + "q-url-param-list=", + "q-signature=" .. signature, + } + + return concat_tab(arr, '&') +end + + +-- normalized log data for CLS API +local function normalize_log(log) + local normalized_log = {} + local log_size = 4 -- empty obj alignment + for k, v in pairs(log) do + local v_type = type(v) + local field = { key = k, value = "" } + if v_type == "string" then + field["value"] = v + elseif v_type == "number" then + field["value"] = tostring(v) + elseif v_type == "table" then + field["value"] = json_encode(v) + else + field["value"] = tostring(v) + core.log.warn("unexpected type " .. v_type .. " for field " .. k) + end + if #field.value > MAX_SINGLE_VALUE_SIZE then + core.log.warn(field.key, " value size over ", MAX_SINGLE_VALUE_SIZE, " , truncated") + field.value = field.value:sub(1, MAX_SINGLE_VALUE_SIZE) + end + insert_tab(normalized_log, field) + log_size = log_size + #field.key + #field.value + end + return normalized_log, log_size +end + + +local _M = { version = 0.1 } +local mt = { __index = _M } + +local pb_state +local function init_pb_state() + local old_pb_state = pb.state(nil) + protoc.reload() + local cls_sdk_protoc = protoc.new() + -- proto file in https://www.tencentcloud.com/document/product/614/42787 + local ok, err = pcall(cls_sdk_protoc.load, cls_sdk_protoc, [[ +package cls; + +message Log +{ + message Content + { + required string key = 1; // Key of each field group + required string value = 2; // Value of each field group + } + required int64 time = 1; // Unix timestamp + repeated Content contents = 2; // Multiple key-value pairs in one log +} + +message LogTag +{ + required string key = 1; + required string value = 2; +} + +message LogGroup +{ + repeated Log logs = 1; // Log array consisting of multiple logs + optional string contextFlow = 2; // This parameter does not take effect currently + optional string filename = 3; // Log filename + optional string source = 4; // Log source, which is generally the machine IP + repeated LogTag logTags = 5; +} + +message LogGroupList +{ + repeated LogGroup logGroupList = 1; // Log group list +} + ]], "tencent-cloud-cls/cls.proto") + if not ok then + cls_sdk_protoc:reset() + pb.state(old_pb_state) + return "failed to load cls.proto: ".. err + end + pb_state = pb.state(old_pb_state) +end + + +function _M.new(host, topic, secret_id, secret_key) + if not pb_state then + local err = init_pb_state() + if err then + return nil, err + end + end + local self = { + host = host, + topic = topic, + secret_id = secret_id, + secret_key = secret_key, + } + return setmetatable(self, mt) +end + + +local function do_request_uri(uri, params) + local client = http:new() + client:set_timeouts(cls_conn_timeout, cls_send_timeout, cls_read_timeout) + local res, err = client:request_uri(uri, params) + client:close() + return res, err +end + + +function _M.send_cls_request(self, pb_obj) + -- recovery of stored pb_store + local old_pb_state = pb.state(pb_state) + local ok, pb_data = pcall(pb.encode, "cls.LogGroupList", pb_obj) + pb_state = pb.state(old_pb_state) + if not ok or not pb_data then + core.log.error("failed to encode LogGroupList, err: ", pb_data) + return false, pb_data + end + + clear_tab(headers_cache) + headers_cache["Host"] = self.host + headers_cache["Content-Type"] = "application/x-protobuf" + headers_cache["Authorization"] = sign(self.secret_id, self.secret_key, cls_api_path) + + -- TODO: support lz4/zstd compress + params_cache.method = "POST" + params_cache.body = pb_data + + local cls_url = "http://" .. self.host .. cls_api_path .. "?topic_id=" .. self.topic + core.log.debug("CLS request URL: ", cls_url) + + local res, err = do_request_uri(cls_url, params_cache) + if not res then + return false, err + end + + if res.status ~= 200 then + err = fmt("got wrong status: %s, headers: %s, body, %s", + res.status, json.encode(res.headers), res.body) + -- 413, 404, 401, 403 are not retryable + if res.status == 413 or res.status == 404 or res.status == 401 or res.status == 403 then + core.log.error(err, ", not retryable") + return true + end + + return false, err + end + + core.log.debug("CLS report success") + return true +end + + +function _M.send_to_cls(self, logs) + clear_tab(log_group_list) + local now = ngx_now() * 1000 + + local total_size = 0 + local format_logs = new_tab(#logs, 0) + -- sums of all value in all LogGroup should be no more than 5MB + -- so send whenever size exceed max size + local group_list_start = 1 + + if not host_ip then + local host_ip_list, err = get_ip(core_gethostname()) + if not host_ip_list then + return false, err + end + host_ip = tostring(unpack(host_ip_list)) + end + + for i = 1, #logs, 1 do + local contents, log_size = normalize_log(logs[i]) + if log_size > MAX_LOG_GROUP_VALUE_SIZE then + core.log.error("size of log is over 5MB, dropped") + goto continue + end + total_size = total_size + log_size + if total_size > MAX_LOG_GROUP_VALUE_SIZE then + insert_tab(log_group_list, { + logs = format_logs, + source = host_ip, + }) + local ok, err = self:send_cls_request(log_group_list_pb) + if not ok then + return false, err, group_list_start + end + group_list_start = i + format_logs = new_tab(#logs - i, 0) + total_size = 0 + clear_tab(log_group_list) + end + insert_tab(format_logs, { + time = now, + contents = contents, + }) + :: continue :: + end + + insert_tab(log_group_list, { + logs = format_logs, + source = host_ip, + }) + local ok, err = self:send_cls_request(log_group_list_pb) + return ok, err, group_list_start +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/traffic-split.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/traffic-split.lua new file mode 100644 index 0000000..da6014e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/traffic-split.lua @@ -0,0 +1,305 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local upstream = require("apisix.upstream") +local schema_def = require("apisix.schema_def") +local roundrobin = require("resty.roundrobin") +local ipmatcher = require("resty.ipmatcher") +local expr = require("resty.expr.v1") +local pairs = pairs +local ipairs = ipairs +local type = type +local table_insert = table.insert +local tostring = tostring + +local lrucache = core.lrucache.new({ + ttl = 0, count = 512 +}) + + +local vars_schema = { + type = "array", +} + + +local match_schema = { + type = "array", + items = { + type = "object", + properties = { + vars = vars_schema + } + }, +} + + +local upstreams_schema = { + type = "array", + items = { + type = "object", + properties = { + upstream_id = schema_def.id_schema, + upstream = schema_def.upstream, + weight = { + description = "used to split traffic between different" .. + "upstreams for plugin configuration", + type = "integer", + default = 1, + minimum = 0 + } + } + }, + -- When the upstream configuration of the plugin is missing, + -- the upstream of `route` is used by default. + default = { + { + weight = 1 + } + }, + minItems = 1, + maxItems = 20 +} + + +local schema = { + type = "object", + properties = { + rules = { + type = "array", + items = { + type = "object", + properties = { + match = match_schema, + weighted_upstreams = upstreams_schema + }, + } + } + }, +} + +local plugin_name = "traffic-split" + +local _M = { + version = 0.1, + priority = 966, + name = plugin_name, + schema = schema +} + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + + if not ok then + return false, err + end + + if conf.rules then + for _, rule in ipairs(conf.rules) do + if rule.match then + for _, m in ipairs(rule.match) do + local ok, err = expr.new(m.vars) + if not ok then + return false, "failed to validate the 'vars' expression: " .. err + end + end + end + end + end + + return true +end + + +local function parse_domain_for_node(node) + local host = node.domain or node.host + if not ipmatcher.parse_ipv4(host) + and not ipmatcher.parse_ipv6(host) + then + node.domain = host + + local ip, err = core.resolver.parse_domain(host) + if ip then + node.host = ip + end + + if err then + core.log.error("dns resolver domain: ", host, " error: ", err) + end + end +end + + +local function set_upstream(upstream_info, ctx) + local nodes = upstream_info.nodes + local new_nodes = {} + if core.table.isarray(nodes) then + for _, node in ipairs(nodes) do + parse_domain_for_node(node) + table_insert(new_nodes, node) + end + else + for addr, weight in pairs(nodes) do + local node = {} + local port, host + host, port = core.utils.parse_addr(addr) + node.host = host + parse_domain_for_node(node) + node.port = port + node.weight = weight + table_insert(new_nodes, node) + end + end + + local up_conf = { + name = upstream_info.name, + type = upstream_info.type, + hash_on = upstream_info.hash_on, + pass_host = upstream_info.pass_host, + upstream_host = upstream_info.upstream_host, + key = upstream_info.key, + nodes = new_nodes, + timeout = upstream_info.timeout, + scheme = upstream_info.scheme + } + + local ok, err = upstream.check_schema(up_conf) + if not ok then + core.log.error("failed to validate generated upstream: ", err) + return 500, err + end + + local matched_route = ctx.matched_route + up_conf.parent = matched_route + local upstream_key = up_conf.type .. "#route_" .. + matched_route.value.id .. "_" .. upstream_info.vid + if upstream_info.node_tid then + upstream_key = upstream_key .. "_" .. upstream_info.node_tid + end + core.log.info("upstream_key: ", upstream_key) + upstream.set(ctx, upstream_key, ctx.conf_version, up_conf) + if upstream_info.scheme == "https" then + upstream.set_scheme(ctx, up_conf) + end + return +end + + +local function new_rr_obj(weighted_upstreams) + local server_list = {} + for i, upstream_obj in ipairs(weighted_upstreams) do + if upstream_obj.upstream_id then + server_list[upstream_obj.upstream_id] = upstream_obj.weight + elseif upstream_obj.upstream then + -- Add a virtual id field to uniquely identify the upstream key. + upstream_obj.upstream.vid = i + -- Get the table id of the nodes as part of the upstream_key, + -- avoid upstream_key duplicate because vid is the same in the loop + -- when multiple rules with multiple weighted_upstreams under each rule. + -- see https://github.com/apache/apisix/issues/5276 + local node_tid = tostring(upstream_obj.upstream.nodes):sub(#"table: " + 1) + upstream_obj.upstream.node_tid = node_tid + server_list[upstream_obj.upstream] = upstream_obj.weight + else + -- If the upstream object has only the weight value, it means + -- that the upstream weight value on the default route has been reached. + -- Mark empty upstream services in the plugin. + server_list["plugin#upstream#is#empty"] = upstream_obj.weight + + end + end + + return roundrobin:new(server_list) +end + + +function _M.access(conf, ctx) + if not conf or not conf.rules then + return + end + + local weighted_upstreams + local match_passed = true + + for _, rule in ipairs(conf.rules) do + -- check if all upstream_ids are valid + if rule.weighted_upstreams then + for _, wupstream in ipairs(rule.weighted_upstreams) do + local ups_id = wupstream.upstream_id + if ups_id then + local ups = upstream.get_by_id(ups_id) + if not ups then + return 500, "failed to fetch upstream info by " + .. "upstream id: " .. ups_id + end + end + end + end + + if not rule.match then + match_passed = true + weighted_upstreams = rule.weighted_upstreams + break + end + + for _, single_match in ipairs(rule.match) do + local expr, err = expr.new(single_match.vars) + if err then + core.log.error("vars expression does not match: ", err) + return 500, err + end + + match_passed = expr:eval(ctx.var) + if match_passed then + break + end + end + + if match_passed then + weighted_upstreams = rule.weighted_upstreams + break + end + end + + core.log.info("match_passed: ", match_passed) + + if not match_passed then + return + end + + local rr_up, err = lrucache(weighted_upstreams, nil, new_rr_obj, weighted_upstreams) + if not rr_up then + core.log.error("lrucache roundrobin failed: ", err) + return 500 + end + + local upstream = rr_up:find() + if upstream and type(upstream) == "table" then + core.log.info("upstream: ", core.json.encode(upstream)) + return set_upstream(upstream, ctx) + elseif upstream and upstream ~= "plugin#upstream#is#empty" then + ctx.upstream_id = upstream + core.log.info("upstream_id: ", upstream) + return + end + + ctx.upstream_id = nil + core.log.info("route_up: ", upstream) + return +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/ua-restriction.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ua-restriction.lua new file mode 100644 index 0000000..bf28685 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/ua-restriction.lua @@ -0,0 +1,178 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ipairs = ipairs +local core = require("apisix.core") +local re_compile = require("resty.core.regex").re_match_compile +local stringx = require('pl.stringx') +local type = type +local str_strip = stringx.strip +local re_find = ngx.re.find + +local lrucache_allow = core.lrucache.new({ ttl = 300, count = 4096 }) +local lrucache_deny = core.lrucache.new({ ttl = 300, count = 4096 }) + +local schema = { + type = "object", + properties = { + bypass_missing = { + type = "boolean", + default = false, + }, + allowlist = { + type = "array", + minItems = 1, + items = { + type = "string", + minLength = 1, + } + }, + denylist = { + type = "array", + minItems = 1, + items = { + type = "string", + minLength = 1, + } + }, + message = { + type = "string", + minLength = 1, + maxLength = 1024, + default = "Not allowed" + }, + }, + oneOf = { + {required = {"allowlist"}}, + {required = {"denylist"}} + } +} + +local plugin_name = "ua-restriction" + +local _M = { + version = 0.1, + priority = 2999, + name = plugin_name, + schema = schema, +} + +local function check_with_allow_list(user_agents, allowlist) + local check = function (user_agent) + user_agent = str_strip(user_agent) + + for _, rule in ipairs(allowlist) do + if re_find(user_agent, rule, "jo") then + return true + end + end + return false + end + + if type(user_agents) == "table" then + for _, v in ipairs(user_agents) do + if lrucache_allow(v, allowlist, check, v) then + return true + end + end + return false + else + return lrucache_allow(user_agents, allowlist, check, user_agents) + end +end + + +local function check_with_deny_list(user_agents, denylist) + local check = function (user_agent) + user_agent = str_strip(user_agent) + + for _, rule in ipairs(denylist) do + if re_find(user_agent, rule, "jo") then + return false + end + end + return true + end + + if type(user_agents) == "table" then + for _, v in ipairs(user_agents) do + if lrucache_deny(v, denylist, check, v) then + return false + end + end + return true + else + return lrucache_deny(user_agents, denylist, check, user_agents) + end +end + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + + if not ok then + return false, err + end + + if conf.allowlist then + for _, re_rule in ipairs(conf.allowlist) do + ok, err = re_compile(re_rule, "j") + if not ok then + return false, err + end + end + end + + if conf.denylist then + for _, re_rule in ipairs(conf.denylist) do + ok, err = re_compile(re_rule, "j") + if not ok then + return false, err + end + end + end + + return true +end + + +function _M.access(conf, ctx) + -- after core.request.header function changed + -- we need to get original header value by using core.request.headers + local user_agent = core.request.headers(ctx)["User-Agent"] + + if not user_agent then + if conf.bypass_missing then + return + else + return 403, { message = conf.message } + end + end + + local is_passed + + if conf.allowlist then + is_passed = check_with_allow_list(user_agent, conf.allowlist) + else + is_passed = check_with_deny_list(user_agent, conf.denylist) + end + + if not is_passed then + return 403, { message = conf.message } + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/udp-logger.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/udp-logger.lua new file mode 100644 index 0000000..75e8bba --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/udp-logger.lua @@ -0,0 +1,145 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local log_util = require("apisix.utils.log-util") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local plugin_name = "udp-logger" +local tostring = tostring +local ngx = ngx +local udp = ngx.socket.udp + + +local batch_processor_manager = bp_manager_mod.new("udp logger") +local schema = { + type = "object", + properties = { + host = {type = "string"}, + port = {type = "integer", minimum = 0}, + timeout = {type = "integer", minimum = 1, default = 3}, + log_format = {type = "object"}, + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = { type = "boolean", default = false }, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + }, + required = {"host", "port"} +} + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + +local _M = { + version = 0.1, + priority = 400, + name = plugin_name, + metadata_schema = metadata_schema, + schema = batch_processor_manager:wrap_schema(schema), +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + return core.schema.check(schema, conf) +end + + +local function send_udp_data(conf, log_message) + local err_msg + local res = true + local sock = udp() + sock:settimeout(conf.timeout * 1000) + + core.log.info("sending a batch logs to ", conf.host, ":", conf.port) + core.log.info("sending log_message: ", log_message) + + local ok, err = sock:setpeername(conf.host, conf.port) + + if not ok then + return false, "failed to connect to UDP server: host[" .. conf.host + .. "] port[" .. tostring(conf.port) .. "] err: " .. err + end + + ok, err = sock:send(log_message) + if not ok then + res = false + err_msg = "failed to send data to UDP server: host[" .. conf.host + .. "] port[" .. tostring(conf.port) .. "] err:" .. err + end + + ok, err = sock:close() + if not ok then + core.log.error("failed to close the UDP connection, host[", + conf.host, "] port[", conf.port, "] ", err) + end + + return res, err_msg +end + + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + + +function _M.log(conf, ctx) + local entry = log_util.get_log_entry(plugin_name, conf, ctx) + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + -- Generate a function to be executed by the batch processor + local func = function(entries, batch_max_size) + local data, err + if batch_max_size == 1 then + data, err = core.json.encode(entries[1]) -- encode as single {} + else + data, err = core.json.encode(entries) -- encode as array [{}] + end + + if not data then + return false, 'error occurred while encoding the data: ' .. err + end + + return send_udp_data(conf, data) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func) +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/uri-blocker.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/uri-blocker.lua new file mode 100644 index 0000000..4612532 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/uri-blocker.lua @@ -0,0 +1,108 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local re_compile = require("resty.core.regex").re_match_compile +local re_find = ngx.re.find +local ipairs = ipairs + +local schema = { + type = "object", + properties = { + block_rules = { + type = "array", + items = { + type = "string", + minLength = 1, + maxLength = 4096, + }, + uniqueItems = true + }, + rejected_code = { + type = "integer", + minimum = 200, + default = 403 + }, + rejected_msg = { + type = "string", + minLength = 1 + }, + case_insensitive = { + type = "boolean", + default = false + }, + }, + required = {"block_rules"}, +} + + +local plugin_name = "uri-blocker" + +local _M = { + version = 0.1, + priority = 2900, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + for i, re_rule in ipairs(conf.block_rules) do + local ok, err = re_compile(re_rule, "j") + -- core.log.warn("ok: ", tostring(ok), " err: ", tostring(err), + -- " re_rule: ", re_rule) + if not ok then + return false, err + end + end + + return true +end + + +function _M.rewrite(conf, ctx) + core.log.info("uri: ", ctx.var.request_uri) + core.log.info("block uri rules: ", conf.block_rules_concat) + + if not conf.block_rules_concat then + local block_rules = {} + for i, re_rule in ipairs(conf.block_rules) do + block_rules[i] = re_rule + end + + conf.block_rules_concat = core.table.concat(block_rules, "|") + if conf.case_insensitive then + conf.block_rules_concat = "(?i)" .. conf.block_rules_concat + end + core.log.info("concat block_rules: ", conf.block_rules_concat) + end + + local from = re_find(ctx.var.request_uri, conf.block_rules_concat, "jo") + if from then + if conf.rejected_msg then + return conf.rejected_code, { error_msg = conf.rejected_msg } + end + return conf.rejected_code + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/wolf-rbac.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/wolf-rbac.lua new file mode 100644 index 0000000..22a90c2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/wolf-rbac.lua @@ -0,0 +1,492 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local consumer = require("apisix.consumer") +local json = require("apisix.core.json") +local sleep = core.sleep +local ngx_re = require("ngx.re") +local http = require("resty.http") +local ngx = ngx +local rawget = rawget +local rawset = rawset +local setmetatable = setmetatable +local type = type +local string = string +local req_read_body = ngx.req.read_body +local req_get_body_data = ngx.req.get_body_data + +local plugin_name = "wolf-rbac" + + +local schema = { + type = "object", + properties = { + appid = { + type = "string", + default = "unset" + }, + server = { + type = "string", + default = "http://127.0.0.1:12180" + }, + header_prefix = { + type = "string", + default = "X-" + }, + } +} + +local _M = { + version = 0.1, + priority = 2555, + type = 'auth', + name = plugin_name, + schema = schema, +} + + +local token_version = 'V1' +local function create_rbac_token(appid, wolf_token) + return token_version .. "#" .. appid .. "#" .. wolf_token +end + +local function fail_response(message, init_values) + local response = init_values or {} + response.message = message + return response +end + +local function success_response(message, init_values) + local response = init_values or {} + response.message = message + return response +end + +local function parse_rbac_token(rbac_token) + local res, err = ngx_re.split(rbac_token, "#", nil, nil, 3) + if not res then + return nil, err + end + + if #res ~= 3 or res[1] ~= token_version then + return nil, 'invalid rbac token: version' + end + local appid = res[2] + local wolf_token = res[3] + + return {appid = appid, wolf_token = wolf_token} +end + +local function new_headers() + local t = {} + local lt = {} + local _mt = { + __index = function(t, k) + return rawget(lt, string.lower(k)) + end, + __newindex = function(t, k, v) + rawset(t, k, v) + rawset(lt, string.lower(k), v) + end, + } + return setmetatable(t, _mt) +end + +-- timeout in ms +local function http_req(method, uri, body, myheaders, timeout) + if not myheaders then + myheaders = new_headers() + end + + local httpc = http.new() + if timeout then + httpc:set_timeout(timeout) + end + + local res, err = httpc:request_uri(uri, { + method = method, + headers = myheaders, + body = body, + ssl_verify = false + }) + + if not res then + core.log.error("FAIL REQUEST [ ",core.json.delay_encode( + {method = method, uri = uri, body = body, headers = myheaders}), + " ] failed! res is nil, err:", err) + return nil, err + end + + return res +end + +local function http_get(uri, myheaders, timeout) + return http_req("GET", uri, nil, myheaders, timeout) +end + + +function _M.check_schema(conf) + local check = {"server"} + core.utils.check_https(check, conf, plugin_name) + core.log.info("input conf: ", core.json.delay_encode(conf)) + + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + return true +end + + +local function fetch_rbac_token(ctx) + if ctx.var.arg_rbac_token then + return ngx.unescape_uri(ctx.var.arg_rbac_token) + end + + if ctx.var.http_authorization then + return ctx.var.http_authorization + end + + if ctx.var.http_x_rbac_token then + return ctx.var.http_x_rbac_token + end + + return ctx.var['cookie_x-rbac-token'] +end + + +local function check_url_permission(server, appid, action, resName, client_ip, wolf_token) + local retry_max = 3 + local errmsg + local userInfo + local res + local err + local access_check_url = server .. "/wolf/rbac/access_check" + local headers = new_headers() + headers["x-rbac-token"] = wolf_token + headers["Content-Type"] = "application/json; charset=utf-8" + local args = { appID = appid, resName = resName, action = action, clientIP = client_ip} + local url = access_check_url .. "?" .. ngx.encode_args(args) + local timeout = 1000 * 10 + + for i = 1, retry_max do + -- TODO: read apisix info. + res, err = http_get(url, headers, timeout) + if err then + break + else + core.log.info("check permission request:", url, ", status:", res.status, + ",body:", core.json.delay_encode(res.body)) + if res.status < 500 then + break + else + core.log.info("request [curl -v ", url, "] failed! status:", res.status) + if i < retry_max then + sleep(0.1) + end + end + end + end + + if err then + core.log.error("fail request: ", url, ", err:", err) + return { + status = 500, + err = "request to wolf-server failed, err:" .. err + } + end + + if res.status ~= 200 and res.status >= 500 then + return { + status = 500, + err = 'request to wolf-server failed, status:' .. res.status + } + end + + local body, err = json.decode(res.body) + if not body then + errmsg = 'check permission failed! parse response json failed!' + core.log.error( "json.decode(", res.body, ") failed! err:", err) + return {status = res.status, err = errmsg} + else + if body.data then + userInfo = body.data.userInfo + end + errmsg = body.reason + return {status = res.status, err = errmsg, userInfo = userInfo} + end +end + + +function _M.rewrite(conf, ctx) + local url = ctx.var.uri + local action = ctx.var.request_method + local client_ip = ctx.var.http_x_real_ip or core.request.get_ip(ctx) + local perm_item = {action = action, url = url, clientIP = client_ip} + core.log.info("hit wolf-rbac rewrite") + + local rbac_token = fetch_rbac_token(ctx) + if rbac_token == nil then + core.log.info("no permission to access ", + core.json.delay_encode(perm_item), ", need login!") + return 401, fail_response("Missing rbac token in request") + end + + local tokenInfo, err = parse_rbac_token(rbac_token) + core.log.info("token info: ", core.json.delay_encode(tokenInfo), + ", err: ", err) + if err then + return 401, fail_response('invalid rbac token: parse failed') + end + + local appid = tokenInfo.appid + local wolf_token = tokenInfo.wolf_token + perm_item.appid = appid + perm_item.wolf_token = wolf_token + + local consumer_conf = consumer.plugin(plugin_name) + if not consumer_conf then + return 401, fail_response("Missing related consumer") + end + + local consumers = consumer.consumers_kv(plugin_name, consumer_conf, "appid") + + core.log.info("------ consumers: ", core.json.delay_encode(consumers)) + local cur_consumer = consumers[appid] + if not cur_consumer then + core.log.error("consumer [", appid, "] not found") + return 401, fail_response("Invalid appid in rbac token") + end + core.log.info("consumer: ", core.json.delay_encode(cur_consumer)) + local server = cur_consumer.auth_conf.server + + local res = check_url_permission(server, appid, action, url, + client_ip, wolf_token) + core.log.info(" check_url_permission(", core.json.delay_encode(perm_item), + ") res: ",core.json.delay_encode(res)) + + local username = nil + local nickname = nil + if type(res.userInfo) == 'table' then + local userInfo = res.userInfo + ctx.userInfo = userInfo + local userId = userInfo.id + username = userInfo.username + nickname = userInfo.nickname or userInfo.username + local prefix = cur_consumer.auth_conf.header_prefix or '' + core.response.set_header(prefix .. "UserId", userId) + core.response.set_header(prefix .. "Username", username) + core.response.set_header(prefix .. "Nickname", ngx.escape_uri(nickname)) + core.request.set_header(ctx, prefix .. "UserId", userId) + core.request.set_header(ctx, prefix .. "Username", username) + core.request.set_header(ctx, prefix .. "Nickname", ngx.escape_uri(nickname)) + end + + if res.status ~= 200 then + -- no permission. + core.log.error(" check_url_permission(", + core.json.delay_encode(perm_item), + ") failed, res: ",core.json.delay_encode(res)) + return res.status, fail_response(res.err, { username = username, nickname = nickname }) + end + consumer.attach_consumer(ctx, cur_consumer, consumer_conf) + core.log.info("wolf-rbac check permission passed") +end + +local function get_args() + local ctx = ngx.ctx.api_ctx + local args, err + req_read_body() + if string.find(ctx.var.http_content_type or "","application/json", + 1, true) then + local req_body = req_get_body_data() + args, err = json.decode(req_body) + if not args then + core.log.error("json.decode(", req_body, ") failed! ", err) + end + else + args = core.request.get_post_args(ctx) + end + + return args +end + +local function get_consumer(appid) + local consumer_conf = consumer.plugin(plugin_name) + if not consumer_conf then + core.response.exit(500) + end + + local consumers = consumer.consumers_kv(plugin_name, consumer_conf, "appid") + + core.log.info("------ consumers: ", core.json.delay_encode(consumers)) + local consumer = consumers[appid] + if not consumer then + core.log.info("request appid [", appid, "] not found") + core.response.exit(400, + fail_response("appid not found") + ) + end + return consumer +end + +local function request_to_wolf_server(method, uri, headers, body) + headers["Content-Type"] = "application/json; charset=utf-8" + local timeout = 1000 * 5 + local request_debug = core.json.delay_encode( + { + method = method, uri = uri, body = body, + headers = headers,timeout = timeout + } + ) + + core.log.info("request [", request_debug, "] ....") + local res, err = http_req(method, uri, core.json.encode(body), headers, timeout) + if not res then + core.log.error("request [", request_debug, "] failed! err: ", err) + return core.response.exit(500, + fail_response("request to wolf-server failed!") + ) + end + core.log.info("request [", request_debug, "] status: ", res.status, + ", body: ", res.body) + + if res.status ~= 200 then + core.log.error("request [", request_debug, "] failed! status: ", + res.status) + return core.response.exit(500, + fail_response("request to wolf-server failed!") + ) + end + local body, err = json.decode(res.body) + if not body then + core.log.error("request [", request_debug, "] failed! err:", err) + return core.response.exit(500, fail_response("request to wolf-server failed!")) + end + if not body.ok then + core.log.error("request [", request_debug, "] failed! response body:", + core.json.delay_encode(body)) + return core.response.exit(200, fail_response("request to wolf-server failed!")) + end + + core.log.info("request [", request_debug, "] success! response body:", + core.json.delay_encode(body)) + return body +end + +local function wolf_rbac_login() + local args = get_args() + if not args then + return core.response.exit(400, fail_response("invalid request")) + end + if not args.appid then + return core.response.exit(400, fail_response("appid is missing")) + end + + local appid = args.appid + local consumer = get_consumer(appid) + core.log.info("consumer: ", core.json.delay_encode(consumer)) + + local uri = consumer.auth_conf.server .. '/wolf/rbac/login.rest' + local headers = new_headers() + local body = request_to_wolf_server('POST', uri, headers, args) + + local userInfo = body.data.userInfo + local wolf_token = body.data.token + + local rbac_token = create_rbac_token(appid, wolf_token) + core.response.exit(200, success_response(nil, {rbac_token = rbac_token, user_info = userInfo})) +end + +local function get_wolf_token(ctx) + core.log.info("hit wolf-rbac change_password api") + local rbac_token = fetch_rbac_token(ctx) + if rbac_token == nil then + local url = ctx.var.uri + local action = ctx.var.request_method + local client_ip = core.request.get_ip(ctx) + local perm_item = {action = action, url = url, clientIP = client_ip} + core.log.info("no permission to access ", + core.json.delay_encode(perm_item), ", need login!") + return core.response.exit(401, fail_response("Missing rbac token in request")) + end + + local tokenInfo, err = parse_rbac_token(rbac_token) + core.log.info("token info: ", core.json.delay_encode(tokenInfo), + ", err: ", err) + if err then + return core.response.exit(401, fail_response('invalid rbac token: parse failed')) + end + return tokenInfo +end + +local function wolf_rbac_change_pwd() + local args = get_args() + + local ctx = ngx.ctx.api_ctx + local tokenInfo = get_wolf_token(ctx) + local appid = tokenInfo.appid + local wolf_token = tokenInfo.wolf_token + local consumer = get_consumer(appid) + core.log.info("consumer: ", core.json.delay_encode(consumer)) + + local uri = consumer.auth_conf.server .. '/wolf/rbac/change_pwd' + local headers = new_headers() + headers['x-rbac-token'] = wolf_token + request_to_wolf_server('POST', uri, headers, args) + core.response.exit(200, success_response('success to change password', { })) +end + +local function wolf_rbac_user_info() + local ctx = ngx.ctx.api_ctx + local tokenInfo = get_wolf_token(ctx) + local appid = tokenInfo.appid + local wolf_token = tokenInfo.wolf_token + local consumer = get_consumer(appid) + core.log.info("consumer: ", core.json.delay_encode(consumer)) + + local uri = consumer.auth_conf.server .. '/wolf/rbac/user_info' + local headers = new_headers() + headers['x-rbac-token'] = wolf_token + local body = request_to_wolf_server('GET', uri, headers, {}) + local userInfo = body.data.userInfo + core.response.exit(200, success_response(nil, {user_info = userInfo})) +end + +function _M.api() + return { + { + methods = {"POST"}, + uri = "/apisix/plugin/wolf-rbac/login", + handler = wolf_rbac_login, + }, + { + methods = {"PUT"}, + uri = "/apisix/plugin/wolf-rbac/change_pwd", + handler = wolf_rbac_change_pwd, + }, + { + methods = {"GET"}, + uri = "/apisix/plugin/wolf-rbac/user_info", + handler = wolf_rbac_user_info, + }, + } +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/workflow.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/workflow.lua new file mode 100644 index 0000000..e41679b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/workflow.lua @@ -0,0 +1,161 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local expr = require("resty.expr.v1") +local ipairs = ipairs + +local schema = { + type = "object", + properties = { + rules = { + type = "array", + items = { + type = "object", + properties = { + case = { + type = "array", + items = { + anyOf = { + { + type = "array", + }, + { + type = "string", + }, + } + }, + minItems = 1, + }, + actions = { + type = "array", + items = { + type = "array", + minItems = 1 + } + } + }, + required = {"actions"} + } + } + }, + required = {"rules"} +} + +local plugin_name = "workflow" + +local _M = { + version = 0.1, + priority = 1006, + name = plugin_name, + schema = schema +} + + +local return_schema = { + type = "object", + properties = { + code = { + type = "integer", + minimum = 100, + maximum = 599 + } + }, + required = {"code"} +} + + +local function check_return_schema(conf) + local ok, err = core.schema.check(return_schema, conf) + if not ok then + return false, err + end + return true +end + + +local function exit(conf) + return conf.code, {error_msg = "rejected by workflow"} +end + + + +local support_action = { + ["return"] = { + handler = exit, + check_schema = check_return_schema, + } +} + + +function _M.register(plugin_name, handler, check_schema) + support_action[plugin_name] = { + handler = handler, + check_schema = check_schema + } +end + +function _M.check_schema(conf) + local ok, err = core.schema.check(schema, conf) + if not ok then + return false, err + end + + for idx, rule in ipairs(conf.rules) do + if rule.case then + local ok, err = expr.new(rule.case) + if not ok then + return false, "failed to validate the 'case' expression: " .. err + end + end + + local actions = rule.actions + for _, action in ipairs(actions) do + + if not support_action[action[1]] then + return false, "unsupported action: " .. action[1] + end + + -- use the action's idx as an identifier to isolate between confs + action[2]["_vid"] = idx + local ok, err = support_action[action[1]].check_schema(action[2], plugin_name) + if not ok then + return false, "failed to validate the '" .. action[1] .. "' action: " .. err + end + end + end + + return true +end + + +function _M.access(conf, ctx) + for _, rule in ipairs(conf.rules) do + local match_result = true + if rule.case then + local expr, _ = expr.new(rule.case) + match_result = expr:eval(ctx.var) + end + if match_result then + -- only one action is currently supported + local action = rule.actions[1] + return support_action[action[1]].handler(action[2], ctx) + end + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/zipkin.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/zipkin.lua new file mode 100644 index 0000000..dc814f1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/zipkin.lua @@ -0,0 +1,318 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local new_tracer = require("opentracing.tracer").new +local zipkin_codec = require("apisix.plugins.zipkin.codec") +local new_random_sampler = require("apisix.plugins.zipkin.random_sampler").new +local new_reporter = require("apisix.plugins.zipkin.reporter").new +local ngx = ngx +local ngx_var = ngx.var +local ngx_re = require("ngx.re") +local pairs = pairs +local tonumber = tonumber +local to_hex = require "resty.string".to_hex + +local plugin_name = "zipkin" +local ZIPKIN_SPAN_VER_1 = 1 +local ZIPKIN_SPAN_VER_2 = 2 +local plugin = require("apisix.plugin") +local string_format = string.format + + +local lrucache = core.lrucache.new({ + type = "plugin", +}) + +local schema = { + type = "object", + properties = { + endpoint = {type = "string"}, + sample_ratio = {type = "number", minimum = 0.00001, maximum = 1}, + service_name = { + type = "string", + description = "service name for zipkin reporter", + default = "APISIX", + }, + server_addr = { + type = "string", + description = "default is $server_addr, you can specify your external ip address", + pattern = "^[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}$" + }, + span_version = { + enum = {ZIPKIN_SPAN_VER_1, ZIPKIN_SPAN_VER_2}, + default = ZIPKIN_SPAN_VER_2, + }, + }, + required = {"endpoint", "sample_ratio"} +} + + +local _M = { + version = 0.1, + priority = 12011, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + local check = {"endpoint"} + core.utils.check_https(check, conf, plugin_name) + return core.schema.check(schema, conf) +end + +local plugin_info = plugin.plugin_attr(plugin_name) or {} + + +local function create_tracer(conf,ctx) + conf.route_id = ctx.route_id + local reporter = new_reporter(conf) + reporter:init_processor() + local tracer = new_tracer(reporter, new_random_sampler(conf)) + tracer:register_injector("http_headers", zipkin_codec.new_injector()) + tracer:register_extractor("http_headers", zipkin_codec.new_extractor()) + return tracer +end + + +local function parse_b3(b3) + -- See https://github.com/openzipkin/b3-propagation#single-header + if b3 == "0" then + return nil, nil, nil, "0", nil + end + + local pieces, err = ngx_re.split(b3, "-", nil, nil, 4) + if not pieces then + return err + end + if not pieces[1] then + return "missing trace_id" + end + if not pieces[2] then + return "missing span_id" + end + return nil, pieces[1], pieces[2], pieces[3], pieces[4] +end + +local function inject_header(ctx) + local opentracing = ctx.opentracing + local tracer = opentracing.tracer + local outgoing_headers = {} + + local span = opentracing.request_span + if ctx.opentracing_sample then + span = opentracing.proxy_span + end + tracer:inject(span, "http_headers", outgoing_headers) + + for k, v in pairs(outgoing_headers) do + core.request.set_header(ctx, k, v) + end +end + +function _M.rewrite(plugin_conf, ctx) + local conf = core.table.clone(plugin_conf) + -- once the server started, server_addr and server_port won't change, so we can cache it. + conf.server_port = tonumber(ctx.var['server_port']) + + if not conf.server_addr or conf.server_addr == '' then + conf.server_addr = ctx.var["server_addr"] + end + + local tracer = core.lrucache.plugin_ctx(lrucache, ctx, conf.server_addr .. conf.server_port, + create_tracer, conf, ctx) + + local headers = core.request.headers(ctx) + local per_req_sample_ratio + + -- X-B3-Flags: if it equals '1' then it overrides sampling policy + -- We still want to warn on invalid sampled header, so do this after the above + local debug = headers["x-b3-flags"] + if debug == "1" then + per_req_sample_ratio = 1 + end + + local trace_id, request_span_id, sampled, parent_span_id + local b3 = headers["b3"] + if b3 then + -- don't pass b3 header by default + -- TODO: add an option like 'single_b3_header' so we can adapt to the upstream + -- which doesn't support b3 header without always breaking down the header + core.request.set_header(ctx, "b3", nil) + + local err + err, trace_id, request_span_id, sampled, parent_span_id = parse_b3(b3) + + if err then + core.log.error("invalid b3 header: ", b3, ", ignored: ", err) + return 400 + end + + if sampled == "d" then + core.request.set_header(ctx, "x-b3-flags", "1") + sampled = "1" + end + else + -- X-B3-Sampled: if the client decided to sample this request, we do too. + sampled = headers["x-b3-sampled"] + trace_id = headers["x-b3-traceid"] + parent_span_id = headers["x-b3-parentspanid"] + request_span_id = headers["x-b3-spanid"] + end + + local zipkin_ctx = core.tablepool.fetch("zipkin_ctx", 0, 3) + zipkin_ctx.trace_id = trace_id + zipkin_ctx.parent_span_id = parent_span_id + zipkin_ctx.request_span_id = request_span_id + ctx.zipkin = zipkin_ctx + + local wire_context = tracer:extract("http_headers", ctx) + + local start_timestamp = ngx.req.start_time() + local request_span = tracer:start_span("apisix.request", { + child_of = wire_context, + start_timestamp = start_timestamp, + tags = { + component = "apisix", + ["span.kind"] = "server", + ["http.method"] = ctx.var.request_method, + ["http.url"] = ctx.var.request_uri, + -- TODO: support ipv6 + ["peer.ipv4"] = core.request.get_remote_client_ip(ctx), + ["peer.port"] = core.request.get_remote_client_port(ctx), + } + }) + + ctx.opentracing = { + tracer = tracer, + wire_context = wire_context, + request_span = request_span, + } + + -- Process sampled + if sampled == "1" or sampled == "true" then + per_req_sample_ratio = 1 + elseif sampled == "0" or sampled == "false" then + per_req_sample_ratio = 0 + end + + ctx.opentracing_sample = tracer.sampler:sample(per_req_sample_ratio or conf.sample_ratio) + if not ctx.opentracing_sample then + request_span:set_baggage_item("x-b3-sampled","0") + else + request_span:set_baggage_item("x-b3-sampled","1") + end + + if plugin_info.set_ngx_var then + local span_context = request_span:context() + ngx_var.zipkin_context_traceparent = string_format("00-%s-%s-%02x", + to_hex(span_context.trace_id), + to_hex(span_context.span_id), + span_context:get_baggage_item("x-b3-sampled")) + ngx_var.zipkin_trace_id = span_context.trace_id + ngx_var.zipkin_span_id = span_context.span_id + end + + if not ctx.opentracing_sample then + return + end + + local request_span = ctx.opentracing.request_span + if conf.span_version == ZIPKIN_SPAN_VER_1 then + ctx.opentracing.rewrite_span = request_span:start_child_span("apisix.rewrite", + start_timestamp) + ctx.REWRITE_END_TIME = tracer:time() + ctx.opentracing.rewrite_span:finish(ctx.REWRITE_END_TIME) + else + ctx.opentracing.proxy_span = request_span:start_child_span("apisix.proxy", + start_timestamp) + end +end + +function _M.access(conf, ctx) + local opentracing = ctx.opentracing + local tracer = opentracing.tracer + + if conf.span_version == ZIPKIN_SPAN_VER_1 then + opentracing.access_span = opentracing.request_span:start_child_span( + "apisix.access", ctx.REWRITE_END_TIME) + + ctx.ACCESS_END_TIME = tracer:time() + opentracing.access_span:finish(ctx.ACCESS_END_TIME) + + opentracing.proxy_span = opentracing.request_span:start_child_span( + "apisix.proxy", ctx.ACCESS_END_TIME) + end + + -- send headers to upstream + inject_header(ctx) +end + + +function _M.header_filter(conf, ctx) + if not ctx.opentracing_sample then + return + end + + local opentracing = ctx.opentracing + local end_time = opentracing.tracer:time() + + if conf.span_version == ZIPKIN_SPAN_VER_1 then + if opentracing.proxy_span then + opentracing.body_filter_span = opentracing.proxy_span:start_child_span( + "apisix.body_filter", end_time) + end + else + opentracing.proxy_span:finish(end_time) + opentracing.response_span = opentracing.request_span:start_child_span( + "apisix.response_span", end_time) + end +end + + +function _M.log(conf, ctx) + if ctx.zipkin then + core.tablepool.release("zipkin_ctx", ctx.zipkin) + ctx.zipkin = nil + end + + if not ctx.opentracing_sample then + return + end + + local opentracing = ctx.opentracing + + local log_end_time = opentracing.tracer:time() + + if conf.span_version == ZIPKIN_SPAN_VER_1 then + if opentracing.body_filter_span then + opentracing.body_filter_span:finish(log_end_time) + end + if opentracing.proxy_span then + opentracing.proxy_span:finish(log_end_time) + end + elseif opentracing.response_span then + opentracing.response_span:finish(log_end_time) + end + + local upstream_status = core.response.get_upstream_status(ctx) + opentracing.request_span:set_tag("http.status_code", upstream_status) + + opentracing.request_span:finish(log_end_time) +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/zipkin/codec.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/zipkin/codec.lua new file mode 100644 index 0000000..917c492 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/zipkin/codec.lua @@ -0,0 +1,114 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local to_hex = require "resty.string".to_hex +local new_span_context = require("opentracing.span_context").new +local ngx = ngx +local string = string +local pairs = pairs +local tonumber = tonumber + +local function hex_to_char(c) + return string.char(tonumber(c, 16)) +end + +local function from_hex(str) + if str ~= nil then -- allow nil to pass through + str = str:gsub("%x%x", hex_to_char) + end + return str +end + +local function new_extractor() + return function(ctx) + local had_invalid_id = false + + local zipkin_ctx = ctx.zipkin + local trace_id = zipkin_ctx.trace_id + local parent_span_id = zipkin_ctx.parent_span_id + local request_span_id = zipkin_ctx.request_span_id + + -- Validate trace id + if trace_id and + ((#trace_id ~= 16 and #trace_id ~= 32) or trace_id:match("%X")) then + core.log.warn("x-b3-traceid header invalid; ignoring.") + had_invalid_id = true + end + + -- Validate parent_span_id + if parent_span_id and + (#parent_span_id ~= 16 or parent_span_id:match("%X")) then + core.log.warn("x-b3-parentspanid header invalid; ignoring.") + had_invalid_id = true + end + + -- Validate request_span_id + if request_span_id and + (#request_span_id ~= 16 or request_span_id:match("%X")) then + core.log.warn("x-b3-spanid header invalid; ignoring.") + had_invalid_id = true + end + + if trace_id == nil or had_invalid_id then + return nil + end + + -- Process jaegar baggage header + local baggage = {} + local headers = core.request.headers(ctx) + for k, v in pairs(headers) do + local baggage_key = k:match("^uberctx%-(.*)$") + if baggage_key then + baggage[baggage_key] = ngx.unescape_uri(v) + end + end + + core.log.info("new span context: trace id: ", trace_id, + ", span id: ", request_span_id, + ", parent span id: ", parent_span_id) + + trace_id = from_hex(trace_id) + parent_span_id = from_hex(parent_span_id) + request_span_id = from_hex(request_span_id) + + return new_span_context(trace_id, request_span_id, parent_span_id, + baggage) + end +end + +local function new_injector() + return function(span_context, headers) + -- We want to remove headers if already present + headers["x-b3-traceid"] = to_hex(span_context.trace_id) + headers["x-b3-parentspanid"] = span_context.parent_id + and to_hex(span_context.parent_id) or nil + headers["x-b3-spanid"] = to_hex(span_context.span_id) + headers["x-b3-sampled"] = span_context:get_baggage_item("x-b3-sampled") + for key, value in span_context:each_baggage_item() do + -- skip x-b3-sampled baggage + if key ~= "x-b3-sampled" then + -- XXX: https://github.com/opentracing/specification/issues/117 + headers["uberctx-"..key] = ngx.escape_uri(value) + end + end + end +end + +return { + new_extractor = new_extractor, + new_injector = new_injector, +} diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/zipkin/random_sampler.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/zipkin/random_sampler.lua new file mode 100644 index 0000000..d458bce --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/zipkin/random_sampler.lua @@ -0,0 +1,37 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local assert = assert +local type = type +local setmetatable = setmetatable +local math = math + + +local _M = {} +local mt = { __index = _M } + +function _M.new(conf) + return setmetatable({}, mt) +end + +function _M.sample(self, sample_ratio) + assert(type(sample_ratio) == "number" and + sample_ratio >= 0 and sample_ratio <= 1, "invalid sample_ratio") + return math.random() < sample_ratio +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/plugins/zipkin/reporter.lua b/CloudronPackages/APISIX/apisix-source/apisix/plugins/zipkin/reporter.lua new file mode 100644 index 0000000..2edf1c1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/plugins/zipkin/reporter.lua @@ -0,0 +1,184 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local resty_http = require "resty.http" +local to_hex = require "resty.string".to_hex +local cjson = require "cjson.safe".new() +cjson.encode_number_precision(16) +local assert = assert +local type = type +local setmetatable = setmetatable +local math = math +local tostring = tostring +local batch_processor = require("apisix.utils.batch-processor") +local core = require("apisix.core") + +local _M = {} +local mt = { __index = _M } + + +local span_kind_map = { + client = "CLIENT", + server = "SERVER", + producer = "PRODUCER", + consumer = "CONSUMER", +} + + +function _M.new(conf) + local endpoint = conf.endpoint + local service_name = conf.service_name + local server_port = conf.server_port + local server_addr = conf.server_addr + assert(type(endpoint) == "string", "invalid http endpoint") + return setmetatable({ + endpoint = endpoint, + service_name = service_name, + server_addr = server_addr, + server_port = server_port, + pending_spans_n = 0, + route_id = conf.route_id + }, mt) +end + + +function _M.report(self, span) + if span:get_baggage_item("x-b3-sampled") == "0" then + return + end + local span_context = span:context() + + local zipkin_tags = {} + for k, v in span:each_tag() do + -- Zipkin tag values should be strings + zipkin_tags[k] = tostring(v) + end + + local span_kind = zipkin_tags["span.kind"] + zipkin_tags["span.kind"] = nil + + local localEndpoint = { + serviceName = self.service_name, + ipv4 = self.server_addr, + port = self.server_port, + -- TODO: ip/port from ngx.var.server_name/ngx.var.server_port? + } + + local remoteEndpoint do + local peer_port = span:get_tag "peer.port" -- get as number + if peer_port then + zipkin_tags["peer.port"] = nil + remoteEndpoint = { + ipv4 = zipkin_tags["peer.ipv4"], + -- ipv6 = zipkin_tags["peer.ipv6"], + port = peer_port, -- port is *not* optional + } + zipkin_tags["peer.ipv4"] = nil + zipkin_tags["peer.ipv6"] = nil + else + remoteEndpoint = cjson.null + end + end + + local zipkin_span = { + traceId = to_hex(span_context.trace_id), + name = span.name, + parentId = span_context.parent_id and + to_hex(span_context.parent_id) or nil, + id = to_hex(span_context.span_id), + kind = span_kind_map[span_kind], + timestamp = span.timestamp * 1000000, + duration = math.floor(span.duration * 1000000), -- zipkin wants integer + -- TODO: debug? + localEndpoint = localEndpoint, + remoteEndpoint = remoteEndpoint, + tags = zipkin_tags, + annotations = span.logs + } + + self.pending_spans_n = self.pending_spans_n + 1 + if self.processor then + self.processor:push(zipkin_span) + end +end + + +local function send_span(pending_spans, report) + local httpc = resty_http.new() + local res, err = httpc:request_uri(report.endpoint, { + method = "POST", + headers = { + ["content-type"] = "application/json", + }, + body = pending_spans, + keepalive = 5000, + keepalive_pool = 5 + }) + + if not res then + -- for zipkin test + core.log.error("report zipkin span failed") + return nil, "failed: " .. err .. ", url: " .. report.endpoint + elseif res.status < 200 or res.status >= 300 then + return nil, "failed: " .. report.endpoint .. " " + .. res.status .. " " .. res.reason + end + + return true +end + + +function _M.init_processor(self) + local process_conf = { + name = "zipkin_report", + retry_delay = 1, + batch_max_size = 1000, + max_retry_count = 0, + buffer_duration = 60, + inactive_timeout = 5, + route_id = self.route_id, + server_addr = self.server_addr, + } + + local flush = function (entries, batch_max_size) + if not entries then + return true + end + + local pending_spans, err + if batch_max_size == 1 then + pending_spans, err = cjson.encode(entries[1]) + else + pending_spans, err = cjson.encode(entries) + end + + if not pending_spans then + return false, 'error occurred while encoding the data: ' .. err + end + + return send_span(pending_spans, self) + end + + local processor, err = batch_processor:new(flush, process_conf) + if not processor then + return false, "create processor error: " .. err + end + + self.processor = processor +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/pubsub/kafka.lua b/CloudronPackages/APISIX/apisix-source/apisix/pubsub/kafka.lua new file mode 100644 index 0000000..2cce1a0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/pubsub/kafka.lua @@ -0,0 +1,149 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local bconsumer = require("resty.kafka.basic-consumer") +local ffi = require("ffi") +local C = ffi.C +local tostring = tostring +local type = type +local ipairs = ipairs +local str_sub = string.sub + +ffi.cdef[[ + int64_t atoll(const char *num); +]] + + +local _M = {} + + +-- Handles the conversion of 64-bit integers in the lua-protobuf. +-- +-- Because of the limitations of luajit, we cannot use native 64-bit +-- numbers, so pb decode converts int64 to a string in #xxx format +-- to avoid loss of precision, by this function, we convert this +-- string to int64 cdata numbers. +local function pb_convert_to_int64(src) + if type(src) == "string" then + -- the format is #1234, so there is a small minimum length of 2 + if #src < 2 then + return 0 + end + return C.atoll(ffi.cast("char *", src) + 1) + else + return src + end +end + + +-- Takes over requests of type kafka upstream in the http_access phase. +function _M.access(api_ctx) + local pubsub, err = core.pubsub.new() + if not pubsub then + core.log.error("failed to initialize pubsub module, err: ", err) + core.response.exit(400) + return + end + + local up_nodes = api_ctx.matched_upstream.nodes + + -- kafka client broker-related configuration + local broker_list = {} + for i, node in ipairs(up_nodes) do + broker_list[i] = { + host = node.host, + port = node.port, + } + + if api_ctx.kafka_consumer_enable_sasl then + broker_list[i].sasl_config = { + mechanism = "PLAIN", + user = api_ctx.kafka_consumer_sasl_username, + password = api_ctx.kafka_consumer_sasl_password, + } + end + end + + local client_config = {refresh_interval = 30 * 60 * 1000} + if api_ctx.matched_upstream.tls then + client_config.ssl = true + client_config.ssl_verify = api_ctx.matched_upstream.tls.verify + end + + -- load and create the consumer instance when it is determined + -- that the websocket connection was created successfully + local consumer = bconsumer:new(broker_list, client_config) + + pubsub:on("cmd_kafka_list_offset", function (params) + -- The timestamp parameter uses a 64-bit integer, which is difficult + -- for luajit to handle well, so the int64_as_string option in + -- lua-protobuf is used here. Smaller numbers will be decoded as + -- lua number, while overly larger numbers will be decoded as strings + -- in the format #number, where the # symbol at the beginning of the + -- string will be removed and converted to int64_t with the atoll function. + local timestamp = pb_convert_to_int64(params.timestamp) + + local offset, err = consumer:list_offset(params.topic, params.partition, timestamp) + + if not offset then + return nil, "failed to list offset, topic: " .. params.topic .. + ", partition: " .. params.partition .. ", err: " .. err + end + + offset = tostring(offset) + return { + kafka_list_offset_resp = { + offset = str_sub(offset, 1, #offset - 2) + } + } + end) + + pubsub:on("cmd_kafka_fetch", function (params) + local offset = pb_convert_to_int64(params.offset) + + local ret, err = consumer:fetch(params.topic, params.partition, offset) + if not ret then + return nil, "failed to fetch message, topic: " .. params.topic .. + ", partition: " .. params.partition .. ", err: " .. err + end + + -- split into multiple messages when the amount of data in + -- a single batch is too large + local messages = ret.records + + -- special handling of int64 for luajit compatibility + for _, message in ipairs(messages) do + local timestamp = tostring(message.timestamp) + message.timestamp = str_sub(timestamp, 1, #timestamp - 2) + local offset = tostring(message.offset) + message.offset = str_sub(offset, 1, #offset - 2) + end + + return { + kafka_fetch_resp = { + messages = messages, + }, + } + end) + + -- start processing client commands + pubsub:wait() +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/router.lua b/CloudronPackages/APISIX/apisix-source/apisix/router.lua new file mode 100644 index 0000000..93b123e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/router.lua @@ -0,0 +1,131 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local http_route = require("apisix.http.route") +local apisix_upstream = require("apisix.upstream") +local core = require("apisix.core") +local str_lower = string.lower +local ipairs = ipairs + + +local _M = {version = 0.3} + + +local function filter(route) + route.orig_modifiedIndex = route.modifiedIndex + + route.has_domain = false + if not route.value then + return + end + + if route.value.host then + route.value.host = str_lower(route.value.host) + elseif route.value.hosts then + for i, v in ipairs(route.value.hosts) do + route.value.hosts[i] = str_lower(v) + end + end + + apisix_upstream.filter_upstream(route.value.upstream, route) + + core.log.info("filter route: ", core.json.delay_encode(route, true)) +end + + +-- attach common methods if the router doesn't provide its custom implementation +local function attach_http_router_common_methods(http_router) + if http_router.routes == nil then + http_router.routes = function () + if not http_router.user_routes then + return nil, nil + end + + local user_routes = http_router.user_routes + return user_routes.values, user_routes.conf_version + end + end + + if http_router.init_worker == nil then + http_router.init_worker = function (filter) + http_router.user_routes = http_route.init_worker(filter) + end + end +end + + +function _M.http_init_worker() + local conf = core.config.local_conf() + local router_http_name = "radixtree_uri" + local router_ssl_name = "radixtree_sni" + + if conf and conf.apisix and conf.apisix.router then + router_http_name = conf.apisix.router.http or router_http_name + router_ssl_name = conf.apisix.router.ssl or router_ssl_name + end + + local router_http = require("apisix.http.router." .. router_http_name) + attach_http_router_common_methods(router_http) + router_http.init_worker(filter) + _M.router_http = router_http + + local router_ssl = require("apisix.ssl.router." .. router_ssl_name) + router_ssl.init_worker() + _M.router_ssl = router_ssl + + _M.api = require("apisix.api_router") +end + + +function _M.stream_init_worker() + local router_ssl_name = "radixtree_sni" + + local router_stream = require("apisix.stream.router.ip_port") + router_stream.stream_init_worker(filter) + _M.router_stream = router_stream + + local router_ssl = require("apisix.ssl.router." .. router_ssl_name) + router_ssl.init_worker() + _M.router_ssl = router_ssl +end + + +function _M.ssls() + return _M.router_ssl.ssls() +end + +function _M.http_routes() + if not _M.router_http then + return nil, nil + end + return _M.router_http.routes() +end + +function _M.stream_routes() + -- maybe it's not inited. + if not _M.router_stream then + return nil, nil + end + return _M.router_stream.routes() +end + + +-- for test +_M.filter_test = filter + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/schema_def.lua b/CloudronPackages/APISIX/apisix-source/apisix/schema_def.lua new file mode 100644 index 0000000..d8b6208 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/schema_def.lua @@ -0,0 +1,1094 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local schema = require('apisix.core.schema') +local table_insert = table.insert +local table_concat = table.concat +local setmetatable = setmetatable +local error = error + +local _M = {version = 0.5} + + +local plugins_schema = { + type = "object" +} + +_M.anonymous_consumer_schema = { + type = "string", + minLength = "1" +} + +local id_schema = { + anyOf = { + { + type = "string", minLength = 1, maxLength = 64, + pattern = [[^[a-zA-Z0-9-_.]+$]] + }, + {type = "integer", minimum = 1} + } +} + +local host_def_pat = "^\\*?[0-9a-zA-Z-._\\[\\]:]+$" +local host_def = { + type = "string", + pattern = host_def_pat, +} +_M.host_def = host_def + + +local ipv4_seg = "([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])" +local ipv4_def_buf = {} +for i = 1, 4 do + table_insert(ipv4_def_buf, ipv4_seg) +end +local ipv4_def = table_concat(ipv4_def_buf, [[\.]]) +-- There is false negative for ipv6/cidr. For instance, `:/8` will be valid. +-- It is fine as the correct regex will be too complex. +local ipv6_def = "([a-fA-F0-9]{0,4}:){1,8}(:[a-fA-F0-9]{0,4}){0,8}" + .. "([a-fA-F0-9]{0,4})?" +local ip_def = { + {title = "IPv4", type = "string", format = "ipv4"}, + {title = "IPv4/CIDR", type = "string", pattern = "^" .. ipv4_def .. "/([12]?[0-9]|3[0-2])$"}, + {title = "IPv6", type = "string", format = "ipv6"}, + {title = "IPv6/CIDR", type = "string", pattern = "^" .. ipv6_def .. "/[0-9]{1,3}$"}, +} +_M.ip_def = ip_def + + +_M.uri_def = {type = "string", pattern = [=[^[^\/]+:\/\/([\da-zA-Z.-]+|\[[\da-fA-F:]+\])(:\d+)?]=]} + + +local timestamp_def = { + type = "integer", +} + +local remote_addr_def = { + description = "client IP", + type = "string", + anyOf = ip_def, +} + + +local label_value_def = { + description = "value of label", + type = "string", + pattern = [[^\S+$]], + maxLength = 256, + minLength = 1 +} +_M.label_value_def = label_value_def + + +local labels_def = { + description = "key/value pairs to specify attributes", + type = "object", + patternProperties = { + [".*"] = label_value_def + }, +} + + +local rule_name_def = { + type = "string", + maxLength = 100, + minLength = 1, +} + + +local desc_def = { + type = "string", + maxLength = 256, +} + + +local timeout_def = { + type = "object", + properties = { + connect = {type = "number", exclusiveMinimum = 0}, + send = {type = "number", exclusiveMinimum = 0}, + read = {type = "number", exclusiveMinimum = 0}, + }, + required = {"connect", "send", "read"}, +} + + +local health_checker = { + type = "object", + properties = { + active = { + type = "object", + properties = { + type = { + type = "string", + enum = {"http", "https", "tcp"}, + default = "http" + }, + timeout = {type = "number", default = 1}, + concurrency = {type = "integer", default = 10}, + host = host_def, + port = { + type = "integer", + minimum = 1, + maximum = 65535 + }, + http_path = {type = "string", default = "/"}, + https_verify_certificate = {type = "boolean", default = true}, + healthy = { + type = "object", + properties = { + interval = {type = "integer", minimum = 1, default = 1}, + http_statuses = { + type = "array", + minItems = 1, + items = { + type = "integer", + minimum = 200, + maximum = 599 + }, + uniqueItems = true, + default = {200, 302} + }, + successes = { + type = "integer", + minimum = 1, + maximum = 254, + default = 2 + } + } + }, + unhealthy = { + type = "object", + properties = { + interval = {type = "integer", minimum = 1, default = 1}, + http_statuses = { + type = "array", + minItems = 1, + items = { + type = "integer", + minimum = 200, + maximum = 599 + }, + uniqueItems = true, + default = {429, 404, 500, 501, 502, 503, 504, 505} + }, + http_failures = { + type = "integer", + minimum = 1, + maximum = 254, + default = 5 + }, + tcp_failures = { + type = "integer", + minimum = 1, + maximum = 254, + default = 2 + }, + timeouts = { + type = "integer", + minimum = 1, + maximum = 254, + default = 3 + } + } + }, + req_headers = { + type = "array", + minItems = 1, + items = { + type = "string", + uniqueItems = true, + }, + } + } + }, + passive = { + type = "object", + properties = { + type = { + type = "string", + enum = {"http", "https", "tcp"}, + default = "http" + }, + healthy = { + type = "object", + properties = { + http_statuses = { + type = "array", + minItems = 1, + items = { + type = "integer", + minimum = 200, + maximum = 599, + }, + uniqueItems = true, + default = {200, 201, 202, 203, 204, 205, 206, 207, + 208, 226, 300, 301, 302, 303, 304, 305, + 306, 307, 308} + }, + successes = { + type = "integer", + minimum = 0, + maximum = 254, + default = 5 + } + } + }, + unhealthy = { + type = "object", + properties = { + http_statuses = { + type = "array", + minItems = 1, + items = { + type = "integer", + minimum = 200, + maximum = 599, + }, + uniqueItems = true, + default = {429, 500, 503} + }, + tcp_failures = { + type = "integer", + minimum = 0, + maximum = 254, + default = 2 + }, + timeouts = { + type = "integer", + minimum = 0, + maximum = 254, + default = 7 + }, + http_failures = { + type = "integer", + minimum = 0, + maximum = 254, + default = 5 + }, + } + } + }, + } + }, + anyOf = { + {required = {"active"}}, + {required = {"active", "passive"}}, + }, + additionalProperties = false, +} + + +local nodes_schema = { + anyOf = { + { + type = "object", + patternProperties = { + [".*"] = { + description = "weight of node", + type = "integer", + minimum = 0, + } + }, + }, + { + type = "array", + items = { + type = "object", + properties = { + host = host_def, + port = { + description = "port of node", + type = "integer", + minimum = 1, + maximum = 65535 + }, + weight = { + description = "weight of node", + type = "integer", + minimum = 0, + }, + priority = { + description = "priority of node", + type = "integer", + default = 0, + }, + metadata = { + description = "metadata of node", + type = "object", + } + }, + required = {"host", "weight"}, + }, + } + } +} +_M.discovery_nodes = { + type = "array", + items = { + type = "object", + properties = { + host = { + description = "domain or ip", + }, + port = { + description = "port of node", + type = "integer", + minimum = 1, + maximum = 65535 + }, + weight = { + description = "weight of node", + type = "integer", + minimum = 0, + }, + priority = { + description = "priority of node", + type = "integer", + }, + metadata = { + description = "metadata of node", + type = "object", + } + }, + -- nodes from DNS discovery may not contain port + required = {"host", "weight"}, + }, +} + + +local certificate_scheme = { + type = "string", minLength = 128, maxLength = 64*1024 +} + + +local private_key_schema = { + type = "string", minLength = 128, maxLength = 64*1024 +} + + +local upstream_schema = { + type = "object", + properties = { + -- metadata + id = id_schema, + name = rule_name_def, + desc = desc_def, + labels = labels_def, + create_time = timestamp_def, + update_time = timestamp_def, + + -- properties + nodes = nodes_schema, + retries = { + type = "integer", + minimum = 0, + }, + retry_timeout = { + type = "number", + minimum = 0, + }, + timeout = timeout_def, + tls = { + type = "object", + properties = { + client_cert_id = id_schema, + client_cert = certificate_scheme, + client_key = private_key_schema, + verify = { + type = "boolean", + description = "Turn on server certificate verification, ".. + "currently only kafka upstream is supported", + default = false, + }, + }, + dependencies = { + client_cert = {required = {"client_key"}}, + client_key = {required = {"client_cert"}}, + client_cert_id = { + ["not"] = {required = {"client_cert", "client_key"}} + } + } + }, + keepalive_pool = { + type = "object", + properties = { + size = { + type = "integer", + default = 320, + minimum = 1, + }, + idle_timeout = { + type = "number", + default = 60, + minimum = 0, + }, + requests = { + type = "integer", + default = 1000, + minimum = 1, + }, + }, + }, + type = { + description = "algorithms of load balancing", + type = "string", + default = "roundrobin", + }, + checks = health_checker, + hash_on = { + type = "string", + default = "vars", + enum = { + "vars", + "header", + "cookie", + "consumer", + "vars_combinations", + }, + }, + key = { + description = "the key of chash for dynamic load balancing", + type = "string", + }, + scheme = { + default = "http", + enum = {"grpc", "grpcs", "http", "https", "tcp", "tls", "udp", + "kafka"}, + description = "The scheme of the upstream." .. + " For L7 proxy, it can be one of grpc/grpcs/http/https." .. + " For L4 proxy, it can be one of tcp/tls/udp." .. + " For specific protocols, it can be kafka." + }, + discovery_type = { + description = "discovery type", + type = "string", + }, + discovery_args = { + type = "object", + properties = { + namespace_id = { + description = "namespace id", + type = "string", + }, + group_name = { + description = "group name", + type = "string", + }, + } + }, + pass_host = { + description = "mod of host passing", + type = "string", + enum = {"pass", "node", "rewrite"}, + default = "pass" + }, + upstream_host = host_def, + service_name = { + type = "string", + maxLength = 256, + minLength = 1 + }, + }, + oneOf = { + {required = {"nodes"}}, + {required = {"service_name", "discovery_type"}}, + }, + additionalProperties = false +} + +-- TODO: add more nginx variable support +_M.upstream_hash_vars_schema = { + type = "string", + pattern = [[^((uri|server_name|server_addr|request_uri|remote_port]] + .. [[|remote_addr|query_string|host|hostname|mqtt_client_id)]] + .. [[|arg_[0-9a-zA-z_-]+)$]], +} + +-- validates header name, cookie name. +-- a-z, A-Z, 0-9, '_' and '-' are allowed. +-- when "underscores_in_headers on", header name allow '_'. +-- http://nginx.org/en/docs/http/ngx_http_core_module.html#underscores_in_headers +_M.upstream_hash_header_schema = { + type = "string", + pattern = [[^[a-zA-Z0-9-_]+$]] +} + +-- validates string only +_M.upstream_hash_vars_combinations_schema = { + type = "string" +} + + +local method_schema = { + description = "HTTP method", + type = "string", + enum = {"GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", + "OPTIONS", "CONNECT", "TRACE", "PURGE"}, +} +_M.method_schema = method_schema + + +_M.route = { + type = "object", + properties = { + -- metadata + id = id_schema, + name = rule_name_def, + desc = desc_def, + labels = labels_def, + create_time = timestamp_def, + update_time = timestamp_def, + + -- properties + uri = {type = "string", minLength = 1, maxLength = 4096}, + uris = { + type = "array", + items = { + description = "HTTP uri", + type = "string", + }, + minItems = 1, + uniqueItems = true, + }, + priority = {type = "integer", default = 0}, + + methods = { + type = "array", + items = method_schema, + uniqueItems = true, + }, + host = host_def, + hosts = { + type = "array", + items = host_def, + minItems = 1, + uniqueItems = true, + }, + remote_addr = remote_addr_def, + remote_addrs = { + type = "array", + items = remote_addr_def, + minItems = 1, + uniqueItems = true, + }, + timeout = timeout_def, + vars = { + type = "array", + }, + filter_func = { + type = "string", + minLength = 10, + pattern = [[^function]], + }, + + -- The 'script' fields below are used by dashboard for plugin orchestration + script = {type = "string", minLength = 10, maxLength = 102400}, + script_id = id_schema, + + plugins = plugins_schema, + plugin_config_id = id_schema, + + upstream = upstream_schema, + + service_id = id_schema, + upstream_id = id_schema, + + enable_websocket = { + description = "enable websocket for request", + type = "boolean", + }, + + status = { + description = "route status, 1 to enable, 0 to disable", + type = "integer", + enum = {1, 0}, + default = 1 + }, + }, + allOf = { + { + oneOf = { + {required = {"uri"}}, + {required = {"uris"}}, + }, + }, + { + oneOf = { + {["not"] = { + anyOf = { + {required = {"host"}}, + {required = {"hosts"}}, + } + }}, + {required = {"host"}}, + {required = {"hosts"}} + }, + }, + { + oneOf = { + {["not"] = { + anyOf = { + {required = {"remote_addr"}}, + {required = {"remote_addrs"}}, + } + }}, + {required = {"remote_addr"}}, + {required = {"remote_addrs"}} + }, + }, + }, + anyOf = { + {required = {"plugins", "uri"}}, + {required = {"upstream", "uri"}}, + {required = {"upstream_id", "uri"}}, + {required = {"service_id", "uri"}}, + {required = {"plugins", "uris"}}, + {required = {"upstream", "uris"}}, + {required = {"upstream_id", "uris"}}, + {required = {"service_id", "uris"}}, + {required = {"script", "uri"}}, + {required = {"script", "uris"}}, + }, + ["not"] = { + anyOf = { + {required = {"script", "plugins"}}, + {required = {"script", "plugin_config_id"}}, + } + }, + additionalProperties = false, +} + + +_M.service = { + type = "object", + properties = { + -- metadata + id = id_schema, + name = rule_name_def, + desc = desc_def, + labels = labels_def, + create_time = timestamp_def, + update_time = timestamp_def, + + -- properties + plugins = plugins_schema, + upstream = upstream_schema, + upstream_id = id_schema, + script = {type = "string", minLength = 10, maxLength = 102400}, + enable_websocket = { + description = "enable websocket for request", + type = "boolean", + }, + hosts = { + type = "array", + items = host_def, + minItems = 1, + uniqueItems = true, + }, + }, + additionalProperties = false, +} + + +_M.consumer = { + type = "object", + properties = { + -- metadata + username = { + type = "string", minLength = 1, maxLength = rule_name_def.maxLength, + pattern = [[^[a-zA-Z0-9_\-]+$]] + }, + desc = desc_def, + labels = labels_def, + create_time = timestamp_def, + update_time = timestamp_def, + + -- properties + group_id = id_schema, + plugins = plugins_schema, + }, + required = {"username"}, + additionalProperties = false, +} + +_M.credential = { + type = "object", + properties = { + -- metadata + id = id_schema, + name = rule_name_def, + desc = desc_def, + labels = labels_def, + create_time = timestamp_def, + update_time = timestamp_def, + + -- properties + plugins = { + type = "object", + maxProperties = 1, + }, + }, + additionalProperties = false, +} + +_M.upstream = upstream_schema + + +local secret_uri_schema = { + type = "string", + pattern = "^\\$(secret|env|ENV)://" +} + + +_M.ssl = { + type = "object", + properties = { + -- metadata + id = id_schema, + desc = desc_def, + labels = labels_def, + create_time = timestamp_def, + update_time = timestamp_def, + + -- properties + type = { + description = "ssl certificate type, " .. + "server to server certificate, " .. + "client to client certificate for upstream", + type = "string", + default = "server", + enum = {"server", "client"} + }, + cert = { + oneOf = { + certificate_scheme, + secret_uri_schema + } + }, + key = { + oneOf = { + private_key_schema, + secret_uri_schema + } + }, + sni = { + type = "string", + pattern = host_def_pat, + }, + snis = { + type = "array", + items = { + type = "string", + pattern = host_def_pat, + }, + minItems = 1, + }, + certs = { + type = "array", + items = { + oneOf = { + certificate_scheme, + secret_uri_schema + } + } + }, + keys = { + type = "array", + items = { + oneOf = { + private_key_schema, + secret_uri_schema + } + } + }, + client = { + type = "object", + properties = { + ca = certificate_scheme, + depth = { + type = "integer", + minimum = 0, + default = 1, + }, + skip_mtls_uri_regex = { + type = "array", + minItems = 1, + uniqueItems = true, + items = { + description = "uri regular expression to skip mtls", + type = "string", + } + }, + }, + required = {"ca"}, + }, + status = { + description = "ssl status, 1 to enable, 0 to disable", + type = "integer", + enum = {1, 0}, + default = 1 + }, + ssl_protocols = { + description = "set ssl protocols", + type = "array", + maxItems = 3, + uniqueItems = true, + items = { + enum = {"TLSv1.1", "TLSv1.2", "TLSv1.3"} + }, + }, + }, + ["if"] = { + properties = { + type = { + enum = {"server"}, + }, + }, + }, + ["then"] = { + oneOf = { + {required = {"sni", "key", "cert"}}, + {required = {"snis", "key", "cert"}} + } + }, + ["else"] = {required = {"key", "cert"}}, + additionalProperties = false, +} + + + +-- TODO: Design a plugin resource registration framework used by plugins and move the proto +-- resource to grpc-transcode plugin, which should not be an APISIX core resource +_M.proto = { + type = "object", + properties = { + -- metadata + id = id_schema, + name = rule_name_def, + desc = desc_def, + labels = labels_def, + create_time = timestamp_def, + update_time = timestamp_def, + + -- properties + content = { + type = "string", minLength = 1, maxLength = 1024*1024 + } + }, + required = {"content"}, + additionalProperties = false, +} + + +_M.global_rule = { + type = "object", + properties = { + -- metadata + id = id_schema, + create_time = timestamp_def, + update_time = timestamp_def, + + -- properties + plugins = plugins_schema, + }, + required = {"id", "plugins"}, + additionalProperties = false, +} + + +local xrpc_protocol_schema = { + type = "object", + properties = { + name = { + type = "string", + }, + superior_id = id_schema, + conf = { + description = "protocol-specific configuration", + type = "object", + }, + logger = { + type = "array", + items = { + properties = { + name = { + type = "string", + }, + filter = { + description = "logger filter rules", + type = "array", + }, + conf = { + description = "logger plugin configuration", + type = "object", + }, + }, + dependencies = { + name = {"conf"}, + }, + additionalProperties = false, + }, + }, + + }, + required = {"name"} +} + + +_M.stream_route = { + type = "object", + properties = { + -- metadata + id = id_schema, + name = rule_name_def, + desc = desc_def, + labels = labels_def, + create_time = timestamp_def, + update_time = timestamp_def, + + -- properties + remote_addr = remote_addr_def, + server_addr = { + description = "server IP", + type = "string", + anyOf = ip_def, + }, + server_port = { + description = "server port", + type = "integer", + minimum = 1, + maximum = 65535 + }, + sni = { + description = "server name indication", + type = "string", + pattern = host_def_pat, + }, + upstream = upstream_schema, + upstream_id = id_schema, + service_id = id_schema, + plugins = plugins_schema, + protocol = xrpc_protocol_schema, + }, + additionalProperties = false, +} + + +_M.plugins = { + type = "array", + items = { + type = "object", + properties = { + name = { + type = "string", + minLength = 1, + }, + stream = { + type = "boolean" + }, + additionalProperties = false, + }, + required = {"name"} + } +} + + +_M.plugin_config = { + type = "object", + properties = { + -- metadata + id = id_schema, + name = { + type = "string", + }, + desc = desc_def, + labels = labels_def, + create_time = timestamp_def, + update_time = timestamp_def, + + -- properties + plugins = plugins_schema, + }, + required = {"id", "plugins"}, + additionalProperties = false, +} + + +_M.consumer_group = { + type = "object", + properties = { + -- metadata + id = id_schema, + name = rule_name_def, + desc = desc_def, + labels = labels_def, + create_time = timestamp_def, + update_time = timestamp_def, + + -- properties + plugins = plugins_schema, + }, + required = {"id", "plugins"}, + additionalProperties = false, +} + + +_M.id_schema = id_schema + + +_M.plugin_injected_schema = { + ["$comment"] = "this is a mark for our injected plugin schema", + _meta = { + type = "object", + properties = { + disable = { + type = "boolean", + }, + error_response = { + oneOf = { + { type = "string" }, + { type = "object" }, + } + }, + priority = { + description = "priority of plugins by customized order", + type = "integer", + }, + filter = { + description = "filter determines whether the plugin ".. + "needs to be executed at runtime", + type = "array", + }, + pre_function = { + description = "function to be executed in each phase " .. + "before execution of plugins. The pre_function will have access " .. + "to two arguments: `conf` and `ctx`.", + type = "string", + }, + }, + additionalProperties = false, + } +} + + +setmetatable(_M, { + __index = schema, + __newindex = function() error("no modification allowed") end, +}) + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/script.lua b/CloudronPackages/APISIX/apisix-source/apisix/script.lua new file mode 100644 index 0000000..49f13a9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/script.lua @@ -0,0 +1,59 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local loadstring = loadstring +local error = error + + +local _M = {} + + +function _M.load(route, api_ctx) + local script = route.value.script + if script == nil or script == "" then + error("missing valid script") + end + + local loadfun, err = loadstring(script, "route#" .. route.value.id) + if not loadfun then + error("failed to load script: " .. err .. " script: " .. script) + return nil + end + api_ctx.script_obj = loadfun() +end + + +function _M.run(phase, api_ctx) + local obj = api_ctx and api_ctx.script_obj + if not obj then + core.log.error("missing loaded script object") + return api_ctx + end + + core.log.info("loaded script_obj: ", core.json.delay_encode(obj, true)) + + local phase_func = obj[phase] + if phase_func then + phase_func(api_ctx) + end + + return api_ctx +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/secret.lua b/CloudronPackages/APISIX/apisix-source/apisix/secret.lua new file mode 100644 index 0000000..60e575b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/secret.lua @@ -0,0 +1,227 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local require = require +local core = require("apisix.core") +local string = require("apisix.core.string") + +local find = string.find +local sub = string.sub +local upper = string.upper +local byte = string.byte +local type = type +local pcall = pcall +local pairs = pairs + +local _M = {} + + +local PREFIX = "$secret://" +local secrets + +local function check_secret(conf) + local idx = find(conf.id or "", "/") + if not idx then + return false, "no secret id" + end + local manager = sub(conf.id, 1, idx - 1) + + local ok, secret_manager = pcall(require, "apisix.secret." .. manager) + if not ok then + return false, "secret manager not exits, manager: " .. manager + end + + return core.schema.check(secret_manager.schema, conf) +end + + + local function secret_kv(manager, confid) + local secret_values + secret_values = core.config.fetch_created_obj("/secrets") + if not secret_values or not secret_values.values then + return nil + end + + local secret = secret_values:get(manager .. "/" .. confid) + if not secret then + return nil + end + + return secret.value +end + + +function _M.secrets() + if not secrets then + return nil, nil + end + + return secrets.values, secrets.conf_version +end + + +function _M.init_worker() + local cfg = { + automatic = true, + checker = check_secret, + } + + secrets = core.config.new("/secrets", cfg) +end + + +local function check_secret_uri(secret_uri) + -- Avoid the error caused by has_prefix to cause a crash. + if type(secret_uri) ~= "string" then + return false, "error secret_uri type: " .. type(secret_uri) + end + + if not string.has_prefix(secret_uri, PREFIX) and + not string.has_prefix(upper(secret_uri), core.env.PREFIX) then + return false, "error secret_uri prefix: " .. secret_uri + end + + return true +end + +_M.check_secret_uri = check_secret_uri + + +local function parse_secret_uri(secret_uri) + local is_secret_uri, err = check_secret_uri(secret_uri) + if not is_secret_uri then + return is_secret_uri, err + end + + local path = sub(secret_uri, #PREFIX + 1) + local idx1 = find(path, "/") + if not idx1 then + return nil, "error format: no secret manager" + end + local manager = sub(path, 1, idx1 - 1) + + local idx2 = find(path, "/", idx1 + 1) + if not idx2 then + return nil, "error format: no secret conf id" + end + local confid = sub(path, idx1 + 1, idx2 - 1) + + local key = sub(path, idx2 + 1) + if key == "" then + return nil, "error format: no secret key id" + end + + local opts = { + manager = manager, + confid = confid, + key = key + } + return opts +end + + +local function fetch_by_uri(secret_uri) + core.log.info("fetching data from secret uri: ", secret_uri) + local opts, err = parse_secret_uri(secret_uri) + if not opts then + return nil, err + end + + local conf = secret_kv(opts.manager, opts.confid) + if not conf then + return nil, "no secret conf, secret_uri: " .. secret_uri + end + + local ok, sm = pcall(require, "apisix.secret." .. opts.manager) + if not ok then + return nil, "no secret manager: " .. opts.manager + end + + local value, err = sm.get(conf, opts.key) + if err then + return nil, err + end + + return value +end + +-- for test +_M.fetch_by_uri = fetch_by_uri + + +local function fetch(uri) + -- do a quick filter to improve retrieval speed + if byte(uri, 1, 1) ~= byte('$') then + return nil + end + + local val, err + if string.has_prefix(upper(uri), core.env.PREFIX) then + val, err = core.env.fetch_by_uri(uri) + elseif string.has_prefix(uri, PREFIX) then + val, err = fetch_by_uri(uri) + end + + if err then + core.log.error("failed to fetch secret value: ", err) + return + end + + return val +end + + +local secrets_lrucache = core.lrucache.new({ + ttl = 300, count = 512 +}) + +local fetch_secrets +do + local retrieve_refs + function retrieve_refs(refs) + for k, v in pairs(refs) do + local typ = type(v) + if typ == "string" then + refs[k] = fetch(v) or v + elseif typ == "table" then + retrieve_refs(v) + end + end + return refs + end + + local function retrieve(refs) + core.log.info("retrieve secrets refs") + + local new_refs = core.table.deepcopy(refs) + return retrieve_refs(new_refs) + end + + function fetch_secrets(refs, cache, key, version) + if not refs or type(refs) ~= "table" then + return nil + end + if not cache then + return retrieve(refs) + end + return secrets_lrucache(key, version, retrieve, refs) + end +end + +_M.fetch_secrets = fetch_secrets + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/secret/aws.lua b/CloudronPackages/APISIX/apisix-source/apisix/secret/aws.lua new file mode 100644 index 0000000..af2e045 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/secret/aws.lua @@ -0,0 +1,140 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- AWS Tools. +require("resty.aws.config") -- to read env vars before initing aws module + +local core = require("apisix.core") +local http = require("resty.http") +local aws = require("resty.aws") +local aws_instance + +local sub = core.string.sub +local find = core.string.find +local env = core.env +local unpack = unpack + +local schema = { + type = "object", + properties = { + access_key_id = { + type = "string", + }, + secret_access_key = { + type = "string", + }, + session_token = { + type = "string", + }, + region = { + type = "string", + default = "us-east-1", + }, + endpoint_url = core.schema.uri_def, + }, + required = {"access_key_id", "secret_access_key"}, +} + +local _M = { + schema = schema +} + +local function make_request_to_aws(conf, key) + if not aws_instance then + aws_instance = aws() + end + + local region = conf.region + + local access_key_id = env.fetch_by_uri(conf.access_key_id) or conf.access_key_id + + local secret_access_key = env.fetch_by_uri(conf.secret_access_key) or conf.secret_access_key + + local session_token = env.fetch_by_uri(conf.session_token) or conf.session_token + + local credentials = aws_instance:Credentials({ + accessKeyId = access_key_id, + secretAccessKey = secret_access_key, + sessionToken = session_token, + }) + + local default_endpoint = "https://secretsmanager." .. region .. ".amazonaws.com" + local scheme, host, port, _, _ = unpack(http:parse_uri(conf.endpoint_url or default_endpoint)) + local endpoint = scheme .. "://" .. host + + local sm = aws_instance:SecretsManager({ + credentials = credentials, + endpoint = endpoint, + region = region, + port = port, + }) + + local res, err = sm:getSecretValue({ + SecretId = key, + VersionStage = "AWSCURRENT", + }) + + if not res then + return nil, err + end + + if res.status ~= 200 then + local data = core.json.encode(res.body) + if data then + return nil, "invalid status code " .. res.status .. ", " .. data + end + + return nil, "invalid status code " .. res.status + end + + return res.body.SecretString +end + +-- key is the aws secretId +function _M.get(conf, key) + core.log.info("fetching data from aws for key: ", key) + + local idx = find(key, '/') + + local main_key = idx and sub(key, 1, idx - 1) or key + if main_key == "" then + return nil, "can't find main key, key: " .. key + end + + local sub_key = idx and sub(key, idx + 1) or nil + + core.log.info("main: ", main_key, sub_key and ", sub: " .. sub_key or "") + + local res, err = make_request_to_aws(conf, main_key) + if not res then + return nil, "failed to retrtive data from aws secret manager: " .. err + end + + if not sub_key then + return res + end + + local data, err = core.json.decode(res) + if not data then + return nil, "failed to decode result, res: " .. res .. ", err: " .. err + end + + return data[sub_key] +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/secret/gcp.lua b/CloudronPackages/APISIX/apisix-source/apisix/secret/gcp.lua new file mode 100644 index 0000000..6b6e661 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/secret/gcp.lua @@ -0,0 +1,202 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- GCP Tools. +local core = require("apisix.core") +local http = require("resty.http") +local google_oauth = require("apisix.utils.google-cloud-oauth") + +local str_sub = core.string.sub +local str_find = core.string.find +local decode_base64 = ngx.decode_base64 + +local lrucache = core.lrucache.new({ ttl = 300, count = 8 }) + +local schema = { + type = "object", + properties = { + auth_config = { + type = "object", + properties = { + client_email = { type = "string" }, + private_key = { type = "string" }, + project_id = { type = "string" }, + token_uri = { + type = "string", + default = "https://oauth2.googleapis.com/token" + }, + scope = { + type = "array", + items = { + type = "string" + }, + default = { + "https://www.googleapis.com/auth/cloud-platform" + } + }, + entries_uri = { + type = "string", + default = "https://secretmanager.googleapis.com/v1" + }, + }, + required = { "client_email", "private_key", "project_id" } + }, + ssl_verify = { + type = "boolean", + default = true + }, + auth_file = { type = "string" }, + }, + oneOf = { + { required = { "auth_config" } }, + { required = { "auth_file" } }, + }, +} + +local _M = { + schema = schema +} + +local function fetch_oauth_conf(conf) + if conf.auth_config then + return conf.auth_config + end + + local file_content, err = core.io.get_file(conf.auth_file) + if not file_content then + return nil, "failed to read configuration, file: " .. conf.auth_file .. ", err: " .. err + end + + local config_tab, err = core.json.decode(file_content) + if not config_tab then + return nil, "config parse failure, data: " .. file_content .. ", err: " .. err + end + + local config = { + auth_config = { + client_email = config_tab.client_email, + private_key = config_tab.private_key, + project_id = config_tab.project_id + } + } + + local ok, err = core.schema.check(schema, config) + if not ok then + return nil, "config parse failure, file: " .. conf.auth_file .. ", err: " .. err + end + + return config_tab +end + + +local function get_secret(oauth, secrets_id) + local httpc = http.new() + + local access_token = oauth:generate_access_token() + if not access_token then + return nil, "failed to get google oauth token" + end + + local entries_uri = oauth.entries_uri .. "/projects/" .. oauth.project_id + .. "/secrets/" .. secrets_id .. "/versions/latest:access" + + local res, err = httpc:request_uri(entries_uri, { + ssl_verify = oauth.ssl_verify, + method = "GET", + headers = { + ["Content-Type"] = "application/json", + ["Authorization"] = (oauth.access_token_type or "Bearer") .. " " .. access_token, + }, + }) + + if not res then + return nil, err + end + + if res.status ~= 200 then + return nil, res.body + end + + local body, err = core.json.decode(res.body) + if not body then + return nil, "failed to parse response data, " .. err + end + + local payload = body.payload + if not payload then + return nil, "invalid payload" + end + + return decode_base64(payload.data) +end + + +local function make_request_to_gcp(conf, secrets_id) + local auth_config, err = fetch_oauth_conf(conf) + if not auth_config then + return nil, err + end + + local lru_key = auth_config.client_email .. "#" .. auth_config.project_id + + local oauth, err = lrucache(lru_key, "gcp", google_oauth.new, auth_config, conf.ssl_verify) + if not oauth then + return nil, "failed to create oauth object, " .. err + end + + local secret, err = get_secret(oauth, secrets_id) + if not secret then + return nil, err + end + + return secret +end + + +function _M.get(conf, key) + core.log.info("fetching data from gcp for key: ", key) + + local idx = str_find(key, '/') + + local main_key = idx and str_sub(key, 1, idx - 1) or key + if main_key == "" then + return nil, "can't find main key, key: " .. key + end + + local sub_key = idx and str_sub(key, idx + 1) + + core.log.info("main: ", main_key, sub_key and ", sub: " .. sub_key or "") + + local res, err = make_request_to_gcp(conf, main_key) + if not res then + return nil, "failed to retrtive data from gcp secret manager: " .. err + end + + if not sub_key then + return res + end + + local data, err = core.json.decode(res) + if not data then + return nil, "failed to decode result, err: " .. err + end + + return data[sub_key] +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/secret/vault.lua b/CloudronPackages/APISIX/apisix-source/apisix/secret/vault.lua new file mode 100644 index 0000000..40b5d40 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/secret/vault.lua @@ -0,0 +1,122 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Vault Tools. +-- Vault is an identity-based secrets and encryption management system. + +local core = require("apisix.core") +local http = require("resty.http") + +local norm_path = require("pl.path").normpath + +local sub = core.string.sub +local rfind_char = core.string.rfind_char +local env = core.env + +local schema = { + type = "object", + properties = { + uri = core.schema.uri_def, + prefix = { + type = "string", + }, + token = { + type = "string", + }, + namespace = { + type = "string", + }, + }, + required = {"uri", "prefix", "token"}, +} + +local _M = { + schema = schema +} + +local function make_request_to_vault(conf, method, key, data) + local httpc = http.new() + -- config timeout or default to 5000 ms + httpc:set_timeout((conf.timeout or 5)*1000) + + local req_addr = conf.uri .. norm_path("/v1/" + .. conf.prefix .. "/" .. key) + + local token, _ = env.fetch_by_uri(conf.token) + if not token then + token = conf.token + end + + local headers = { + ["X-Vault-Token"] = token + } + if conf.namespace then + -- The namespace rule is referenced in + -- https://developer.hashicorp.com/vault/docs/enterprise/namespaces#vault-api-and-namespaces + headers["X-Vault-Namespace"] = conf.namespace + end + + local res, err = httpc:request_uri(req_addr, { + method = method, + headers = headers, + body = core.json.encode(data or {}, true) + }) + + if not res then + return nil, err + end + + return res.body +end + +-- key is the vault kv engine path +local function get(conf, key) + core.log.info("fetching data from vault for key: ", key) + + local idx = rfind_char(key, '/') + if not idx then + return nil, "error key format, key: " .. key + end + + local main_key = sub(key, 1, idx - 1) + if main_key == "" then + return nil, "can't find main key, key: " .. key + end + local sub_key = sub(key, idx + 1) + if sub_key == "" then + return nil, "can't find sub key, key: " .. key + end + + core.log.info("main: ", main_key, " sub: ", sub_key) + + local res, err = make_request_to_vault(conf, "GET", main_key) + if not res then + return nil, "failed to retrtive data from vault kv engine: " .. err + end + + local ret = core.json.decode(res) + if not ret or not ret.data then + return nil, "failed to decode result, res: " .. res + end + + return ret.data[sub_key] +end + +_M.get = get + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/ssl.lua b/CloudronPackages/APISIX/apisix-source/apisix/ssl.lua new file mode 100644 index 0000000..2bd7570 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/ssl.lua @@ -0,0 +1,342 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local secret = require("apisix.secret") +local ngx_ssl = require("ngx.ssl") +local ngx_ssl_client = require("ngx.ssl.clienthello") +local ffi = require("ffi") + +local C = ffi.C +local ngx_encode_base64 = ngx.encode_base64 +local ngx_decode_base64 = ngx.decode_base64 +local aes = require("resty.aes") +local str_lower = string.lower +local str_byte = string.byte +local assert = assert +local type = type +local ipairs = ipairs +local ngx_sub = ngx.re.sub + +ffi.cdef[[ +unsigned long ERR_peek_error(void); +void ERR_clear_error(void); +]] + +local cert_cache = core.lrucache.new { + ttl = 3600, count = 1024, +} + +local pkey_cache = core.lrucache.new { + ttl = 3600, count = 1024, +} + + +local _M = {} + + +function _M.server_name(clienthello) + local sni, err + if clienthello then + sni, err = ngx_ssl_client.get_client_hello_server_name() + else + sni, err = ngx_ssl.server_name() + end + if err then + return nil, err + end + + if not sni then + local local_conf = core.config.local_conf() + sni = core.table.try_read_attr(local_conf, "apisix", "ssl", "fallback_sni") + if not sni then + return nil + end + end + + sni = ngx_sub(sni, "\\.$", "", "jo") + sni = str_lower(sni) + return sni +end + + +function _M.session_hostname() + return ngx_ssl.session_hostname() +end + + +function _M.set_protocols_by_clienthello(ssl_protocols) + if ssl_protocols then + return ngx_ssl_client.set_protocols(ssl_protocols) + end + return true +end + + +local function init_iv_tbl(ivs) + local _aes_128_cbc_with_iv_tbl = core.table.new(2, 0) + local type_ivs = type(ivs) + + if type_ivs == "table" then + for _, iv in ipairs(ivs) do + local aes_with_iv = assert(aes:new(iv, nil, aes.cipher(128, "cbc"), {iv = iv})) + core.table.insert(_aes_128_cbc_with_iv_tbl, aes_with_iv) + end + elseif type_ivs == "string" then + local aes_with_iv = assert(aes:new(ivs, nil, aes.cipher(128, "cbc"), {iv = ivs})) + core.table.insert(_aes_128_cbc_with_iv_tbl, aes_with_iv) + end + + return _aes_128_cbc_with_iv_tbl +end + + +local _aes_128_cbc_with_iv_tbl_gde +local function get_aes_128_cbc_with_iv_gde(local_conf) + if _aes_128_cbc_with_iv_tbl_gde == nil then + local ivs = core.table.try_read_attr(local_conf, "apisix", "data_encryption", "keyring") + _aes_128_cbc_with_iv_tbl_gde = init_iv_tbl(ivs) + end + + return _aes_128_cbc_with_iv_tbl_gde +end + + + +local function encrypt(aes_128_cbc_with_iv, origin) + local encrypted = aes_128_cbc_with_iv:encrypt(origin) + if encrypted == nil then + core.log.error("failed to encrypt key[", origin, "] ") + return origin + end + + return ngx_encode_base64(encrypted) +end + +function _M.aes_encrypt_pkey(origin, field) + local local_conf = core.config.local_conf() + local aes_128_cbc_with_iv_tbl_gde = get_aes_128_cbc_with_iv_gde(local_conf) + local aes_128_cbc_with_iv_gde = aes_128_cbc_with_iv_tbl_gde[1] + + if not field then + if aes_128_cbc_with_iv_gde ~= nil and core.string.has_prefix(origin, "---") then + return encrypt(aes_128_cbc_with_iv_gde, origin) + end + else + if field == "data_encrypt" then + if aes_128_cbc_with_iv_gde ~= nil then + return encrypt(aes_128_cbc_with_iv_gde, origin) + end + end + end + return origin +end + + +local function aes_decrypt_pkey(origin, field) + if not field and core.string.has_prefix(origin, "---") then + return origin + end + + local local_conf = core.config.local_conf() + local aes_128_cbc_with_iv_tbl = get_aes_128_cbc_with_iv_gde(local_conf) + if #aes_128_cbc_with_iv_tbl == 0 then + return origin + end + + local decoded_key = ngx_decode_base64(origin) + if not decoded_key then + core.log.error("base64 decode ssl key failed. key[", origin, "] ") + return nil + end + + for _, aes_128_cbc_with_iv in ipairs(aes_128_cbc_with_iv_tbl) do + local decrypted = aes_128_cbc_with_iv:decrypt(decoded_key) + if decrypted then + return decrypted + end + + if C.ERR_peek_error() then + -- clean up the error queue of OpenSSL to prevent + -- normal requests from being interfered with. + C.ERR_clear_error() + end + end + + return nil, "decrypt ssl key failed" +end +_M.aes_decrypt_pkey = aes_decrypt_pkey + + +local function validate(cert, key) + local parsed_cert, err = ngx_ssl.parse_pem_cert(cert) + if not parsed_cert then + return nil, "failed to parse cert: " .. err + end + + if key == nil then + -- sometimes we only need to validate the cert + return true + end + + local err + key, err = aes_decrypt_pkey(key) + if not key then + core.log.error(err) + return nil, "failed to decrypt previous encrypted key" + end + + local parsed_key, err = ngx_ssl.parse_pem_priv_key(key) + if not parsed_key then + return nil, "failed to parse key: " .. err + end + + -- TODO: check if key & cert match + return true +end +_M.validate = validate + + +local function parse_pem_cert(sni, cert) + core.log.debug("parsing cert for sni: ", sni) + + local parsed, err = ngx_ssl.parse_pem_cert(cert) + return parsed, err +end + + +function _M.fetch_cert(sni, cert) + local parsed_cert, err = cert_cache(cert, nil, parse_pem_cert, sni, cert) + if not parsed_cert then + return false, err + end + + return parsed_cert +end + + +local function parse_pem_priv_key(sni, pkey) + core.log.debug("parsing priv key for sni: ", sni) + + local key, err = aes_decrypt_pkey(pkey) + if not key then + core.log.error(err) + return nil, err + end + local parsed, err = ngx_ssl.parse_pem_priv_key(key) + return parsed, err +end + + +function _M.fetch_pkey(sni, pkey) + local parsed_pkey, err = pkey_cache(pkey, nil, parse_pem_priv_key, sni, pkey) + if not parsed_pkey then + return false, err + end + + return parsed_pkey +end + + +local function support_client_verification() + return ngx_ssl.verify_client ~= nil +end +_M.support_client_verification = support_client_verification + + +function _M.check_ssl_conf(in_dp, conf) + if not in_dp then + local ok, err = core.schema.check(core.schema.ssl, conf) + if not ok then + return nil, "invalid configuration: " .. err + end + end + + if not secret.check_secret_uri(conf.cert) and + not secret.check_secret_uri(conf.key) then + + local ok, err = validate(conf.cert, conf.key) + if not ok then + return nil, err + end + end + + if conf.type == "client" then + return true + end + + local numcerts = conf.certs and #conf.certs or 0 + local numkeys = conf.keys and #conf.keys or 0 + if numcerts ~= numkeys then + return nil, "mismatched number of certs and keys" + end + + for i = 1, numcerts do + if not secret.check_secret_uri(conf.certs[i]) and + not secret.check_secret_uri(conf.keys[i]) then + + local ok, err = validate(conf.certs[i], conf.keys[i]) + if not ok then + return nil, "failed to handle cert-key pair[" .. i .. "]: " .. err + end + end + end + + if conf.client then + if not support_client_verification() then + return nil, "client tls verify unsupported" + end + + local ok, err = validate(conf.client.ca, nil) + if not ok then + return nil, "failed to validate client_cert: " .. err + end + end + + return true +end + + +function _M.get_status_request_ext() + core.log.debug("parsing status request extension ... ") + local ext = ngx_ssl_client.get_client_hello_ext(5) + if not ext then + core.log.debug("no contains status request extension") + return false + end + local total_len = #ext + -- 1-byte for CertificateStatusType + -- 2-byte for zero-length "responder_id_list" + -- 2-byte for zero-length "request_extensions" + if total_len < 5 then + core.log.error("bad ssl client hello extension: ", + "extension data error") + return false + end + + -- CertificateStatusType + local status_type = str_byte(ext, 1) + if status_type == 1 then + core.log.debug("parsing status request extension ok: ", + "status_type is ocsp(1)") + return true + end + + return false +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/ssl/router/radixtree_sni.lua b/CloudronPackages/APISIX/apisix-source/apisix/ssl/router/radixtree_sni.lua new file mode 100644 index 0000000..ae7e5b2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/ssl/router/radixtree_sni.lua @@ -0,0 +1,332 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local get_request = require("resty.core.base").get_request +local router_new = require("apisix.utils.router").new +local core = require("apisix.core") +local apisix_ssl = require("apisix.ssl") +local secret = require("apisix.secret") +local ngx_ssl = require("ngx.ssl") +local config_util = require("apisix.core.config_util") +local ngx = ngx +local ipairs = ipairs +local type = type +local error = error +local str_find = core.string.find +local str_gsub = string.gsub +local str_lower = string.lower +local tostring = tostring +local ssl_certificates +local radixtree_router +local radixtree_router_ver + + +local _M = { + version = 0.1, + server_name = ngx_ssl.server_name, +} + + +local function create_router(ssl_items) + local ssl_items = ssl_items or {} + + local route_items = core.table.new(#ssl_items, 0) + local idx = 0 + + for _, ssl in config_util.iterate_values(ssl_items) do + if ssl.value ~= nil and ssl.value.type == "server" and + (ssl.value.status == nil or ssl.value.status == 1) then -- compatible with old version + + local j = 0 + local sni + if type(ssl.value.snis) == "table" and #ssl.value.snis > 0 then + sni = core.table.new(0, #ssl.value.snis) + for _, s in ipairs(ssl.value.snis) do + j = j + 1 + sni[j] = s:reverse() + end + else + sni = ssl.value.sni:reverse() + end + + idx = idx + 1 + route_items[idx] = { + paths = sni, + handler = function (api_ctx) + if not api_ctx then + return + end + api_ctx.matched_ssl = ssl + api_ctx.matched_sni = sni + end + } + end + end + + core.log.info("route items: ", core.json.delay_encode(route_items, true)) + -- for testing + if idx > 1 then + core.log.info("we have more than 1 ssl certs now") + end + local router, err = router_new(route_items) + if not router then + return nil, err + end + + return router +end + + +local function set_pem_ssl_key(sni, cert, pkey) + local r = get_request() + if r == nil then + return false, "no request found" + end + + local parsed_cert, err = apisix_ssl.fetch_cert(sni, cert) + if not parsed_cert then + return false, "failed to parse PEM cert: " .. err + end + + local ok, err = ngx_ssl.set_cert(parsed_cert) + if not ok then + return false, "failed to set PEM cert: " .. err + end + + local parsed_pkey, err = apisix_ssl.fetch_pkey(sni, pkey) + if not parsed_pkey then + return false, "failed to parse PEM priv key: " .. err + end + + ok, err = ngx_ssl.set_priv_key(parsed_pkey) + if not ok then + return false, "failed to set PEM priv key: " .. err + end + + return true +end +_M.set_pem_ssl_key = set_pem_ssl_key + + +-- export the set cert/key process so we can hook it in the other plugins +function _M.set_cert_and_key(sni, value) + local ok, err = set_pem_ssl_key(sni, value.cert, value.key) + if not ok then + return false, err + end + + -- multiple certificates support. + if value.certs then + for i = 1, #value.certs do + local cert = value.certs[i] + local key = value.keys[i] + + ok, err = set_pem_ssl_key(sni, cert, key) + if not ok then + return false, err + end + end + end + + return true +end + + +function _M.match_and_set(api_ctx, match_only, alt_sni) + local err + if not radixtree_router or + radixtree_router_ver ~= ssl_certificates.conf_version then + radixtree_router, err = create_router(ssl_certificates.values) + if not radixtree_router then + return false, "failed to create radixtree router: " .. err + end + radixtree_router_ver = ssl_certificates.conf_version + end + + local sni = alt_sni + if not sni then + sni, err = apisix_ssl.server_name() + if type(sni) ~= "string" then + local advise = "please check if the client requests via IP or uses an outdated " .. + "protocol. If you need to report an issue, " .. + "provide a packet capture file of the TLS handshake." + return false, "failed to find SNI: " .. (err or advise) + end + end + + core.log.debug("sni: ", sni) + + local sni_rev = sni:reverse() + local ok = radixtree_router:dispatch(sni_rev, nil, api_ctx) + if not ok then + if not alt_sni then + -- it is expected that alternative SNI doesn't have a SSL certificate associated + -- with it sometimes + core.log.error("failed to find any SSL certificate by SNI: ", sni) + end + return false + end + + + if type(api_ctx.matched_sni) == "table" then + local matched = false + for _, msni in ipairs(api_ctx.matched_sni) do + if sni_rev == msni or not str_find(sni_rev, ".", #msni) then + matched = true + break + end + end + if not matched then + local log_snis = core.json.encode(api_ctx.matched_sni, true) + if log_snis ~= nil then + log_snis = str_gsub(log_snis:reverse(), "%[", "%]") + log_snis = str_gsub(log_snis, "%]", "%[", 1) + end + core.log.warn("failed to find any SSL certificate by SNI: ", + sni, " matched SNIs: ", log_snis) + return false + end + else + if str_find(sni_rev, ".", #api_ctx.matched_sni) then + core.log.warn("failed to find any SSL certificate by SNI: ", + sni, " matched SNI: ", api_ctx.matched_sni:reverse()) + return false + end + end + + core.log.info("debug - matched: ", core.json.delay_encode(api_ctx.matched_ssl, true)) + + if match_only then + return true + end + + ok, err = _M.set(api_ctx.matched_ssl, sni) + if not ok then + return false, err + end + + return true +end + + +function _M.set(matched_ssl, sni) + if not matched_ssl then + return false, "failed to match ssl certificate" + end + local ok, err + if not sni then + sni, err = apisix_ssl.server_name() + if type(sni) ~= "string" then + local advise = "please check if the client requests via IP or uses an outdated " .. + "protocol. If you need to report an issue, " .. + "provide a packet capture file of the TLS handshake." + return false, "failed to find SNI: " .. (err or advise) + end + end + ngx_ssl.clear_certs() + + local new_ssl_value = secret.fetch_secrets(matched_ssl.value, true, matched_ssl.value, "") + or matched_ssl.value + + ok, err = _M.set_cert_and_key(sni, new_ssl_value) + if not ok then + return false, err + end + + if matched_ssl.value.client then + local ca_cert = matched_ssl.value.client.ca + local depth = matched_ssl.value.client.depth + if apisix_ssl.support_client_verification() then + local parsed_cert, err = apisix_ssl.fetch_cert(sni, ca_cert) + if not parsed_cert then + return false, "failed to parse client cert: " .. err + end + + local reject_in_handshake = + (ngx.config.subsystem == "stream") or + (matched_ssl.value.client.skip_mtls_uri_regex == nil) + -- TODO: support passing `trusted_certs` (3rd arg, keep it nil for now) + local ok, err = ngx_ssl.verify_client(parsed_cert, depth, nil, + reject_in_handshake) + if not ok then + return false, err + end + end + end + + return true +end + + +function _M.ssls() + if not ssl_certificates then + return nil, nil + end + + return ssl_certificates.values, ssl_certificates.conf_version +end + + +local function ssl_filter(ssl) + if not ssl.value then + return + end + + if ssl.value.sni then + ssl.value.sni = ngx.re.sub(ssl.value.sni, "\\.$", "", "jo") + ssl.value.sni = str_lower(ssl.value.sni) + elseif ssl.value.snis then + for i, v in ipairs(ssl.value.snis) do + v = ngx.re.sub(v, "\\.$", "", "jo") + ssl.value.snis[i] = str_lower(v) + end + end +end + + +function _M.init_worker() + local err + ssl_certificates, err = core.config.new("/ssls", { + automatic = true, + item_schema = core.schema.ssl, + checker = function (item, schema_type) + return apisix_ssl.check_ssl_conf(true, item) + end, + filter = ssl_filter, + }) + if not ssl_certificates then + error("failed to create etcd instance for fetching ssl certificates: " + .. err) + end +end + + +function _M.get_by_id(ssl_id) + local ssl + local ssls = core.config.fetch_created_obj("/ssls") + if ssls then + ssl = ssls:get(tostring(ssl_id)) + end + + if not ssl then + return nil + end + + return ssl.value +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/ip-restriction.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/ip-restriction.lua new file mode 100644 index 0000000..66c6c11 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/ip-restriction.lua @@ -0,0 +1,26 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local base = require("apisix.plugins.ip-restriction.init") + + +-- avoid unexpected data sharing +local ip_restriction = core.table.clone(base) +ip_restriction.preread = base.restrict + + +return ip_restriction diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/limit-conn.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/limit-conn.lua new file mode 100644 index 0000000..1beb7c7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/limit-conn.lua @@ -0,0 +1,61 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local limit_conn = require("apisix.plugins.limit-conn.init") + + +local plugin_name = "limit-conn" +local schema = { + type = "object", + properties = { + conn = {type = "integer", exclusiveMinimum = 0}, + burst = {type = "integer", minimum = 0}, + default_conn_delay = {type = "number", exclusiveMinimum = 0}, + only_use_default_delay = {type = "boolean", default = false}, + key = {type = "string"}, + key_type = {type = "string", + enum = {"var", "var_combination"}, + default = "var", + }, + }, + required = {"conn", "burst", "default_conn_delay", "key"} +} + +local _M = { + version = 0.1, + priority = 1003, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +function _M.preread(conf, ctx) + return limit_conn.increase(conf, ctx) +end + + +function _M.log(conf, ctx) + return limit_conn.decrease(conf, ctx) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/mqtt-proxy.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/mqtt-proxy.lua new file mode 100644 index 0000000..f075e20 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/mqtt-proxy.lua @@ -0,0 +1,186 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local bit = require("bit") +local ngx = ngx +local str_byte = string.byte +local str_sub = string.sub + + +core.ctx.register_var("mqtt_client_id", function(ctx) + return ctx.mqtt_client_id +end) + + +local schema = { + type = "object", + properties = { + protocol_name = {type = "string"}, + protocol_level = {type = "integer"} + }, + required = {"protocol_name", "protocol_level"}, +} + + +local plugin_name = "mqtt-proxy" + + +local _M = { + version = 0.1, + priority = 1000, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +local function decode_variable_byte_int(data, offset) + local multiplier = 1 + local len = 0 + local pos + for i = offset, offset + 3 do + pos = i + local byte = str_byte(data, i, i) + len = len + bit.band(byte, 127) * multiplier + multiplier = multiplier * 128 + if bit.band(byte, 128) == 0 then + break + end + end + + return len, pos +end + + +local function parse_msg_hdr(data) + local packet_type_flags_byte = str_byte(data, 1, 1) + if packet_type_flags_byte < 16 or packet_type_flags_byte > 32 then + return nil, nil, + "Received unexpected MQTT packet type+flags: " .. packet_type_flags_byte + end + + local len, pos = decode_variable_byte_int(data, 2) + return len, pos +end + + +local function parse_mqtt(data, parsed_pos) + local res = {} + + local protocol_len = str_byte(data, parsed_pos + 1, parsed_pos + 1) * 256 + + str_byte(data, parsed_pos + 2, parsed_pos + 2) + parsed_pos = parsed_pos + 2 + res.protocol = str_sub(data, parsed_pos + 1, parsed_pos + protocol_len) + parsed_pos = parsed_pos + protocol_len + + res.protocol_ver = str_byte(data, parsed_pos + 1, parsed_pos + 1) + parsed_pos = parsed_pos + 1 + + -- skip control flags & keepalive + parsed_pos = parsed_pos + 3 + + if res.protocol_ver == 5 then + -- skip properties + local property_len + property_len, parsed_pos = decode_variable_byte_int(data, parsed_pos + 1) + parsed_pos = parsed_pos + property_len + end + + local client_id_len = str_byte(data, parsed_pos + 1, parsed_pos + 1) * 256 + + str_byte(data, parsed_pos + 2, parsed_pos + 2) + parsed_pos = parsed_pos + 2 + + if parsed_pos + client_id_len > #data then + res.expect_len = parsed_pos + client_id_len + return res + end + + if client_id_len == 0 then + -- A Server MAY allow a Client to supply a ClientID that has a length of zero bytes + res.client_id = "" + else + res.client_id = str_sub(data, parsed_pos + 1, parsed_pos + client_id_len) + end + + parsed_pos = parsed_pos + client_id_len + + res.expect_len = parsed_pos + return res +end + + +function _M.preread(conf, ctx) + local sock = ngx.req.socket() + -- the header format of MQTT CONNECT can be found in + -- https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901033 + local data, err = sock:peek(5) + if not data then + core.log.error("failed to read the msg header: ", err) + return 503 + end + + local remain_len, pos, err = parse_msg_hdr(data) + if not remain_len then + core.log.error("failed to parse the msg header: ", err) + return 503 + end + + local data, err = sock:peek(pos + remain_len) + if not data then + core.log.error("failed to read the Connect Command: ", err) + return 503 + end + + local res = parse_mqtt(data, pos) + if res.expect_len > #data then + core.log.error("failed to parse mqtt request, expect len: ", + res.expect_len, " but got ", #data) + return 503 + end + + if res.protocol and res.protocol ~= conf.protocol_name then + core.log.error("expect protocol name: ", conf.protocol_name, + ", but got ", res.protocol) + return 503 + end + + if res.protocol_ver and res.protocol_ver ~= conf.protocol_level then + core.log.error("expect protocol level: ", conf.protocol_level, + ", but got ", res.protocol_ver) + return 503 + end + + core.log.info("mqtt client id: ", res.client_id) + + -- when client id is missing, fallback to balance by client IP + if res.client_id ~= "" then + ctx.mqtt_client_id = res.client_id + end + return +end + + +function _M.log(conf, ctx) + core.log.info("plugin log phase, conf: ", core.json.encode(conf)) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/prometheus.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/prometheus.lua new file mode 100644 index 0000000..46222ec --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/prometheus.lua @@ -0,0 +1,48 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local exporter = require("apisix.plugins.prometheus.exporter") + + +local plugin_name = "prometheus" +local schema = { + type = "object", + properties = { + prefer_name = { + type = "boolean", + default = false -- stream route doesn't have name yet + } + }, +} + + +local _M = { + version = 0.1, + priority = 500, + name = plugin_name, + log = exporter.stream_log, + schema = schema, + run_policy = "prefer_route", +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/syslog.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/syslog.lua new file mode 100644 index 0000000..5a44ce4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/plugins/syslog.lua @@ -0,0 +1,80 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local log_util = require("apisix.utils.log-util") +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local syslog = require("apisix.plugins.syslog.init") +local plugin_name = "syslog" + +local batch_processor_manager = bp_manager_mod.new("stream sys logger") +local schema = { + type = "object", + properties = { + host = {type = "string"}, + port = {type = "integer"}, + flush_limit = {type = "integer", minimum = 1, default = 4096}, + drop_limit = {type = "integer", default = 1048576}, + timeout = {type = "integer", minimum = 1, default = 3000}, + log_format = {type = "object"}, + sock_type = {type = "string", default = "tcp", enum = {"tcp", "udp"}}, + pool_size = {type = "integer", minimum = 5, default = 5}, + tls = {type = "boolean", default = false} + }, + required = {"host", "port"} +} + +local schema = batch_processor_manager:wrap_schema(schema) + +local metadata_schema = { + type = "object", + properties = { + log_format = { + type = "object" + } + }, +} + +local _M = { + version = 0.1, + priority = 401, + name = plugin_name, + schema = schema, + metadata_schema = metadata_schema, + flush_syslog = syslog.flush_syslog, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + return core.schema.check(schema, conf) +end + + +function _M.log(conf, ctx) + local entry = log_util.get_log_entry(plugin_name, conf, ctx) + if not entry then + return + end + + syslog.push_entry(conf, ctx, entry) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/router/ip_port.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/router/ip_port.lua new file mode 100644 index 0000000..4d502ca --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/router/ip_port.lua @@ -0,0 +1,249 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local core_ip = require("apisix.core.ip") +local config_util = require("apisix.core.config_util") +local stream_plugin_checker = require("apisix.plugin").stream_plugin_checker +local router_new = require("apisix.utils.router").new +local apisix_ssl = require("apisix.ssl") +local xrpc = require("apisix.stream.xrpc") +local error = error +local tonumber = tonumber +local ipairs = ipairs + +local user_routes +local router_ver +local tls_router +local other_routes = {} +local _M = {version = 0.1} + + + +local function match_addrs(route, vars) + -- todo: use resty-ipmatcher to support multiple ip address + if route.value.remote_addr then + local ok, _ = route.value.remote_addr_matcher:match(vars.remote_addr) + if not ok then + return false + end + end + + if route.value.server_addr then + local ok, _ = route.value.server_addr_matcher:match(vars.server_addr) + if not ok then + return false + end + end + + -- todo: use resty-ipmatcher to support multiple ip address + if route.value.server_port and + route.value.server_port ~= tonumber(vars.server_port) then + return false + end + + return true +end + + +local create_router +do + local sni_to_items = {} + local tls_routes = {} + + function create_router(items) + local tls_routes_idx = 1 + local other_routes_idx = 1 + core.table.clear(tls_routes) + core.table.clear(other_routes) + core.table.clear(sni_to_items) + + for _, item in config_util.iterate_values(items) do + if item.value == nil then + goto CONTINUE + end + + local route = item.value + if route.protocol and route.protocol.superior_id then + -- subordinate route won't be matched in the entry + -- TODO: check the subordinate relationship in the Admin API + goto CONTINUE + end + + if item.value.remote_addr then + item.value.remote_addr_matcher = core_ip.create_ip_matcher({item.value.remote_addr}) + end + if item.value.server_addr then + item.value.server_addr_matcher = core_ip.create_ip_matcher({item.value.server_addr}) + end + if not route.sni then + other_routes[other_routes_idx] = item + other_routes_idx = other_routes_idx + 1 + goto CONTINUE + end + + local sni_rev = route.sni:reverse() + local stored = sni_to_items[sni_rev] + if stored then + core.table.insert(stored, item) + goto CONTINUE + end + + sni_to_items[sni_rev] = {item} + tls_routes[tls_routes_idx] = { + paths = sni_rev, + filter_fun = function (vars, opts, ctx) + local items = sni_to_items[sni_rev] + for _, route in ipairs(items) do + local hit = match_addrs(route, vars) + if hit then + ctx.matched_route = route + return true + end + end + return false + end, + handler = function (ctx, sni_rev) + -- done in the filter_fun + end + } + tls_routes_idx = tls_routes_idx + 1 + + ::CONTINUE:: + end + + if #tls_routes > 0 then + local router, err = router_new(tls_routes) + if not router then + return err + end + + tls_router = router + end + + return nil + end +end + + +do + local match_opts = {} + + function _M.match(api_ctx) + if router_ver ~= user_routes.conf_version then + local err = create_router(user_routes.values) + if err then + return false, "failed to create router: " .. err + end + + router_ver = user_routes.conf_version + end + + local sni = apisix_ssl.server_name() + if sni and tls_router then + local sni_rev = sni:reverse() + + core.table.clear(match_opts) + match_opts.vars = api_ctx.var + + local _, err = tls_router:dispatch(sni_rev, match_opts, api_ctx) + if err then + return false, "failed to match TLS router: " .. err + end + end + + if api_ctx.matched_route then + -- unlike the matcher for the SSL, it is fine to let + -- '*.x.com' to match 'a.b.x.com' as we don't care about + -- the certificate + return true + end + + for _, route in ipairs(other_routes) do + local hit = match_addrs(route, api_ctx.var) + if hit then + api_ctx.matched_route = route + return true + end + end + + core.log.info("not hit any route") + return true + end +end + + +function _M.routes() + if not user_routes then + return nil, nil + end + + return user_routes.values, user_routes.conf_version +end + +local function stream_route_checker(item, in_cp) + if item.plugins then + local ok, message = stream_plugin_checker(item, in_cp) + if not ok then + return false, message + end + end + -- validate the address format when remote_address or server_address is not nil + if item.remote_addr then + if not core_ip.validate_cidr_or_ip(item.remote_addr) then + return false, "invalid remote_addr: " .. item.remote_addr + end + end + if item.server_addr then + if not core_ip.validate_cidr_or_ip(item.server_addr) then + return false, "invalid server_addr: " .. item.server_addr + end + end + + if item.protocol then + local prot_conf = item.protocol + if prot_conf then + local ok, message = xrpc.check_schema(prot_conf, false) + if not ok then + return false, message + end + end + end + + return true +end +_M.stream_route_checker = stream_route_checker + + +function _M.stream_init_worker(filter) + local err + user_routes, err = core.config.new("/stream_routes", { + automatic = true, + item_schema = core.schema.stream_route, + checker = function(item) + return stream_route_checker(item) + end, + filter = filter, + }) + + if not user_routes then + error("failed to create etcd instance for fetching /stream_routes : " + .. err) + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc.lua new file mode 100644 index 0000000..f9cfa8c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc.lua @@ -0,0 +1,121 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local metrics = require("apisix.stream.xrpc.metrics") +local ipairs = ipairs +local pairs = pairs +local ngx_exit = ngx.exit + + +local is_http = true +local runner +if ngx.config.subsystem ~= "http" then + is_http = false + runner = require("apisix.stream.xrpc.runner") +end + +local _M = {} +local registered_protocols = {} +local registered_protocol_schemas = {} + + +-- only need to load schema module when it is used in Admin API +local function register_protocol(name, is_http) + if not is_http then + registered_protocols[name] = require("apisix.stream.xrpc.protocols." .. name) + end + + registered_protocol_schemas[name] = + require("apisix.stream.xrpc.protocols." .. name .. ".schema") +end + + +function _M.init() + local local_conf = core.config.local_conf() + if not local_conf.xrpc then + return + end + + local prot_conf = local_conf.xrpc.protocols + if not prot_conf then + return + end + + if is_http and not local_conf.apisix.enable_admin then + -- we need to register xRPC protocols in HTTP only when Admin API is enabled + return + end + + for _, prot in ipairs(prot_conf) do + core.log.info("register xprc protocol ", prot.name) + register_protocol(prot.name, is_http) + end +end + + +function _M.init_metrics(collector) + local local_conf = core.config.local_conf() + if not local_conf.xrpc then + return + end + + local prot_conf = local_conf.xrpc.protocols + if not prot_conf then + return + end + + for _, prot in ipairs(prot_conf) do + metrics.store(collector, prot.name) + end +end + + +function _M.init_worker() + for name, prot in pairs(registered_protocols) do + if not is_http and prot.init_worker then + prot.init_worker() + end + end +end + + +function _M.check_schema(item, skip_disabled_plugin) + local name = item.name + local protocol = registered_protocol_schemas[name] + if not protocol and not skip_disabled_plugin then + -- like plugins, ignore unknown plugin if the schema is checked in the DP + return false, "unknown protocol [" .. name .. "]" + end + + -- check protocol-specific configuration + if not item.conf then + return true + end + return protocol.check_schema(item.conf) +end + + +function _M.run_protocol(conf, ctx) + local name = conf.name + local protocol = registered_protocols[name] + local code = runner.run(protocol, ctx) + return ngx_exit(code) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/metrics.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/metrics.lua new file mode 100644 index 0000000..41b77d4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/metrics.lua @@ -0,0 +1,50 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local pairs = pairs +local pcall = pcall + + +local _M = {} +local hubs = {} + + +function _M.store(prometheus, name) + local ok, m = pcall(require, "apisix.stream.xrpc.protocols." .. name .. ".metrics") + if not ok then + core.log.notice("no metric for protocol ", name) + return + end + + local hub = {} + for metric, conf in pairs(m) do + core.log.notice("register metric ", metric, " for protocol ", name) + hub[metric] = prometheus[conf.type](prometheus, name .. '_' .. metric, + conf.help, conf.labels, conf.buckets) + end + + hubs[name] = hub +end + + +function _M.load(name) + return hubs[name] +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/dubbo/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/dubbo/init.lua new file mode 100644 index 0000000..19160d6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/dubbo/init.lua @@ -0,0 +1,231 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local sdk = require("apisix.stream.xrpc.sdk") +local xrpc_socket = require("resty.apisix.stream.xrpc.socket") +local math_random = math.random +local ngx = ngx +local OK = ngx.OK +local str_format = string.format +local DECLINED = ngx.DECLINED +local DONE = ngx.DONE +local bit = require("bit") +local ffi = require("ffi") +local ffi_str = ffi.string + + +-- dubbo protocol spec: https://cn.dubbo.apache.org/zh-cn/overview/reference/protocols/tcp/ +local header_len = 16 +local _M = {} + + +function _M.init_downstream(session) + session.req_id_seq = 0 + session.resp_id_seq = 0 + session.cmd_labels = { session.route.id, "" } + return xrpc_socket.downstream.socket() +end + + +local function parse_dubbo_header(header) + for i = 1, header_len do + local currentByte = header:byte(i) + if not currentByte then + return nil + end + end + + local magic_number = str_format("%04x", header:byte(1) * 256 + header:byte(2)) + local message_flag = header:byte(3) + local status = header:byte(4) + local request_id = 0 + for i = 5, 12 do + request_id = request_id * 256 + header:byte(i) + end + + local byte13Val = header:byte(13) * 256 * 256 * 256 + local byte14Val = header:byte(14) * 256 * 256 + local data_length = byte13Val + byte14Val + header:byte(15) * 256 + header:byte(16) + + local is_request = bit.band(bit.rshift(message_flag, 7), 0x01) == 1 and 1 or 0 + local is_two_way = bit.band(bit.rshift(message_flag, 6), 0x01) == 1 and 1 or 0 + local is_event = bit.band(bit.rshift(message_flag, 5), 0x01) == 1 and 1 or 0 + + return { + magic_number = magic_number, + message_flag = message_flag, + is_request = is_request, + is_two_way = is_two_way, + is_event = is_event, + status = status, + request_id = request_id, + data_length = data_length + } +end + + +local function read_data(sk, is_req) + local header_data, err = sk:read(header_len) + if not header_data then + return nil, err, false + end + + local header_str = ffi_str(header_data, header_len) + local header_info = parse_dubbo_header(header_str) + if not header_info then + return nil, "header insufficient", false + end + + local is_valid_magic_number = header_info.magic_number == "dabb" + if not is_valid_magic_number then + return nil, str_format("unknown magic number: \"%s\"", header_info.magic_number), false + end + + local body_data, err = sk:read(header_info.data_length) + if not body_data then + core.log.error("failed to read dubbo request body") + return nil, err, false + end + + local ctx = ngx.ctx + ctx.dubbo_serialization_id = bit.band(header_info.message_flag, 0x1F) + + if is_req then + ctx.dubbo_req_body_data = body_data + else + ctx.dubbo_rsp_body_data = body_data + end + + return true, nil, false +end + + +local function read_req(sk) + return read_data(sk, true) +end + + +local function read_reply(sk) + return read_data(sk, false) +end + + +local function handle_reply(session, sk) + local ok, err = read_reply(sk) + if not ok then + return nil, err + end + + local ctx = sdk.get_req_ctx(session, 10) + + return ctx +end + + +function _M.from_downstream(session, downstream) + local read_pipeline = false + session.req_id_seq = session.req_id_seq + 1 + local ctx = sdk.get_req_ctx(session, session.req_id_seq) + session._downstream_ctx = ctx + while true do + local ok, err, pipelined = read_req(downstream) + if not ok then + if err ~= "timeout" and err ~= "closed" then + core.log.error("failed to read request: ", err) + end + + if read_pipeline and err == "timeout" then + break + end + + return DECLINED + end + + if not pipelined then + break + end + + if not read_pipeline then + read_pipeline = true + -- set minimal read timeout to read pipelined data + downstream:settimeouts(0, 0, 1) + end + end + + if read_pipeline then + -- set timeout back + downstream:settimeouts(0, 0, 0) + end + + return OK, ctx +end + + +function _M.connect_upstream(session, ctx) + local conf = session.upstream_conf + local nodes = conf.nodes + if #nodes == 0 then + core.log.error("failed to connect: no nodes") + return DECLINED + end + + local node = nodes[math_random(#nodes)] + local sk = sdk.connect_upstream(node, conf) + if not sk then + return DECLINED + end + + core.log.debug("dubbo_connect_upstream end") + + return OK, sk +end + +function _M.disconnect_upstream(session, upstream) + sdk.disconnect_upstream(upstream, session.upstream_conf) +end + + +function _M.to_upstream(session, ctx, downstream, upstream) + local ok, _ = upstream:move(downstream) + if not ok then + return DECLINED + end + + return OK +end + + +function _M.from_upstream(session, downstream, upstream) + local ctx,err = handle_reply(session, upstream) + if err then + return DECLINED + end + + local ok, _ = downstream:move(upstream) + if not ok then + return DECLINED + end + + return DONE, ctx +end + + +function _M.log(_, _) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/dubbo/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/dubbo/schema.lua new file mode 100644 index 0000000..3a9d733 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/dubbo/schema.lua @@ -0,0 +1,32 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") + + +local schema = { + type = "object", +} + +local _M = {} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/redis/commands.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/redis/commands.lua new file mode 100644 index 0000000..ff3338f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/redis/commands.lua @@ -0,0 +1,222 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local ipairs = ipairs +local pairs = pairs + + +local cmd_to_key_finder = {} +--[[ +-- the data is generated from the script below +local redis = require "resty.redis" +local red = redis:new() + +local ok, err = red:connect("127.0.0.1", 6379) +if not ok then + ngx.say("failed to connect: ", err) + return +end + +local res = red:command("info") +local map = {} +for _, r in ipairs(res) do + local first_key = r[4] + local last_key = r[5] + local step = r[6] + local idx = first_key .. ':' .. last_key .. ':' .. step + + if idx ~= "1:1:1" then + -- "1:1:1" is the default + if map[idx] then + table.insert(map[idx], r[1]) + else + map[idx] = {r[1]} + end + end +end +for _, r in pairs(map) do + table.sort(r) +end +local dump = require('pl.pretty').dump; dump(map) +--]] +local key_to_cmd = { + ["0:0:0"] = { + "acl", + "asking", + "auth", + "bgrewriteaof", + "bgsave", + "blmpop", + "bzmpop", + "client", + "cluster", + "command", + "config", + "dbsize", + "debug", + "discard", + "echo", + "eval", + "eval_ro", + "evalsha", + "evalsha_ro", + "exec", + "failover", + "fcall", + "fcall_ro", + "flushall", + "flushdb", + "function", + "hello", + "info", + "keys", + "lastsave", + "latency", + "lmpop", + "lolwut", + "memory", + "module", + "monitor", + "multi", + "object", + "pfselftest", + "ping", + "psubscribe", + "psync", + "publish", + "pubsub", + "punsubscribe", + "quit", + "randomkey", + "readonly", + "readwrite", + "replconf", + "replicaof", + "reset", + "role", + "save", + "scan", + "script", + "select", + "shutdown", + "sintercard", + "slaveof", + "slowlog", + "subscribe", + "swapdb", + "sync", + "time", + "unsubscribe", + "unwatch", + "wait", + "xgroup", + "xinfo", + "xread", + "xreadgroup", + "zdiff", + "zinter", + "zintercard", + "zmpop", + "zunion" + }, + ["1:-1:1"] = { + "del", + "exists", + "mget", + "pfcount", + "pfmerge", + "sdiff", + "sdiffstore", + "sinter", + "sinterstore", + "ssubscribe", + "sunion", + "sunionstore", + "sunsubscribe", + "touch", + "unlink", + "watch" + }, + ["1:-1:2"] = { + "mset", + "msetnx" + }, + ["1:-2:1"] = { + "blpop", + "brpop", + "bzpopmax", + "bzpopmin" + }, + ["1:2:1"] = { + "blmove", + "brpoplpush", + "copy", + "geosearchstore", + "lcs", + "lmove", + "rename", + "renamenx", + "rpoplpush", + "smove", + "zrangestore" + }, + ["2:-1:1"] = { + "bitop" + }, + ["2:2:1"] = { + "pfdebug" + }, + ["3:3:1"] = { + "migrate" + } +} +local key_finders = { + ["0:0:0"] = false, + ["1:-1:1"] = function (idx, narg) + return 1 < idx + end, + ["1:-1:2"] = function (idx, narg) + return 1 < idx and idx % 2 == 0 + end, + ["1:-2:1"] = function (idx, narg) + return 1 < idx and idx < narg - 1 + end, + ["1:2:1"] = function (idx, narg) + return idx == 2 or idx == 3 + end, + ["2:-1:1"] = function (idx, narg) + return 2 < idx + end, + ["2:2:1"] = function (idx, narg) + return idx == 3 + end, + ["3:3:1"] = function (idx, narg) + return idx == 4 + end +} +for k, cmds in pairs(key_to_cmd) do + for _, cmd in ipairs(cmds) do + cmd_to_key_finder[cmd] = key_finders[k] + end +end + + +return { + cmd_to_key_finder = cmd_to_key_finder, + default_key_finder = function (idx, narg) + return idx == 2 + end, +} diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/redis/init.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/redis/init.lua new file mode 100644 index 0000000..9aff6d0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/redis/init.lua @@ -0,0 +1,499 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local sdk = require("apisix.stream.xrpc.sdk") +local commands = require("apisix.stream.xrpc.protocols.redis.commands") +local xrpc_socket = require("resty.apisix.stream.xrpc.socket") +local ffi = require("ffi") +local ffi_str = ffi.string +local math_random = math.random +local OK = ngx.OK +local DECLINED = ngx.DECLINED +local DONE = ngx.DONE +local sleep = ngx.sleep +local str_byte = string.byte +local str_fmt = string.format +local ipairs = ipairs +local tonumber = tonumber + + +-- this variable is only used to log the redis command line in log_format +-- and is not used for filter in the logger phase. +core.ctx.register_var("redis_cmd_line", function(ctx) + return core.table.concat(ctx.cmd_line, " ") +end) + +-- redis protocol spec: https://redis.io/docs/reference/protocol-spec/ +-- There is no plan to support inline command format +local protocol_name = "redis" +local _M = {} +local MAX_LINE_LEN = 128 +local MAX_VALUE_LEN = 128 +local PREFIX_ARR = str_byte("*") +local PREFIX_STR = str_byte("$") +local PREFIX_STA = str_byte("+") +local PREFIX_INT = str_byte(":") +local PREFIX_ERR = str_byte("-") + + +local lrucache = core.lrucache.new({ + type = "plugin", +}) + + +local function create_matcher(conf) + local matcher = {} + --[[ + {"delay": 5, "key":"x", "commands":["GET", "MGET"]} + {"delay": 5, "commands":["GET"]} + => { + get = {keys = {x = {delay = 5}, * = {delay = 5}}} + mget = {keys = {x = {delay = 5}}} + } + ]]-- + for _, rule in ipairs(conf.faults) do + for _, cmd in ipairs(rule.commands) do + cmd = cmd:lower() + local key = rule.key + local kf = commands.cmd_to_key_finder[cmd] + local key_matcher = matcher[cmd] + if not key_matcher then + key_matcher = { + keys = {} + } + matcher[cmd] = key_matcher + end + + if not key or kf == false then + key = "*" + end + + if key_matcher.keys[key] then + core.log.warn("override existent fault rule of cmd: ", cmd, ", key: ", key) + end + + key_matcher.keys[key] = rule + end + end + + return matcher +end + + +local function get_matcher(conf, ctx) + return core.lrucache.plugin_ctx(lrucache, ctx, nil, create_matcher, conf) +end + + +function _M.init_downstream(session) + local conf = session.route.protocol.conf + if conf and conf.faults then + local matcher = get_matcher(conf, session.conn_ctx) + session.matcher = matcher + end + + session.req_id_seq = 0 + session.resp_id_seq = 0 + session.cmd_labels = {session.route.id, ""} + return xrpc_socket.downstream.socket() +end + + +local function read_line(sk) + local p, err, len = sk:read_line(MAX_LINE_LEN) + if not p then + return nil, err + end + + if len < 2 then + return nil, "line too short" + end + + return p, nil, len +end + + +local function read_len(sk) + local p, err, len = read_line(sk) + if not p then + return nil, err + end + + local s = ffi_str(p + 1, len - 1) + local n = tonumber(s) + if not n then + return nil, str_fmt("invalid len string: \"%s\"", s) + end + return n +end + + +local function read_req(session, sk) + local narg, err = read_len(sk) + if not narg then + return nil, err + end + + local cmd_line = core.tablepool.fetch("xrpc_redis_cmd_line", narg, 0) + + local n, err = read_len(sk) + if not n then + return nil, err + end + + local p, err = sk:read(n + 2) + if not p then + return nil, err + end + + local s = ffi_str(p, n) + local cmd = s:lower() + cmd_line[1] = cmd + + if cmd == "subscribe" or cmd == "psubscribe" then + session.in_pub_sub = true + end + + local key_finder + local matcher = session.matcher + if matcher then + matcher = matcher[s:lower()] + if matcher then + key_finder = commands.cmd_to_key_finder[s] or commands.default_key_finder + end + end + + for i = 2, narg do + local is_key = false + if key_finder then + is_key = key_finder(i, narg) + end + + local n, err = read_len(sk) + if not n then + return nil, err + end + + local s + if not is_key and n > MAX_VALUE_LEN then + -- avoid recording big value + local p, err = sk:read(MAX_VALUE_LEN) + if not p then + return nil, err + end + + local ok, err = sk:drain(n - MAX_VALUE_LEN + 2) + if not ok then + return nil, err + end + + s = ffi_str(p, MAX_VALUE_LEN) .. "...(" .. n .. " bytes)" + else + local p, err = sk:read(n + 2) + if not p then + return nil, err + end + + s = ffi_str(p, n) + + if is_key and matcher.keys[s] then + matcher = matcher.keys[s] + key_finder = nil + end + end + + cmd_line[i] = s + end + + session.req_id_seq = session.req_id_seq + 1 + local ctx = sdk.get_req_ctx(session, session.req_id_seq) + ctx.cmd_line = cmd_line + ctx.cmd = cmd + + local pipelined = sk:has_pending_data() + + if matcher then + if matcher.keys then + -- try to match any key of this command + matcher = matcher.keys["*"] + end + + if matcher then + sleep(matcher.delay) + end + end + + return true, nil, pipelined +end + + +local function read_subscribe_reply(sk) + local line, err, n = read_line(sk) + if not line then + return nil, err + end + + local prefix = line[0] + + if prefix == PREFIX_STR then -- char '$' + local size = tonumber(ffi_str(line + 1, n - 1)) + if size < 0 then + return true + end + + local p, err = sk:read(size + 2) + if not p then + return nil, err + end + + return ffi_str(p, size) + + elseif prefix == PREFIX_INT then -- char ':' + return tonumber(ffi_str(line + 1, n - 1)) + + else + return nil, str_fmt("unknown prefix: \"%s\"", prefix) + end +end + + +local function read_reply(sk, session) + local line, err, n = read_line(sk) + if not line then + return nil, err + end + + local prefix = line[0] + + if prefix == PREFIX_STR then -- char '$' + -- print("bulk reply") + + local size = tonumber(ffi_str(line + 1, n - 1)) + if size < 0 then + return true + end + + local ok, err = sk:drain(size + 2) + if not ok then + return nil, err + end + + return true + + elseif prefix == PREFIX_STA then -- char '+' + -- print("status reply") + return true + + elseif prefix == PREFIX_ARR then -- char '*' + local narr = tonumber(ffi_str(line + 1, n - 1)) + + -- print("multi-bulk reply: ", narr) + if narr < 0 then + return true + end + + if session and session.in_pub_sub and (narr == 3 or narr == 4) then + local msg_type, err = read_subscribe_reply(sk) + if msg_type == nil then + return nil, err + end + + session.pub_sub_msg_type = msg_type + + local res, err = read_reply(sk) + if res == nil then + return nil, err + end + + if msg_type == "unsubscribe" or msg_type == "punsubscribe" then + local n_ch, err = read_subscribe_reply(sk) + if n_ch == nil then + return nil, err + end + + if n_ch == 0 then + session.in_pub_sub = -1 + -- clear this flag later at the end of `handle_reply` + end + + else + local n = msg_type == "pmessage" and 2 or 1 + for i = 1, n do + local res, err = read_reply(sk) + if res == nil then + return nil, err + end + end + end + + else + for i = 1, narr do + local res, err = read_reply(sk) + if res == nil then + return nil, err + end + end + end + + return true + + elseif prefix == PREFIX_INT then -- char ':' + -- print("integer reply") + return true + + elseif prefix == PREFIX_ERR then -- char '-' + -- print("error reply: ", n) + return true + + else + return nil, str_fmt("unknown prefix: \"%s\"", prefix) + end +end + + +local function handle_reply(session, sk) + local ok, err = read_reply(sk, session) + if not ok then + return nil, err + end + + local ctx + if session.in_pub_sub and session.pub_sub_msg_type then + local msg_type = session.pub_sub_msg_type + session.pub_sub_msg_type = nil + if session.resp_id_seq < session.req_id_seq then + local cur_ctx = sdk.get_req_ctx(session, session.resp_id_seq + 1) + local cmd = cur_ctx.cmd + if cmd == msg_type then + ctx = cur_ctx + session.resp_id_seq = session.resp_id_seq + 1 + end + end + + if session.in_pub_sub == -1 then + session.in_pub_sub = nil + end + else + session.resp_id_seq = session.resp_id_seq + 1 + ctx = sdk.get_req_ctx(session, session.resp_id_seq) + end + + return ctx +end + + +function _M.from_downstream(session, downstream) + local read_pipeline = false + while true do + local ok, err, pipelined = read_req(session, downstream) + if not ok then + if err ~= "timeout" and err ~= "closed" then + core.log.error("failed to read request: ", err) + end + + if read_pipeline and err == "timeout" then + break + end + + return DECLINED + end + + if not pipelined then + break + end + + if not read_pipeline then + read_pipeline = true + -- set minimal read timeout to read pipelined data + downstream:settimeouts(0, 0, 1) + end + end + + if read_pipeline then + -- set timeout back + downstream:settimeouts(0, 0, 0) + end + + return OK +end + + +function _M.connect_upstream(session, ctx) + local conf = session.upstream_conf + local nodes = conf.nodes + if #nodes == 0 then + core.log.error("failed to connect: no nodes") + return DECLINED + end + + local node = nodes[math_random(#nodes)] + local sk = sdk.connect_upstream(node, conf) + if not sk then + return DECLINED + end + + return OK, sk +end + + +function _M.disconnect_upstream(session, upstream) + sdk.disconnect_upstream(upstream, session.upstream_conf) +end + + +function _M.to_upstream(session, ctx, downstream, upstream) + local ok, err = upstream:move(downstream) + if not ok then + core.log.error("failed to send to upstream: ", err) + return DECLINED + end + + return OK +end + + +function _M.from_upstream(session, downstream, upstream) + local ctx, err = handle_reply(session, upstream) + if err then + core.log.error("failed to handle upstream: ", err) + return DECLINED + end + + local ok, err = downstream:move(upstream) + if not ok then + core.log.error("failed to handle upstream: ", err) + return DECLINED + end + + return DONE, ctx +end + + +function _M.log(session, ctx) + local metrics = sdk.get_metrics(session, protocol_name) + if metrics then + session.cmd_labels[2] = ctx.cmd + metrics.commands_total:inc(1, session.cmd_labels) + metrics.commands_latency_seconds:observe(ctx.var.rpc_time, session.cmd_labels) + end + + core.tablepool.release("xrpc_redis_cmd_line", ctx.cmd_line) + ctx.cmd_line = nil +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/redis/metrics.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/redis/metrics.lua new file mode 100644 index 0000000..6009a50 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/redis/metrics.lua @@ -0,0 +1,33 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local _M = { + commands_total = { + type = "counter", + help = "Total number of requests for a specific Redis command", + labels = {"route", "command"}, + }, + commands_latency_seconds = { + type = "histogram", + help = "Latency of requests for a specific Redis command", + labels = {"route", "command"}, + -- latency buckets, 1ms to 1s: + buckets = {0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1} + }, +} + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/redis/schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/redis/schema.lua new file mode 100644 index 0000000..0b6c90c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/protocols/redis/schema.lua @@ -0,0 +1,59 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") + + +local schema = { + type = "object", + properties = { + faults = { + type = "array", + minItems = 1, + items = { + type = "object", + properties = { + commands = { + type = "array", + minItems = 1, + items = { + type = "string" + }, + }, + key = { + type = "string", + minLength = 1, + }, + delay = { + type = "number", + description = "additional delay in seconds", + } + }, + required = {"commands", "delay"} + }, + }, + }, +} + +local _M = {} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/runner.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/runner.lua new file mode 100644 index 0000000..5f1b97d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/runner.lua @@ -0,0 +1,279 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local expr = require("resty.expr.v1") +local pairs = pairs +local ngx = ngx +local ngx_now = ngx.now +local OK = ngx.OK +local DECLINED = ngx.DECLINED +local DONE = ngx.DONE +local pcall = pcall +local ipairs = ipairs +local tostring = tostring + + +core.ctx.register_var("rpc_time", function(ctx) + return ctx._rpc_end_time - ctx._rpc_start_time +end) + +local logger_expr_cache = core.lrucache.new({ + ttl = 300, count = 1024 +}) + +local _M = {} + + +local function open_session(conn_ctx) + conn_ctx.xrpc_session = { + conn_ctx = conn_ctx, + route = conn_ctx.matched_route.value, + -- fields start with '_' should not be accessed by the protocol implementation + _upstream_conf = conn_ctx.matched_upstream, + _ctxs = {}, + } + return conn_ctx.xrpc_session +end + + +local function put_req_ctx(session, ctx) + local id = ctx._id + session._ctxs[id] = nil + + core.ctx.release_vars(ctx) + + core.tablepool.release("xrpc_ctxs", ctx) +end + + +local function filter_logger(ctx, logger) + if not logger then + return false + end + + if not logger.filter or #logger.filter == 0 then + -- no valid filter, default execution plugin + return true + end + + local version = tostring(logger.filter) + local filter_expr, err = logger_expr_cache(ctx.conf_id, version, expr.new, logger.filter) + if not filter_expr or err then + core.log.error("failed to validate the 'filter' expression: ", err) + return false + end + return filter_expr:eval(ctx.var) +end + + +local function run_log_plugin(ctx, logger) + local pkg_name = "apisix.stream.plugins." .. logger.name + local ok, plugin = pcall(require, pkg_name) + if not ok then + core.log.error("failed to load plugin [", logger.name, "] err: ", plugin) + return + end + + local log_func = plugin.log + if log_func then + log_func(logger.conf, ctx) + end +end + + +local function finialize_req(protocol, session, ctx) + ctx._rpc_end_time = ngx_now() + local loggers = session.route.protocol.logger + if loggers and #loggers > 0 then + for _, logger in ipairs(loggers) do + ctx.conf_id = tostring(logger.conf) + local matched = filter_logger(ctx, logger) + core.log.info("log filter: ", logger.name, " filter result: ", matched) + if matched then + run_log_plugin(ctx, logger) + end + end + end + + protocol.log(session, ctx) + put_req_ctx(session, ctx) +end + + +local function close_session(session, protocol) + local upstream_ctx = session._upstream_ctx + if upstream_ctx then + upstream_ctx.closed = true + + local up = upstream_ctx.upstream + protocol.disconnect_upstream(session, up) + end + + local upstream_ctxs = session._upstream_ctxs + if upstream_ctxs then + for _, upstream_ctx in pairs(upstream_ctxs) do + upstream_ctx.closed = true + + local up = upstream_ctx.upstream + protocol.disconnect_upstream(session, up) + end + end + + for id, ctx in pairs(session._ctxs) do + core.log.notice("RPC is not finished, id: ", id) + ctx.unfinished = true + finialize_req(protocol, session, ctx) + end +end + + +local function open_upstream(protocol, session, ctx) + local key = session._upstream_key + session._upstream_key = nil + + if key then + if not session._upstream_ctxs then + session._upstream_ctxs = {} + end + + local up_ctx = session._upstream_ctxs[key] + if up_ctx then + return OK, up_ctx + end + else + if session._upstream_ctx then + return OK, session._upstream_ctx + end + + session.upstream_conf = session._upstream_conf + end + + local state, upstream = protocol.connect_upstream(session, session) + if state ~= OK then + return state, nil + end + + local up_ctx = { + upstream = upstream, + closed = false, + } + if key then + session._upstream_ctxs[key] = up_ctx + else + session._upstream_ctx = up_ctx + end + + return OK, up_ctx +end + + +local function start_upstream_coroutine(session, protocol, downstream, up_ctx) + local upstream = up_ctx.upstream + while not up_ctx.closed do + local status, ctx = protocol.from_upstream(session, downstream, upstream) + if status ~= OK then + if ctx ~= nil then + finialize_req(protocol, session, ctx) + end + + if status == DECLINED then + -- fail to read + break + end + + if status == DONE then + -- a rpc is finished + goto continue + end + end + + ::continue:: + end +end + + +function _M.run(protocol, conn_ctx) + local session = open_session(conn_ctx) + local downstream = protocol.init_downstream(session) + + while true do + local status, ctx = protocol.from_downstream(session, downstream) + if status ~= OK then + if ctx ~= nil then + finialize_req(protocol, session, ctx) + end + + if status == DECLINED then + -- fail to read or can't be authorized + break + end + + if status == DONE then + -- heartbeat or fault injection, already reply to downstream + goto continue + end + end + + -- need to do some auth/routing jobs before reaching upstream + local status, up_ctx = open_upstream(protocol, session, ctx) + if status ~= OK then + if ctx ~= nil then + finialize_req(protocol, session, ctx) + end + + break + end + + status = protocol.to_upstream(session, ctx, downstream, up_ctx.upstream) + if status ~= OK then + if ctx ~= nil then + finialize_req(protocol, session, ctx) + end + + if status == DECLINED then + break + end + + if status == DONE then + -- for Unary request we can directly reply here + goto continue + end + end + + if not up_ctx.coroutine then + local co, err = ngx.thread.spawn( + start_upstream_coroutine, session, protocol, downstream, up_ctx) + if not co then + core.log.error("failed to start upstream coroutine: ", err) + break + end + + up_ctx.coroutine = co + end + + ::continue:: + end + + close_session(session, protocol) + + -- return non-zero code to terminal the session + return 200 +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/sdk.lua b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/sdk.lua new file mode 100644 index 0000000..60f100c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/stream/xrpc/sdk.lua @@ -0,0 +1,202 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +--- Upstream helper functions which can be used in xRPC +-- +-- @module xrpc.sdk +local core = require("apisix.core") +local config_util = require("apisix.core.config_util") +local router = require("apisix.stream.router.ip_port") +local metrics = require("apisix.stream.xrpc.metrics") +local apisix_upstream = require("apisix.upstream") +local xrpc_socket = require("resty.apisix.stream.xrpc.socket") +local ngx_now = ngx.now +local str_fmt = string.format +local tab_insert = table.insert +local error = error +local tostring = tostring + + +local _M = {} + + +--- +-- Returns the connected xRPC upstream socket according to the configuration +-- +-- @function xrpc.sdk.connect_upstream +-- @tparam table node selected upstream node +-- @tparam table up_conf upstream configuration +-- @treturn table|nil the xRPC upstream socket, or nil if failed +function _M.connect_upstream(node, up_conf) + local sk = xrpc_socket.upstream.socket() + + local timeout = up_conf.timeout + if not timeout then + -- use the default timeout of Nginx proxy + sk:settimeouts(60 * 1000, 600 * 1000, 600 * 1000) + else + -- the timeout unit for balancer is second while the unit for cosocket is millisecond + sk:settimeouts(timeout.connect * 1000, timeout.send * 1000, timeout.read * 1000) + end + + local ok, err = sk:connect(node.host, node.port) + if not ok then + core.log.error("failed to connect: ", err) + return nil + end + + if up_conf.scheme == "tls" then + -- TODO: support mTLS + local ok, err = sk:sslhandshake(nil, node.host) + if not ok then + core.log.error("failed to handshake: ", err) + return nil + end + end + + return sk +end + + +--- +-- Disconnect xRPC upstream socket according to the configuration +-- +-- @function xrpc.sdk.disconnect_upstream +-- @tparam table upstream xRPC upstream socket +-- @tparam table up_conf upstream configuration +function _M.disconnect_upstream(upstream, up_conf) + return upstream:close() +end + + +--- +-- Returns the request level ctx with an id +-- +-- @function xrpc.sdk.get_req_ctx +-- @tparam table session xrpc session +-- @tparam string id ctx id +-- @treturn table the request level ctx +function _M.get_req_ctx(session, id) + if not id then + error("id is required") + end + + local ctx = session._ctxs[id] + if ctx then + return ctx + end + + local ctx = core.tablepool.fetch("xrpc_ctxs", 4, 4) + -- fields start with '_' should not be accessed by the protocol implementation + ctx._id = id + core.ctx.set_vars_meta(ctx) + ctx.conf_type = "xrpc-" .. session.route.protocol.name .. "-logger" + + session._ctxs[id] = ctx + + ctx._rpc_start_time = ngx_now() + return ctx +end + + +--- +-- Returns the new router if the stream routes are changed +-- +-- @function xrpc.sdk.get_router +-- @tparam table session xrpc session +-- @tparam string version the current router version, should come from the last call +-- @treturn boolean whether there is a change +-- @treturn table the new router under the specific protocol +-- @treturn string the new router version +function _M.get_router(session, version) + local protocol_name = session.route.protocol.name + local id = session.route.id + + local items, conf_version = router.routes() + if version == conf_version then + return false + end + + local proto_router = {} + for _, item in config_util.iterate_values(items) do + if item.value == nil then + goto CONTINUE + end + + local route = item.value + if route.protocol.name ~= protocol_name then + goto CONTINUE + end + + if tostring(route.protocol.superior_id) ~= id then + goto CONTINUE + end + + tab_insert(proto_router, route) + + ::CONTINUE:: + end + + return true, proto_router, conf_version +end + + +--- +-- Set the session's current upstream according to the route's configuration +-- +-- @function xrpc.sdk.set_upstream +-- @tparam table session xrpc session +-- @tparam table conf the route configuration +-- @treturn nil|string error message if present +function _M.set_upstream(session, conf) + local up + if conf.upstream then + up = conf.upstream + else + local id = conf.upstream_id + up = apisix_upstream.get_by_id(id) + if not up then + return str_fmt("upstream %s can't be got", id) + end + end + + local key = tostring(up) + core.log.info("set upstream to: ", key, " conf: ", core.json.delay_encode(up, true)) + + session._upstream_key = key + session.upstream_conf = up + return nil +end + + +--- +-- Returns the protocol specific metrics object +-- +-- @function xrpc.sdk.get_metrics +-- @tparam table session xrpc session +-- @tparam string protocol_name protocol name +-- @treturn nil|table the metrics under the specific protocol if available +function _M.get_metrics(session, protocol_name) + local metric_conf = session.route.protocol.metric + if not (metric_conf and metric_conf.enable) then + return nil + end + return metrics.load(protocol_name) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/timers.lua b/CloudronPackages/APISIX/apisix-source/apisix/timers.lua new file mode 100644 index 0000000..aebe346 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/timers.lua @@ -0,0 +1,103 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local process = require("ngx.process") +local pairs = pairs +local unpack = unpack +local thread_spawn = ngx.thread.spawn +local thread_wait = ngx.thread.wait + +local check_interval = 1 + +local timers = {} + + +local _M = {} + + +local function background_timer() + if core.table.nkeys(timers) == 0 then + return + end + + local threads = {} + for name, timer in pairs(timers) do + core.log.info("run timer[", name, "]") + + local th, err = thread_spawn(timer) + if not th then + core.log.error("failed to spawn thread for timer [", name, "]: ", err) + goto continue + end + + core.table.insert(threads, th) + +::continue:: + end + + local ok = thread_wait(unpack(threads)) + if not ok then + core.log.error("failed to wait threads") + end +end + + +local function is_privileged() + return process.type() == "privileged agent" +end + + +function _M.init_worker() + local opts = { + each_ttl = 0, + sleep_succ = 0, + check_interval = check_interval, + } + local timer, err = core.timer.new("background", background_timer, opts) + if not timer then + core.log.error("failed to create background timer: ", err) + return + end + + core.log.notice("succeed to create background timer") +end + + +function _M.register_timer(name, f, privileged) + if privileged and not is_privileged() then + return + end + + timers[name] = f +end + + +function _M.unregister_timer(name, privileged) + if privileged and not is_privileged() then + return + end + + timers[name] = nil +end + + +function _M.check_interval() + return check_interval +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/upstream.lua b/CloudronPackages/APISIX/apisix-source/apisix/upstream.lua new file mode 100644 index 0000000..ffd5e39 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/upstream.lua @@ -0,0 +1,659 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local discovery = require("apisix.discovery.init").discovery +local upstream_util = require("apisix.utils.upstream") +local apisix_ssl = require("apisix.ssl") +local events = require("apisix.events") +local error = error +local tostring = tostring +local ipairs = ipairs +local pairs = pairs +local pcall = pcall +local ngx_var = ngx.var +local is_http = ngx.config.subsystem == "http" +local upstreams +local healthcheck + +local healthcheck_shdict_name = "upstream-healthcheck" +if not is_http then + healthcheck_shdict_name = healthcheck_shdict_name .. "-" .. ngx.config.subsystem +end + +local set_upstream_tls_client_param +local ok, apisix_ngx_upstream = pcall(require, "resty.apisix.upstream") +if ok then + set_upstream_tls_client_param = apisix_ngx_upstream.set_cert_and_key +else + set_upstream_tls_client_param = function () + return nil, "need to build APISIX-Runtime to support upstream mTLS" + end +end + +local set_stream_upstream_tls +if not is_http then + local ok, apisix_ngx_stream_upstream = pcall(require, "resty.apisix.stream.upstream") + if ok then + set_stream_upstream_tls = apisix_ngx_stream_upstream.set_tls + else + set_stream_upstream_tls = function () + return nil, "need to build APISIX-Runtime to support TLS over TCP upstream" + end + end +end + + + +local HTTP_CODE_UPSTREAM_UNAVAILABLE = 503 +local _M = {} + + +local function set_directly(ctx, key, ver, conf) + if not ctx then + error("missing argument ctx", 2) + end + if not key then + error("missing argument key", 2) + end + if not ver then + error("missing argument ver", 2) + end + if not conf then + error("missing argument conf", 2) + end + + ctx.upstream_conf = conf + ctx.upstream_version = ver + ctx.upstream_key = key + return +end +_M.set = set_directly + + +local function release_checker(healthcheck_parent) + if not healthcheck_parent or not healthcheck_parent.checker then + return + end + local checker = healthcheck_parent.checker + core.log.info("try to release checker: ", tostring(checker)) + checker:delayed_clear(3) + checker:stop() +end + + +local function get_healthchecker_name(value) + return "upstream#" .. value.key +end +_M.get_healthchecker_name = get_healthchecker_name + + +local function create_checker(upstream) + if healthcheck == nil then + healthcheck = require("resty.healthcheck") + end + + local healthcheck_parent = upstream.parent + if healthcheck_parent.checker and healthcheck_parent.checker_upstream == upstream + and healthcheck_parent.checker_nodes_ver == upstream._nodes_ver then + return healthcheck_parent.checker + end + + if upstream.is_creating_checker then + core.log.info("another request is creating new checker") + return nil + end + upstream.is_creating_checker = true + + core.log.debug("events module used by the healthcheck: ", events.events_module, + ", module name: ",events:get_healthcheck_events_modele()) + + local checker, err = healthcheck.new({ + name = get_healthchecker_name(healthcheck_parent), + shm_name = healthcheck_shdict_name, + checks = upstream.checks, + -- the events.init_worker will be executed in the init_worker phase, + -- events.healthcheck_events_module is set + -- while the healthcheck object is executed in the http access phase, + -- so it can be used here + events_module = events:get_healthcheck_events_modele(), + }) + + if not checker then + core.log.error("fail to create healthcheck instance: ", err) + upstream.is_creating_checker = nil + return nil + end + + if healthcheck_parent.checker then + local ok, err = pcall(core.config_util.cancel_clean_handler, healthcheck_parent, + healthcheck_parent.checker_idx, true) + if not ok then + core.log.error("cancel clean handler error: ", err) + end + end + + core.log.info("create new checker: ", tostring(checker)) + + local host = upstream.checks and upstream.checks.active and upstream.checks.active.host + local port = upstream.checks and upstream.checks.active and upstream.checks.active.port + local up_hdr = upstream.pass_host == "rewrite" and upstream.upstream_host + local use_node_hdr = upstream.pass_host == "node" or nil + for _, node in ipairs(upstream.nodes) do + local host_hdr = up_hdr or (use_node_hdr and node.domain) + local ok, err = checker:add_target(node.host, port or node.port, host, + true, host_hdr) + if not ok then + core.log.error("failed to add new health check target: ", node.host, ":", + port or node.port, " err: ", err) + end + end + + local check_idx, err = core.config_util.add_clean_handler(healthcheck_parent, release_checker) + if not check_idx then + upstream.is_creating_checker = nil + checker:clear() + checker:stop() + core.log.error("failed to add clean handler, err:", + err, " healthcheck parent:", core.json.delay_encode(healthcheck_parent, true)) + + return nil + end + + healthcheck_parent.checker = checker + healthcheck_parent.checker_upstream = upstream + healthcheck_parent.checker_nodes_ver = upstream._nodes_ver + healthcheck_parent.checker_idx = check_idx + + upstream.is_creating_checker = nil + + return checker +end + + +local function fetch_healthchecker(upstream) + if not upstream.checks then + return nil + end + + return create_checker(upstream) +end + + +local function set_upstream_scheme(ctx, upstream) + -- plugins like proxy-rewrite may already set ctx.upstream_scheme + if not ctx.upstream_scheme then + -- the old configuration doesn't have scheme field, so fallback to "http" + ctx.upstream_scheme = upstream.scheme or "http" + end + + ctx.var["upstream_scheme"] = ctx.upstream_scheme +end +_M.set_scheme = set_upstream_scheme + +local scheme_to_port = { + http = 80, + https = 443, + grpc = 80, + grpcs = 443, +} + + +_M.scheme_to_port = scheme_to_port + + +local function fill_node_info(up_conf, scheme, is_stream) + local nodes = up_conf.nodes + if up_conf.nodes_ref == nodes then + -- filled + return true + end + + local need_filled = false + for _, n in ipairs(nodes) do + if not is_stream and not n.port then + if up_conf.scheme ~= scheme then + return nil, "Can't detect upstream's scheme. " .. + "You should either specify a port in the node " .. + "or specify the upstream.scheme explicitly" + end + + need_filled = true + end + + if not n.priority then + need_filled = true + end + end + + if not need_filled then + up_conf.nodes_ref = nodes + return true + end + + core.log.debug("fill node info for upstream: ", + core.json.delay_encode(up_conf, true)) + + -- keep the original nodes for slow path in `compare_upstream_node()`, + -- can't use `core.table.deepcopy()` for whole `nodes` array here, + -- because `compare_upstream_node()` compare `metadata` of node by address. + up_conf.original_nodes = core.table.new(#nodes, 0) + for i, n in ipairs(nodes) do + up_conf.original_nodes[i] = core.table.clone(n) + if not n.port or not n.priority then + nodes[i] = core.table.clone(n) + + if not is_stream and not n.port then + nodes[i].port = scheme_to_port[scheme] + end + + -- fix priority for non-array nodes and nodes from service discovery + if not n.priority then + nodes[i].priority = 0 + end + end + end + + up_conf.nodes_ref = nodes + return true +end + + +function _M.set_by_route(route, api_ctx) + if api_ctx.upstream_conf then + -- upstream_conf has been set by traffic-split plugin + return + end + + local up_conf = api_ctx.matched_upstream + if not up_conf then + return 503, "missing upstream configuration in Route or Service" + end + -- core.log.info("up_conf: ", core.json.delay_encode(up_conf, true)) + + if up_conf.service_name then + if not discovery then + return 503, "discovery is uninitialized" + end + if not up_conf.discovery_type then + return 503, "discovery server need appoint" + end + + local dis = discovery[up_conf.discovery_type] + if not dis then + local err = "discovery " .. up_conf.discovery_type .. " is uninitialized" + return 503, err + end + + local new_nodes, err = dis.nodes(up_conf.service_name, up_conf.discovery_args) + if not new_nodes then + return HTTP_CODE_UPSTREAM_UNAVAILABLE, "no valid upstream node: " .. (err or "nil") + end + + local same = upstream_util.compare_upstream_node(up_conf, new_nodes) + if not same then + if not up_conf._nodes_ver then + up_conf._nodes_ver = 0 + end + up_conf._nodes_ver = up_conf._nodes_ver + 1 + + local pass, err = core.schema.check(core.schema.discovery_nodes, new_nodes) + if not pass then + return HTTP_CODE_UPSTREAM_UNAVAILABLE, "invalid nodes format: " .. err + end + + core.log.info("discover new upstream from ", up_conf.service_name, ", type ", + up_conf.discovery_type, ": ", + core.json.delay_encode(up_conf, true)) + end + + -- in case the value of new_nodes is the same as the old one, + -- but discovery lib return a new table for it. + -- for example, when watch loop of kubernetes discovery is broken or done, + -- it will fetch full data again and return a new table for every services. + up_conf.nodes = new_nodes + end + + local id = up_conf.parent.value.id + local conf_version = up_conf.parent.modifiedIndex + -- include the upstream object as part of the version, because the upstream will be changed + -- by service discovery or dns resolver. + set_directly(api_ctx, id, conf_version .. "#" .. tostring(up_conf) .. "#" + .. tostring(up_conf._nodes_ver or ''), up_conf) + + local nodes_count = up_conf.nodes and #up_conf.nodes or 0 + if nodes_count == 0 then + release_checker(up_conf.parent) + return HTTP_CODE_UPSTREAM_UNAVAILABLE, "no valid upstream node" + end + + if not is_http then + local ok, err = fill_node_info(up_conf, nil, true) + if not ok then + return 503, err + end + + local scheme = up_conf.scheme + if scheme == "tls" then + local ok, err = set_stream_upstream_tls() + if not ok then + return 503, err + end + + local sni = apisix_ssl.server_name() + if sni then + ngx_var.upstream_sni = sni + end + end + + local checker = fetch_healthchecker(up_conf) + api_ctx.up_checker = checker + return + end + + set_upstream_scheme(api_ctx, up_conf) + + local ok, err = fill_node_info(up_conf, api_ctx.upstream_scheme, false) + if not ok then + return 503, err + end + + local checker = fetch_healthchecker(up_conf) + api_ctx.up_checker = checker + + local scheme = up_conf.scheme + if (scheme == "https" or scheme == "grpcs") and up_conf.tls then + + local client_cert, client_key + if up_conf.tls.client_cert_id then + client_cert = api_ctx.upstream_ssl.cert + client_key = api_ctx.upstream_ssl.key + else + client_cert = up_conf.tls.client_cert + client_key = up_conf.tls.client_key + end + + -- the sni here is just for logging + local sni = api_ctx.var.upstream_host + local cert, err = apisix_ssl.fetch_cert(sni, client_cert) + if not ok then + return 503, err + end + + local key, err = apisix_ssl.fetch_pkey(sni, client_key) + if not ok then + return 503, err + end + + if scheme == "grpcs" then + api_ctx.upstream_grpcs_cert = cert + api_ctx.upstream_grpcs_key = key + else + local ok, err = set_upstream_tls_client_param(cert, key) + if not ok then + return 503, err + end + end + end + + return +end + + +function _M.set_grpcs_upstream_param(ctx) + if ctx.upstream_grpcs_cert then + local cert = ctx.upstream_grpcs_cert + local key = ctx.upstream_grpcs_key + local ok, err = set_upstream_tls_client_param(cert, key) + if not ok then + return 503, err + end + end +end + + +function _M.upstreams() + if not upstreams then + return nil, nil + end + + return upstreams.values, upstreams.conf_version +end + + +function _M.check_schema(conf) + return core.schema.check(core.schema.upstream, conf) +end + + +local function get_chash_key_schema(hash_on) + if not hash_on then + return nil, "hash_on is nil" + end + + if hash_on == "vars" then + return core.schema.upstream_hash_vars_schema + end + + if hash_on == "header" or hash_on == "cookie" then + return core.schema.upstream_hash_header_schema + end + + if hash_on == "consumer" then + return nil, nil + end + + if hash_on == "vars_combinations" then + return core.schema.upstream_hash_vars_combinations_schema + end + + return nil, "invalid hash_on type " .. hash_on +end + + +local function check_upstream_conf(in_dp, conf) + if not in_dp then + local ok, err = core.schema.check(core.schema.upstream, conf) + if not ok then + return false, "invalid configuration: " .. err + end + + if conf.nodes and not core.table.isarray(conf.nodes) then + local port + for addr,_ in pairs(conf.nodes) do + _, port = core.utils.parse_addr(addr) + if port then + if port < 1 or port > 65535 then + return false, "invalid port " .. tostring(port) + end + end + end + end + + local ssl_id = conf.tls and conf.tls.client_cert_id + if ssl_id then + local key = "/ssls/" .. ssl_id + local res, err = core.etcd.get(key) + if not res then + return nil, "failed to fetch ssl info by " + .. "ssl id [" .. ssl_id .. "]: " .. err + end + + if res.status ~= 200 then + return nil, "failed to fetch ssl info by " + .. "ssl id [" .. ssl_id .. "], " + .. "response code: " .. res.status + end + if res.body and res.body.node and + res.body.node.value and res.body.node.value.type ~= "client" then + + return nil, "failed to fetch ssl info by " + .. "ssl id [" .. ssl_id .. "], " + .. "wrong ssl type" + end + end + + -- encrypt the key in the admin + if conf.tls and conf.tls.client_key then + conf.tls.client_key = apisix_ssl.aes_encrypt_pkey(conf.tls.client_key) + end + end + + if is_http then + if conf.pass_host == "rewrite" and + (conf.upstream_host == nil or conf.upstream_host == "") + then + return false, "`upstream_host` can't be empty when `pass_host` is `rewrite`" + end + end + + if conf.tls and conf.tls.client_cert then + local cert = conf.tls.client_cert + local key = conf.tls.client_key + local ok, err = apisix_ssl.validate(cert, key) + if not ok then + return false, err + end + end + + if conf.type ~= "chash" then + return true + end + + if conf.hash_on ~= "consumer" and not conf.key then + return false, "missing key" + end + + local key_schema, err = get_chash_key_schema(conf.hash_on) + if err then + return false, "type is chash, err: " .. err + end + + if key_schema then + local ok, err = core.schema.check(key_schema, conf.key) + if not ok then + return false, "invalid configuration: " .. err + end + end + + return true +end + + +function _M.check_upstream_conf(conf) + return check_upstream_conf(false, conf) +end + + +local function filter_upstream(value, parent) + if not value then + return + end + + value.parent = parent + + if not is_http and value.scheme == "http" then + -- For L4 proxy, the default scheme is "tcp" + value.scheme = "tcp" + end + + if not value.nodes then + return + end + + local nodes = value.nodes + if core.table.isarray(nodes) then + for _, node in ipairs(nodes) do + local host = node.host + if not core.utils.parse_ipv4(host) and + not core.utils.parse_ipv6(host) then + parent.has_domain = true + break + end + end + else + local new_nodes = core.table.new(core.table.nkeys(nodes), 0) + for addr, weight in pairs(nodes) do + local host, port = core.utils.parse_addr(addr) + if not core.utils.parse_ipv4(host) and + not core.utils.parse_ipv6(host) then + parent.has_domain = true + end + local node = { + host = host, + port = port, + weight = weight, + } + core.table.insert(new_nodes, node) + end + value.nodes = new_nodes + end +end +_M.filter_upstream = filter_upstream + + +function _M.init_worker() + local err + upstreams, err = core.config.new("/upstreams", { + automatic = true, + item_schema = core.schema.upstream, + -- also check extra fields in the DP side + checker = function (item, schema_type) + return check_upstream_conf(true, item) + end, + filter = function(upstream) + upstream.has_domain = false + + filter_upstream(upstream.value, upstream) + + core.log.info("filter upstream: ", core.json.delay_encode(upstream, true)) + end, + }) + if not upstreams then + error("failed to create etcd instance for fetching upstream: " .. err) + return + end +end + + +function _M.get_by_id(up_id) + local upstream + local upstreams = core.config.fetch_created_obj("/upstreams") + if upstreams then + upstream = upstreams:get(tostring(up_id)) + end + + if not upstream then + core.log.error("failed to find upstream by id: ", up_id) + return nil + end + + if upstream.has_domain then + local err + upstream, err = upstream_util.parse_domain_in_up(upstream) + if err then + core.log.error("failed to get resolved upstream: ", err) + return nil + end + end + + core.log.info("parsed upstream: ", core.json.delay_encode(upstream, true)) + return upstream.dns_value or upstream.value +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/utils/auth.lua b/CloudronPackages/APISIX/apisix-source/apisix/utils/auth.lua new file mode 100644 index 0000000..b7c9186 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/utils/auth.lua @@ -0,0 +1,24 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local _M = {} + +function _M.is_running_under_multi_auth(ctx) + return ctx._plugin_name == "multi-auth" +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/utils/batch-processor-manager.lua b/CloudronPackages/APISIX/apisix-source/apisix/utils/batch-processor-manager.lua new file mode 100644 index 0000000..4e97bd6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/utils/batch-processor-manager.lua @@ -0,0 +1,158 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local batch_processor = require("apisix.utils.batch-processor") +local timer_at = ngx.timer.at +local pairs = pairs +local setmetatable = setmetatable + + +local _M = {} +local mt = { __index = _M } + + +function _M.new(name) + return setmetatable({ + stale_timer_running = false, + buffers = {}, + total_pushed_entries = 0, + name = name, + }, mt) +end + + +function _M:wrap_schema(schema) + local bp_schema = core.table.deepcopy(batch_processor.schema) + local properties = schema.properties + for k, v in pairs(bp_schema.properties) do + if not properties[k] then + properties[k] = v + end + -- don't touch if the plugin overrides the property + end + + properties.name.default = self.name + return schema +end + + +-- remove stale objects from the memory after timer expires +local function remove_stale_objects(premature, self) + if premature then + return + end + + for key, batch in pairs(self.buffers) do + if #batch.entry_buffer.entries == 0 and #batch.batch_to_process == 0 then + core.log.info("removing batch processor stale object, conf: ", + core.json.delay_encode(key)) + self.buffers[key] = nil + end + end + + self.stale_timer_running = false +end + + +local check_stale +do + local interval = 1800 + + function check_stale(self) + if not self.stale_timer_running then + -- run the timer every 30 mins if any log is present + timer_at(interval, remove_stale_objects, self) + self.stale_timer_running = true + end + end + + function _M.set_check_stale_interval(time) + interval = time + end +end + + +local function total_processed_entries(self) + local processed_entries = 0 + for _, log_buffer in pairs(self.buffers) do + processed_entries = processed_entries + log_buffer.processed_entries + end + return processed_entries +end + +function _M:add_entry(conf, entry, max_pending_entries) + if max_pending_entries then + local total_processed_entries_count = total_processed_entries(self) + if self.total_pushed_entries - total_processed_entries_count > max_pending_entries then + core.log.error("max pending entries limit exceeded. discarding entry.", + " total_pushed_entries: ", self.total_pushed_entries, + " total_processed_entries: ", total_processed_entries_count, + " max_pending_entries: ", max_pending_entries) + return + end + end + check_stale(self) + + local log_buffer = self.buffers[conf] + if not log_buffer then + return false + end + + log_buffer:push(entry) + self.total_pushed_entries = self.total_pushed_entries + 1 + return true +end + + +function _M:add_entry_to_new_processor(conf, entry, ctx, func, max_pending_entries) + if max_pending_entries then + local total_processed_entries_count = total_processed_entries(self) + if self.total_pushed_entries - total_processed_entries_count > max_pending_entries then + core.log.error("max pending entries limit exceeded. discarding entry.", + " total_pushed_entries: ", self.total_pushed_entries, + " total_processed_entries: ", total_processed_entries_count, + " max_pending_entries: ", max_pending_entries) + return + end + end + check_stale(self) + + local config = { + name = conf.name, + batch_max_size = conf.batch_max_size, + max_retry_count = conf.max_retry_count, + retry_delay = conf.retry_delay, + buffer_duration = conf.buffer_duration, + inactive_timeout = conf.inactive_timeout, + route_id = ctx.var.route_id, + server_addr = ctx.var.server_addr, + } + + local log_buffer, err = batch_processor:new(func, config) + if not log_buffer then + core.log.error("error when creating the batch processor: ", err) + return false + end + + log_buffer:push(entry) + self.buffers[conf] = log_buffer + self.total_pushed_entries = self.total_pushed_entries + 1 + return true +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/utils/batch-processor.lua b/CloudronPackages/APISIX/apisix-source/apisix/utils/batch-processor.lua new file mode 100644 index 0000000..eabee4f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/utils/batch-processor.lua @@ -0,0 +1,235 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local setmetatable = setmetatable +local timer_at = ngx.timer.at +local ipairs = ipairs +local table = table +local now = ngx.now +local type = type +local batch_processor = {} +local batch_processor_mt = { + __index = batch_processor +} +local execute_func +local create_buffer_timer +local batch_metrics +local prometheus +if ngx.config.subsystem == "http" then + prometheus = require("apisix.plugins.prometheus.exporter") +end + + +local schema = { + type = "object", + properties = { + name = {type = "string", default = "log buffer"}, + max_retry_count = {type = "integer", minimum = 0, default= 0}, + retry_delay = {type = "integer", minimum = 0, default= 1}, + buffer_duration = {type = "integer", minimum = 1, default= 60}, + inactive_timeout = {type = "integer", minimum = 1, default= 5}, + batch_max_size = {type = "integer", minimum = 1, default= 1000}, + } +} +batch_processor.schema = schema + + +local function schedule_func_exec(self, delay, batch) + local hdl, err = timer_at(delay, execute_func, self, batch) + if not hdl then + core.log.error("failed to create process timer: ", err) + return + end +end + + +local function set_metrics(self, count) + -- add batch metric for every route + if batch_metrics and self.name and self.route_id and self.server_addr then + self.label = {self.name, self.route_id, self.server_addr} + batch_metrics:set(count, self.label) + end +end + + +local function slice_batch(batch, n) + local slice = {} + local idx = 1 + for i = n or 1, #batch do + slice[idx] = batch[i] + idx = idx + 1 + end + return slice +end + + +function execute_func(premature, self, batch) + if premature then + return + end + + -- In case of "err" and a valid "first_fail" batch processor considers, all first_fail-1 + -- entries have been successfully consumed and hence reschedule the job for entries with + -- index first_fail to #entries based on the current retry policy. + local ok, err, first_fail = self.func(batch.entries, self.batch_max_size) + if not ok then + if first_fail then + core.log.error("Batch Processor[", self.name, "] failed to process entries [", + #batch.entries + 1 - first_fail, "/", #batch.entries ,"]: ", err) + batch.entries = slice_batch(batch.entries, first_fail) + self.processed_entries = self.processed_entries + first_fail - 1 + else + core.log.error("Batch Processor[", self.name, + "] failed to process entries: ", err) + end + + batch.retry_count = batch.retry_count + 1 + if batch.retry_count <= self.max_retry_count and #batch.entries > 0 then + schedule_func_exec(self, self.retry_delay, + batch) + else + self.processed_entries = self.processed_entries + #batch.entries + core.log.error("Batch Processor[", self.name,"] exceeded ", + "the max_retry_count[", batch.retry_count, + "] dropping the entries") + end + return + end + self.processed_entries = self.processed_entries + #batch.entries + core.log.debug("Batch Processor[", self.name, + "] successfully processed the entries") +end + + +local function flush_buffer(premature, self) + if premature then + return + end + + if now() - self.last_entry_t >= self.inactive_timeout or + now() - self.first_entry_t >= self.buffer_duration + then + core.log.debug("Batch Processor[", self.name ,"] buffer ", + "duration exceeded, activating buffer flush") + self:process_buffer() + self.is_timer_running = false + return + end + + -- buffer duration did not exceed or the buffer is active, + -- extending the timer + core.log.debug("Batch Processor[", self.name ,"] extending buffer timer") + create_buffer_timer(self) +end + + +function create_buffer_timer(self) + local hdl, err = timer_at(self.inactive_timeout, flush_buffer, self) + if not hdl then + core.log.error("failed to create buffer timer: ", err) + return + end + self.is_timer_running = true +end + + +function batch_processor:new(func, config) + local ok, err = core.schema.check(schema, config) + if not ok then + return nil, err + end + + if type(func) ~= "function" then + return nil, "Invalid argument, arg #1 must be a function" + end + + local processor = { + func = func, + buffer_duration = config.buffer_duration, + inactive_timeout = config.inactive_timeout, + max_retry_count = config.max_retry_count, + batch_max_size = config.batch_max_size, + retry_delay = config.retry_delay, + name = config.name, + batch_to_process = {}, + entry_buffer = {entries = {}, retry_count = 0}, + is_timer_running = false, + first_entry_t = 0, + last_entry_t = 0, + route_id = config.route_id, + server_addr = config.server_addr, + processed_entries = 0 + } + + return setmetatable(processor, batch_processor_mt) +end + +function batch_processor:push(entry) + -- if the batch size is one then immediately send for processing + if self.batch_max_size == 1 then + local batch = {entries = {entry}, retry_count = 0} + schedule_func_exec(self, 0, batch) + return + end + + if prometheus and prometheus.get_prometheus() and not batch_metrics and self.name + and self.route_id and self.server_addr then + batch_metrics = prometheus.get_prometheus():gauge("batch_process_entries", + "batch process remaining entries", + {"name", "route_id", "server_addr"}) + end + + local entries = self.entry_buffer.entries + table.insert(entries, entry) + set_metrics(self, #entries) + + if #entries == 1 then + self.first_entry_t = now() + end + self.last_entry_t = now() + + if self.batch_max_size <= #entries then + core.log.debug("Batch Processor[", self.name , + "] batch max size has exceeded") + self:process_buffer() + end + + if not self.is_timer_running then + create_buffer_timer(self) + end +end + + +function batch_processor:process_buffer() + -- If entries are present in the buffer move the entries to processing + if #self.entry_buffer.entries > 0 then + core.log.debug("transferring buffer entries to processing pipe line, ", + "buffercount[", #self.entry_buffer.entries ,"]") + self.batch_to_process[#self.batch_to_process + 1] = self.entry_buffer + self.entry_buffer = {entries = {}, retry_count = 0} + set_metrics(self, 0) + end + + for _, batch in ipairs(self.batch_to_process) do + schedule_func_exec(self, 0, batch) + end + + self.batch_to_process = {} +end + + +return batch_processor diff --git a/CloudronPackages/APISIX/apisix-source/apisix/utils/content-decode.lua b/CloudronPackages/APISIX/apisix-source/apisix/utils/content-decode.lua new file mode 100644 index 0000000..c22c965 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/utils/content-decode.lua @@ -0,0 +1,112 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local pcall = pcall +local zlib = require("ffi-zlib") +local str_buffer = require("string.buffer") +local is_br_libs_loaded, brotli = pcall(require, "brotli") +local content_decode_funcs = {} +local _M = {} + + +local function inflate_gzip(data) + local inputs = str_buffer.new():set(data) + local outputs = str_buffer.new() + + local read_inputs = function(size) + local data = inputs:get(size) + if data == "" then + return nil + end + return data + end + + local write_outputs = function(data) + return outputs:put(data) + end + + local ok, err = zlib.inflateGzip(read_inputs, write_outputs) + if not ok then + return nil, "inflate gzip err: " .. err + end + + return outputs:get() +end +content_decode_funcs.gzip = inflate_gzip + + +local function brotli_stream_decode(read_inputs, write_outputs) + -- read 64k data per times + local read_size = 64 * 1024 + local decompressor = brotli.decompressor:new() + + local chunk, ok, res + repeat + chunk = read_inputs(read_size) + if chunk then + ok, res = pcall(function() + return decompressor:decompress(chunk) + end) + else + ok, res = pcall(function() + return decompressor:finish() + end) + end + if not ok then + return false, res + end + write_outputs(res) + until not chunk + + return true, nil +end + + +local function brotli_decode(data) + local inputs = str_buffer.new():set(data) + local outputs = str_buffer.new() + + local read_inputs = function(size) + local data = inputs:get(size) + if data == "" then + return nil + end + return data + end + + local write_outputs = function(data) + return outputs:put(data) + end + + local ok, err = brotli_stream_decode(read_inputs, write_outputs) + if not ok then + return nil, "brotli decode err: " .. err + end + + return outputs:get() +end + +if is_br_libs_loaded then + content_decode_funcs.br = brotli_decode +end + + +function _M.dispatch_decoder(response_encoding) + return content_decode_funcs[response_encoding] +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/utils/google-cloud-oauth.lua b/CloudronPackages/APISIX/apisix-source/apisix/utils/google-cloud-oauth.lua new file mode 100644 index 0000000..6cb3528 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/utils/google-cloud-oauth.lua @@ -0,0 +1,130 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local type = type +local setmetatable = setmetatable + +local ngx_update_time = ngx.update_time +local ngx_time = ngx.time +local ngx_encode_args = ngx.encode_args + +local http = require("resty.http") +local jwt = require("resty.jwt") + + +local function get_timestamp() + ngx_update_time() + return ngx_time() +end + + +local _M = {} + + +function _M.generate_access_token(self) + if not self.access_token or get_timestamp() > self.access_token_expire_time - 60 then + self:refresh_access_token() + end + return self.access_token +end + + +function _M.refresh_access_token(self) + local http_new = http.new() + local res, err = http_new:request_uri(self.token_uri, { + ssl_verify = self.ssl_verify, + method = "POST", + body = ngx_encode_args({ + grant_type = "urn:ietf:params:oauth:grant-type:jwt-bearer", + assertion = self:generate_jwt_token() + }), + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + }, + }) + + if not res then + core.log.error("failed to refresh google oauth access token, ", err) + return + end + + if res.status ~= 200 then + core.log.error("failed to refresh google oauth access token: ", res.body) + return + end + + res, err = core.json.decode(res.body) + if not res then + core.log.error("failed to parse google oauth response data: ", err) + return + end + + self.access_token = res.access_token + self.access_token_type = res.token_type + self.access_token_expire_time = get_timestamp() + res.expires_in +end + + +function _M.generate_jwt_token(self) + local payload = core.json.encode({ + iss = self.client_email, + aud = self.token_uri, + scope = self.scope, + iat = get_timestamp(), + exp = get_timestamp() + (60 * 60) + }) + + local jwt_token = jwt:sign(self.private_key, { + header = { alg = "RS256", typ = "JWT" }, + payload = payload, + }) + + return jwt_token +end + + +function _M.new(config, ssl_verify) + local oauth = { + client_email = config.client_email, + private_key = config.private_key, + project_id = config.project_id, + token_uri = config.token_uri or "https://oauth2.googleapis.com/token", + auth_uri = config.auth_uri or "https://accounts.google.com/o/oauth2/auth", + entries_uri = config.entries_uri, + access_token = nil, + access_token_type = nil, + access_token_expire_time = 0, + } + + oauth.ssl_verify = ssl_verify + + if config.scope then + if type(config.scope) == "string" then + oauth.scope = config.scope + end + + if type(config.scope) == "table" then + oauth.scope = core.table.concat(config.scope, " ") + end + end + + return setmetatable(oauth, { __index = _M }) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/utils/log-util.lua b/CloudronPackages/APISIX/apisix-source/apisix/utils/log-util.lua new file mode 100644 index 0000000..c9cda1d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/utils/log-util.lua @@ -0,0 +1,403 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local plugin = require("apisix.plugin") +local expr = require("resty.expr.v1") +local content_decode = require("apisix.utils.content-decode") +local ngx = ngx +local pairs = pairs +local ngx_now = ngx.now +local ngx_header = ngx.header +local os_date = os.date +local str_byte = string.byte +local str_sub = string.sub +local math_floor = math.floor +local ngx_update_time = ngx.update_time +local req_get_body_data = ngx.req.get_body_data +local is_http = ngx.config.subsystem == "http" +local req_get_body_file = ngx.req.get_body_file +local MAX_REQ_BODY = 524288 -- 512 KiB +local MAX_RESP_BODY = 524288 -- 512 KiB +local io = io + +local lru_log_format = core.lrucache.new({ + ttl = 300, count = 512 +}) + +local _M = {} + + +local function get_request_body(max_bytes) + local req_body = req_get_body_data() + if req_body then + if max_bytes and #req_body >= max_bytes then + req_body = str_sub(req_body, 1, max_bytes) + end + return req_body + end + + local file_name = req_get_body_file() + if not file_name then + return nil + end + + core.log.info("attempt to read body from file: ", file_name) + + local f, err = io.open(file_name, 'r') + if not f then + return nil, "fail to open file " .. err + end + + req_body = f:read(max_bytes) + f:close() + + return req_body +end + + +local function gen_log_format(format) + local log_format = {} + for k, var_name in pairs(format) do + if var_name:byte(1, 1) == str_byte("$") then + log_format[k] = {true, var_name:sub(2)} + else + log_format[k] = {false, var_name} + end + end + core.log.info("log_format: ", core.json.delay_encode(log_format)) + return log_format +end + + +local function get_custom_format_log(ctx, format, max_req_body_bytes) + local log_format = lru_log_format(format or "", nil, gen_log_format, format) + local entry = core.table.new(0, core.table.nkeys(log_format)) + for k, var_attr in pairs(log_format) do + if var_attr[1] then + local key = var_attr[2] + if key == "request_body" then + local max_req_body_bytes = max_req_body_bytes or MAX_REQ_BODY + local req_body, err = get_request_body(max_req_body_bytes) + if err then + core.log.error("fail to get request body: ", err) + else + entry[k] = req_body + end + else + entry[k] = ctx.var[var_attr[2]] + end + else + entry[k] = var_attr[2] + end + end + + local matched_route = ctx.matched_route and ctx.matched_route.value + if matched_route then + entry.service_id = matched_route.service_id + entry.route_id = matched_route.id + end + return entry +end +-- export the log getter so we can mock in tests +_M.get_custom_format_log = get_custom_format_log + + +-- for test +function _M.inject_get_custom_format_log(f) + get_custom_format_log = f + _M.get_custom_format_log = f +end + + +local function latency_details_in_ms(ctx) + local latency = (ngx_now() - ngx.req.start_time()) * 1000 + local upstream_latency, apisix_latency = nil, latency + + if ctx.var.upstream_response_time then + upstream_latency = ctx.var.upstream_response_time * 1000 + apisix_latency = apisix_latency - upstream_latency + + -- The latency might be negative, as Nginx uses different time measurements in + -- different metrics. + -- See https://github.com/apache/apisix/issues/5146#issuecomment-928919399 + if apisix_latency < 0 then + apisix_latency = 0 + end + end + + return latency, upstream_latency, apisix_latency +end +_M.latency_details_in_ms = latency_details_in_ms + + +local function get_full_log(ngx, conf) + local ctx = ngx.ctx.api_ctx + local var = ctx.var + local service_id + local route_id + local url = var.scheme .. "://" .. var.host .. ":" .. var.server_port + .. var.request_uri + local matched_route = ctx.matched_route and ctx.matched_route.value + + if matched_route then + service_id = matched_route.service_id or "" + route_id = matched_route.id + else + service_id = var.host + end + + local consumer + if ctx.consumer then + consumer = { + username = ctx.consumer.username + } + end + + local latency, upstream_latency, apisix_latency = latency_details_in_ms(ctx) + + local log = { + request = { + url = url, + uri = var.request_uri, + method = ngx.req.get_method(), + headers = ngx.req.get_headers(), + querystring = ngx.req.get_uri_args(), + size = var.request_length + }, + response = { + status = ngx.status, + headers = ngx.resp.get_headers(), + size = var.bytes_sent + }, + server = { + hostname = core.utils.gethostname(), + version = core.version.VERSION + }, + upstream = var.upstream_addr, + service_id = service_id, + route_id = route_id, + consumer = consumer, + client_ip = core.request.get_remote_client_ip(ngx.ctx.api_ctx), + start_time = ngx.req.start_time() * 1000, + latency = latency, + upstream_latency = upstream_latency, + apisix_latency = apisix_latency + } + + if ctx.resp_body then + log.response.body = ctx.resp_body + end + + if conf.include_req_body then + + local log_request_body = true + + if conf.include_req_body_expr then + + if not conf.request_expr then + local request_expr, err = expr.new(conf.include_req_body_expr) + if not request_expr then + core.log.error('generate request expr err ' .. err) + return log + end + conf.request_expr = request_expr + end + + local result = conf.request_expr:eval(ctx.var) + + if not result then + log_request_body = false + end + end + + if log_request_body then + local max_req_body_bytes = conf.max_req_body_bytes or MAX_REQ_BODY + local body, err = get_request_body(max_req_body_bytes) + if err then + core.log.error("fail to get request body: ", err) + return + end + log.request.body = body + end + end + + return log +end +_M.get_full_log = get_full_log + + +-- for test +function _M.inject_get_full_log(f) + get_full_log = f + _M.get_full_log = f +end + + +local function is_match(match, ctx) + local match_result + for _, m in pairs(match) do + local expr, _ = expr.new(m) + match_result = expr:eval(ctx.var) + if match_result then + break + end + end + + return match_result +end + + +function _M.get_log_entry(plugin_name, conf, ctx) + -- If the "match" configuration is set and the matching conditions are not met, + -- then do not log the message. + if conf.match and not is_match(conf.match, ctx) then + return + end + + local metadata = plugin.plugin_metadata(plugin_name) + core.log.info("metadata: ", core.json.delay_encode(metadata)) + + local entry + local customized = false + + local has_meta_log_format = metadata and metadata.value.log_format + and core.table.nkeys(metadata.value.log_format) > 0 + + if conf.log_format or has_meta_log_format then + customized = true + entry = get_custom_format_log(ctx, conf.log_format or metadata.value.log_format, + conf.max_req_body_bytes) + else + if is_http then + entry = get_full_log(ngx, conf) + else + -- get_full_log doesn't work in stream + core.log.error(plugin_name, "'s log_format is not set") + end + end + + return entry, customized +end + + +function _M.get_req_original(ctx, conf) + local data = { + ctx.var.request, "\r\n" + } + for k, v in pairs(ngx.req.get_headers()) do + core.table.insert_tail(data, k, ": ", v, "\r\n") + end + core.table.insert(data, "\r\n") + + if conf.include_req_body then + local max_req_body_bytes = conf.max_req_body_bytes or MAX_REQ_BODY + local req_body = get_request_body(max_req_body_bytes) + core.table.insert(data, req_body) + end + + return core.table.concat(data, "") +end + + +function _M.check_log_schema(conf) + if conf.include_req_body_expr then + local ok, err = expr.new(conf.include_req_body_expr) + if not ok then + return nil, "failed to validate the 'include_req_body_expr' expression: " .. err + end + end + if conf.include_resp_body_expr then + local ok, err = expr.new(conf.include_resp_body_expr) + if not ok then + return nil, "failed to validate the 'include_resp_body_expr' expression: " .. err + end + end + return true, nil +end + + +function _M.collect_body(conf, ctx) + if conf.include_resp_body then + local log_response_body = true + + if conf.include_resp_body_expr then + if not conf.response_expr then + local response_expr, err = expr.new(conf.include_resp_body_expr) + if not response_expr then + core.log.error('generate response expr err ' .. err) + return + end + conf.response_expr = response_expr + end + + if ctx.res_expr_eval_result == nil then + ctx.res_expr_eval_result = conf.response_expr:eval(ctx.var) + end + + if not ctx.res_expr_eval_result then + log_response_body = false + end + end + + if log_response_body then + local max_resp_body_bytes = conf.max_resp_body_bytes or MAX_RESP_BODY + + if ctx._resp_body_bytes and ctx._resp_body_bytes >= max_resp_body_bytes then + return + end + local final_body = core.response.hold_body_chunk(ctx, true, max_resp_body_bytes) + if not final_body then + return + end + + local response_encoding = ngx_header["Content-Encoding"] + if not response_encoding then + ctx.resp_body = final_body + return + end + + local decoder = content_decode.dispatch_decoder(response_encoding) + if not decoder then + core.log.warn("unsupported compression encoding type: ", + response_encoding) + ctx.resp_body = final_body + return + end + + local decoded_body, err = decoder(final_body) + if err ~= nil then + core.log.warn("try decode compressed data err: ", err) + ctx.resp_body = final_body + return + end + + ctx.resp_body = decoded_body + end + end +end + + +function _M.get_rfc3339_zulu_timestamp(timestamp) + ngx_update_time() + local now = timestamp or ngx_now() + local second = math_floor(now) + local millisecond = math_floor((now - second) * 1000) + return os_date("!%Y-%m-%dT%T.", second) .. core.string.format("%03dZ", millisecond) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/utils/redis-schema.lua b/CloudronPackages/APISIX/apisix-source/apisix/utils/redis-schema.lua new file mode 100644 index 0000000..c9fdec4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/utils/redis-schema.lua @@ -0,0 +1,81 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local policy_to_additional_properties = { + redis = { + properties = { + redis_host = { + type = "string", minLength = 2 + }, + redis_port = { + type = "integer", minimum = 1, default = 6379, + }, + redis_username = { + type = "string", minLength = 1, + }, + redis_password = { + type = "string", minLength = 0, + }, + redis_database = { + type = "integer", minimum = 0, default = 0, + }, + redis_timeout = { + type = "integer", minimum = 1, default = 1000, + }, + redis_ssl = { + type = "boolean", default = false, + }, + redis_ssl_verify = { + type = "boolean", default = false, + }, + }, + required = {"redis_host"}, + }, + ["redis-cluster"] = { + properties = { + redis_cluster_nodes = { + type = "array", + minItems = 1, + items = { + type = "string", minLength = 2, maxLength = 100 + }, + }, + redis_password = { + type = "string", minLength = 0, + }, + redis_timeout = { + type = "integer", minimum = 1, default = 1000, + }, + redis_cluster_name = { + type = "string", + }, + redis_cluster_ssl = { + type = "boolean", default = false, + }, + redis_cluster_ssl_verify = { + type = "boolean", default = false, + }, + }, + required = {"redis_cluster_nodes", "redis_cluster_name"}, + }, +} + +local _M = { + schema = policy_to_additional_properties +} + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/utils/redis.lua b/CloudronPackages/APISIX/apisix-source/apisix/utils/redis.lua new file mode 100644 index 0000000..423ad6d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/utils/redis.lua @@ -0,0 +1,74 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local redis_new = require("resty.redis").new +local core = require("apisix.core") + + +local _M = {version = 0.1} + +local function redis_cli(conf) + local red = redis_new() + local timeout = conf.redis_timeout or 1000 -- default 1sec + + red:set_timeouts(timeout, timeout, timeout) + + local sock_opts = { + ssl = conf.redis_ssl, + ssl_verify = conf.redis_ssl_verify + } + + local ok, err = red:connect(conf.redis_host, conf.redis_port or 6379, sock_opts) + if not ok then + core.log.error(" redis connect error, error: ", err) + return false, err + end + + local count + count, err = red:get_reused_times() + if 0 == count then + if conf.redis_password and conf.redis_password ~= '' then + local ok, err + if conf.redis_username then + ok, err = red:auth(conf.redis_username, conf.redis_password) + else + ok, err = red:auth(conf.redis_password) + end + if not ok then + return nil, err + end + end + + -- select db + if conf.redis_database ~= 0 then + local ok, err = red:select(conf.redis_database) + if not ok then + return false, "failed to change redis db, err: " .. err + end + end + elseif err then + -- core.log.info(" err: ", err) + return nil, err + end + return red, nil +end + + +function _M.new(conf) + return redis_cli(conf) +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/utils/rediscluster.lua b/CloudronPackages/APISIX/apisix-source/apisix/utils/rediscluster.lua new file mode 100644 index 0000000..e3bda4a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/utils/rediscluster.lua @@ -0,0 +1,60 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local rediscluster = require("resty.rediscluster") +local core = require("apisix.core") +local ipairs = ipairs + +local _M = {version = 0.1} + +local function new_redis_cluster(conf, dict_name) + local config = { + name = conf.redis_cluster_name, + serv_list = {}, + read_timeout = conf.redis_timeout, + auth = conf.redis_password, + dict_name = dict_name, + connect_opts = { + ssl = conf.redis_cluster_ssl, + ssl_verify = conf.redis_cluster_ssl_verify, + } + } + + for i, conf_item in ipairs(conf.redis_cluster_nodes) do + local host, port, err = core.utils.parse_addr(conf_item) + if err then + return nil, "failed to parse address: " .. conf_item + .. " err: " .. err + end + + config.serv_list[i] = {ip = host, port = port} + end + + local red_cli, err = rediscluster:new(config) + if not red_cli then + return nil, "failed to new redis cluster: " .. err + end + + return red_cli +end + + +function _M.new(conf, dict_name) + return new_redis_cluster(conf, dict_name) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/utils/rfc5424.lua b/CloudronPackages/APISIX/apisix-source/apisix/utils/rfc5424.lua new file mode 100644 index 0000000..e046194 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/utils/rfc5424.lua @@ -0,0 +1,114 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local LOG_EMERG = 0 -- system is unusable +local LOG_ALERT = 1 -- action must be taken immediately +local LOG_CRIT = 2 -- critical conditions +local LOG_ERR = 3 -- error conditions +local LOG_WARNING = 4 -- warning conditions +local LOG_NOTICE = 5 -- normal but significant condition +local LOG_INFO = 6 -- informational +local LOG_DEBUG = 7 -- debug-level messages + +local LOG_KERN = 0 -- kernel messages +local LOG_USER = 1 -- random user-level messages +local LOG_MAIL = 2 -- mail system +local LOG_DAEMON = 3 -- system daemons +local LOG_AUTH = 4 -- security/authorization messages +local LOG_SYSLOG = 5 -- messages generated internally by syslogd +local LOG_LPR = 6 -- line printer subsystem +local LOG_NEWS = 7 -- network news subsystem +local LOG_UUCP = 8 -- UUCP subsystem +local LOG_CRON = 9 -- clock daemon +local LOG_AUTHPRIV = 10 -- security/authorization messages (private) +local LOG_FTP = 11 -- FTP daemon +local LOG_LOCAL0 = 16 -- reserved for local use +local LOG_LOCAL1 = 17 -- reserved for local use +local LOG_LOCAL2 = 18 -- reserved for local use +local LOG_LOCAL3 = 19 -- reserved for local use +local LOG_LOCAL4 = 20 -- reserved for local use +local LOG_LOCAL5 = 21 -- reserved for local use +local LOG_LOCAL6 = 22 -- reserved for local use +local LOG_LOCAL7 = 23 -- reserved for local use + +local Facility = { + KERN = LOG_KERN, + USER = LOG_USER, + MAIL = LOG_MAIL, + DAEMON = LOG_DAEMON, + AUTH = LOG_AUTH, + SYSLOG = LOG_SYSLOG, + LPR = LOG_LPR, + NEWS = LOG_NEWS, + UUCP = LOG_UUCP, + CRON = LOG_CRON, + AUTHPRIV = LOG_AUTHPRIV, + FTP = LOG_FTP, + LOCAL0 = LOG_LOCAL0, + LOCAL1 = LOG_LOCAL1, + LOCAL2 = LOG_LOCAL2, + LOCAL3 = LOG_LOCAL3, + LOCAL4 = LOG_LOCAL4, + LOCAL5 = LOG_LOCAL5, + LOCAL6 = LOG_LOCAL6, + LOCAL7 = LOG_LOCAL7, +} + +local Severity = { + EMEGR = LOG_EMERG, + ALERT = LOG_ALERT, + CRIT = LOG_CRIT, + ERR = LOG_ERR, + WARNING = LOG_WARNING, + NOTICE = LOG_NOTICE, + INFO = LOG_INFO, + DEBUG = LOG_DEBUG, +} + +local log_util = require("apisix.utils.log-util") +local ipairs = ipairs +local str_format = string.format + +local _M = { version = 0.1 } + + +function _M.encode(facility, severity, hostname, appname, pid, msg, structured_data) + local pri = (Facility[facility] * 8 + Severity[severity]) + local t = log_util.get_rfc3339_zulu_timestamp() + if not hostname then + hostname = "-" + end + + if not appname then + appname = "-" + end + + local structured_data_str = "-" + + if structured_data then + structured_data_str = "[logservice" + for _, sd_param in ipairs(structured_data) do + structured_data_str = structured_data_str .. " " .. sd_param.name + .. "=\"" .. sd_param.value .. "\"" + end + structured_data_str = structured_data_str .. "]" + end + + return str_format("<%d>1 %s %s %s %d - %s %s\n", pri, t, hostname, + appname, pid, structured_data_str, msg) +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/utils/router.lua b/CloudronPackages/APISIX/apisix-source/apisix/utils/router.lua new file mode 100644 index 0000000..8b6b604 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/utils/router.lua @@ -0,0 +1,34 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local resty_router = require("resty.radixtree") + + +local _M = {} + +do + local router_opts = { + no_param_match = true + } + +function _M.new(routes) + return resty_router.new(routes, router_opts) +end + +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/utils/upstream.lua b/CloudronPackages/APISIX/apisix-source/apisix/utils/upstream.lua new file mode 100644 index 0000000..3c0b9a3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/utils/upstream.lua @@ -0,0 +1,133 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local ipmatcher = require("resty.ipmatcher") +local ngx_now = ngx.now +local ipairs = ipairs +local type = type +local tostring = tostring + + +local _M = {} + + +local function sort_by_key_host(a, b) + return a.host < b.host +end + + +local function compare_upstream_node(up_conf, new_t) + if up_conf == nil then + return false + end + + -- fast path + local old_t = up_conf.nodes + if old_t == new_t then + return true + end + + if type(old_t) ~= "table" then + return false + end + + -- slow path + core.log.debug("compare upstream nodes by value, ", + "old: ", tostring(old_t) , " ", core.json.delay_encode(old_t, true), + "new: ", tostring(new_t) , " ", core.json.delay_encode(new_t, true)) + + if up_conf.original_nodes then + -- if original_nodes is set, it means that the upstream nodes + -- are changed by `fill_node_info`, so we need to compare the new nodes with the + -- original nodes. + old_t = up_conf.original_nodes + end + + if #new_t ~= #old_t then + return false + end + + core.table.sort(old_t, sort_by_key_host) + core.table.sort(new_t, sort_by_key_host) + + for i = 1, #new_t do + local new_node = new_t[i] + local old_node = old_t[i] + for _, name in ipairs({"host", "port", "weight", "priority", "metadata"}) do + if new_node[name] ~= old_node[name] then + return false + end + end + end + + return true +end +_M.compare_upstream_node = compare_upstream_node + + +local function parse_domain_for_nodes(nodes) + local new_nodes = core.table.new(#nodes, 0) + for _, node in ipairs(nodes) do + local host = node.host + if not ipmatcher.parse_ipv4(host) and + not ipmatcher.parse_ipv6(host) then + local ip, err = core.resolver.parse_domain(host) + if ip then + local new_node = core.table.clone(node) + new_node.host = ip + new_node.domain = host + core.table.insert(new_nodes, new_node) + end + + if err then + core.log.error("dns resolver domain: ", host, " error: ", err) + end + else + core.table.insert(new_nodes, node) + end + end + return new_nodes +end +_M.parse_domain_for_nodes = parse_domain_for_nodes + + +function _M.parse_domain_in_up(up) + local nodes = up.value.nodes + local new_nodes, err = parse_domain_for_nodes(nodes) + if not new_nodes then + return nil, err + end + + local ok = compare_upstream_node(up.dns_value, new_nodes) + if ok then + return up + end + + if not up.orig_modifiedIndex then + up.orig_modifiedIndex = up.modifiedIndex + end + up.modifiedIndex = up.orig_modifiedIndex .. "#" .. ngx_now() + + up.dns_value = core.table.clone(up.value) + up.dns_value.nodes = new_nodes + core.log.info("resolve upstream which contain domain: ", + core.json.delay_encode(up, true)) + return up +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/apisix/wasm.lua b/CloudronPackages/APISIX/apisix-source/apisix/wasm.lua new file mode 100644 index 0000000..6a93728 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/apisix/wasm.lua @@ -0,0 +1,203 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local type = type +local support_wasm, wasm = pcall(require, "resty.proxy-wasm") +local ngx_var = ngx.var + + +local schema = { + type = "object", + properties = { + conf = { + oneOf = { + { type = "object", minProperties = 1}, + { type = "string", minLength = 1}, + } + }, + }, + required = {"conf"} +} +local _M = {} + + +local function check_schema(conf) + return core.schema.check(schema, conf) +end + + +local function get_plugin_ctx_key(ctx) + return ctx.conf_type .. "#" .. ctx.conf_id +end + +local function fetch_plugin_ctx(conf, ctx, plugin) + if not conf.plugin_ctxs then + conf.plugin_ctxs = {} + end + + local ctxs = conf.plugin_ctxs + local key = get_plugin_ctx_key(ctx) + local plugin_ctx = ctxs[key] + local err + if not plugin_ctx then + if type(conf.conf) == "table" then + plugin_ctx, err = wasm.on_configure(plugin, core.json.encode(conf.conf)) + elseif type(conf.conf) == "string" then + plugin_ctx, err = wasm.on_configure(plugin, conf.conf) + else + return nil, "invalid conf type" + end + if not plugin_ctx then + return nil, err + end + + ctxs[key] = plugin_ctx + end + + return plugin_ctx +end + + +local function http_request_wrapper(self, conf, ctx) + local name = self.name + local plugin_ctx, err = fetch_plugin_ctx(conf, ctx, self.plugin) + if not plugin_ctx then + core.log.error(name, ": failed to fetch wasm plugin ctx: ", err) + return 503 + end + + local ok, err = wasm.on_http_request_headers(plugin_ctx) + if not ok then + core.log.error(name, ": failed to run wasm plugin: ", err) + return 503 + end + + -- $wasm_process_req_body is predefined in ngx_tpl.lua + local handle_body = ngx_var.wasm_process_req_body + if handle_body ~= '' then + -- reset the flag so we can use it for the next Wasm plugin + -- use ngx.var to bypass the cache + ngx_var.wasm_process_req_body = '' + + local body, err = core.request.get_body() + if err ~= nil then + core.log.error(name, ": failed to get request body: ", err) + return 503 + end + + local ok, err = wasm.on_http_request_body(plugin_ctx, body, true) + if not ok then + core.log.error(name, ": failed to run wasm plugin: ", err) + return 503 + end + end +end + + +local function header_filter_wrapper(self, conf, ctx) + local name = self.name + local plugin_ctx, err = fetch_plugin_ctx(conf, ctx, self.plugin) + if not plugin_ctx then + core.log.error(name, ": failed to fetch wasm plugin ctx: ", err) + return 503 + end + + local ok, err = wasm.on_http_response_headers(plugin_ctx) + if not ok then + core.log.error(name, ": failed to run wasm plugin: ", err) + return 503 + end + + -- $wasm_process_resp_body is predefined in ngx_tpl.lua + local handle_body = ngx_var.wasm_process_resp_body + if handle_body ~= '' then + -- reset the flag so we can use it for the next Wasm plugin + -- use ngx.var to bypass the cache + ngx_var.wasm_process_resp_body = "" + ctx["wasm_" .. name .. "_process_resp_body"] = true + end +end + + +local function body_filter_wrapper(self, conf, ctx) + local name = self.name + + local enabled = ctx["wasm_" .. name .. "_process_resp_body"] + if not enabled then + return + end + + local plugin_ctx, err = fetch_plugin_ctx(conf, ctx, self.plugin) + if not plugin_ctx then + core.log.error(name, ": failed to fetch wasm plugin ctx: ", err) + return + end + + local ok, err = wasm.on_http_response_body(plugin_ctx) + if not ok then + core.log.error(name, ": failed to run wasm plugin: ", err) + return + end +end + + +function _M.require(attrs) + if not support_wasm then + return nil, "need to build APISIX-Runtime to support wasm" + end + + local name = attrs.name + local priority = attrs.priority + local plugin, err = wasm.load(name, attrs.file) + if not plugin then + return nil, err + end + + local mod = { + version = 0.1, + name = name, + priority = priority, + schema = schema, + check_schema = check_schema, + plugin = plugin, + type = "wasm", + } + + if attrs.http_request_phase == "rewrite" then + mod.rewrite = function (conf, ctx) + return http_request_wrapper(mod, conf, ctx) + end + else + mod.access = function (conf, ctx) + return http_request_wrapper(mod, conf, ctx) + end + end + + mod.header_filter = function (conf, ctx) + return header_filter_wrapper(mod, conf, ctx) + end + + mod.body_filter = function (conf, ctx) + return body_filter_wrapper(mod, conf, ctx) + end + + -- the returned values need to be the same as the Lua's 'require' + return true, mod +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/autodocs/config.ld b/CloudronPackages/APISIX/apisix-source/autodocs/config.ld new file mode 100644 index 0000000..d5d3cca --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/autodocs/config.ld @@ -0,0 +1,11 @@ +project='Apache APISIX' +title='Plugin Develop Docs' +description='Functions in APISIX core' +format='markdown' +backtick_references = false +no_lua_ref = true +all = false +no_space_before_args = true +ext = "md" +template = true -- use the ldoc.ltp as markdown template +template_escape = ">" diff --git a/CloudronPackages/APISIX/apisix-source/autodocs/generate.sh b/CloudronPackages/APISIX/apisix-source/autodocs/generate.sh new file mode 100755 index 0000000..4b918cd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/autodocs/generate.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -ex + +# workdir is the root of the apisix, use command: autodocs/generate.sh build to generate the docs, +# and the output will be in the workdir/autodocs/output/ directory. +build() { + # install dependencies + apt-get -y update --fix-missing + apt-get -y install lua5.1 liblua5.1-0-dev + curl https://raw.githubusercontent.com/apache/apisix/master/utils/linux-install-luarocks.sh -sL | bash - + luarocks install ldoc + + # generate docs + rm -rf autodocs/output || true + mkdir autodocs/output || true + cd autodocs/output + find ../../apisix/core -name "*.lua" -type f -exec ldoc -c ../config.ld {} \; + + # generate the markdown files' name + rm ../md_files_name.txt || true + output="./" + mds=$(ls $output) + for md in $mds + do + echo $md >> ../md_files_name.txt + done +} + +case_opt=$1 +case $case_opt in + (build) + build + ;; +esac diff --git a/CloudronPackages/APISIX/apisix-source/autodocs/ldoc.ltp b/CloudronPackages/APISIX/apisix-source/autodocs/ldoc.ltp new file mode 100644 index 0000000..72d77c2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/autodocs/ldoc.ltp @@ -0,0 +1,98 @@ +> local iter = ldoc.modules.iter +> local display_name = ldoc.display_name +> local function trim_newline(s) +> return (s:gsub("\n", "\r")) +> end +--- +title: APISIX Plugin Development Docs +--- + + + +## $(ldoc.title) + +### $(module.name) + +$(module.summary) $(module.description) +> +> for kind, items in module.kinds() do +> for item in items() do + +#### $(trim_newline(display_name(item))) +> if item.type == "function" then +> if item.summary and item.summary ~= '' then + +**Summary**: $(item.summary) +> end -- if item.summary +> if item.description and item.description ~= '' then + +**Description**: + +```text$(trim_newline(item.description)) +``` +> end -- if item.description +> end -- if item.type +> if item.params and #item.params > 0 then +> local subnames = module.kinds:type_of(item).subnames +> if subnames then + +**$(subnames)** + +> end -- if subnames +> -- print the parameters +> for par in iter(item.params) do +> local param = item:subparam(par) +> for p in iter(param) do +> local name = item:display_name_of(p) +> local tp = item:type_of_param(p) +* **$(name)**($(tp)):$(item.params.map[p]) +> if tp ~= '' then +> end -- if tp +> +> end -- for p +> end -- for par +> end -- if item.params and #item.params > 0 +> +> -- print the returns +> if item.retgroups then +> local groups = item.retgroups + +**Returns:** + +> for i, group in ldoc.ipairs(groups) do +> for r in group:iter() do +> local type, ctypes = item:return_type(r); +* `$(type)`: $(r.text) +> end -- for r in group:iter() +> end -- for i,group +> end -- if item.retgroups + +> if item.usage then +**Usage** + +> for usage in item.usage:iter() do +```lua +$(trim_newline(usage)) +``` +> end -- for usage in item.usage:iter() +> local usage = item.usage +> end -- if item.usage +> end -- end for item in items() +> end -- for kinds, items diff --git a/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/conf/cert/apisix.crt b/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/conf/cert/apisix.crt new file mode 100644 index 0000000..503f277 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/conf/cert/apisix.crt @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE----- +MIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G +A1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa +GA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n +RG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM +CHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe +cvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb +VDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR +2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr +abf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2 +WjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/ +Evm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1 +/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh +/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj +cTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ +tSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl +c3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC +tC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY +1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl +PYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob +rJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy +hme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1 +7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y +IJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve +U/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM= +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/conf/cert/apisix.key b/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/conf/cert/apisix.key new file mode 100644 index 0000000..7105067 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/conf/cert/apisix.key @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG5AIBAAKCAYEAyCM0rqJecvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5 +jhZB3W6BkWUWR4oNFLLSqcVbVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfo +eLj0efMiOepOSZflj9Ob4yKR2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5s +mPtW1Oc/BV5terhscJdOgmRrabf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt +6iMWEGeQU6mwPENgvj1olji2WjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiy +Vt1TmtMWn1ztk6FfLRqwJWR/Evm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1o +npRVeXhrBajbCRDRBMwaNw/1/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2 +fzaqpIfyUbPST4GdqNG9NyIh/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI +1cGrGwyXbrieNp63AgMBAAECggGBAJM8g0duoHmIYoAJzbmKe4ew0C5fZtFUQNmu +O2xJITUiLT3ga4LCkRYsdBnY+nkK8PCnViAb10KtIT+bKipoLsNWI9Xcq4Cg4G3t +11XQMgPPgxYXA6m8t+73ldhxrcKqgvI6xVZmWlKDPn+CY/Wqj5PA476B5wEmYbNC +GIcd1FLl3E9Qm4g4b/sVXOHARF6iSvTR+6ol4nfWKlaXSlx2gNkHuG8RVpyDsp9c +z9zUqAdZ3QyFQhKcWWEcL6u9DLBpB/gUjyB3qWhDMe7jcCBZR1ALyRyEjmDwZzv2 +jlv8qlLFfn9R29UI0pbuL1eRAz97scFOFme1s9oSU9a12YHfEd2wJOM9bqiKju8y +DZzePhEYuTZ8qxwiPJGy7XvRYTGHAs8+iDlG4vVpA0qD++1FTpv06cg/fOdnwshE +OJlEC0ozMvnM2rZ2oYejdG3aAnUHmSNa5tkJwXnmj/EMw1TEXf+H6+xknAkw05nh +zsxXrbuFUe7VRfgB5ElMA/V4NsScgQKBwQDmMRtnS32UZjw4A8DsHOKFzugfWzJ8 +Gc+3sTgs+4dNIAvo0sjibQ3xl01h0BB2Pr1KtkgBYB8LJW/FuYdCRS/KlXH7PHgX +84gYWImhNhcNOL3coO8NXvd6+m+a/Z7xghbQtaraui6cDWPiCNd/sdLMZQ/7LopM +RbM32nrgBKMOJpMok1Z6zsPzT83SjkcSxjVzgULNYEp03uf1PWmHuvjO1yELwX9/ +goACViF+jst12RUEiEQIYwr4y637GQBy+9cCgcEA3pN9W5OjSPDVsTcVERig8++O +BFURiUa7nXRHzKp2wT6jlMVcu8Pb2fjclxRyaMGYKZBRuXDlc/RNO3uTytGYNdC2 +IptU5N4M7iZHXj190xtDxRnYQWWo/PR6EcJj3f/tc3Itm1rX0JfuI3JzJQgDb9Z2 +s/9/ub8RRvmQV9LM/utgyOwNdf5dyVoPcTY2739X4ZzXNH+CybfNa+LWpiJIVEs2 +txXbgZrhmlaWzwA525nZ0UlKdfktdcXeqke9eBghAoHARVTHFy6CjV7ZhlmDEtqE +U58FBOS36O7xRDdpXwsHLnCXhbFu9du41mom0W4UdzjgVI9gUqG71+SXrKr7lTc3 +dMHcSbplxXkBJawND/Q1rzLG5JvIRHO1AGJLmRgIdl8jNgtxgV2QSkoyKlNVbM2H +Wy6ZSKM03lIj74+rcKuU3N87dX4jDuwV0sPXjzJxL7NpR/fHwgndgyPcI14y2cGz +zMC44EyQdTw+B/YfMnoZx83xaaMNMqV6GYNnTHi0TO2TAoHBAKmdrh9WkE2qsr59 +IoHHygh7Wzez+Ewr6hfgoEK4+QzlBlX+XV/9rxIaE0jS3Sk1txadk5oFDebimuSk +lQkv1pXUOqh+xSAwk5v88dBAfh2dnnSa8HFN3oz+ZfQYtnBcc4DR1y2X+fVNgr3i +nxruU2gsAIPFRnmvwKPc1YIH9A6kIzqaoNt1f9VM243D6fNzkO4uztWEApBkkJgR +4s/yOjp6ovS9JG1NMXWjXQPcwTq3sQVLnAHxZRJmOvx69UmK4QKBwFYXXjeXiU3d +bcrPfe6qNGjfzK+BkhWznuFUMbuxyZWDYQD5yb6ukUosrj7pmZv3BxKcKCvmONU+ +CHgIXB+hG+R9S2mCcH1qBQoP/RSm+TUzS/Bl2UeuhnFZh2jSZQy3OwryUi6nhF0u +LDzMI/6aO1ggsI23Ri0Y9ZtqVKczTkxzdQKR9xvoNBUufjimRlS80sJCEB3Qm20S +wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/conf/cert/openssl.conf b/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/conf/cert/openssl.conf new file mode 100644 index 0000000..c99afc4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/conf/cert/openssl.conf @@ -0,0 +1,40 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +[req] +distinguished_name = req_distinguished_name +x509_extensions = v3_req +prompt = no + +[req_distinguished_name] +C = CN +ST = GuangDong +L = ZhuHai +O = iresty +CN = test.com + +[v3_req] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer +basicConstraints = CA:TRUE +subjectAltName = @alt_names + +[alt_names] +DNS.1 = test.com +DNS.2 = *.test.com + +## openssl genrsa -out apisix.key 3072 -nodes +## openssl req -new -x509 -key apisix.key -sha256 -config openssl.conf -out apisix.crt -days 36500 diff --git a/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/conf/nginx.conf b/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/conf/nginx.conf new file mode 100644 index 0000000..f35131c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/conf/nginx.conf @@ -0,0 +1,131 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +master_process on; + +worker_processes 1; + +error_log logs/error.log warn; +pid logs/nginx.pid; + +worker_rlimit_nofile 20480; + +events { + worker_connections 10620; +} + +worker_shutdown_timeout 3; + +http { + lua_package_path "$prefix/lua/?.lua;;"; + + log_format main '$remote_addr - $remote_user [$time_local] $http_host "$request" $status $body_bytes_sent $request_time "$http_referer" "$http_user_agent" $upstream_addr $upstream_status $upstream_response_time'; + access_log logs/access.log main buffer=16384 flush=5; + + init_by_lua_block { + require "resty.core" + apisix = require("apisix") + apisix.http_init() + } + + init_worker_by_lua_block { + apisix.http_init_worker() + } + + upstream apisix_backend { + server 0.0.0.1; + balancer_by_lua_block { + apisix.http_balancer_phase() + } + + keepalive 320; + } + + server { + listen 9443 ssl; + ssl_certificate cert/apisix.crt; + ssl_certificate_key cert/apisix.key; + ssl_session_cache shared:SSL:1m; + + listen 9080; + + server_tokens off; + more_set_headers 'Server: APISIX web server'; + + location = /apisix/nginx_status { + allow 127.0.0.0/24; + access_log off; + stub_status; + } + + location /apisix/admin { + allow 127.0.0.0/24; + content_by_lua_block { + apisix.http_admin() + } + } + + ssl_certificate_by_lua_block { + apisix.http_ssl_phase() + } + + location / { + set $upstream_scheme 'http'; + set $upstream_host $http_host; + set $upstream_upgrade ''; + set $upstream_connection ''; + set $upstream_uri ''; + + access_by_lua_block { + apisix.http_access_phase() + } + + proxy_http_version 1.1; + proxy_set_header Host $upstream_host; + proxy_set_header Upgrade $upstream_upgrade; + proxy_set_header Connection $upstream_connection; + proxy_set_header X-Real-IP $remote_addr; + proxy_pass_header Server; + proxy_pass_header Date; + + ### the following x-forwarded-* headers is to send to upstream server + + set $var_x_forwarded_proto $scheme; + set $var_x_forwarded_host $host; + set $var_x_forwarded_port $server_port; + + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $var_x_forwarded_proto; + proxy_set_header X-Forwarded-Host $var_x_forwarded_host; + proxy_set_header X-Forwarded-Port $var_x_forwarded_port; + + # proxy pass + proxy_pass $upstream_scheme://apisix_backend$upstream_uri; + + header_filter_by_lua_block { + apisix.http_header_filter_phase() + } + + body_filter_by_lua_block { + apisix.http_body_filter_phase() + } + + log_by_lua_block { + apisix.http_log_phase() + } + } + } +} diff --git a/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/lua/apisix.lua b/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/lua/apisix.lua new file mode 100644 index 0000000..ea5bf15 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/benchmark/fake-apisix/lua/apisix.lua @@ -0,0 +1,74 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local balancer = require "ngx.balancer" +local _M = {version = 0.1} + +function _M.http_init() +end + +function _M.http_init_worker() +end + +local function fake_fetch() + ngx.ctx.ip = "127.0.0.1" + ngx.ctx.port = 1980 +end + +function _M.http_access_phase() + local uri = ngx.var.uri + local host = ngx.var.host + local method = ngx.req.get_method() + local remote_addr = ngx.var.remote_addr + fake_fetch(uri, host, method, remote_addr) +end + +function _M.http_header_filter_phase() + if ngx.ctx then + -- do something + end +end + +function _M.http_body_filter_phase() + if ngx.ctx then + -- do something + end +end + +function _M.http_log_phase() + if ngx.ctx then + -- do something + end +end + +function _M.http_admin() +end + +function _M.http_ssl_phase() + if ngx.ctx then + -- do something + end +end + +function _M.http_balancer_phase() + local ok, err = balancer.set_current_peer(ngx.ctx.ip, ngx.ctx.port) + if not ok then + ngx.log(ngx.ERR, "failed to set the current peer: ", err) + return ngx.exit(500) + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/benchmark/run.sh b/CloudronPackages/APISIX/apisix-source/benchmark/run.sh new file mode 100755 index 0000000..44d0efa --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/benchmark/run.sh @@ -0,0 +1,155 @@ +#! /bin/bash -x + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +if [ -n "$1" ]; then + worker_cnt=$1 +else + worker_cnt=1 +fi + +if [ -n "$2" ]; then + upstream_cnt=$2 +else + upstream_cnt=1 +fi + +mkdir -p benchmark/server/logs +mkdir -p benchmark/fake-apisix/logs + + +make init + +fake_apisix_cmd="openresty -p $PWD/benchmark/fake-apisix -c $PWD/benchmark/fake-apisix/conf/nginx.conf" +server_cmd="openresty -p $PWD/benchmark/server -c $PWD/benchmark/server/conf/nginx.conf" + +trap 'onCtrlC' INT +function onCtrlC () { + sudo killall wrk + sudo killall openresty + sudo ${fake_apisix_cmd} -s stop || exit 1 + sudo ${server_cmd} -s stop || exit 1 +} + +for up_cnt in $(seq 1 $upstream_cnt); +do + port=$((1979+$up_cnt)) + nginx_listen=$nginx_listen"listen $port;" + upstream_nodes=$upstream_nodes"\"127.0.0.1:$port\":1" + + if [ $up_cnt -lt $upstream_cnt ]; then + upstream_nodes=$upstream_nodes"," + fi +done + +sed -i "s/\- proxy-mirror/#\- proxy-mirror/g" conf/config-default.yaml +sed -i "s/\- proxy-cache/#\- proxy-cache/g" conf/config-default.yaml +sed -i "s/listen .*;/$nginx_listen/g" benchmark/server/conf/nginx.conf + +echo " +nginx_config: + worker_processes: ${worker_cnt} +" > conf/config.yaml + +sudo ${server_cmd} || exit 1 + +make run + +sleep 3 + +############################################# +echo -e "\n\napisix: $worker_cnt worker + $upstream_cnt upstream + no plugin" +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + }, + "upstream": { + "type": "roundrobin", + "nodes": { + '$upstream_nodes' + } + } +}' + +sleep 1 + +wrk -d 5 -c 16 http://127.0.0.1:9080/hello + +sleep 1 + +wrk -d 5 -c 16 http://127.0.0.1:9080/hello + +sleep 1 + +############################################# +echo -e "\n\napisix: $worker_cnt worker + $upstream_cnt upstream + 2 plugins (limit-count + prometheus)" +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2000000000000, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + }, + "prometheus": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + '$upstream_nodes' + } + } +}' + +sleep 3 + +wrk -d 5 -c 16 http://127.0.0.1:9080/hello + +sleep 1 + +wrk -d 5 -c 16 http://127.0.0.1:9080/hello + +sleep 1 + +make stop + +############################################# +echo -e "\n\nfake empty apisix server: $worker_cnt worker" + +sleep 1 + +sed -i "s/worker_processes [0-9]*/worker_processes $worker_cnt/g" benchmark/fake-apisix/conf/nginx.conf + +sudo ${fake_apisix_cmd} || exit 1 + +sleep 1 + +wrk -d 5 -c 16 http://127.0.0.1:9080/hello + +sleep 1 + +wrk -d 5 -c 16 http://127.0.0.1:9080/hello + +sudo ${fake_apisix_cmd} -s stop || exit 1 + +sudo ${server_cmd} -s stop || exit 1 diff --git a/CloudronPackages/APISIX/apisix-source/benchmark/server/conf/nginx.conf b/CloudronPackages/APISIX/apisix-source/benchmark/server/conf/nginx.conf new file mode 100644 index 0000000..6328d89 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/benchmark/server/conf/nginx.conf @@ -0,0 +1,42 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +master_process on; + +worker_processes 2; + +error_log logs/error.log warn; +pid logs/nginx.pid; + +worker_rlimit_nofile 20480; + +events { + accept_mutex off; + worker_connections 10620; +} + +worker_shutdown_timeout 3; + +http { + server { + listen 1980; + + access_log off; + location / { + echo_duplicate 1 "1234567890"; + } + } +} diff --git a/CloudronPackages/APISIX/apisix-source/bin/apisix b/CloudronPackages/APISIX/apisix-source/bin/apisix new file mode 100755 index 0000000..f4c75fb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/bin/apisix @@ -0,0 +1,48 @@ +#!/bin/bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +if [ -s './apisix/cli/apisix.lua' ]; then + # install via source + APISIX_LUA=./apisix/cli/apisix.lua +elif [ -s '/usr/local/share/lua/5.1/apisix/cli/apisix.lua' ]; then + # install via luarock + APISIX_LUA=/usr/local/share/lua/5.1/apisix/cli/apisix.lua +else + # install via official rpm or docker + APISIX_LUA=/usr/local/apisix/apisix/cli/apisix.lua +fi + +# find the openresty +OR_BIN=$(command -v openresty || exit 1) +OR_EXEC=${OR_BIN:-'/usr/local/openresty-debug/bin/openresty'} +OR_VER=$(openresty -v 2>&1 | awk -F '/' '{print $2}' | awk -F '.' '{print $1 * 100 + $2}') +LUA_VERSION=$(lua -v 2>&1| grep -E -o "Lua [0-9]+.[0-9]+") + +if [[ -e $OR_EXEC && "$OR_VER" -ge 119 ]]; then + # OpenResty version is >= 1.19, use luajit by default + ROOT=$(${OR_EXEC} -V 2>&1 | grep prefix | grep -Eo 'prefix=(.*)/nginx\s+--' | grep -Eo '/.*/') + # find the luajit binary of openresty + LUAJIT_BIN="$ROOT"/luajit/bin/luajit + + # use the luajit of openresty + echo "$LUAJIT_BIN $APISIX_LUA $*" + exec $LUAJIT_BIN $APISIX_LUA $* +else + echo "ERROR: Please check the version of OpenResty and Lua, OpenResty 1.19+ + LuaJIT is required for Apache APISIX." +fi diff --git a/CloudronPackages/APISIX/apisix-source/ci/backup-docker-images.sh b/CloudronPackages/APISIX/apisix-source/ci/backup-docker-images.sh new file mode 100644 index 0000000..bc87987 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/backup-docker-images.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +test_type=$1 + +echo "started backing up, time: $(date)" +mkdir docker-images-backup +sum=$(cat ci/pod/docker-compose.$test_type.yml | grep image | wc -l) +special_tag=$(cat ci/pod/docker-compose.$test_type.yml | grep image: | awk '{print $2}' | awk 'ORS=NR%"'$sum'"?" ":"\n"{print}') +echo special: $special_tag +openwhisk_tag="openwhisk/action-nodejs-v14:nightly openwhisk/standalone:nightly" +echo +echo special_tag: $special_tag +echo openwhisk_tag: $openwhisk_tag +echo +all_tags="${special_tag} ${openwhisk_tag}" +to_pull="" + +for tag in $all_tags +do + if ! ( docker inspect $tag &> /dev/null ) + then + to_pull="${to_pull} ${tag}" + fi +done + +echo to pull : $to_pull + +if [[ -n $to_pull ]] +then + echo "$to_pull" | xargs -P10 -n1 docker pull +fi + +docker save $special_tag $openwhisk_tag -o docker-images-backup/apisix-images.tar +echo "docker save done, time: $(date)" diff --git a/CloudronPackages/APISIX/apisix-source/ci/check_changelog_prs.ts b/CloudronPackages/APISIX/apisix-source/ci/check_changelog_prs.ts new file mode 100755 index 0000000..e2cad27 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/check_changelog_prs.ts @@ -0,0 +1,238 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { execSync } from 'child_process'; +import { readFileSync } from 'fs'; +import { join } from 'path'; + +// Types +interface Version { + tag: string; + ref: string; +} + +interface PR { + number: number; + title: string; + commit: string; +} + +// Configuration +const IGNORE_TYPES = [ + 'docs', + 'chore', + 'test', + 'ci' +]; + +const IGNORE_PRS = [ + // 3.9.0 + 10655, 10857, 10858, 10887, 10959, 11029, 11041, 11053, 11055, 11061, 10976, 10984, 11025, + // 3.10.0 + 11105, 11128, 11169, 11171, 11280, 11333, 11081, 11202, 11469, + // 3.11.0 + 11463, 11570, + // 3.12.0 + 11769, 11816, 11881, 11905, 11924, 11926, 11973, 11991, 11992, 11829, + // 3.13.0 + 9945, 11420, 11765, 12036, 12048, 12057, 12076, 12122, 12123, 12168, 12199, 12218, 12225, 12272, 12277, 12300, 12306, 12329, 12353, 12364, 12375, 12358 +]; + + +function getGitRef(version: string): string { + try { + execSync(`git rev-parse ${version}`, { stdio: 'ignore' }); + return version; + } catch { + return 'HEAD'; + } +} + +function extractVersionsFromChangelog(): Version[] { + const changelogPath = join(process.cwd(), '..', 'CHANGELOG.md'); + const content = readFileSync(changelogPath, 'utf-8'); + const versionRegex = /^## ([0-9]+\.[0-9]+\.[0-9]+)/gm; + const versions: Version[] = []; + let match; + + while ((match = versionRegex.exec(content)) !== null) { + const tag = match[1]; + versions.push({ + tag, + ref: getGitRef(tag) + }); + } + + return versions; +} + +function extractPRsFromChangelog(startTag: string, endTag: string): number[] { + const changelogPath = join(process.cwd(), '..', 'CHANGELOG.md'); + const content = readFileSync(changelogPath, 'utf-8'); + const lines = content.split('\n'); + let inRange = false; + const prs: number[] = []; + + for (const line of lines) { + if (line.startsWith(`## ${startTag}`)) { + inRange = true; + continue; + } + if (inRange && line.startsWith(`## ${endTag}`)) { + break; + } + if (inRange) { + const match = line.match(/#(\d+)/); + if (match) { + prs.push(parseInt(match[1], 10)); + } + } + } + + return prs.sort((a, b) => a - b); +} + +function shouldIgnoreCommitMessage(message: string): boolean { + // Extract the commit message part (remove the commit hash) + const messagePart = message.split(' ').slice(1).join(' '); + + // Check if the message starts with any of the ignored types + for (const type of IGNORE_TYPES) { + // Check simple format: "type: message" + if (messagePart.startsWith(`${type}:`)) { + return true; + } + // Check format with scope: "type(scope): message" + if (messagePart.startsWith(`${type}(`)) { + const closingBracketIndex = messagePart.indexOf('):'); + if (closingBracketIndex !== -1) { + return true; + } + } + } + return false; +} + +function extractPRsFromGitLog(oldRef: string, newRef: string): PR[] { + const log = execSync(`git log ${oldRef}..${newRef} --oneline`, { encoding: 'utf-8' }); + const prs: PR[] = []; + + for (const line of log.split('\n')) { + if (!line.trim()) continue; + + // Check if this commit should be ignored + if (shouldIgnoreCommitMessage(line)) continue; + + // Find PR number + const prMatch = line.match(/#(\d+)/); + if (prMatch) { + const prNumber = parseInt(prMatch[1], 10); + if (!IGNORE_PRS.includes(prNumber)) { + prs.push({ + number: prNumber, + title: line, + commit: line.split(' ')[0] + }); + } + } + } + + return prs.sort((a, b) => a.number - b.number); +} + +function findMissingPRs(changelogPRs: number[], gitPRs: PR[]): PR[] { + const changelogPRSet = new Set(changelogPRs); + return gitPRs.filter(pr => !changelogPRSet.has(pr.number)); +} + +function versionGreaterThan(v1: string, v2: string): boolean { + // Remove 'v' prefix if present + const cleanV1 = v1.replace(/^v/, ''); + const cleanV2 = v2.replace(/^v/, ''); + + // Split version strings into arrays of numbers + const v1Parts = cleanV1.split('.').map(Number); + const v2Parts = cleanV2.split('.').map(Number); + + // Compare each part + for (let i = 0; i < Math.max(v1Parts.length, v2Parts.length); i++) { + const v1Part = v1Parts[i] || 0; + const v2Part = v2Parts[i] || 0; + + if (v1Part > v2Part) return true; + if (v1Part < v2Part) return false; + } + + // If all parts are equal, return false + return false; +} + +// Main function +async function main() { + try { + const versions = extractVersionsFromChangelog(); + let hasErrors = false; + + for (let i = 0; i < versions.length - 1; i++) { + const newVersion = versions[i]; + const oldVersion = versions[i + 1]; + + // Skip if new version is less than or equal to 3.8.0 + if (!versionGreaterThan(newVersion.tag, '3.8.0')) { + continue; + } + + console.log(`\n=== Checking changes between ${newVersion.tag} (${newVersion.ref}) and ${oldVersion.tag} (${oldVersion.ref}) ===`); + + const changelogPRs = extractPRsFromChangelog(newVersion.tag, oldVersion.tag); + const gitPRs = extractPRsFromGitLog(oldVersion.ref, newVersion.ref); + const missingPRs = findMissingPRs(changelogPRs, gitPRs); + + console.log(`\n=== PR Comparison Results for ${newVersion.tag} ===`); + + if (missingPRs.length === 0) { + console.log(`\n✅ All PRs are included in CHANGELOG.md for version ${newVersion.tag}`); + } else { + console.log(`\n❌ Missing PRs in CHANGELOG.md for version ${newVersion.tag} (sorted):`); + missingPRs.forEach(pr => { + console.log(` #${pr.number}`); + }); + + console.log(`\nDetailed information about missing PRs for version ${newVersion.tag}:`); + missingPRs.forEach(pr => { + console.log(`\nPR #${pr.number}:`); + console.log(` - ${pr.title}`); + console.log(` - PR URL: https://github.com/apache/apisix/pull/${pr.number}`); + }); + + console.log('Note: If you confirm that a PR should not appear in the changelog, please add its number to the IGNORE_PRS array in this script.'); + hasErrors = true; + } + } + + if (hasErrors) { + process.exit(1); + } + } catch (error) { + console.error('Error:', error); + process.exit(1); + } +} + +(async () => { + await main(); +})(); diff --git a/CloudronPackages/APISIX/apisix-source/ci/common.sh b/CloudronPackages/APISIX/apisix-source/ci/common.sh new file mode 100644 index 0000000..3850460 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/common.sh @@ -0,0 +1,217 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -ex + +export_version_info() { + source ./.requirements +} + +export_or_prefix() { + export OPENRESTY_PREFIX="/usr/local/openresty" + + export PATH=$OPENRESTY_PREFIX/nginx/sbin:$OPENRESTY_PREFIX/luajit/bin:$OPENRESTY_PREFIX/bin:$PATH + export OPENSSL_PREFIX=$OPENRESTY_PREFIX/openssl3 + export OPENSSL_BIN=$OPENSSL_PREFIX/bin/openssl +} + +create_lua_deps() { + echo "Create lua deps" + + make deps + + # just for jwt-auth test + luarocks install lua-resty-openssl --tree deps + + # maybe reopen this feature later + # luarocks install luacov-coveralls --tree=deps --local > build.log 2>&1 || (cat build.log && exit 1) + # for github action cache + chmod -R a+r deps +} + +rerun_flaky_tests() { + if tail -1 "$1" | grep "Result: PASS"; then + exit 0 + fi + + if ! tail -1 "$1" | grep "Result: FAIL"; then + # CI failure not caused by failed test + exit 1 + fi + + local tests + local n_test + tests="$(awk '/^t\/.*.t\s+\(.+ Failed: .+\)/{ print $1 }' "$1")" + n_test="$(echo "$tests" | wc -l)" + if [ "$n_test" -gt 10 ]; then + # too many tests failed + exit 1 + fi + + echo "Rerun $(echo "$tests" | xargs)" + FLUSH_ETCD=1 prove --timer -I./test-nginx/lib -I./ $(echo "$tests" | xargs) +} + +install_curl () { + CURL_VERSION="8.13.0" + wget -q https://github.com/stunnel/static-curl/releases/download/${CURL_VERSION}/curl-linux-x86_64-glibc-${CURL_VERSION}.tar.xz + tar -xf curl-linux-x86_64-glibc-${CURL_VERSION}.tar.xz + sudo cp curl /usr/bin + curl -V +} + +install_apisix_runtime() { + export runtime_version=${APISIX_RUNTIME} + wget "https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/build-apisix-runtime.sh" + chmod +x build-apisix-runtime.sh + ./build-apisix-runtime.sh latest +} + +install_grpcurl () { + # For more versions, visit https://github.com/fullstorydev/grpcurl/releases + GRPCURL_VERSION="1.8.5" + wget -q https://github.com/fullstorydev/grpcurl/releases/download/v${GRPCURL_VERSION}/grpcurl_${GRPCURL_VERSION}_linux_x86_64.tar.gz + tar -xvf grpcurl_${GRPCURL_VERSION}_linux_x86_64.tar.gz -C /usr/local/bin +} + +install_vault_cli () { + VAULT_VERSION="1.9.0" + wget -q https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip + unzip vault_${VAULT_VERSION}_linux_amd64.zip && mv ./vault /usr/local/bin +} + +install_nodejs () { + curl -fsSL https://raw.githubusercontent.com/tj/n/master/bin/n | bash -s install --cleanup lts + corepack enable pnpm +} + +install_brotli () { + local BORTLI_VERSION="1.1.0" + wget -q https://github.com/google/brotli/archive/refs/tags/v${BORTLI_VERSION}.zip + unzip v${BORTLI_VERSION}.zip && cd ./brotli-${BORTLI_VERSION} && mkdir build && cd build + local CMAKE=$(command -v cmake3 > /dev/null 2>&1 && echo cmake3 || echo cmake) + ${CMAKE} -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local/brotli .. + sudo ${CMAKE} --build . --config Release --target install + if [ -d "/usr/local/brotli/lib64" ]; then + echo /usr/local/brotli/lib64 | sudo tee /etc/ld.so.conf.d/brotli.conf + else + echo /usr/local/brotli/lib | sudo tee /etc/ld.so.conf.d/brotli.conf + fi + sudo ldconfig + cd ../.. + rm -rf brotli-${BORTLI_VERSION} +} + +set_coredns() { + # test a domain name is configured as upstream + echo "127.0.0.1 test.com" | sudo tee -a /etc/hosts + echo "::1 ipv6.local" | sudo tee -a /etc/hosts + # test certificate verification + echo "127.0.0.1 admin.apisix.dev" | sudo tee -a /etc/hosts + cat /etc/hosts # check GitHub Action's configuration + + # override DNS configures + if [ -f "/etc/netplan/50-cloud-init.yaml" ]; then + sudo pip3 install yq + + tmp=$(mktemp) + yq -y '.network.ethernets.eth0."dhcp4-overrides"."use-dns"=false' /etc/netplan/50-cloud-init.yaml | \ + yq -y '.network.ethernets.eth0."dhcp4-overrides"."use-domains"=false' | \ + yq -y '.network.ethernets.eth0.nameservers.addresses[0]="8.8.8.8"' | \ + yq -y '.network.ethernets.eth0.nameservers.search[0]="apache.org"' > $tmp + mv $tmp /etc/netplan/50-cloud-init.yaml + cat /etc/netplan/50-cloud-init.yaml + sudo netplan apply + sleep 3 + + sudo mv /etc/resolv.conf /etc/resolv.conf.bak + sudo ln -s /run/systemd/resolve/resolv.conf /etc/ + fi + cat /etc/resolv.conf + + mkdir -p build-cache + + if [ ! -f "build-cache/coredns_1_8_1" ]; then + wget -q https://github.com/coredns/coredns/releases/download/v1.8.1/coredns_1.8.1_linux_amd64.tgz + tar -xvf coredns_1.8.1_linux_amd64.tgz + mv coredns build-cache/ + + touch build-cache/coredns_1_8_1 + fi + + pushd t/coredns || exit 1 + ../../build-cache/coredns -dns.port=1053 & + popd || exit 1 + + touch build-cache/test_resolve.conf + echo "nameserver 127.0.0.1:1053" > build-cache/test_resolve.conf +} + +GRPC_SERVER_EXAMPLE_VER=20210819 + +linux_get_dependencies () { + apt update + apt install -y cpanminus build-essential libncurses5-dev libreadline-dev libssl-dev perl libpcre3 libpcre3-dev xz-utils + apt remove -y curl + apt-get install -y libyaml-dev + wget https://github.com/mikefarah/yq/releases/download/3.4.1/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq + + # install curl with http3 support + install_curl +} + +function start_grpc_server_example() { + ./t/grpc_server_example/grpc_server_example \ + -grpc-address :10051 -grpcs-address :10052 -grpcs-mtls-address :10053 -grpc-http-address :10054 \ + -crt ./t/certs/apisix.crt -key ./t/certs/apisix.key -ca ./t/certs/mtls_ca.crt \ + > grpc_server_example.log 2>&1 & + + for (( i = 0; i <= 10; i++ )); do + sleep 0.5 + GRPC_PROC=`ps -ef | grep grpc_server_example | grep -v grep || echo "none"` + if [[ $GRPC_PROC == "none" || "$i" -eq 10 ]]; then + echo "failed to start grpc_server_example" + ss -antp | grep 1005 || echo "no proc listen port 1005x" + cat grpc_server_example.log + + exit 1 + fi + + ss -lntp | grep 10051 | grep grpc_server && break + done +} + + +function start_sse_server_example() { + # build sse_server_example + pushd t/sse_server_example + go build + ./sse_server_example 7737 2>&1 & + + for (( i = 0; i <= 10; i++ )); do + sleep 0.5 + SSE_PROC=`ps -ef | grep sse_server_example | grep -v grep || echo "none"` + if [[ $SSE_PROC == "none" || "$i" -eq 10 ]]; then + echo "failed to start sse_server_example" + ss -antp | grep 7737 || echo "no proc listen port 7737" + exit 1 + else + break + fi + done + popd +} diff --git a/CloudronPackages/APISIX/apisix-source/ci/free_disk_space.sh b/CloudronPackages/APISIX/apisix-source/ci/free_disk_space.sh new file mode 100644 index 0000000..462258e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/free_disk_space.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# GitHub Action CI runner comes with a limited disk space, due to several reasons +# it may become full. For example, caching docker images creates an archive of +# several GBs of size, this sometimes leads to disk usage becoming full. +# To keep CI functional, we delete large directories that we do not need. + +echo "==============================================================================" +echo "Freeing up disk space on CI system" +echo "==============================================================================" + +echo "Initial disk usage:" +df -h + +echo "Removing large directories and runtimes..." +sudo rm -rf /usr/local/lib/android /usr/share/dotnet /opt/ghc /usr/local/.ghcup /usr/share/swift + +echo "Removing large packages and performing clean-up..." +sudo apt-get remove -y '^aspnetcore-.*' '^dotnet-.*' '^llvm-.*' 'php.*' '^mongodb-.*' '^mysql-.*' \ +azure-cli google-chrome-stable firefox powershell mono-devel libgl1-mesa-dri google-cloud-sdk google-cloud-cli --fix-missing +sudo apt-get autoremove -y +sudo apt-get clean + +echo "Removing Docker images..." +sudo docker image prune --all --force + +echo "Removing and Swap storage..." +sudo swapoff -a +sudo rm -f /mnt/swapfile + +echo "Final disk usage:" +df -h diff --git a/CloudronPackages/APISIX/apisix-source/ci/init-common-test-service.sh b/CloudronPackages/APISIX/apisix-source/ci/init-common-test-service.sh new file mode 100755 index 0000000..602f01a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/init-common-test-service.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# prepare vault kv engine +sleep 3s +docker exec -i vault sh -c "VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault secrets enable -path=kv -version=1 kv" + +# prepare localstack +sleep 3s +docker exec -i localstack sh -c "awslocal secretsmanager create-secret --name apisix-key --description 'APISIX Secret' --secret-string '{\"jack\":\"value\"}'" +sleep 3s +docker exec -i localstack sh -c "awslocal secretsmanager create-secret --name apisix-mysql --description 'APISIX Secret' --secret-string 'secret'" diff --git a/CloudronPackages/APISIX/apisix-source/ci/init-last-test-service.sh b/CloudronPackages/APISIX/apisix-source/ci/init-last-test-service.sh new file mode 100755 index 0000000..6943490 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/init-last-test-service.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +before() { + # generating SSL certificates for Kafka + sudo keytool -genkeypair -keyalg RSA -dname "CN=127.0.0.1" -alias 127.0.0.1 -keystore ./ci/pod/kafka/kafka-server/selfsigned.jks -validity 365 -keysize 2048 -storepass changeit +} + +after() { + docker exec -i apache-apisix-kafka-server1-1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 1 --topic test2 + docker exec -i apache-apisix-kafka-server1-1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 3 --topic test3 + docker exec -i apache-apisix-kafka-server2-1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server2:2181 --replication-factor 1 --partitions 1 --topic test4 + docker exec -i apache-apisix-kafka-server1-1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 1 --topic test-consumer + # create messages for test-consumer + for i in `seq 30` + do + docker exec -i apache-apisix-kafka-server1-1 bash -c "echo "testmsg$i" | /opt/bitnami/kafka/bin/kafka-console-producer.sh --bootstrap-server 127.0.0.1:9092 --topic test-consumer" + echo "Produces messages to the test-consumer topic, msg: testmsg$i" + done + echo "Kafka service initialization completed" +} + +case $1 in + 'after') + after + ;; + 'before') + before + ;; +esac diff --git a/CloudronPackages/APISIX/apisix-source/ci/init-plugin-test-service.sh b/CloudronPackages/APISIX/apisix-source/ci/init-plugin-test-service.sh new file mode 100755 index 0000000..2da891e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/init-plugin-test-service.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +after() { + docker exec -i apache-apisix-kafka-server1-1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 1 --topic test2 + docker exec -i apache-apisix-kafka-server1-1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server1:2181 --replication-factor 1 --partitions 3 --topic test3 + docker exec -i apache-apisix-kafka-server2-1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server2:2181 --replication-factor 1 --partitions 1 --topic test4 + + # prepare openwhisk env + docker pull openwhisk/action-nodejs-v14:1.20.0 + docker run --rm -d --name openwhisk -p 3233:3233 -p 3232:3232 -v /var/run/docker.sock:/var/run/docker.sock openwhisk/standalone:1.0.0 + docker exec -i openwhisk waitready + docker exec -i openwhisk bash -c "wsk package create pkg" + docker exec -i openwhisk bash -c "wsk action update /guest/pkg/testpkg <(echo 'function main(args){return {\"hello\": \"world\"}}') --kind nodejs:14" + docker exec -i openwhisk bash -c "wsk action update test <(echo 'function main(args){return {\"hello\": \"test\"}}') --kind nodejs:14" + docker exec -i openwhisk bash -c "wsk action update test-params <(echo 'function main(args){return {\"hello\": args.name || \"test\"}}') --kind nodejs:14" + docker exec -i openwhisk bash -c "wsk action update test-statuscode <(echo 'function main(args){return {\"statusCode\": 407}}') --kind nodejs:14" + docker exec -i openwhisk bash -c "wsk action update test-headers <(echo 'function main(args){return {\"headers\": {\"test\":\"header\"}}}') --kind nodejs:14" + docker exec -i openwhisk bash -c "wsk action update test-body <(echo 'function main(args){return {\"body\": {\"test\":\"body\"}}}') --kind nodejs:14" + + + docker exec -i rmqnamesrv rm /home/rocketmq/rocketmq-4.6.0/conf/tools.yml + docker exec -i rmqnamesrv /home/rocketmq/rocketmq-4.6.0/bin/mqadmin updateTopic -n rocketmq_namesrv:9876 -t test -c DefaultCluster + docker exec -i rmqnamesrv /home/rocketmq/rocketmq-4.6.0/bin/mqadmin updateTopic -n rocketmq_namesrv:9876 -t test2 -c DefaultCluster + docker exec -i rmqnamesrv /home/rocketmq/rocketmq-4.6.0/bin/mqadmin updateTopic -n rocketmq_namesrv:9876 -t test3 -c DefaultCluster + docker exec -i rmqnamesrv /home/rocketmq/rocketmq-4.6.0/bin/mqadmin updateTopic -n rocketmq_namesrv:9876 -t test4 -c DefaultCluster + + # wait for keycloak ready + bash -c 'while true; do curl -s localhost:8080 &>/dev/null; ret=$?; [[ $ret -eq 0 ]] && break; sleep 3; done' + + # install jq + wget https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 -O jq + chmod +x jq + docker cp jq apisix_keycloak:/usr/bin/ + + # configure keycloak + docker exec apisix_keycloak bash /tmp/kcadm_configure_cas.sh + docker exec apisix_keycloak bash /tmp/kcadm_configure_university.sh + docker exec apisix_keycloak bash /tmp/kcadm_configure_basic.sh + + # configure clickhouse + echo 'CREATE TABLE default.test (`host` String, `client_ip` String, `route_id` String, `service_id` String, `@timestamp` String, PRIMARY KEY(`@timestamp`)) ENGINE = MergeTree()' | curl 'http://localhost:8123/' --data-binary @- + echo 'CREATE TABLE default.test (`host` String, `client_ip` String, `route_id` String, `service_id` String, `@timestamp` String, PRIMARY KEY(`@timestamp`)) ENGINE = MergeTree()' | curl 'http://localhost:8124/' --data-binary @- +} + +before() { + # download keycloak cas provider + sudo wget -q https://github.com/jacekkow/keycloak-protocol-cas/releases/download/18.0.2/keycloak-protocol-cas-18.0.2.jar -O /opt/keycloak-protocol-cas-18.0.2.jar +} + +case $1 in + 'after') + after + ;; + 'before') + before + ;; +esac diff --git a/CloudronPackages/APISIX/apisix-source/ci/kubernetes-ci.sh b/CloudronPackages/APISIX/apisix-source/ci/kubernetes-ci.sh new file mode 100755 index 0000000..c40b8c7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/kubernetes-ci.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./ci/common.sh + +run_case() { + export_or_prefix + export PERL5LIB=.:$PERL5LIB + prove -Itest-nginx/lib -I./ -r t/kubernetes | tee test-result + rerun_flaky_tests test-result +} + +case_opt=$1 +case $case_opt in + (run_case) + run_case + ;; +esac diff --git a/CloudronPackages/APISIX/apisix-source/ci/linux-install-etcd-client.sh b/CloudronPackages/APISIX/apisix-source/ci/linux-install-etcd-client.sh new file mode 100755 index 0000000..33e5b8a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/linux-install-etcd-client.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ETCD_ARCH="amd64" +ETCD_VERSION=${ETCD_VERSION:-'3.5.4'} +ARCH=${ARCH:-`(uname -m | tr '[:upper:]' '[:lower:]')`} + +if [[ $ARCH == "arm64" ]] || [[ $ARCH == "aarch64" ]]; then + ETCD_ARCH="arm64" +fi + +wget -q https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-${ETCD_ARCH}.tar.gz +tar xf etcd-v${ETCD_VERSION}-linux-${ETCD_ARCH}.tar.gz +sudo cp etcd-v${ETCD_VERSION}-linux-${ETCD_ARCH}/etcdctl /usr/local/bin/ +rm -rf etcd-v${ETCD_VERSION}-linux-${ETCD_ARCH} diff --git a/CloudronPackages/APISIX/apisix-source/ci/linux-install-openresty.sh b/CloudronPackages/APISIX/apisix-source/ci/linux-install-openresty.sh new file mode 100755 index 0000000..465df32 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/linux-install-openresty.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +set -euo pipefail + +source ./ci/common.sh + +export_version_info + +ARCH=${ARCH:-`(uname -m | tr '[:upper:]' '[:lower:]')`} +arch_path="" +if [[ $ARCH == "arm64" ]] || [[ $ARCH == "aarch64" ]]; then + arch_path="arm64/" +fi + +wget -qO - https://openresty.org/package/pubkey.gpg | sudo apt-key add - +wget -qO - http://repos.apiseven.com/pubkey.gpg | sudo apt-key add - +sudo apt-get -y update --fix-missing +sudo apt-get -y install software-properties-common +sudo add-apt-repository -y "deb https://openresty.org/package/${arch_path}ubuntu $(lsb_release -sc) main" +sudo add-apt-repository -y "deb http://repos.apiseven.com/packages/${arch_path}debian bullseye main" + +sudo apt-get update +sudo apt-get install -y openresty-pcre-dev openresty-zlib-dev build-essential gcc g++ cpanminus + +SSL_LIB_VERSION=${SSL_LIB_VERSION-openssl} +ENABLE_FIPS=${ENABLE_FIPS:-"false"} + +if [ "$OPENRESTY_VERSION" == "source" ]; then + if [ "$SSL_LIB_VERSION" == "tongsuo" ]; then + export openssl_prefix=/usr/local/tongsuo + export zlib_prefix=$OPENRESTY_PREFIX/zlib + export pcre_prefix=$OPENRESTY_PREFIX/pcre + + export cc_opt="-DNGX_LUA_ABORT_AT_PANIC -I${zlib_prefix}/include -I${pcre_prefix}/include -I${openssl_prefix}/include" + export ld_opt="-L${zlib_prefix}/lib -L${pcre_prefix}/lib -L${openssl_prefix}/lib64 -Wl,-rpath,${zlib_prefix}/lib:${pcre_prefix}/lib:${openssl_prefix}/lib64" + fi +fi + +install_apisix_runtime + +if [ ! "$ENABLE_FIPS" == "true" ]; then +curl -o /usr/local/openresty/openssl3/ssl/openssl.cnf \ + https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/conf/openssl3/openssl.cnf +fi + +# patch lua-resty-events +sed -i 's/log(ERR, "event worker failed: ", perr)/log(ngx.WARN, "event worker failed: ", perr)/' /usr/local/openresty/lualib/resty/events/worker.lua diff --git a/CloudronPackages/APISIX/apisix-source/ci/linux_apisix_current_luarocks_in_customed_nginx_runner.sh b/CloudronPackages/APISIX/apisix-source/ci/linux_apisix_current_luarocks_in_customed_nginx_runner.sh new file mode 100755 index 0000000..3eaba07 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/linux_apisix_current_luarocks_in_customed_nginx_runner.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +export OPENRESTY_VERSION=source +. ./ci/linux_apisix_current_luarocks_runner.sh diff --git a/CloudronPackages/APISIX/apisix-source/ci/linux_apisix_current_luarocks_runner.sh b/CloudronPackages/APISIX/apisix-source/ci/linux_apisix_current_luarocks_runner.sh new file mode 100755 index 0000000..39b9df8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/linux_apisix_current_luarocks_runner.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./ci/common.sh + +do_install() { + linux_get_dependencies + install_brotli + + export_or_prefix + + ./ci/linux-install-openresty.sh + ./utils/linux-install-luarocks.sh + ./ci/linux-install-etcd-client.sh +} + +script() { + export_or_prefix + openresty -V + + sudo rm -rf /usr/local/share/lua/5.1/apisix + + # install APISIX with local version + luarocks install apisix-master-0.rockspec --only-deps > build.log 2>&1 || (cat build.log && exit 1) + luarocks make apisix-master-0.rockspec > build.log 2>&1 || (cat build.log && exit 1) + # ensure all files under apisix is installed + diff -rq apisix /usr/local/share/lua/5.1/apisix + + mkdir cli_tmp && cd cli_tmp + + # show install file + luarocks show apisix + + sudo PATH=$PATH apisix help + sudo PATH=$PATH apisix init + sudo PATH=$PATH apisix start + sudo PATH=$PATH apisix stop + + grep '\[error\]' /usr/local/apisix/logs/error.log > /tmp/error.log | true + if [ -s /tmp/error.log ]; then + echo "=====found error log=====" + cat /usr/local/apisix/logs/error.log + exit 1 + fi + + cd .. + + # apisix cli test + set_coredns + + # install test dependencies + sudo pip install requests + + # dismiss "maximum number of open file descriptors too small" warning + ulimit -n 10240 + ulimit -n -S + ulimit -n -H + + for f in ./t/cli/test_*.sh; do + PATH="$PATH" "$f" + done +} + +case_opt=$1 +shift + +case ${case_opt} in +do_install) + do_install "$@" + ;; +script) + script "$@" + ;; +esac diff --git a/CloudronPackages/APISIX/apisix-source/ci/linux_openresty_common_runner.sh b/CloudronPackages/APISIX/apisix-source/ci/linux_openresty_common_runner.sh new file mode 100755 index 0000000..afaf948 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/linux_openresty_common_runner.sh @@ -0,0 +1,127 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./ci/common.sh + +before_install() { + linux_get_dependencies + + sudo cpanm --notest Test::Nginx >build.log 2>&1 || (cat build.log && exit 1) +} + +do_install() { + export_or_prefix + + ./ci/linux-install-openresty.sh + + ./utils/linux-install-luarocks.sh + + ./ci/linux-install-etcd-client.sh + + create_lua_deps + + # sudo apt-get install tree -y + # tree deps + + # The latest version of test-nginx is not compatible with the current set of tests with ---http2 + # due to this commit: https://github.com/openresty/test-nginx/commit/0ccd106cbe6878318e5a591634af8f1707c411a6 + # This change pins test-nginx to a commit before this one. + git clone --depth 1 https://github.com/openresty/test-nginx.git test-nginx + cd test-nginx + git fetch --depth=1 origin ced30a31bafab6c68873efb17b6d80f39bcd95f5 + git checkout ced30a31bafab6c68873efb17b6d80f39bcd95f5 + cd .. + + make utils + + mkdir -p build-cache + # install and start grpc_server_example + cd t/grpc_server_example + + CGO_ENABLED=0 go build + cd ../../ + + # install grpcurl + install_grpcurl + + # install nodejs + install_nodejs + + # grpc-web server && client + cd t/plugin/grpc-web + ./setup.sh + # back to home directory + cd ../../../ + + # install mcp test suite + pushd t/plugin/mcp + pnpm install + popd + + # install common jest test suite + pushd t + pnpm install + popd + + # install vault cli capabilities + install_vault_cli + + # install brotli + install_brotli +} + +script() { + export_or_prefix + openresty -V + + make init + + set_coredns + + start_grpc_server_example + + start_sse_server_example + + # APISIX_ENABLE_LUACOV=1 PERL5LIB=.:$PERL5LIB prove -Itest-nginx/lib -r t + FLUSH_ETCD=1 TEST_EVENTS_MODULE=$TEST_EVENTS_MODULE prove --timer -Itest-nginx/lib -I./ -r $TEST_FILE_SUB_DIR | tee /tmp/test.result + rerun_flaky_tests /tmp/test.result +} + +after_success() { + # cat luacov.stats.out + # luacov-coveralls + echo "done" +} + +case_opt=$1 +shift + +case ${case_opt} in +before_install) + before_install "$@" + ;; +do_install) + do_install "$@" + ;; +script) + script "$@" + ;; +after_success) + after_success "$@" + ;; +esac diff --git a/CloudronPackages/APISIX/apisix-source/ci/linux_openresty_runner.sh b/CloudronPackages/APISIX/apisix-source/ci/linux_openresty_runner.sh new file mode 100755 index 0000000..2e39224 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/linux_openresty_runner.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +export OPENRESTY_VERSION=source +. ./ci/linux_openresty_common_runner.sh diff --git a/CloudronPackages/APISIX/apisix-source/ci/linux_openresty_tongsuo_runner.sh b/CloudronPackages/APISIX/apisix-source/ci/linux_openresty_tongsuo_runner.sh new file mode 100755 index 0000000..2afdcea --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/linux_openresty_tongsuo_runner.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +export OPENRESTY_VERSION=source +export SSL_LIB_VERSION=tongsuo + + +before_install() { + if [ -n "$COMPILE_TONGSUO" ]; then + git clone https://github.com/api7/tongsuo --depth 1 + pushd tongsuo + # build binary + ./config enable-ntls -static + make -j2 + mv apps/openssl apps/static-openssl + ./config shared enable-ntls -g --prefix=/usr/local/tongsuo + make -j2 + popd + fi + + pushd tongsuo + sudo make install_sw + sudo cp apps/static-openssl /usr/local/tongsuo/bin/openssl + export PATH=/usr/local/tongsuo/bin:$PATH + openssl version + popd +} + + +case_opt=$1 + +case ${case_opt} in +before_install) + # shellcheck disable=SC2218 + before_install + ;; +esac + +. ./ci/linux_openresty_common_runner.sh diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/docker-compose.common.yml b/CloudronPackages/APISIX/apisix-source/ci/pod/docker-compose.common.yml new file mode 100644 index 0000000..67504cb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/docker-compose.common.yml @@ -0,0 +1,113 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +version: "3.8" + +services: + ## Etcd + etcd_old: + image: bitnami/etcd:3.3.8 + restart: unless-stopped + env_file: + - ci/pod/etcd/env/common.env + environment: + ETCD_ADVERTISE_CLIENT_URLS: http://0.0.0.0:2379 + ports: + - "3379:2379" + - "3380:2380" + + etcd: + image: bitnami/etcd:3.5.4 + restart: unless-stopped + env_file: + - ci/pod/etcd/env/common.env + environment: + ETCD_ADVERTISE_CLIENT_URLS: http://0.0.0.0:2379 + ports: + - "2379:2379" + - "2380:2380" + + etcd_tls: + image: bitnami/etcd:3.5.4 + restart: unless-stopped + env_file: + - ci/pod/etcd/env/common.env + environment: + ETCD_ADVERTISE_CLIENT_URLS: https://0.0.0.0:12379 + ETCD_LISTEN_CLIENT_URLS: https://0.0.0.0:12379 + ETCD_CERT_FILE: /certs/etcd.pem + ETCD_KEY_FILE: /certs/etcd.key + ports: + - "12379:12379" + - "12380:12380" + volumes: + - ./t/certs:/certs + + etcd_mtls: + image: bitnami/etcd:3.5.4 + restart: unless-stopped + env_file: + - ci/pod/etcd/env/common.env + environment: + ETCD_ADVERTISE_CLIENT_URLS: https://0.0.0.0:22379 + ETCD_LISTEN_CLIENT_URLS: https://0.0.0.0:22379 + ETCD_CERT_FILE: /certs/mtls_server.crt + ETCD_KEY_FILE: /certs/mtls_server.key + ETCD_CLIENT_CERT_AUTH: "true" + ETCD_TRUSTED_CA_FILE: /certs/mtls_ca.crt + ports: + - "22379:22379" + - "22380:22380" + volumes: + - ./t/certs:/certs + + + ## Redis cluster + redis-cluster: + image: vishnunair/docker-redis-cluster:latest + restart: unless-stopped + ports: + - "5000:6379" + - "5002:6380" + - "5003:6381" + - "5004:6382" + - "5005:6383" + - "5006:6384" + + + ## HashiCorp Vault + vault: + image: vault:1.9.0 + container_name: vault + restart: unless-stopped + ports: + - "8200:8200" + cap_add: + - IPC_LOCK + environment: + VAULT_DEV_ROOT_TOKEN_ID: root + VAULT_DEV_LISTEN_ADDRESS: 0.0.0.0:8200 + command: [ "vault", "server", "-dev" ] + + + ## LocalStack + localstack: + image: localstack/localstack + container_name: localstack + restart: unless-stopped + ports: + - "127.0.0.1:4566:4566" # LocalStack Gateway diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/docker-compose.first.yml b/CloudronPackages/APISIX/apisix-source/ci/pod/docker-compose.first.yml new file mode 100644 index 0000000..d203a96 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/docker-compose.first.yml @@ -0,0 +1,304 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +version: "3.8" + +services: + ## Eureka + eureka: + image: bitinit/eureka + env_file: + - ci/pod/eureka/env/common.env + restart: unless-stopped + ports: + - "8761:8761" + + ## Consul + consul_1: + image: consul:1.7 + restart: unless-stopped + ports: + - "8500:8500" + command: [ "consul", "agent", "-server", "-bootstrap-expect=1", "-client", "0.0.0.0", "-log-level", "info", "-data-dir=/consul/data", "-enable-script-checks" ] + networks: + consul_net: + + consul_2: + image: consul:1.7 + restart: unless-stopped + ports: + - "8600:8500" + command: [ "consul", "agent", "-server", "-bootstrap-expect=1", "-client", "0.0.0.0", "-log-level", "info", "-data-dir=/consul/data", "-enable-script-checks" ] + networks: + consul_net: + + consul_3: + image: hashicorp/consul:1.16.2 + restart: unless-stopped + ports: + - "8502:8500" + command: [ "consul", "agent", "-server", "-bootstrap-expect=1", "-client", "0.0.0.0", "-log-level", "info", "-data-dir=/consul/data", "-enable-script-checks", "-ui", "-hcl", "acl = {\nenabled = true\ndefault_policy = \"deny\"\nenable_token_persistence = true\ntokens = {\nagent = \"2b778dd9-f5f1-6f29-b4b4-9a5fa948757a\"\n}}" ] + networks: + consul_net: + + ## Consul cluster + consul_node_1: + image: consul:1.7 + restart: unless-stopped + ports: + - "9500:8500" + - "8300:8300" + - "8301:8301" + - "8302:8302" + - "9600:8600" + command: [ "consul", "agent", "-server", "-bootstrap-expect=1", "-bind", "0.0.0.0", "-client", "0.0.0.0", "-node", "node-1", "-log-level", "info", "-data-dir=/consul/data", "-enable-script-checks" ] + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8500/"] + interval: 10s + timeout: 10s + retries: 5 + networks: + consul_cluster_net: + aliases: + - consul.cluster + + consul_node_2: + image: consul:1.7 + restart: unless-stopped + environment: + - CONSUL_BIND_INTERFACE=eth0 + ports: + - "9501:8500" + command: [ "consul", "agent", "-server", "-bind", "0.0.0.0", "-client", "0.0.0.0", "-retry-join", "consul.cluster", "-node", "node-2", "-log-level", "info", "-data-dir=/consul/data", "-enable-script-checks" ] + depends_on: + consul_node_1: + condition: service_healthy + networks: + consul_cluster_net: + aliases: + - consul.cluster + + consul_node_3: + image: consul:1.7 + restart: unless-stopped + environment: + - CONSUL_BIND_INTERFACE=eth0 + ports: + - "9502:8500" + command: [ "consul", "agent", "-server", "-bind", "0.0.0.0", "-client", "0.0.0.0", "-retry-join", "consul.cluster", "-node", "node-3", "-log-level", "info", "-data-dir=/consul/data", "-enable-script-checks" ] + depends_on: + consul_node_1: + condition: service_healthy + networks: + consul_cluster_net: + aliases: + - consul.cluster + + ## Nacos cluster + nacos_auth: + hostname: nacos1 + image: nacos/nacos-server:1.4.1 + env_file: + - ci/pod/nacos/env/common.env + environment: + NACOS_AUTH_ENABLE: "true" + restart: unless-stopped + ports: + - "8848:8848" + networks: + nacos_net: + + nacos_no_auth: + hostname: nacos2 + image: nacos/nacos-server:1.4.1 + env_file: + - ci/pod/nacos/env/common.env + restart: unless-stopped + ports: + - "8858:8848" + networks: + nacos_net: + + nacos_server_health_check: + build: + context: ci/pod/nacos/healthcheck + dockerfile: Dockerfile + environment: + CHECK_URI: "http://nacos2:8848/nacos/v1/ns/service/list?pageNo=1&pageSize=2" + tty: true + # debug healthcheck script +# volumes: +# - ./ci/pod/nacos/healthcheck/nacos-server-healthcheck.sh:/nacos-server-healthcheck.sh + healthcheck: + test: [ "CMD", "bash", "/nacos-server-healthcheck.sh" ] + interval: 5s + timeout: 5s + retries: 60 + start_period: 10s + networks: + nacos_net: + + nacos_service_health_check: + build: + context: ci/pod/nacos/healthcheck + dockerfile: Dockerfile + # debug healthcheck script +# volumes: +# - ./ci/pod/nacos/healthcheck/nacos-service-healthcheck.sh:/nacos-service-healthcheck.sh + tty: true + healthcheck: + test: [ "CMD", "bash", "/nacos-service-healthcheck.sh" ] + interval: 5s + timeout: 30s + retries: 60 + start_period: 10s + networks: + nacos_net: + + ### Nacos services + nacos-service1: + build: + context: ci/pod/nacos/service + dockerfile: Dockerfile + env_file: + - ci/pod/nacos/env/service.env + environment: + SUFFIX_NUM: 1 + restart: unless-stopped + ports: + - "18001:18001" + depends_on: + nacos_server_health_check: + condition: service_healthy + networks: + nacos_net: + + nacos-service2: + build: + context: ci/pod/nacos/service + dockerfile: Dockerfile + env_file: + - ci/pod/nacos/env/service.env + environment: + SUFFIX_NUM: 2 + restart: unless-stopped + ports: + - "18002:18001" + depends_on: + nacos_server_health_check: + condition: service_healthy + networks: + nacos_net: + + nacos-service3: + build: + context: ci/pod/nacos/service + dockerfile: Dockerfile + env_file: + - ci/pod/nacos/env/service.env + environment: + SUFFIX_NUM: 1 + NAMESPACE: test_ns + restart: unless-stopped + ports: + - "18003:18001" + depends_on: + nacos_server_health_check: + condition: service_healthy + networks: + nacos_net: + + nacos-service4: + build: + context: ci/pod/nacos/service + dockerfile: Dockerfile + env_file: + - ci/pod/nacos/env/service.env + environment: + SUFFIX_NUM: 1 + GROUP: test_group + restart: unless-stopped + ports: + - "18004:18001" + depends_on: + nacos_server_health_check: + condition: service_healthy + networks: + nacos_net: + + nacos-service5: + build: + context: ci/pod/nacos/service + dockerfile: Dockerfile + env_file: + - ci/pod/nacos/env/service.env + environment: + SUFFIX_NUM: 1 + GROUP: test_group + NAMESPACE: test_ns + restart: unless-stopped + ports: + - "18005:18001" + depends_on: + nacos_server_health_check: + condition: service_healthy + networks: + nacos_net: + + nacos-service6: + build: + context: ci/pod/nacos/service + dockerfile: Dockerfile + env_file: + - ci/pod/nacos/env/service.env + environment: + SUFFIX_NUM: 3 + GROUP: test_group2 + NAMESPACE: test_ns + restart: unless-stopped + ports: + - "18006:18001" + depends_on: + nacos_server_health_check: + condition: service_healthy + networks: + nacos_net: + + nacos-service7: + build: + context: ci/pod/nacos/service + dockerfile: Dockerfile + env_file: + - ci/pod/nacos/env/service.env + environment: + SUFFIX_NUM: 4 + GROUP: test_group + NAMESPACE: test_ns2 + restart: unless-stopped + ports: + - "18007:18001" + depends_on: + nacos_server_health_check: + condition: service_healthy + networks: + nacos_net: + + +networks: + consul_cluster_net: + consul_net: + nacos_net: diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/docker-compose.last.yml b/CloudronPackages/APISIX/apisix-source/ci/pod/docker-compose.last.yml new file mode 100644 index 0000000..dbc835f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/docker-compose.last.yml @@ -0,0 +1,97 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +version: "3.8" + +services: + ## Redis + apisix_redis: + # The latest image is the latest stable version + image: redis:latest + restart: unless-stopped + ports: + - "6379:6379" + networks: + apisix_net: + + ## kafka-cluster + zookeeper-server1: + image: bitnami/zookeeper:3.6.0 + env_file: + - ci/pod/kafka/zookeeper-server/env/common.env + restart: unless-stopped + ports: + - "2181:2181" + networks: + kafka_net: + + zookeeper-server2: + image: bitnami/zookeeper:3.6.0 + env_file: + - ci/pod/kafka/zookeeper-server/env/common.env + restart: unless-stopped + ports: + - "12181:12181" + networks: + kafka_net: + + kafka-server1: + image: bitnami/kafka:2.8.1 + env_file: + - ci/pod/kafka/kafka-server/env/last.env + environment: + KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server1:2181 + restart: unless-stopped + ports: + - "9092:9092" + - "9093:9093" + - "9094:9094" + depends_on: + - zookeeper-server1 + - zookeeper-server2 + networks: + kafka_net: + volumes: + - ./ci/pod/kafka/kafka-server/kafka_jaas.conf:/opt/bitnami/kafka/config/kafka_jaas.conf:ro + - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.keystore.jks:ro + - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.truststore.jks:ro + + kafka-server2: + image: bitnami/kafka:2.8.1 + env_file: + - ci/pod/kafka/kafka-server/env/last.env + environment: + KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server2:2181 + restart: unless-stopped + ports: + - "19092:9092" + - "19093:9093" + - "19094:9094" + depends_on: + - zookeeper-server1 + - zookeeper-server2 + networks: + kafka_net: + volumes: + - ./ci/pod/kafka/kafka-server/kafka_jaas.conf:/opt/bitnami/kafka/config/kafka_jaas.conf:ro + - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.keystore.jks:ro + - ./ci/pod/kafka/kafka-server/selfsigned.jks:/opt/bitnami/kafka/config/certs/kafka.truststore.jks:ro + + +networks: + apisix_net: + kafka_net: diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/docker-compose.plugin.yml b/CloudronPackages/APISIX/apisix-source/ci/pod/docker-compose.plugin.yml new file mode 100644 index 0000000..c0a598c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/docker-compose.plugin.yml @@ -0,0 +1,400 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +version: "3.8" + +services: + ## Redis + apisix_redis: + # The latest image is the latest stable version + image: redis:latest + restart: unless-stopped + volumes: + - ./t/certs:/certs + command: "--tls-port 6380 \ + --tls-cert-file /certs/mtls_server.crt \ + --tls-key-file /certs/mtls_server.key \ + --tls-ca-cert-file /certs/mtls_ca.crt \ + --tls-auth-clients no \ + --user alice on +@all ~* \\&* \\>somepassword" + ports: + - "6379:6379" + - "6380:6380" + networks: + apisix_net: + + ## keycloak + apisix_keycloak: + container_name: apisix_keycloak + image: quay.io/keycloak/keycloak:18.0.2 + # use host network because in CAS auth, + # keycloak needs to send back-channel POST to apisix. + network_mode: host + environment: + KEYCLOAK_ADMIN: admin + KEYCLOAK_ADMIN_PASSWORD: admin + KC_HTTPS_CERTIFICATE_FILE: /opt/keycloak/conf/server.crt.pem + KC_HTTPS_CERTIFICATE_KEY_FILE: /opt/keycloak/conf/server.key.pem + restart: unless-stopped + command: ["start-dev"] + volumes: + - /opt/keycloak-protocol-cas-18.0.2.jar:/opt/keycloak/providers/keycloak-protocol-cas-18.0.2.jar + - ./ci/pod/keycloak/server.crt.pem:/opt/keycloak/conf/server.crt.pem + - ./ci/pod/keycloak/server.key.pem:/opt/keycloak/conf/server.key.pem + - ./ci/pod/keycloak/kcadm_configure_cas.sh:/tmp/kcadm_configure_cas.sh + - ./ci/pod/keycloak/kcadm_configure_university.sh:/tmp/kcadm_configure_university.sh + - ./ci/pod/keycloak/kcadm_configure_basic.sh:/tmp/kcadm_configure_basic.sh + + ## kafka-cluster + zookeeper-server1: + image: bitnami/zookeeper:3.6.0 + env_file: + - ci/pod/kafka/zookeeper-server/env/common.env + restart: unless-stopped + ports: + - "2181:2181" + networks: + kafka_net: + + zookeeper-server2: + image: bitnami/zookeeper:3.6.0 + env_file: + - ci/pod/kafka/zookeeper-server/env/common.env + restart: unless-stopped + ports: + - "12181:12181" + networks: + kafka_net: + + kafka-server1: + image: bitnami/kafka:2.8.1 + env_file: + - ci/pod/kafka/kafka-server/env/common.env + environment: + KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server1:2181 + restart: unless-stopped + ports: + - "9092:9092" + depends_on: + - zookeeper-server1 + - zookeeper-server2 + networks: + kafka_net: + + kafka-server2: + image: bitnami/kafka:2.8.1 + env_file: + - ci/pod/kafka/kafka-server/env/common2.env + environment: + KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper-server2:2181 + restart: unless-stopped + ports: + - "19092:19092" + - "19094:19094" + depends_on: + - zookeeper-server1 + - zookeeper-server2 + networks: + kafka_net: + volumes: + - ./ci/pod/kafka/kafka-server/kafka_jaas.conf:/opt/bitnami/kafka/config/kafka_jaas.conf:ro + + ## SkyWalking + skywalking: + image: apache/skywalking-oap-server:8.7.0-es6 + restart: unless-stopped + ports: + - "1234:1234" + - "11800:11800" + - "12800:12800" + networks: + skywalk_net: + + + ## OpenLDAP + openldap: + image: bitnami/openldap:2.5.8 + environment: + - LDAP_ADMIN_USERNAME=amdin + - LDAP_ADMIN_PASSWORD=adminpassword + - LDAP_USERS=user01,user02 + - LDAP_PASSWORDS=password1,password2 + - LDAP_ENABLE_TLS=yes + - LDAP_TLS_CERT_FILE=/certs/localhost_slapd_cert.pem + - LDAP_TLS_KEY_FILE=/certs/localhost_slapd_key.pem + - LDAP_TLS_CA_FILE=/certs/apisix.crt + ports: + - "1389:1389" + - "1636:1636" + volumes: + - ./t/certs:/certs + + + ## Grafana Loki + loki: + image: grafana/loki:2.8.0 + command: -config.file=/etc/loki/local-config.yaml -auth.enabled -querier.multi-tenant-queries-enabled + ports: + - "3100:3100" + networks: + - loki_net + + rocketmq_namesrv: + image: apacherocketmq/rocketmq:4.6.0 + container_name: rmqnamesrv + restart: unless-stopped + ports: + - "9876:9876" + command: sh mqnamesrv + networks: + rocketmq_net: + + rocketmq_broker: + image: apacherocketmq/rocketmq:4.6.0 + container_name: rmqbroker + restart: unless-stopped + ports: + - "10909:10909" + - "10911:10911" + - "10912:10912" + depends_on: + - rocketmq_namesrv + command: sh mqbroker -n rocketmq_namesrv:9876 -c ../conf/broker.conf + networks: + rocketmq_net: + + # Open Policy Agent + opa: + image: openpolicyagent/opa:0.35.0 + restart: unless-stopped + ports: + - 8181:8181 + command: run -s /example.rego /echo.rego /data.json /with_route.rego + volumes: + - type: bind + source: ./ci/pod/opa/with_route.rego + target: /with_route.rego + - type: bind + source: ./ci/pod/opa/example.rego + target: /example.rego + - type: bind + source: ./ci/pod/opa/echo.rego + target: /echo.rego + - type: bind + source: ./ci/pod/opa/data.json + target: /data.json + networks: + opa_net: + + # Elasticsearch Logger Service + elasticsearch-noauth: + image: docker.elastic.co/elasticsearch/elasticsearch:8.12.0 + restart: unless-stopped + ports: + - "9200:9200" + - "9300:9300" + environment: + ES_JAVA_OPTS: -Xms512m -Xmx512m + discovery.type: single-node + xpack.security.enabled: 'false' + + elasticsearch-auth: + image: docker.elastic.co/elasticsearch/elasticsearch:8.12.0 + restart: unless-stopped + ports: + - "9201:9201" + environment: + ES_JAVA_OPTS: -Xms512m -Xmx512m + discovery.type: single-node + ELASTIC_USERNAME: elastic + ELASTIC_PASSWORD: 123456 + http.port: 9201 + xpack.security.enabled: 'true' + + elasticsearch-auth-2: + image: docker.elastic.co/elasticsearch/elasticsearch:9.0.2 + restart: unless-stopped + ports: + - "9301:9201" + environment: + ES_JAVA_OPTS: -Xms512m -Xmx512m + discovery.type: single-node + ELASTIC_USERNAME: elastic + ELASTIC_PASSWORD: 123456 + http.port: 9201 + xpack.security.enabled: 'true' + + elasticsearch-auth-3: + image: docker.elastic.co/elasticsearch/elasticsearch:7.0.0 + restart: unless-stopped + ports: + - "9401:9201" + environment: + ES_JAVA_OPTS: -Xms512m -Xmx512m + discovery.type: single-node + ELASTIC_USERNAME: elastic + ELASTIC_PASSWORD: 123456 + http.port: 9201 + xpack.security.enabled: 'true' + + elasticsearch-auth-4: + image: docker.elastic.co/elasticsearch/elasticsearch:6.7.0 + restart: unless-stopped + ports: + - "9501:9201" + environment: + ES_JAVA_OPTS: -Xms512m -Xmx512m + discovery.type: single-node + ELASTIC_USERNAME: elastic + ELASTIC_PASSWORD: 123456 + http.port: 9201 + xpack.security.enabled: 'true' + + # The function services of OpenFunction + test-header: + image: test-header-image:latest + restart: unless-stopped + ports: + - "30583:8080" + environment: + CONTEXT_MODE: "self-host" + FUNC_CONTEXT: "{\"name\":\"HelloWorld\",\"version\":\"v1.0.0\",\"port\":\"8080\",\"runtime\":\"Knative\"}" + + test-uri: + image: test-uri-image:latest + restart: unless-stopped + ports: + - "30584:8080" + environment: + CONTEXT_MODE: "self-host" + FUNC_CONTEXT: "{\"name\":\"HelloWorld\",\"version\":\"v1.0.0\",\"port\":\"8080\",\"runtime\":\"Knative\"}" + + test-body: + image: test-body-image:latest + restart: unless-stopped + ports: + - "30585:8080" + environment: + CONTEXT_MODE: "self-host" + FUNC_CONTEXT: "{\"name\":\"HelloWorld\",\"version\":\"v1.0.0\",\"port\":\"8080\",\"runtime\":\"Knative\"}" + + ## RedisCluster Enable TLS + redis-node-0: + image: docker.io/bitnami/redis-cluster:7.0 + volumes: + - ./t/certs:/certs + environment: + - 'ALLOW_EMPTY_PASSWORD=yes' + - 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2' + - 'REDIS_TLS_ENABLED=yes' + - 'REDIS_TLS_CERT_FILE=/certs/mtls_server.crt' + - 'REDIS_TLS_KEY_FILE=/certs/mtls_server.key' + - 'REDIS_TLS_CA_FILE=/certs/mtls_ca.crt' + - 'REDIS_TLS_AUTH_CLIENTS=no' + ports: + - '7000:6379' + + redis-node-1: + image: docker.io/bitnami/redis-cluster:7.0 + volumes: + - ./t/certs:/certs + environment: + - 'ALLOW_EMPTY_PASSWORD=yes' + - 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2' + - 'REDIS_TLS_ENABLED=yes' + - 'REDIS_TLS_CERT_FILE=/certs/mtls_server.crt' + - 'REDIS_TLS_KEY_FILE=/certs/mtls_server.key' + - 'REDIS_TLS_CA_FILE=/certs/mtls_ca.crt' + - 'REDIS_TLS_AUTH_CLIENTS=no' + ports: + - '7001:6379' + + redis-node-2: + image: docker.io/bitnami/redis-cluster:7.0 + volumes: + - ./t/certs:/certs + depends_on: + - redis-node-0 + - redis-node-1 + environment: + - 'ALLOW_EMPTY_PASSWORD=yes' + - 'REDIS_CLUSTER_REPLICAS=0' + - 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2' + - 'REDIS_CLUSTER_CREATOR=yes' + - 'REDIS_TLS_ENABLED=yes' + - 'REDIS_TLS_CERT_FILE=/certs/mtls_server.crt' + - 'REDIS_TLS_KEY_FILE=/certs/mtls_server.key' + - 'REDIS_TLS_CA_FILE=/certs/mtls_ca.crt' + - 'REDIS_TLS_AUTH_CLIENTS=no' + ports: + - '7002:6379' + + graphql-demo: + # the owner doesn't provide a semver tag + image: npalm/graphql-java-demo:latest + ports: + - '8888:8080' + + vector: + image: timberio/vector:0.29.1-debian + container_name: vector + volumes: + - ./ci/pod/vector:/etc/vector/ + - ./t/certs:/certs + ports: + - '3000:3000' #tcp logger + - '8127:8127/udp' + - '43000:43000' + - '5140:5140' + - "18088:18088" # For splunk logging tests + - '5150:5150/udp' + - "3001:3001" #http logger + networks: + vector_net: + + clickhouse: + image: clickhouse/clickhouse-server:23.4.2-alpine + container_name: clickhouse + ports: + - '8123:8123' + networks: + clickhouse_net: + + clickhouse2: + image: clickhouse/clickhouse-server:23.4.2-alpine + container_name: clickhouse2 + ports: + - '8124:8123' + networks: + clickhouse_net: + otel-collector: + image: otel/opentelemetry-collector-contrib + volumes: + - ./ci/pod/otelcol-contrib:/etc/otelcol-contrib:rw + ports: + - '4318:4318' + + +networks: + apisix_net: + kafka_net: + skywalk_net: + rocketmq_net: + opa_net: + vector_net: + clickhouse_net: + loki_net: diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/etcd/env/common.env b/CloudronPackages/APISIX/apisix-source/ci/pod/etcd/env/common.env new file mode 100644 index 0000000..24ba47f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/etcd/env/common.env @@ -0,0 +1 @@ +ALLOW_NONE_AUTHENTICATION=yes diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/eureka/env/common.env b/CloudronPackages/APISIX/apisix-source/ci/pod/eureka/env/common.env new file mode 100644 index 0000000..11169cb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/eureka/env/common.env @@ -0,0 +1,7 @@ +ENVIRONMENT=apisix +spring.application.name=apisix-eureka +server.port=8761 +eureka.instance.ip-address=127.0.0.1 +eureka.client.registerWithEureka=true +eureka.client.fetchRegistry=false +eureka.client.serviceUrl.defaultZone=http://127.0.0.1:8761/eureka/ diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/kafka-server/env/common.env b/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/kafka-server/env/common.env new file mode 100644 index 0000000..06200b9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/kafka-server/env/common.env @@ -0,0 +1,3 @@ +ALLOW_PLAINTEXT_LISTENER=yes +KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true +KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092 diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/kafka-server/env/common2.env b/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/kafka-server/env/common2.env new file mode 100644 index 0000000..c0fb5f5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/kafka-server/env/common2.env @@ -0,0 +1,8 @@ +ALLOW_PLAINTEXT_LISTENER=yes +KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=false +KAFKA_CFG_LISTENERS=PLAINTEXT://0.0.0.0:19092,SASL_PLAINTEXT://0.0.0.0:19094 +KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:19092,SASL_PLAINTEXT://127.0.0.1:19094 +KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM= +KAFKA_CFG_SSL_KEYSTORE_LOCATION=/opt/bitnami/kafka/config/certs/kafka.keystore.jks +KAFKA_CFG_SSL_KEYSTORE_PASSWORD=changeit +KAFKA_CFG_SSL_KEY_PASSWORD=changeit diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/kafka-server/env/last.env b/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/kafka-server/env/last.env new file mode 100644 index 0000000..adc9d7c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/kafka-server/env/last.env @@ -0,0 +1,8 @@ +ALLOW_PLAINTEXT_LISTENER=yes +KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=false +KAFKA_CFG_LISTENERS=PLAINTEXT://0.0.0.0:9092,SSL://0.0.0.0:9093,SASL_PLAINTEXT://0.0.0.0:9094 +KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092,SSL://127.0.0.1:9093,SASL_PLAINTEXT://127.0.0.1:9094 +KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM= +KAFKA_CFG_SSL_KEYSTORE_LOCATION=/opt/bitnami/kafka/config/certs/kafka.keystore.jks +KAFKA_CFG_SSL_KEYSTORE_PASSWORD=changeit +KAFKA_CFG_SSL_KEY_PASSWORD=changeit diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/kafka-server/kafka_jaas.conf b/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/kafka-server/kafka_jaas.conf new file mode 100644 index 0000000..4bc1938 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/kafka-server/kafka_jaas.conf @@ -0,0 +1,23 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +KafkaServer { + org.apache.kafka.common.security.plain.PlainLoginModule required + username="admin" + password="admin-secret" + user_admin="admin-secret"; +}; diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/zookeeper-server/env/common.env b/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/zookeeper-server/env/common.env new file mode 100644 index 0000000..aa3cf9f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/kafka/zookeeper-server/env/common.env @@ -0,0 +1 @@ +ALLOW_ANONYMOUS_LOGIN=yes diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/kcadm_configure_basic.sh b/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/kcadm_configure_basic.sh new file mode 100755 index 0000000..9c2a7b1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/kcadm_configure_basic.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +export PATH=/opt/keycloak/bin:$PATH + +kcadm.sh config credentials --server http://127.0.0.1:8080 --realm master --user admin --password admin + +# create realm +kcadm.sh create realms -s realm=basic -s enabled=true + +# set realm keys with specific private key, reuse tls cert and key +PRIVATE_KEY=$(awk 'NF {sub(/\r/, ""); printf "%s\\n", $0}' /opt/keycloak/conf/server.key.pem) +CERTIFICATE=$(awk 'NF {sub(/\r/, ""); printf "%s\\n", $0}' /opt/keycloak/conf/server.crt.pem) +kcadm.sh create components -r basic -s name=rsa-apisix -s providerId=rsa \ + -s providerType=org.keycloak.keys.KeyProvider \ + -s 'config.priority=["1000"]' \ + -s 'config.enabled=["true"]' \ + -s 'config.active=["true"]' \ + -s "config.privateKey=[\"$PRIVATE_KEY\"]" \ + -s "config.certificate=[\"$CERTIFICATE\"]" \ + -s 'config.algorithm=["RS256"]' + +# create client apisix +kcadm.sh create clients \ + -r basic \ + -s clientId=apisix \ + -s enabled=true \ + -s clientAuthenticatorType=client-secret \ + -s secret=secret \ + -s 'redirectUris=["*"]' \ + -s 'directAccessGrantsEnabled=true' + +# add audience to client apisix, so that the access token will contain the client id ("apisix") as audience +APISIX_CLIENT_UUID=$(kcadm.sh get clients -r basic -q clientId=apisix | jq -r '.[0].id') +kcadm.sh create clients/$APISIX_CLIENT_UUID/protocol-mappers/models \ + -r basic \ + -s protocol=openid-connect \ + -s name=aud \ + -s protocolMapper=oidc-audience-mapper \ + -s 'config."id.token.claim"=false' \ + -s 'config."access.token.claim"=true' \ + -s 'config."included.client.audience"=apisix' + +# create client apisix +kcadm.sh create clients \ + -r basic \ + -s clientId=apisix \ + -s enabled=true \ + -s clientAuthenticatorType=client-secret \ + -s secret=secret \ + -s 'redirectUris=["*"]' \ + -s 'directAccessGrantsEnabled=true' + +# create client apisix-no-aud, without client id audience +# according to Keycloak's default implementation, when unconfigured, +# only the account is listed as an audience, not the client id + +kcadm.sh create clients \ + -r basic \ + -s clientId=apisix-no-aud \ + -s enabled=true \ + -s clientAuthenticatorType=client-secret \ + -s secret=secret \ + -s 'redirectUris=["*"]' \ + -s 'directAccessGrantsEnabled=true' + +# create user jack +kcadm.sh create users -r basic -s username=jack -s enabled=true +kcadm.sh set-password -r basic --username jack --new-password jack diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/kcadm_configure_cas.sh b/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/kcadm_configure_cas.sh new file mode 100644 index 0000000..3486667 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/kcadm_configure_cas.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -ex + +export PATH=/opt/keycloak/bin:$PATH + +kcadm.sh config credentials --server http://localhost:8080 --realm master --user admin --password admin + +kcadm.sh create realms -s realm=test -s enabled=true + +kcadm.sh create users -r test -s username=test -s enabled=true +kcadm.sh set-password -r test --username test --new-password test + +clients=("cas1" "cas2") +rootUrls=("http://127.0.0.1:1984" "http://127.0.0.2:1984") + +for i in ${!clients[@]}; do + kcadm.sh create clients -r test -s clientId=${clients[$i]} -s enabled=true \ + -s protocol=cas -s frontchannelLogout=false -s rootUrl=${rootUrls[$i]} -s 'redirectUris=["/*"]' +done diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/kcadm_configure_university.sh b/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/kcadm_configure_university.sh new file mode 100644 index 0000000..162b624 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/kcadm_configure_university.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +export PATH=/opt/keycloak/bin:$PATH + +kcadm.sh config credentials --server http://localhost:8080 --realm master --user admin --password admin + +# create realm University +kcadm.sh create realms -s realm=University -s enabled=true + +# create roles `Teacher, Student` +kcadm.sh create roles -r University -s name=Teacher +kcadm.sh create roles -r University -s name=Student + +# create users `teacher@gmail.com, student@gmail.com` +kcadm.sh create users -r University -s username=teacher@gmail.com -s enabled=true +kcadm.sh create users -r University -s username=student@gmail.com -s enabled=true + +# set password +kcadm.sh set-password -r University --username teacher@gmail.com --new-password 123456 +kcadm.sh set-password -r University --username student@gmail.com --new-password 123456 + +# bind roles to users +kcadm.sh add-roles -r University --uusername teacher@gmail.com --rolename Teacher +kcadm.sh add-roles -r University --uusername student@gmail.com --rolename Student + +# create client course_management +kcadm.sh create clients -r University -s clientId=course_management -s enabled=true -s clientAuthenticatorType=client-secret -s secret=d1ec69e9-55d2-4109-a3ea-befa071579d5 + +client_id=$(kcadm.sh get clients -r University --fields id,clientId 2>/dev/null | jq -r '.[] | select(.clientId=='\"course_management\"') | .id') +teacher_id=$(kcadm.sh get roles -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"Teacher\"') | .id') +student_id=$(kcadm.sh get roles -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"Student\"') | .id') + +# update client course_management +kcadm.sh update clients/${client_id} -r University -s protocol=openid-connect -s standardFlowEnabled=true \ + -s implicitFlowEnabled=true -s directAccessGrantsEnabled=true -s serviceAccountsEnabled=true \ + -s authorizationServicesEnabled=true -s 'redirectUris=["*"]' -s 'webOrigins=["*"]' + +kcadm.sh update clients/${client_id}/authz/resource-server -r University -s allowRemoteResourceManagement=false -s policyEnforcementMode="ENFORCING" + +# create authz-resource with name `course_resource`, uri `/course/*`, scope `DELETE, delete, view, GET` +kcadm.sh create clients/${client_id}/authz/resource-server/resource -r University -s name=course_resource \ + -s ownerManagedAccess=false -s uris='["/course/*"]' -s scopes='[{"name": "DELETE"},{"name": "view"},{"name": "GET"},{"name": "delete"}]' + +course_resource_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/resource -r University --fields _id,name 2>/dev/null | jq -r '.[] | select(.name=='\"course_resource\"') | ._id') +DELETE_scope_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/scope -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"DELETE\"') | .id') +delete_scope_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/scope -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"delete\"') | .id') +GET_scope_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/scope -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"GET\"') | .id') +view_scope_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/scope -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"view\"') | .id') + +# create authz-policy `AllowTeacherPolicy, AllowStudentPolicy` +kcadm.sh create clients/${client_id}/authz/resource-server/policy/role -r University \ + -s name="AllowTeacherPolicy" -s logic="POSITIVE" -s decisionStrategy="UNANIMOUS" \ + -s roles='[{"id": '\"${teacher_id}\"'}]' + +kcadm.sh create clients/${client_id}/authz/resource-server/policy/role -r University \ + -s name="AllowStudentPolicy" -s logic="POSITIVE" -s decisionStrategy="UNANIMOUS" \ + -s roles='[{"id": '\"${student_id}\"'}]' + +allow_teacher_policy_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/policy -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"AllowTeacherPolicy\"') | .id') +allow_student_policy_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/policy -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"AllowStudentPolicy\"') | .id') + +# create authz-permission `Delete Course Permission` and `View Course Permission` +kcadm.sh create clients/${client_id}/authz/resource-server/permission/scope -r University \ + -s name="Delete Course Permission" -s logic="POSITIVE" -s decisionStrategy="UNANIMOUS" \ + -s policies='['\"${allow_teacher_policy_id}\"']' \ + -s scopes='['\"${DELETE_scope_id}\"', '\"${delete_scope_id}\"']' \ + -s resources='['\"${course_resource_id}\"']' + +kcadm.sh create clients/${client_id}/authz/resource-server/permission/scope -r University \ + -s name="View Course Permission" -s logic="POSITIVE" -s decisionStrategy="AFFIRMATIVE" \ + -s policies='['\"${allow_teacher_policy_id}\"', '\"${allow_student_policy_id}\"']' \ + -s scopes='['\"${GET_scope_id}\"', '\"${view_scope_id}\"']' \ + -s resources='['\"${course_resource_id}\"']' diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/server.crt.pem b/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/server.crt.pem new file mode 100644 index 0000000..9c7bde3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/server.crt.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIUbZfnhty/ZiHPz5Aq8kK5Kr8kcSQwDQYJKoZIhvcNAQEL +BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA0MTgxMTQzNDJaFw0zMzA0 +MTUxMTQzNDJaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw +HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQC/F4wK7eMTVAKGDMLCXE+Y6REdA5GU6/AakJf3NEKQ +wCrtrqO+VBPIz445+edf3EEXhjFFGPdU6p0EkF0SMLaMsVBQQJ2qcP6FloIYiyT3 +WCs/gbtdoWq53ucAfWueIyHWsovLc0VhOXm0rhTYg88nMjJ7y6vYkfLMT6qlwASn +9Tozgjat09fWATbN7yBi4ivVVsKDo2S3jkOyVnYYMjzZO3CSkyUSMl+ZsSesseSK +A9c2zogfKIU833njraA8blMFfdinEMI/9yceEx57IUjnpY1iWHLSItiZF+LKEpeL +vp9gpr88ghR85ISusqAqwcmnsdAqjjw7gbPm1DIvUgVBAgMBAAGjUzBRMB0GA1Ud +DgQWBBRvlz5ZiE2fD9ikPRqpYwsVrxZfxTAfBgNVHSMEGDAWgBRvlz5ZiE2fD9ik +PRqpYwsVrxZfxTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCX +5fOeFnX67eHI5dJB8p3U2GS21qykDVLV5ZV+JZfZwXJEygIvr/T9vs772EPxv+0/ +TO0+pGdcVswXq/6BoUFCV0rWWTDP5wTS3sV1ZsSSHil5zEutXuAI1LQGlit6w5xn +iDURFZw3ZmOFytXKXNbca1ma4yaCZtOwVe3O36GZeOiZFzBYE2DELqy77Nz1E5+3 +jZaDnx0vonV8/hhX6FAPRPQnIXkaEH3BnVQZGD1jxipbFQQtmeeNPELy18MQo30N +W1wOsbMMouniKUjdT16tdtzJzC+l9pVqRC+8df5PJfN56Uv9Ed6pjytkSF1SvHyJ +iTWmyxJL9AonUkc5Oiri +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/server.key.pem b/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/server.key.pem new file mode 100755 index 0000000..f2bc2d9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/keycloak/server.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC/F4wK7eMTVAKG +DMLCXE+Y6REdA5GU6/AakJf3NEKQwCrtrqO+VBPIz445+edf3EEXhjFFGPdU6p0E +kF0SMLaMsVBQQJ2qcP6FloIYiyT3WCs/gbtdoWq53ucAfWueIyHWsovLc0VhOXm0 +rhTYg88nMjJ7y6vYkfLMT6qlwASn9Tozgjat09fWATbN7yBi4ivVVsKDo2S3jkOy +VnYYMjzZO3CSkyUSMl+ZsSesseSKA9c2zogfKIU833njraA8blMFfdinEMI/9yce +Ex57IUjnpY1iWHLSItiZF+LKEpeLvp9gpr88ghR85ISusqAqwcmnsdAqjjw7gbPm +1DIvUgVBAgMBAAECggEBAKUrrkGYI2mGePPzPbiP38E02zTv67sEQLJFfwUOp+bE +I5b0F9agh8VQGghkyKgkEiNKO3YVQVuluvjB66CYeIGdleT4JQ+4wVcoo+ShCN++ +1wr6kMA6kKx+Tb8vqYCzr0ELbSf6x+Jksp0Ixz3qmHixu88jWbNFW89boQ3JrnyZ +TUgRSRdPoXcxspwcbhy6mMhwUfUSy8Zcck81dBEAjokvzbYh4jtFYMipWqro66KJ +B9uqQme2J/rN/2PSrA6chI85Wa+JaGOSPDaGNp+DrADjoVZf1tXgzGCsA/lmVtQ0 +8YN4Dh21EjLxz4Dj5GE7RWET4Ejvv1XEih1p+zKne00CgYEA327raCD5Fnr1nGTb +Q4ZWkcDR6EGSD6JGD0ur+UqqJhirM/5b4iGcsVK5uufb5dwk9+9z0EucXOVq/il0 +vgG2FbgRYM8kx3CDLvMYAqKJ8e5NsGJWwJVq6DsmsO1SaEId+SVFH83RHfG5/ksq +/DgRg0Wl9FoL7sHchuSIP2QiLrMCgYEA2vHcKsMZk/KGMBHVffY3PUckirIM6vLa +idMmm0T0HSAdviZRxQGyOnjd93ZhMqFJPTrmHOq0uAxfdFt+oRoHk/pGarBCv76L +NnPrSnVe1pJOh7Mm7LHLgrAgeM2WW7xz6jZwc8On+9qHK97I/wAnJB8J7DvQJ2hR +sWCDSbfKtjsCgYEAnVE77tVIjMuGo9dfiuvLiFR7d0yzys43Bg4ByEUKCEjWQoWV +rGJ+MVxN6YvXCME4RloS8VZLgh0GeG44BJCv5Br2IXO4MbTGqQgAn9pRxkZD7S1Q +Z8jMvTboxypSG5ZyBDp5sSr5Ulwg2SuT2IKh0gv4DVRZkoJtA41lYTzf1IECgYBd +3NJGgt20T4S3lu2v0p5b5uQDkdF36CVIcP1cE3OUCPC3VDY5/0ApUSfXryh8TCjZ +1yZPv086mBNUDuV6q24UQndtxaLYERgdgBSfFzJRSuffxS4qyw40OM2y/HA5Y9FN +14jeGEMr9cN9S0VgDPC6y5O1cu8J9e8P3BBsyh5dgQKBgHMlIhOJDO/neVnax79X +d3+5GaiggUnkd27OkYC4LhXEc/QWeHE0ByA0bDhhnsE7IVK2CVC18axOLmEJVy2g +F6ZtxcpNrlVtF4YaOiRVUcDNnz9gX48efrpdoX2iBSFEd1NRDo/bjkVXI1L08LNf +BbMB104PadChoGpl5R3NQQsP +-----END PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/env/common.env b/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/env/common.env new file mode 100644 index 0000000..1a549cb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/env/common.env @@ -0,0 +1,6 @@ +EMBEDDED_STORAGE=embedded +PREFER_HOST_MODE=hostname +MODE=cluster +NACOS_SERVERS="nacos1:8848 nacos2:8848" +JVM_XMS=512m +JVM_XMX=512m diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/env/service.env b/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/env/service.env new file mode 100644 index 0000000..d09eaba --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/env/service.env @@ -0,0 +1,2 @@ +SERVICE_NAME=APISIX-NACOS +NACOS_ADDR=nacos2:8848 diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/healthcheck/Dockerfile b/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/healthcheck/Dockerfile new file mode 100644 index 0000000..e4109df --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/healthcheck/Dockerfile @@ -0,0 +1,30 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +FROM alpine:latest + +# change workdir to / +WORKDIR / + +# install curl +RUN apk --no-cache add bash curl + +# add healthcheck script +COPY *.sh / + +# add hosted process +CMD ["cat"] diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/healthcheck/nacos-server-healthcheck.sh b/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/healthcheck/nacos-server-healthcheck.sh new file mode 100644 index 0000000..232f457 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/healthcheck/nacos-server-healthcheck.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +set -ex + +# nacos server healthcheck +REQ_STATUS=$(curl -s -o /dev/null -w '%{http_code}' "${CHECK_URI}") + +if [ "${REQ_STATUS}" -ne "200" ]; then + exit 1; +fi diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/healthcheck/nacos-service-healthcheck.sh b/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/healthcheck/nacos-service-healthcheck.sh new file mode 100644 index 0000000..bd540d7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/healthcheck/nacos-service-healthcheck.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -ex + +# nacos service healthcheck +URI_LIST=( + "http://nacos2:8848/nacos/v1/ns/service/list?pageNo=1&pageSize=2" + "http://nacos2:8848/nacos/v1/ns/service/list?groupName=test_group&pageNo=1&pageSize=2" + "http://nacos2:8848/nacos/v1/ns/service/list?groupName=DEFAULT_GROUP&namespaceId=test_ns&pageNo=1&pageSize=2" + "http://nacos2:8848/nacos/v1/ns/service/list?groupName=test_group&namespaceId=test_ns&pageNo=1&pageSize=2" +) + +for URI in "${URI_LIST[@]}"; do + if [[ $(curl -s "${URI}" | grep "APISIX-NACOS") ]]; then + continue + else + exit 1; + fi +done + + +for IDX in {1..7..1}; do + REQ_STATUS=$(curl -s -o /dev/null -w '%{http_code}' "http://nacos-service${IDX}:18001/hello") + if [ "${REQ_STATUS}" -ne "200" ]; then + exit 1; + fi +done diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/service/Dockerfile b/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/service/Dockerfile new file mode 100644 index 0000000..d279c74 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/nacos/service/Dockerfile @@ -0,0 +1,32 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +FROM eclipse-temurin:8 + +ENV SUFFIX_NUM=${SUFFIX_NUM:-1} +ENV NACOS_ADDR=${NACOS_ADDR:-127.0.0.1:8848} +ENV SERVICE_NAME=${SERVICE_NAME:-gateway-service} +ENV NAMESPACE=${NAMESPACE} +ENV GROUP=${GROUP:-DEFAULT_GROUP} + +ADD https://raw.githubusercontent.com/api7/nacos-test-service/main/spring-nacos-1.0-SNAPSHOT.jar /app.jar + +ENTRYPOINT ["java","-Djava.security.egd=file:/dev/./urandom","-jar","/app.jar",\ + "--suffix.num=${SUFFIX_NUM}","--spring.cloud.nacos.discovery.server-addr=${NACOS_ADDR}",\ + "--spring.application.name=${SERVICE_NAME}","--spring.cloud.nacos.discovery.group=${GROUP}",\ + "--spring.cloud.nacos.discovery.namespace=${NAMESPACE}"] +EXPOSE 18001 diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/opa/data.json b/CloudronPackages/APISIX/apisix-source/ci/pod/opa/data.json new file mode 100644 index 0000000..b1652ed --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/opa/data.json @@ -0,0 +1,30 @@ +{ + "users": { + "alice": { + "headers": { + "Location": "http://example.com/auth" + }, + "status_code": 302 + }, + "bob": { + "headers": { + "test": "abcd", + "abcd": "test" + } + }, + "carla": { + "reason": "Give you a string reason" + }, + "dylon": { + "reason": { + "code": 40001, + "desc": "Give you a object reason" + } + }, + "elisa": { + "reason": { + "info": [] + } + } + } +} diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/opa/echo.rego b/CloudronPackages/APISIX/apisix-source/ci/pod/opa/echo.rego new file mode 100644 index 0000000..611f64f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/opa/echo.rego @@ -0,0 +1,20 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +package echo + +allow = false +reason = input diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/opa/example.rego b/CloudronPackages/APISIX/apisix-source/ci/pod/opa/example.rego new file mode 100644 index 0000000..a916104 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/opa/example.rego @@ -0,0 +1,55 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +package example + +import input.request +import data.users + +default allow = false + +allow { + request.headers["test-header"] == "only-for-test" + request.method == "GET" + startswith(request.path, "/hello") + request.query["test"] != "abcd" + request.query["user"] +} + +allow { + request.method == "GET" + startswith(request.path, "/echo") +} + +reason = users[request.query["user"]].reason { + not allow + request.query["user"] +} + +headers = users[request.query["user"]].headers { + not allow + request.query["user"] +} + +headers = {"user": request.query["user"]} { + allow + request.query["user"] +} + +status_code = users[request.query["user"]].status_code { + not allow + request.query["user"] +} diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/opa/with_route.rego b/CloudronPackages/APISIX/apisix-source/ci/pod/opa/with_route.rego new file mode 100644 index 0000000..c6a848e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/opa/with_route.rego @@ -0,0 +1,24 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +package with_route +default allow = false + +allow { + input.route.name == "valid" +} + +status_code = 403 {not allow} diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/build-function-image.sh b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/build-function-image.sh new file mode 100755 index 0000000..dc9f34a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/build-function-image.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +set -xeuo pipefail + +if [ ! -f "./pack" ]; then + wget -q https://github.com/buildpacks/pack/releases/download/v0.27.0/pack-v0.27.0-linux.tgz + tar -zxvf pack-v0.27.0-linux.tgz +fi + +# please update function-example/*/hello.go if you want to update function +./pack build test-uri-image --path ./ci/pod/openfunction/function-example/test-uri --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://proxy.golang.org" +./pack build test-body-image --path ./ci/pod/openfunction/function-example/test-body --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://proxy.golang.org" +./pack build test-header-image --path ./ci/pod/openfunction/function-example/test-header --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://proxy.golang.org" diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-body/go.mod b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-body/go.mod new file mode 100644 index 0000000..bf571e0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-body/go.mod @@ -0,0 +1,31 @@ +module example.com/hello + +go 1.17 + +require github.com/OpenFunction/functions-framework-go v0.3.0 + +require ( + github.com/SkyAPM/go2sky v1.4.1 // indirect + github.com/cloudevents/sdk-go/v2 v2.4.1 // indirect + github.com/dapr/dapr v1.6.0 // indirect + github.com/dapr/go-sdk v1.3.1 // indirect + github.com/go-logr/logr v1.2.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/json-iterator/go v1.1.11 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/pkg/errors v0.9.1 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.7.0 // indirect + go.uber.org/zap v1.19.1 // indirect + golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f // indirect + golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect + google.golang.org/grpc v1.40.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/yaml.v3 v3.0.0 // indirect + k8s.io/klog/v2 v2.30.0 // indirect + skywalking.apache.org/repo/goapi v0.0.0-20220401015832-2c9eee9481eb // indirect +) diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-body/go.sum b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-body/go.sum new file mode 100644 index 0000000..f51a530 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-body/go.sum @@ -0,0 +1,1760 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.86.0/go.mod h1:YG2MRW8zzPSZaztnTZtxbMPK2VYaHg4NTDYZMG+5ZqQ= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.12.2/go.mod h1:BmI/dqa6eXfm8WTp+JIN6d6vtVGq+vcsnglFKn/aVkY= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +contrib.go.opencensus.io/exporter/zipkin v0.1.1/go.mod h1:GMvdSl3eJ2gapOaLKzTKE3qDgUkJ86k9k3yY2eqwkzc= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/99designs/keyring v1.1.5/go.mod h1:7hsVvt2qXgtadGevGJ4ujg+u8m6SpJ5TpHqTozIPqf0= +github.com/AdhityaRamadhanus/fasthttpcors v0.0.0-20170121111917-d4c07198763a/go.mod h1:C0A1KeiVHs+trY6gUTPhhGammbrZ30ZfXRW/nuT7HLw= +github.com/AthenZ/athenz v1.10.15/go.mod h1:7KMpEuJ9E4+vMCMI3UQJxwWs0RZtQq7YXZ1IteUjdsc= +github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= +github.com/Azure/azure-amqp-common-go/v3 v3.1.0/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= +github.com/Azure/azure-event-hubs-go/v3 v3.3.10/go.mod h1:sszMsQpFy8Au2s2NColbnJY8lRVm1koW0XxBJ3rN5TY= +github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v59.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.20.0/go.mod h1:ZPW/Z0kLCTdDZaDbYTetxc9Cxl/2lNqxYHYNOF2bti0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.12.0/go.mod h1:GJzjM4SR9T0KyX5gKCVyz1ytD8FeWeUPCwtFCt1AyfE= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.1/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.3.0/go.mod h1:aJ4Pej3ivJnoNJ4UPgh/snHVLSSV2Mcc62srBQZ4TWE= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.1.0/go.mod h1:qKJHexVLI0iqKFeV/2WnqbRBQtJTPOMeBdmHOxs+E88= +github.com/Azure/azure-service-bus-go v0.10.10/go.mod h1:o5z/3lDG1iT/T/G7vgIwIqVDTx9Qa2wndf5OdzSzpF8= +github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= +github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE= +github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= +github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= +github.com/Azure/go-amqp v0.13.1/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.7/go.mod h1:V6p3pKZx1KKkJubbxnDWrzNhEIfOy/pTGasLqzHIPHs= +github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.23/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.4/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= +github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.6-0.20210211175136-c6db21d202f4/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.7/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/OpenFunction/functions-framework-go v0.3.0 h1:yiVwk7IysrMPnG3eCOgRLZbpsCUaYU3gRYA7dqIPREo= +github.com/OpenFunction/functions-framework-go v0.3.0/go.mod h1:DbssgwZJRVd8VOls6aLpQwqBWu6gbDM4G+7RwwCJMEQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/SkyAPM/go2sky v1.4.1 h1:FV0jUB8UeC5CW0Z12j8xgrK0LoVV85Z92ShQU0G3Xfo= +github.com/SkyAPM/go2sky v1.4.1/go.mod h1:cebzbFtq5oc9VrgJy0Sv7oePj/TjIlXPdj2ntHdCXd0= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/a8m/documentdb v1.3.1-0.20211026005403-13c3593b3c3a/go.mod h1:4Z0mpi7fkyqjxUdGiNMO3vagyiUoiwLncaIX6AsW5z0= +github.com/aerospike/aerospike-client-go v4.5.0+incompatible/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/agrea/ptr v0.0.0-20180711073057-77a518d99b7b/go.mod h1:Tie46d3UWzXpj+Fh9+DQTyaUxEpFBPOLXrnx7nxlKRo= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alibaba/sentinel-golang v1.0.3/go.mod h1:Lag5rIYyJiPOylK8Kku2P+a23gdKMMqzQS7wTnjWEpk= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.2/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/darabonba-openapi v0.1.4/go.mod h1:j03z4XUkIC9aBj/w5Bt7H0cygmPNt5sug8NXle68+Og= +github.com/alibabacloud-go/darabonba-openapi v0.1.14/go.mod h1:w4CosR7O/kapCtEEMBm3JsQqWBU/CnZ2o0pHorsTWDI= +github.com/alibabacloud-go/darabonba-string v1.0.0/go.mod h1:93cTfV3vuPhhEwGGpKKqhVW4jLe7tDpo3LUM0i0g6mA= +github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68/go.mod h1:6pb/Qy8c+lqua8cFpEy7g39NRRqOWc3rOwAy8m5Y2BY= +github.com/alibabacloud-go/endpoint-util v1.1.0/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= +github.com/alibabacloud-go/oos-20190601 v1.0.1/go.mod h1:t7g1ubvGwLe0cP+uLSrTza2S6xthOFZw43h9Zajt+Kw= +github.com/alibabacloud-go/openapi-util v0.0.7/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.0.10/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/tea v1.1.0/go.mod h1:IkGyUSX4Ba1V+k4pCtJUc6jDpZLFph9QMy2VUPTwukg= +github.com/alibabacloud-go/tea v1.1.7/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.8/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.11/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.15/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea v1.1.17/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea-utils v1.3.1/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.3.9/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.4.3/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis/v2 v2.13.3/go.mod h1:uS970Sw5Gs9/iK3yBg0l9Uj9s25wXxSpQUE9EaJ/Blg= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.18/go.mod h1:v8ESoHo4SyHmuB4b1tJqDHxfTGEciD+yhvOU/5s1Rfk= +github.com/aliyun/aliyun-oss-go-sdk v2.0.7+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/aliyun/aliyun-tablestore-go-sdk v1.6.0/go.mod h1:jixoiNNRR/4ziq0yub1fTlxmDcQwlpkaujpaWIATQWM= +github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= +github.com/aliyunmq/mq-http-go-sdk v1.0.3/go.mod h1:JYfRMQoPexERvnNNBcal0ZQ2TVQ5ialDiW9ScjaadEM= +github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/apache/pulsar-client-go v0.6.1-0.20211027182823-171ef578e91a/go.mod h1:EauTUv9sTmP9QRznRgK9hxnzCsIVfS8fyhTfGcuJBrE= +github.com/apache/pulsar-client-go/oauth2 v0.0.0-20201120111947-b8bd55bc02bd/go.mod h1:0UtvvETGDdvXNDCHa8ZQpxl+w3HbdFtfYZvDHLgWGTY= +github.com/apache/rocketmq-client-go v1.2.5/go.mod h1:Kap8oXIVLlHF50BGUbN9z97QUp1GaK1nOoCfsZnR2bw= +github.com/apache/rocketmq-client-go/v2 v2.1.0/go.mod h1:oEZKFDvS7sz/RWU0839+dQBupazyBV7WX5cP6nrio0Q= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/ardielle/ardielle-go v1.5.2/go.mod h1:I4hy1n795cUhaVt/ojz83SNVCYIGsAFAONtv2Dr7HUI= +github.com/ardielle/ardielle-tools v1.5.4/go.mod h1:oZN+JRMnqGiIhrzkRN9l26Cej9dEx4jeNG6A+AdkShk= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/EventBus v0.0.0-20200907212545-49d423059eef/go.mod h1:JS7hed4L1fj0hXcyEejnW57/7LCetXggd+vwrRnYeII= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.19.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.32.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.41.7/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= +github.com/beefsack/go-rate v0.0.0-20180408011153-efa7637bb9b6/go.mod h1:6YNgTHLutezwnBvyneBbwvB8C82y3dcoOj5EQJIdGXA= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/camunda-cloud/zeebe/clients/go v1.0.1/go.mod h1:slW2ZP0pMmiZdxBLJHjGxax+E2AjjLFB608DRhounJI= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cinience/go_rocketmq v0.0.2/go.mod h1:2YNY7emT546dcFpMEWLesmAEi4ndW7+tX5VfNf1Zsgs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudevents/sdk-go/v2 v2.4.1 h1:rZJoz9QVLbWQmnvLPDFEmv17Czu+CfSPwMO6lhJ72xQ= +github.com/cloudevents/sdk-go/v2 v2.4.1/go.mod h1:MZiMwmAh5tGj+fPFvtHv9hKurKqXtdB9haJYMJ/7GJY= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/dancannon/gorethink v4.0.0+incompatible/go.mod h1:BLvkat9KmZc1efyYwhz3WnybhRZtgF1K929FD8z1avU= +github.com/danieljoos/wincred v1.0.2/go.mod h1:SnuYRW9lp1oJrZX/dXJqr0cPK5gYXqx3EJbmjhLdK9U= +github.com/dapr/components-contrib v1.6.0-rc.2/go.mod h1:30BaLseZXoK+UPD5E93dCTGZwlG3nWLNJazoJ9bKGlU= +github.com/dapr/dapr v1.6.0 h1:zc6/jHVkD4LkNosVM+PNVDPBnmwYqnXXPD7knvE9etU= +github.com/dapr/dapr v1.6.0/go.mod h1:ilH7anASii1b6hBRy2GTmf63Kj1/ejjaN9GcQJ2z5R8= +github.com/dapr/go-sdk v1.3.1 h1:VI7vp3ZwZu+O8k9vPZ0gTTCRywj+ZsLm7MIQqB9S7FU= +github.com/dapr/go-sdk v1.3.1/go.mod h1:tFH/t0z3qypmk5CXHvYSjf/1dGVi04voXfNnhbGgy/A= +github.com/dapr/kit v0.0.2-0.20210614175626-b9074b64d233/go.mod h1:y8r0VqUNKyd6xBXp7gQjwA59wlCLGfKzL5J8iJsN09w= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deepmap/oapi-codegen v1.3.6/go.mod h1:aBozjEveG+33xPiP55Iw/XbVkhtZHEGLq3nxlX0+hfU= +github.com/deepmap/oapi-codegen v1.8.1/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= +github.com/denisenkom/go-mssqldb v0.0.0-20210411162248-d9abbec934ba/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/dghubble/go-twitter v0.0.0-20190719072343-39e5462e111f/go.mod h1:xfg4uS5LEzOj8PgZV7SQYRHbG7jPUnelEiaAVJxmhJE= +github.com/dghubble/oauth1 v0.6.0/go.mod h1:8pFdfPkv/jr8mkChVbNVuJ0suiHe278BtWI4Tk1ujxk= +github.com/dghubble/sling v1.3.0/go.mod h1:XXShWaBWKzNLhu2OxikSNFrlsvowtz4kyRuXUG7oQKY= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.1-0.20210802184156-9742bd7fca1c+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/didip/tollbooth v4.0.2+incompatible/go.mod h1:A9b0665CE6l1KmzpDws2++elm/CsuWBMa5Jv4WY0PEY= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v17.12.0-ce-rc1.0.20200916142827-bd33bbf0497b+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dvsekhvalnov/jose2go v0.0.0-20180829124132-7f401d37b68a/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/fasthttp-contrib/sessions v0.0.0-20160905201309-74f6ac73d5d5/go.mod h1:MQXNGeXkpojWTxbN7vXoE3f7EmlA11MlJbsrJpVBINA= +github.com/fasthttp/router v1.3.8/go.mod h1:DQBvuHvYbn3SUN6pGjwjPbpCNpWfCFc5Ipn/Fj6XxFc= +github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239/go.mod h1:Gdwt2ce0yfBxPvZrHkprdPPTTS3N5rwmLE8T22KBXlw= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= +github.com/getkin/kin-openapi v0.2.0/go.mod h1:V1z9xl9oF5Wt7v32ne4FmiF1alpS4dM6mNzoywPOXlk= +github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.0/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-ozzo/ozzo-validation/v4 v4.3.0/go.mod h1:2NKgrcHl3z6cJs+3Oo940FPRiTzuqKbvfrL2RxCj6Ew= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-redis/redis/v8 v8.8.0/go.mod h1:F7resOH5Kdug49Otu24RjHWwgK7u9AmtqWMnCV1iP5Y= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogap/errors v0.0.0-20200228125012-531a6449b28c/go.mod h1:tbRYYYC7g/H7QlCeX0Z2zaThWKowF4QQCFIsGgAsqRo= +github.com/gogap/stack v0.0.0-20150131034635-fef68dddd4f8/go.mod h1:6q1WEv2BiAO4FSdwLQTJbWQYAn1/qDNJHUGJNXCj9kM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v0.0.0-20181025225059-d3de96c4c28e/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= +github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= +github.com/gorilla/mux v0.0.0-20181024020800-521ea7b17d02/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grandcat/zeroconf v0.0.0-20190424104450-85eadb44205c/go.mod h1:YjKB0WsLXlMkO9p+wGTCoPIDGRJH0mz7E526PxkQVxI= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hazelcast/hazelcast-go-client v0.0.0-20190530123621-6cf767c2f31a/go.mod h1:VhwtcZ7sg3xq7REqGzEy7ylSWGKz4jZd05eCJropNzI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb-client-go v1.4.0/go.mod h1:S+oZsPivqbcP1S9ur+T+QqXvrYS3NCZeMQtBoH4D1dw= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik= +github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jawher/mow.cli v1.0.4/go.mod h1:5hQj2V8g+qYmLUVWqu4Wuja1pI57M83EChYLVZ0sMKk= +github.com/jawher/mow.cli v1.2.0/go.mod h1:y+pcA3jBAdo/GIZx/0rFjw/K2bVEODP9rfZOfaiq8Ko= +github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869/go.mod h1:cJ6Cj7dQo+O6GJNiMx+Pa94qKj+TG8ONdKHgMNIyyag= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kataras/go-errors v0.0.3/go.mod h1:K3ncz8UzwI3bpuksXt5tQLmrRlgxfv+52ARvAu1+I+o= +github.com/kataras/go-serializer v0.0.4/go.mod h1:/EyLBhXKQOJ12dZwpUZZje3lGy+3wnvG7QKaVJtm/no= +github.com/keighl/postmark v0.0.0-20190821160221-28358b1a94e3/go.mod h1:Pz+php+2qQ4fWYwCa5O/rcnovTT2ylkKg3OnMLuFUbg= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d/go.mod h1:JJNrCn9otv/2QP4D7SMJBgaleKpOf66PnW6F5WGNRIc= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= +github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lestrrat/go-envload v0.0.0-20180220120943-6ed08b54a570/go.mod h1:BLt8L9ld7wVsvEWQbuLrUZnCMnUmLZ+CGDzKtclrTlE= +github.com/lestrrat/go-file-rotatelogs v0.0.0-20180223000712-d3151e2a480f/go.mod h1:UGmTpUd3rjbtfIpwAPrcfmGf/Z1HS95TATB+m57TPB8= +github.com/lestrrat/go-strftime v0.0.0-20180220042222-ba3bf9c1d042/go.mod h1:TPpsiPUEh0zFL1Snz4crhMlBe60PYxRHr5oFF3rRYg0= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linkedin/goavro/v2 v2.9.8/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/machinebox/graphql v0.2.2/go.mod h1:F+kbVMHuwrQ5tYgU9JXlnskM8nOaFxCAEolaQybkjWA= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/matoous/go-nanoid v1.5.0/go.mod h1:zyD2a71IubI24efhpvkJz+ZwfwagzgSO6UNiFsZKN7U= +github.com/matoous/go-nanoid/v2 v2.0.0/go.mod h1:FtS4aGPVfEkxKxhdWPAspZpZSh1cOjtM7Ej/So3hR0g= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.0-20181025052659-b20a3daf6a39/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/microcosm-cc/bluemonday v1.0.7/go.mod h1:HOT/6NaBlR0f9XlxD3zolN6Z3N8Lp4pvhp+jLS5ihnI= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/highwayhash v1.0.0/go.mod h1:xQboMTeM9nY9v/LlAOxFctujiv5+Aq2hR5dxBpaMbdc= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/grpc-proxy v0.0.0-20181017164139-0f1106ef9c76/go.mod h1:x5OoJHDHqxHS801UIuhqGl6QdSAEJvtausosHSdazIo= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nacos-group/nacos-sdk-go v1.0.8/go.mod h1:hlAPn3UdzlxIlSILAyOXKxjFSvDJ9oLzTJ9hLAK1KzA= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/jwt v0.3.3-0.20200519195258-f2bf5ce574c7/go.mod h1:n3cvmLfBfnpV4JJRN7lRYCyZnw48ksGsbThGXEk4w9M= +github.com/nats-io/jwt v1.1.0/go.mod h1:n3cvmLfBfnpV4JJRN7lRYCyZnw48ksGsbThGXEk4w9M= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.0-20200916203241-1f8ce17dff02/go.mod h1:vs+ZEjP+XKy8szkBmQwCB7RjYdIlMaPsFPs4VdS4bTQ= +github.com/nats-io/jwt/v2 v2.0.0-20201015190852-e11ce317263c/go.mod h1:vs+ZEjP+XKy8szkBmQwCB7RjYdIlMaPsFPs4VdS4bTQ= +github.com/nats-io/jwt/v2 v2.0.0-20210125223648-1c24d462becc/go.mod h1:PuO5FToRL31ecdFqVjc794vK0Bj0CwzveQEDvkb7MoQ= +github.com/nats-io/jwt/v2 v2.0.0-20210208203759-ff814ca5f813/go.mod h1:PuO5FToRL31ecdFqVjc794vK0Bj0CwzveQEDvkb7MoQ= +github.com/nats-io/jwt/v2 v2.0.1/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats-server/v2 v2.1.8-0.20200524125952-51ebd92a9093/go.mod h1:rQnBf2Rv4P9adtAs/Ti6LfFmVtFG6HLhl/H7cVshcJU= +github.com/nats-io/nats-server/v2 v2.1.8-0.20200601203034-f8d6dd992b71/go.mod h1:Nan/1L5Sa1JRW+Thm4HNYcIDcVRFc5zK9OpSZeI2kk4= +github.com/nats-io/nats-server/v2 v2.1.8-0.20200929001935-7f44d075f7ad/go.mod h1:TkHpUIDETmTI7mrHN40D1pzxfzHZuGmtMbtb83TGVQw= +github.com/nats-io/nats-server/v2 v2.1.8-0.20201129161730-ebe63db3e3ed/go.mod h1:XD0zHR/jTXdZvWaQfS5mQgsXj6x12kMjKLyAk/cOGgY= +github.com/nats-io/nats-server/v2 v2.1.8-0.20210205154825-f7ab27f7dad4/go.mod h1:kauGd7hB5517KeSqspW2U1Mz/jhPbTrE8eOXzUPk1m0= +github.com/nats-io/nats-server/v2 v2.1.8-0.20210227190344-51550e242af8/go.mod h1:/QQ/dpqFavkNhVnjvMILSQ3cj5hlmhB66adlgNbjuoA= +github.com/nats-io/nats-server/v2 v2.1.9/go.mod h1:9qVyoewoYXzG1ME9ox0HwkkzyYvnlBDugfR4Gg/8uHU= +github.com/nats-io/nats-server/v2 v2.2.1-0.20210330155036-61cbd74e213d/go.mod h1:eKlAaGmSQHZMFQA6x56AaP5/Bl9N3mWF4awyT2TTpzc= +github.com/nats-io/nats-server/v2 v2.2.1/go.mod h1:A+5EOqdnhH7FvLxtAK6SEDx6hyHriVOwf+FT/eEV99c= +github.com/nats-io/nats-streaming-server v0.21.2/go.mod h1:2W8QfNVOtcFpmf0bRiwuLtRb0/hkX4NuOxPOFNOThVQ= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE= +github.com/nats-io/nats.go v1.10.1-0.20200531124210-96f2130e4d55/go.mod h1:ARiFsjW9DVxk48WJbO3OSZ2DG8fjkMi7ecLmXoY/n9I= +github.com/nats-io/nats.go v1.10.1-0.20200606002146-fc6fed82929a/go.mod h1:8eAIv96Mo9QW6Or40jUHejS7e4VwZ3VRYD6Sf0BTDp4= +github.com/nats-io/nats.go v1.10.1-0.20201021145452-94be476ad6e0/go.mod h1:VU2zERjp8xmF+Lw2NH4u2t5qWZxwc7jB3+7HVMWQXPI= +github.com/nats-io/nats.go v1.10.1-0.20210127212649-5b4924938a9a/go.mod h1:Sa3kLIonafChP5IF0b55i9uvGR10I3hPETFbi4+9kOI= +github.com/nats-io/nats.go v1.10.1-0.20210211000709-75ded9c77585/go.mod h1:uBWnCKg9luW1g7hgzPxUjHFRI40EuTSX7RCzgnc74Jk= +github.com/nats-io/nats.go v1.10.1-0.20210228004050-ed743748acac/go.mod h1:hxFvLNbNmT6UppX5B5Tr/r3g+XSwGjJzFn6mxPNJEHc= +github.com/nats-io/nats.go v1.10.1-0.20210330225420-a0b1f60162f8/go.mod h1:Zq9IEHy7zurF0kFbU5aLIknnFI7guh8ijHk+2v+Vf5g= +github.com/nats-io/nats.go v1.12.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nats-io/stan.go v0.8.3/go.mod h1:Ejm8bbHnMTSptU6uNMAVuxeapMJYBB/Ml3ej6z4GoSY= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/open-policy-agent/opa v0.23.2/go.mod h1:rrwxoT/b011T0cyj+gg2VvxqTtn6N3gp/jzmr3fjW44= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/phayes/freeport v0.0.0-20171002181615-b8543db493a5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pkg/errors v0.0.0-20181023235946-059132a15dd0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20181025174421-f30f42803563/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= +github.com/prometheus/statsd_exporter v0.22.3/go.mod h1:N4Z1+iSqc9rnxlT1N8Qn3l65Vzb5t4Uq0jpg8nxyhio= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rs/zerolog v1.18.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I= +github.com/rs/zerolog v1.25.0/go.mod h1:7KHcEGe0QZPOm2IE4Kpb5rTh6n1h2hIgS5OOnu1rUaI= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/savsgio/gotils v0.0.0-20210217112953-d4a072536008/go.mod h1:TWNAOTaVzGOXq8RbEvHnhzA/A2sLZzgn0m6URjnukY8= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sendgrid/rest v2.6.3+incompatible/go.mod h1:kXX7q3jZtJXK5c5qK83bSGMdV6tsOE70KbHoqJls4lE= +github.com/sendgrid/sendgrid-go v3.5.0+incompatible/go.mod h1:QRQt+LX/NmgVEvmdRw0VT/QgUn499+iza2FnDca9fg8= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shirou/gopsutil/v3 v3.21.6/go.mod h1:JfVbDpIBLVzT8oKbvMg9P3wEIMDDpVn+LwHTKj0ST88= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v0.0.0-20190710185942-9d28bd7c0945/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.0-20181021141114-fe5e611709b0/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v0.0.0-20181024212040-082b515c9490/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/supplyon/gremcos v0.1.0/go.mod h1:ZnXsXGVbGCYDFU5GLPX9HZLWfD+ZWkiPo30KUjNoOtw= +github.com/tebeka/strftime v0.1.3/go.mod h1:7wJm3dZlpr4l/oVK0t1HYIc4rMzQ2XJlOMIUJUJH6XQ= +github.com/testcontainers/testcontainers-go v0.9.0/go.mod h1:b22BFXhRbg4PJmeMVWh6ftqjyZHgiIl3w274e9r3C2E= +github.com/tidwall/gjson v1.2.1/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA= +github.com/tidwall/gjson v1.8.0/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= +github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/toolkits/concurrent v0.0.0-20150624120057-a4371d70e3e3/go.mod h1:QDlpd3qS71vYtakd2hmdpqhJ9nwv6mD6A30bQ1BPBFE= +github.com/trusch/grpc-proxy v0.0.0-20190529073533-02b64529f274/go.mod h1:dzrPb02OTNDVimdCCBR1WAPu9a69n3VnfDyCX/GT/gE= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.21.0/go.mod h1:jjraHZVbKOXftJfsOYoAjaeygpj5hr8ermTRJNroD7A= +github.com/valyala/fasthttp v1.31.1-0.20211216042702-258a4c17b4f4/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vmware/vmware-go-kcl v1.5.0/go.mod h1:P92YfaWfQyudNf62BNx+E2rJn9pd165MhHsRt8ajkpM= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b/go.mod h1:HptNXiXVDcJjXe9SqMd0v2FsL9f8dz4GnXgltU6q/co= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= +github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/otel v0.19.0/go.mod h1:j9bF567N9EfomkSidSfmMwIwIBuP37AMAIzVW85OxSg= +go.opentelemetry.io/otel/metric v0.19.0/go.mod h1:8f9fglJPRnXuskQmKpnad31lcLJ2VmNNqIsx/uIwBSc= +go.opentelemetry.io/otel/oteltest v0.19.0/go.mod h1:tI4yxwh8U21v7JD6R3BcA/2+RBoTKFexE/PJ/nSO7IA= +go.opentelemetry.io/otel/trace v0.19.0/go.mod h1:4IXiNextNOpPnRlI4ryK69mn5iC84bjBWZQA5DXz/qg= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/ratelimit v0.2.0/go.mod h1:YYBV4e4naJvhpitQrWJu1vCpgB7CboMe0qhltKt6mUg= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +goji.io v2.0.2+incompatible/go.mod h1:sbqFwrtqZACxLBTQcdgVjFh54yGVCvwq8+w49MVMMIk= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181023182221-1baf3a9d7d67/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201016165138-7b1cca2348c0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180828065106-d99a578cf41b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210701133433-6b8dcf568a95/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= +google.golang.org/genproto v0.0.0-20210707164411-8c882eb9abba/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 h1:NHN4wOCScVzKhPenJ2dt+BTs3X/XkBVI/Rh4iDt55T8= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/couchbase/gocb.v1 v1.6.4/go.mod h1:Ri5Qok4ZKiwmPr75YxZ0uELQy45XJgUSzeUnK806gTY= +gopkg.in/couchbase/gocbcore.v7 v7.1.18/go.mod h1:48d2Be0MxRtsyuvn+mWzqmoGUG9uA00ghopzOs148/E= +gopkg.in/couchbaselabs/gocbconnstr.v1 v1.0.4/go.mod h1:ZjII0iKx4Veo6N6da+pEZu/ptNyKLg9QTVt7fFmR6sw= +gopkg.in/couchbaselabs/gojcbmock.v1 v1.0.4/go.mod h1:jl/gd/aQ2S8whKVSTnsPs6n7BPeaAuw9UglBD/OF7eo= +gopkg.in/couchbaselabs/jsonx.v1 v1.0.0/go.mod h1:oR201IRovxvLW/eISevH12/+MiKHtNQAKfcX8iWZvJY= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= +gopkg.in/gorethink/gorethink.v4 v4.1.0/go.mod h1:M7JgwrUAmshJ3iUbEK0Pt049MPyPK+CYDGGaEjdZb/c= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/kataras/go-serializer.v0 v0.0.4/go.mod h1:v2jHg/3Wp7uncDNzenTsX75PRDxhzlxoo/qDvM4ZGxk= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v0.0.0-20181223230014-1083505acf35/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= +k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= +k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= +k8s.io/apiextensions-apiserver v0.20.0/go.mod h1:ZH+C33L2Bh1LY1+HphoRmN1IQVLTShVcTojivK3N9xg= +k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= +k8s.io/apiserver v0.20.0/go.mod h1:6gRIWiOkvGvQt12WTYmsiYoUyYW0FXSiMdNl4m+sxY8= +k8s.io/cli-runtime v0.20.0/go.mod h1:C5tewU1SC1t09D7pmkk83FT4lMAw+bvMDuRxA7f0t2s= +k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= +k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY= +k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= +k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= +k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/metrics v0.20.0/go.mod h1:9yiRhfr8K8sjdj2EthQQE9WvpYDvsXIV3CjN4Ruq4Jw= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= +sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +skywalking.apache.org/repo/goapi v0.0.0-20220401015832-2c9eee9481eb h1:+PP2DpKFN/rEporLdPI4A7bPWQjwfARlUDKNhSab8iM= +skywalking.apache.org/repo/goapi v0.0.0-20220401015832-2c9eee9481eb/go.mod h1:uWwwvhcwe2MD/nJCg0c1EE/eL6KzaBosLHDfMFoEJ30= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +stathat.com/c/consistent v1.0.0/go.mod h1:QkzMWzcbB+yQBL2AttO6sgsQS/JSTapcDISJalmCDS0= diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-body/hello.go b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-body/hello.go new file mode 100644 index 0000000..df45e24 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-body/hello.go @@ -0,0 +1,37 @@ +/* + * Copyright 2022 The OpenFunction Authors. + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hello + +import ( + "fmt" + "io" + "net/http" + + "github.com/OpenFunction/functions-framework-go/functions" +) + +func init() { + functions.HTTP("HelloWorld", HelloWorld) +} + +func HelloWorld(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + fmt.Fprintf(w, "Hello, %s!\n", string(body)) +} diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-header/go.mod b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-header/go.mod new file mode 100644 index 0000000..76b2646 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-header/go.mod @@ -0,0 +1,3 @@ +module example.com/hello + +go 1.17 diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-header/hello.go b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-header/hello.go new file mode 100644 index 0000000..418f9fb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-header/hello.go @@ -0,0 +1,30 @@ +/* + * Copyright 2022 The OpenFunction Authors. + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hello + +import ( + "fmt" + "net/http" +) + +func HelloWorld(w http.ResponseWriter, r *http.Request) { + header := r.Header + fmt.Fprintf(w, "%s", header["Authorization"]) +} diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-uri/go.mod b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-uri/go.mod new file mode 100644 index 0000000..046483f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-uri/go.mod @@ -0,0 +1,32 @@ +module example.com/hello + +go 1.17 + +require github.com/OpenFunction/functions-framework-go v0.4.0 + +require ( + github.com/SkyAPM/go2sky v1.4.1 // indirect + github.com/cloudevents/sdk-go/v2 v2.4.1 // indirect + github.com/dapr/dapr v1.8.3 // indirect + github.com/dapr/go-sdk v1.5.0 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/mux v1.8.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.7.0 // indirect + go.uber.org/zap v1.21.0 // indirect + golang.org/x/net v0.0.0-20220621193019-9d032be2e588 // indirect + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/genproto v0.0.0-20220622171453-ea41d75dfa0f // indirect + google.golang.org/grpc v1.47.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/klog/v2 v2.30.0 // indirect + skywalking.apache.org/repo/goapi v0.0.0-20220401015832-2c9eee9481eb // indirect +) diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-uri/go.sum b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-uri/go.sum new file mode 100644 index 0000000..7aaa3ce --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-uri/go.sum @@ -0,0 +1,2615 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.86.0/go.mod h1:YG2MRW8zzPSZaztnTZtxbMPK2VYaHg4NTDYZMG+5ZqQ= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.12.2/go.mod h1:BmI/dqa6eXfm8WTp+JIN6d6vtVGq+vcsnglFKn/aVkY= +cloud.google.com/go/secretmanager v1.4.0/go.mod h1:h2VZz7Svt1W9/YVl7mfcX9LddvS6SOLOvMoOXBhYT1k= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= +contrib.go.opencensus.io/exporter/prometheus v0.4.1/go.mod h1:t9wvfitlUjGXG2IXAZsuFq26mDGid/JwCEXp+gTG/9U= +contrib.go.opencensus.io/exporter/zipkin v0.1.1/go.mod h1:GMvdSl3eJ2gapOaLKzTKE3qDgUkJ86k9k3yY2eqwkzc= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +dubbo.apache.org/dubbo-go/v3 v3.0.3-0.20220610080020-48691a404537/go.mod h1:O7eTHAilCWlqBjEkG2MW9khZFImiARb/tSOE8PJas+g= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.1.6/go.mod h1:16e0ds7LGQQcT59QqkTg72Hh5ShM51Byv5PEmW6uoRU= +github.com/99designs/keyring v1.2.0/go.mod h1:ETJn2A9cfvJKq1Q4FeOc+eetK52Ik0kUGog7Uy+xvX8= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= +github.com/AdhityaRamadhanus/fasthttpcors v0.0.0-20170121111917-d4c07198763a/go.mod h1:C0A1KeiVHs+trY6gUTPhhGammbrZ30ZfXRW/nuT7HLw= +github.com/AthenZ/athenz v1.10.39/go.mod h1:3Tg8HLsiQZp81BJY58JBeU2BR6B/H4/0MQGfCwhHNEA= +github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= +github.com/Azure/azure-amqp-common-go/v3 v3.2.3/go.mod h1:7rPmbSfszeovxGfc5fSAXE4ehlXQZHpMja2OtxC2Tas= +github.com/Azure/azure-event-hubs-go/v3 v3.3.18/go.mod h1:R5H325+EzgxcBDkUerEwtor7ZQg77G7HiOTwpcuIVXY= +github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0/go.mod h1:+6sju8gk8FRmSajX3Oz4G5Gm7P+mbqE9FVaXXFYTkCM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= +github.com/Azure/azure-sdk-for-go/sdk/data/aztables v1.0.1/go.mod h1:l3wvZkG9oW07GLBW5Cd0WwG5asOfJ8aqE8raUvNzLpk= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/azsecrets v0.7.1/go.mod h1:WcC2Tk6JyRlqjn2byvinNnZzgdXmZ1tOiIOWNh1u0uA= +github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.5.0/go.mod h1:9V2j0jn9jDEkCkv8w/bKTNppX/d0FVA1ud77xCIP4KA= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.0.1/go.mod h1:LH9XQnMr2ZYxQdVdCrzLO9mxeDyrDFa6wbSI3x5zCZk= +github.com/Azure/azure-service-bus-go v0.10.10/go.mod h1:o5z/3lDG1iT/T/G7vgIwIqVDTx9Qa2wndf5OdzSzpF8= +github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= +github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE= +github.com/Azure/azure-storage-queue-go v0.0.0-20191125232315-636801874cdd/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= +github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= +github.com/Azure/go-amqp v0.13.1/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= +github.com/Azure/go-amqp v0.17.0/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg= +github.com/Azure/go-amqp v0.17.4/go.mod h1:9YJ3RhxRT1gquYnzpZO1vcYMMpAdJT+QEg6fwmw9Zlg= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.7/go.mod h1:V6p3pKZx1KKkJubbxnDWrzNhEIfOy/pTGasLqzHIPHs= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY= +github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= +github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.4/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= +github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Flaque/filet v0.0.0-20201012163910-45f684403088/go.mod h1:TK+jB3mBs+8ZMWhU5BqZKnZWJ1MrLo8etNVg51ueTBo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.1/go.mod h1:Y/0uV2jUab5kBI7SQgl62at0AVX7uaruzADAVmxm3eM= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= +github.com/OpenFunction/functions-framework-go v0.4.0 h1:WHuKHRgwFNiTe+6/lJqDiQC0zOU7cS+HVf/XN/dA1j4= +github.com/OpenFunction/functions-framework-go v0.4.0/go.mod h1:+uYjTEYmn2uqIyViZtg9OF+bUNdjbkWNd7jrQWc7iEc= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/RoaringBitmap/roaring v1.1.0/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/SkyAPM/go2sky v1.4.1 h1:FV0jUB8UeC5CW0Z12j8xgrK0LoVV85Z92ShQU0G3Xfo= +github.com/SkyAPM/go2sky v1.4.1/go.mod h1:cebzbFtq5oc9VrgJy0Sv7oePj/TjIlXPdj2ntHdCXd0= +github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/Workiva/go-datastructures v1.0.52/go.mod h1:Z+F2Rca0qCsVYDS8z7bAGm8f3UkzuWYS/oBZz5a7VVA= +github.com/a8m/documentdb v1.3.1-0.20220405205223-5b41ba0aaeb1/go.mod h1:4Z0mpi7fkyqjxUdGiNMO3vagyiUoiwLncaIX6AsW5z0= +github.com/aerospike/aerospike-client-go v4.5.0+incompatible/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/agiledragon/gomonkey v2.0.2+incompatible/go.mod h1:2NGfXu1a80LLr2cmWXGBDaHEjb1idR6+FVlX5T3D9hw= +github.com/agrea/ptr v0.0.0-20180711073057-77a518d99b7b/go.mod h1:Tie46d3UWzXpj+Fh9+DQTyaUxEpFBPOLXrnx7nxlKRo= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= +github.com/alibaba/sentinel-golang v1.0.4/go.mod h1:Lag5rIYyJiPOylK8Kku2P+a23gdKMMqzQS7wTnjWEpk= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/darabonba-openapi v0.1.4/go.mod h1:j03z4XUkIC9aBj/w5Bt7H0cygmPNt5sug8NXle68+Og= +github.com/alibabacloud-go/darabonba-openapi v0.1.16/go.mod h1:ZjyqRbbZOaUBSh7keeH8VQN/BzCPvxCQwMuJGDdbmXQ= +github.com/alibabacloud-go/darabonba-string v1.0.0/go.mod h1:93cTfV3vuPhhEwGGpKKqhVW4jLe7tDpo3LUM0i0g6mA= +github.com/alibabacloud-go/debug v0.0.0-20190504072949-9472017b5c68/go.mod h1:6pb/Qy8c+lqua8cFpEy7g39NRRqOWc3rOwAy8m5Y2BY= +github.com/alibabacloud-go/endpoint-util v1.1.0/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= +github.com/alibabacloud-go/oos-20190601 v1.0.1/go.mod h1:t7g1ubvGwLe0cP+uLSrTza2S6xthOFZw43h9Zajt+Kw= +github.com/alibabacloud-go/openapi-util v0.0.7/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/openapi-util v0.0.10/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/tea v1.1.0/go.mod h1:IkGyUSX4Ba1V+k4pCtJUc6jDpZLFph9QMy2VUPTwukg= +github.com/alibabacloud-go/tea v1.1.7/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.8/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.11/go.mod h1:/tmnEaQMyb4Ky1/5D+SE1BAsa5zj/KeGOFfwYm3N/p4= +github.com/alibabacloud-go/tea v1.1.15/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea v1.1.17/go.mod h1:nXxjm6CIFkBhwW4FQkNrolwbfon8Svy6cujmKFUq98A= +github.com/alibabacloud-go/tea-utils v1.3.1/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.3.9/go.mod h1:EI/o33aBfj3hETm4RLiAxF/ThQdSngxrpF8rKUDJjPE= +github.com/alibabacloud-go/tea-utils v1.4.3/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis/v2 v2.13.3/go.mod h1:uS970Sw5Gs9/iK3yBg0l9Uj9s25wXxSpQUE9EaJ/Blg= +github.com/aliyun/alibaba-cloud-sdk-go v1.61.18/go.mod h1:v8ESoHo4SyHmuB4b1tJqDHxfTGEciD+yhvOU/5s1Rfk= +github.com/aliyun/aliyun-oss-go-sdk v2.0.7+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/aliyun/aliyun-tablestore-go-sdk v1.6.0/go.mod h1:jixoiNNRR/4ziq0yub1fTlxmDcQwlpkaujpaWIATQWM= +github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw= +github.com/aliyunmq/mq-http-go-sdk v1.0.3/go.mod h1:JYfRMQoPexERvnNNBcal0ZQ2TVQ5ialDiW9ScjaadEM= +github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129/go.mod h1:rFgpPQZYZ8vdbc+48xibu8ALc3yeyd64IhHS+PU6Yyg= +github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/apache/dubbo-getty v1.4.9-0.20220610060150-8af010f3f3dc/go.mod h1:cPJlbcHUTNTpiboMQjMHhE9XBni11LiBiG8FdrDuVzk= +github.com/apache/dubbo-go-hessian2 v1.9.1/go.mod h1:xQUjE7F8PX49nm80kChFvepA/AvqAZ0oh/UaB6+6pBE= +github.com/apache/dubbo-go-hessian2 v1.9.3/go.mod h1:xQUjE7F8PX49nm80kChFvepA/AvqAZ0oh/UaB6+6pBE= +github.com/apache/dubbo-go-hessian2 v1.11.0/go.mod h1:7rEw9guWABQa6Aqb8HeZcsYPHsOS7XT1qtJvkmI6c5w= +github.com/apache/pulsar-client-go v0.8.1/go.mod h1:yJNcvn/IurarFDxwmoZvb2Ieylg630ifxeO/iXpk27I= +github.com/apache/pulsar-client-go/oauth2 v0.0.0-20220120090717-25e59572242e/go.mod h1:Xee4tgYLFpYcPMcTfBYWE1uKRzeciodGTSEDMzsR6i8= +github.com/apache/rocketmq-client-go v1.2.5/go.mod h1:Kap8oXIVLlHF50BGUbN9z97QUp1GaK1nOoCfsZnR2bw= +github.com/apache/rocketmq-client-go/v2 v2.1.0/go.mod h1:oEZKFDvS7sz/RWU0839+dQBupazyBV7WX5cP6nrio0Q= +github.com/apache/rocketmq-client-go/v2 v2.1.1-rc2/go.mod h1:DDYjQ9wxYmJLjgNK4+RqyFE8/13gLK/Bugz4U6zD5MI= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.14.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc/go.mod h1:w648aMHEgFYS6xb0KVMMtZ2uMeemhiKCuD2vj6gY52A= +github.com/ardielle/ardielle-go v1.5.2/go.mod h1:I4hy1n795cUhaVt/ojz83SNVCYIGsAFAONtv2Dr7HUI= +github.com/ardielle/ardielle-tools v1.5.4/go.mod h1:oZN+JRMnqGiIhrzkRN9l26Cej9dEx4jeNG6A+AdkShk= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/EventBus v0.0.0-20200907212545-49d423059eef/go.mod h1:JS7hed4L1fj0hXcyEejnW57/7LCetXggd+vwrRnYeII= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.19.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.32.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/aws/aws-sdk-go v1.41.7/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= +github.com/beefsack/go-rate v0.0.0-20220214233405-116f4ca011a0/go.mod h1:6YNgTHLutezwnBvyneBbwvB8C82y3dcoOj5EQJIdGXA= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/bytecodealliance/wasmtime-go v0.35.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI= +github.com/camunda/zeebe/clients/go/v8 v8.0.3/go.mod h1:iOEgFlCYAPdqae6iPp0ajeo2RSxJirU39i+UAN74NOY= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/cinience/go_rocketmq v0.0.2/go.mod h1:2YNY7emT546dcFpMEWLesmAEi4ndW7+tX5VfNf1Zsgs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudevents/sdk-go/v2 v2.4.1 h1:rZJoz9QVLbWQmnvLPDFEmv17Czu+CfSPwMO6lhJ72xQ= +github.com/cloudevents/sdk-go/v2 v2.4.1/go.mod h1:MZiMwmAh5tGj+fPFvtHv9hKurKqXtdB9haJYMJ/7GJY= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.2/go.mod h1:qpbpJ1jmlqsR9f2IyaLPsdkCdnt0rbDVqIDlhuu5tRY= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/containerd v1.6.2/go.mod h1:sidY30/InSE1j2vdD1ihtKoJz+lWdaXMdiAeIupaf+s= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creasty/defaults v1.5.2/go.mod h1:FPZ+Y0WNrbqOVw+c6av63eyHUAl6pMHZwqLPvXUZGfY= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/dancannon/gorethink v4.0.0+incompatible/go.mod h1:BLvkat9KmZc1efyYwhz3WnybhRZtgF1K929FD8z1avU= +github.com/danieljoos/wincred v1.0.2/go.mod h1:SnuYRW9lp1oJrZX/dXJqr0cPK5gYXqx3EJbmjhLdK9U= +github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/dapr/components-contrib v1.8.0-rc.6/go.mod h1:gxrCpaosbI0n3SFW7fKSvJU/ymjryHqrdRgqmsknuno= +github.com/dapr/components-contrib v1.8.1-rc.1/go.mod h1:gxrCpaosbI0n3SFW7fKSvJU/ymjryHqrdRgqmsknuno= +github.com/dapr/dapr v1.8.0/go.mod h1:yAsDiK5oecG0htw2S8JG9RFaeHJVdlTfZyOrL57AvRM= +github.com/dapr/dapr v1.8.3 h1:wAmP8lXeI1OeCnLGi3XT1PokbSaM0/N71ChZhjPdTCw= +github.com/dapr/dapr v1.8.3/go.mod h1:/0JyKebxzz0vPwYXc/2qHBXIicUi01HUWnpQ8AiJ0zM= +github.com/dapr/go-sdk v1.5.0 h1:OVkrupquJEOL1qRtwKcMVrFKYhw4UJQvgOJNduo2VxE= +github.com/dapr/go-sdk v1.5.0/go.mod h1:Cvz3taCVu22WCNEUbc9/szvG/yJxWPAV4dcaG+zDWA4= +github.com/dapr/kit v0.0.2-0.20210614175626-b9074b64d233/go.mod h1:y8r0VqUNKyd6xBXp7gQjwA59wlCLGfKzL5J8iJsN09w= +github.com/dave/jennifer v1.4.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deepmap/oapi-codegen v1.3.6/go.mod h1:aBozjEveG+33xPiP55Iw/XbVkhtZHEGLq3nxlX0+hfU= +github.com/deepmap/oapi-codegen v1.8.1/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= +github.com/denisenkom/go-mssqldb v0.0.0-20210411162248-d9abbec934ba/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/dghubble/go-twitter v0.0.0-20190719072343-39e5462e111f/go.mod h1:xfg4uS5LEzOj8PgZV7SQYRHbG7jPUnelEiaAVJxmhJE= +github.com/dghubble/oauth1 v0.6.0/go.mod h1:8pFdfPkv/jr8mkChVbNVuJ0suiHe278BtWI4Tk1ujxk= +github.com/dghubble/sling v1.3.0/go.mod h1:XXShWaBWKzNLhu2OxikSNFrlsvowtz4kyRuXUG7oQKY= +github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/didip/tollbooth v4.0.2+incompatible/go.mod h1:A9b0665CE6l1KmzpDws2++elm/CsuWBMa5Jv4WY0PEY= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= +github.com/distribution/distribution/v3 v3.0.0-20211118083504-a29a3c99a684/go.mod h1:UfCu3YXJJCI+IdnqGgYP82dk2+Joxmv+mUTVBES6wac= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.11+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.14+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dubbogo/go-zookeeper v1.0.3/go.mod h1:fn6n2CAEer3novYgk9ULLwAjuV8/g4DdC2ENwRb6E+c= +github.com/dubbogo/go-zookeeper v1.0.4-0.20211212162352-f9d2183d89d5/go.mod h1:fn6n2CAEer3novYgk9ULLwAjuV8/g4DdC2ENwRb6E+c= +github.com/dubbogo/gost v1.9.0/go.mod h1:pPTjVyoJan3aPxBPNUX0ADkXjPibLo+/Ib0/fADXSG8= +github.com/dubbogo/gost v1.11.18/go.mod h1:vIcP9rqz2KsXHPjsAwIUtfJIJjppQLQDcYaZTy/61jI= +github.com/dubbogo/gost v1.11.23/go.mod h1:PhJ8+qZJx+Txjx1KthNPuVkCvUca0jRLgKWj/noGgeI= +github.com/dubbogo/gost v1.11.25/go.mod h1:iovrPhv0hyakhQGVr4jwiECBL9HXNuBY4VV3HWK5pM0= +github.com/dubbogo/grpc-go v1.42.9/go.mod h1:F1T9hnUvYGW4JLK1QNriavpOkhusU677ovPzLkk6zHM= +github.com/dubbogo/jsonparser v1.0.1/go.mod h1:tYAtpctvSP/tWw4MeelsowSPgXQRVHHWbqL6ynps8jU= +github.com/dubbogo/net v0.0.4/go.mod h1:1CGOnM7X3he+qgGNqjeADuE5vKZQx/eMSeUkpU3ujIc= +github.com/dubbogo/triple v1.0.9/go.mod h1:1t9me4j4CTvNDcsMZy6/OGarbRyAUSY0tFXGXHCp7Iw= +github.com/dubbogo/triple v1.1.8/go.mod h1:9pgEahtmsY/avYJp3dzUQE8CMMVe1NtGBmUhfICKLJk= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dvsekhvalnov/jose2go v0.0.0-20200901110807-248326c1351b/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM= +github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.0/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/fasthttp-contrib/sessions v0.0.0-20160905201309-74f6ac73d5d5/go.mod h1:MQXNGeXkpojWTxbN7vXoE3f7EmlA11MlJbsrJpVBINA= +github.com/fasthttp/router v1.3.8/go.mod h1:DQBvuHvYbn3SUN6pGjwjPbpCNpWfCFc5Ipn/Fj6XxFc= +github.com/fastly/go-utils v0.0.0-20180712184237-d95a45783239/go.mod h1:Gdwt2ce0yfBxPvZrHkprdPPTTS3N5rwmLE8T22KBXlw= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/foxcpp/go-mockdns v0.0.0-20210729171921-fb145fc6f897/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= +github.com/getkin/kin-openapi v0.2.0/go.mod h1:V1z9xl9oF5Wt7v32ne4FmiF1alpS4dM6mNzoywPOXlk= +github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= +github.com/go-asn1-ber/asn1-ber v1.3.1/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-co-op/gocron v1.9.0/go.mod h1:DbJm9kdgr1sEvWpHCA7dFFs/PGHPMil9/97EXCRPr4k= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.0/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-ini/ini v1.66.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-ldap/ldap/v3 v3.1.10/go.mod h1:5Zun81jBTabRaI8lzN7E1JjyEl1g6zI6u9pd8luAK4Q= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= +github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-ozzo/ozzo-validation/v4 v4.3.0/go.mod h1:2NKgrcHl3z6cJs+3Oo940FPRiTzuqKbvfrL2RxCj6Ew= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogap/errors v0.0.0-20200228125012-531a6449b28c/go.mod h1:tbRYYYC7g/H7QlCeX0Z2zaThWKowF4QQCFIsGgAsqRo= +github.com/gogap/stack v0.0.0-20150131034635-fef68dddd4f8/go.mod h1:6q1WEv2BiAO4FSdwLQTJbWQYAn1/qDNJHUGJNXCj9kM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/integrate v0.0.0-20181209220457-a422b5c0fdf2/go.mod h1:pDgmNM6seYpwvPos3q+zxlXMsbve6mOIPucUnUOrI7Y= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= +github.com/gonum/stat v0.0.0-20181125101827-41a0da705a5b/go.mod h1:Z4GIJBJO3Wa4gD4vbwQxXXZ+WHmW6E9ixmNrwvs0iZs= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= +github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grandcat/zeroconf v0.0.0-20190424104450-85eadb44205c/go.mod h1:YjKB0WsLXlMkO9p+wGTCoPIDGRJH0mz7E526PxkQVxI= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-plugin v1.4.3/go.mod h1:5fGEH17QVwTTcR0zV7yhDPLLmFX9YSZ38b18Udy6vYQ= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/base62 v0.1.1/go.mod h1:EdWO6czbmthiwZ3/PUsDV+UD1D5IRU4ActiaWGwt0Yw= +github.com/hashicorp/go-secure-stdlib/mlock v0.1.1/go.mod h1:zq93CJChV6L9QTfGKtfBxKqD7BqqXx5O04A/ns2p5+I= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/password v0.1.1/go.mod h1:9hH302QllNwu1o2TGYtSk8I8kTAN0ca1EHpwhm5Mmzo= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.1/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= +github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/vault/sdk v0.3.0/go.mod h1:aZ3fNuL5VNydQk8GcLJ2TV8YCRVvyaakYkhZRoVuhj0= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hazelcast/hazelcast-go-client v0.0.0-20190530123621-6cf767c2f31a/go.mod h1:VhwtcZ7sg3xq7REqGzEy7ylSWGKz4jZd05eCJropNzI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huaweicloud/huaweicloud-sdk-go-obs v3.21.12+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s= +github.com/huaweicloud/huaweicloud-sdk-go-v3 v0.0.87/go.mod h1:IvF+Pe06JMUivVgN6B4wcsPEoFvVa40IYaOPZyUt5HE= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/strcase v0.0.0-20191112232945-16388991a334/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb-client-go v1.4.0/go.mod h1:S+oZsPivqbcP1S9ur+T+QqXvrYS3NCZeMQtBoH4D1dw= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.11.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.10.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jawher/mow.cli v1.0.4/go.mod h1:5hQj2V8g+qYmLUVWqu4Wuja1pI57M83EChYLVZ0sMKk= +github.com/jawher/mow.cli v1.2.0/go.mod h1:y+pcA3jBAdo/GIZx/0rFjw/K2bVEODP9rfZOfaiq8Ko= +github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869/go.mod h1:cJ6Cj7dQo+O6GJNiMx+Pa94qKj+TG8ONdKHgMNIyyag= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= +github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kataras/go-errors v0.0.3/go.mod h1:K3ncz8UzwI3bpuksXt5tQLmrRlgxfv+52ARvAu1+I+o= +github.com/kataras/go-serializer v0.0.4/go.mod h1:/EyLBhXKQOJ12dZwpUZZje3lGy+3wnvG7QKaVJtm/no= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/keybase/go-keychain v0.0.0-20190712205309-48d3d31d256d/go.mod h1:JJNrCn9otv/2QP4D7SMJBgaleKpOf66PnW6F5WGNRIc= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/koding/multiconfig v0.0.0-20171124222453-69c27309b2d7/go.mod h1:Y2SaZf2Rzd0pXkLVhLlCiAXFCLSXAIbTKDivVgff/AM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/labd/commercetools-go-sdk v0.3.2/go.mod h1:I+KKNALlg6PcSertsVA7E442koO99GT7gldWqwZlUGo= +github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= +github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/lestrrat/go-envload v0.0.0-20180220120943-6ed08b54a570/go.mod h1:BLt8L9ld7wVsvEWQbuLrUZnCMnUmLZ+CGDzKtclrTlE= +github.com/lestrrat/go-file-rotatelogs v0.0.0-20180223000712-d3151e2a480f/go.mod h1:UGmTpUd3rjbtfIpwAPrcfmGf/Z1HS95TATB+m57TPB8= +github.com/lestrrat/go-strftime v0.0.0-20180220042222-ba3bf9c1d042/go.mod h1:TPpsiPUEh0zFL1Snz4crhMlBe60PYxRHr5oFF3rRYg0= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linkedin/goavro/v2 v2.9.8/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/machinebox/graphql v0.2.2/go.mod h1:F+kbVMHuwrQ5tYgU9JXlnskM8nOaFxCAEolaQybkjWA= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/matoous/go-nanoid v1.5.0/go.mod h1:zyD2a71IubI24efhpvkJz+ZwfwagzgSO6UNiFsZKN7U= +github.com/matoous/go-nanoid/v2 v2.0.0/go.mod h1:FtS4aGPVfEkxKxhdWPAspZpZSh1cOjtM7Ej/So3hR0g= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/microcosm-cc/bluemonday v1.0.7/go.mod h1:HOT/6NaBlR0f9XlxD3zolN6Z3N8Lp4pvhp+jLS5ihnI= +github.com/microsoft/ApplicationInsights-Go v0.4.4/go.mod h1:fKRUseBqkw6bDiXTs3ESTiU/4YTIHsQS4W3fP2ieF4U= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.0/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/mrz1836/postmark v1.2.9/go.mod h1:xNRms8jgTfqBneqg0+PzvBrhuojefqXIWc6Np0nHiEM= +github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nacos-group/nacos-sdk-go v1.0.8/go.mod h1:hlAPn3UdzlxIlSILAyOXKxjFSvDJ9oLzTJ9hLAK1KzA= +github.com/nacos-group/nacos-sdk-go v1.1.1/go.mod h1:UHOtQNQY/qpk2dhg6gDq8u5+/CEIc3+lWmrmxEzX0/g= +github.com/nacos-group/nacos-sdk-go/v2 v2.0.1/go.mod h1:SlhyCAv961LcZ198XpKfPEQqlJWt2HkL1fDLas0uy/w= +github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/jwt v1.1.0/go.mod h1:n3cvmLfBfnpV4JJRN7lRYCyZnw48ksGsbThGXEk4w9M= +github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats-server/v2 v2.1.9/go.mod h1:9qVyoewoYXzG1ME9ox0HwkkzyYvnlBDugfR4Gg/8uHU= +github.com/nats-io/nats-server/v2 v2.7.4/go.mod h1:1vZ2Nijh8tcyNe8BDVyTviCd9NYzRbubQYiEHsvOQWc= +github.com/nats-io/nats-streaming-server v0.21.2/go.mod h1:2W8QfNVOtcFpmf0bRiwuLtRb0/hkX4NuOxPOFNOThVQ= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE= +github.com/nats-io/nats.go v1.13.1-0.20220308171302-2f2f6968e98d/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nats-io/stan.go v0.8.3/go.mod h1:Ejm8bbHnMTSptU6uNMAVuxeapMJYBB/Ml3ej6z4GoSY= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/open-policy-agent/opa v0.40.0/go.mod h1:UQqv8nJ1njs2+Od1lrPFzUAApdj22ABxTO35+Vpsjz4= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/oracle/oci-go-sdk/v54 v54.0.0/go.mod h1:+t+yvcFGVp+3ZnztnyxqXfQDsMlq8U25faBLa+mqCMc= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v0.0.0-20170211195444-bf27d3ba8e1d/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.5.2+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polarismesh/polaris-go v1.1.0/go.mod h1:tquawfjEKp1W3ffNJQSzhfditjjoZ7tvhOCElN7Efzs= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= +github.com/prometheus/statsd_exporter v0.22.3/go.mod h1:N4Z1+iSqc9rnxlT1N8Qn3l65Vzb5t4Uq0jpg8nxyhio= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rabbitmq/amqp091-go v1.3.4/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rs/zerolog v1.18.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I= +github.com/rs/zerolog v1.25.0/go.mod h1:7KHcEGe0QZPOm2IE4Kpb5rTh6n1h2hIgS5OOnu1rUaI= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/savsgio/gotils v0.0.0-20210217112953-d4a072536008/go.mod h1:TWNAOTaVzGOXq8RbEvHnhzA/A2sLZzgn0m6URjnukY8= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/sendgrid/rest v2.6.3+incompatible/go.mod h1:kXX7q3jZtJXK5c5qK83bSGMdV6tsOE70KbHoqJls4lE= +github.com/sendgrid/sendgrid-go v3.5.0+incompatible/go.mod h1:QRQt+LX/NmgVEvmdRw0VT/QgUn499+iza2FnDca9fg8= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shirou/gopsutil v3.20.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/gopsutil/v3 v3.21.6/go.mod h1:JfVbDpIBLVzT8oKbvMg9P3wEIMDDpVn+LwHTKj0ST88= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sijms/go-ora/v2 v2.2.22/go.mod h1:jzfAFD+4CXHE+LjGWFl6cPrtiIpQVxakI2gvrMF2w6Y= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v0.0.0-20190710185942-9d28bd7c0945/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sony/gobreaker v0.4.2-0.20210216022020-dd874f9dd33b/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/stathat/consistent v1.0.0/go.mod h1:uajTPbgSygZBJ+V+0mY7meZ8i0XAcZs7AQ6V121XSxw= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.4 h1:wZRexSlwd7ZXfKINDLsO4r7WBt3gTKONc6K/VesHvHM= +github.com/stretchr/testify v1.7.4/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/supplyon/gremcos v0.1.0/go.mod h1:ZnXsXGVbGCYDFU5GLPX9HZLWfD+ZWkiPo30KUjNoOtw= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tebeka/strftime v0.1.3/go.mod h1:7wJm3dZlpr4l/oVK0t1HYIc4rMzQ2XJlOMIUJUJH6XQ= +github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= +github.com/testcontainers/testcontainers-go v0.12.0/go.mod h1:SIndOQXZng0IW8iWU1Js0ynrfZ8xcxrTtDfF6rD2pxs= +github.com/tetratelabs/wazero v0.0.0-20220425003459-ad61d9a6ff43/go.mod h1:Y4X/zO4sC2dJjZG9GDYNRbJGogfqFYJY/BbyKlOxXGI= +github.com/tevid/gohamcrest v1.1.1/go.mod h1:3UvtWlqm8j5JbwYZh80D/PVBt0mJ1eJiYgZMibh0H/k= +github.com/tidwall/gjson v1.2.1/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA= +github.com/tidwall/gjson v1.8.1/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= +github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/toolkits/concurrent v0.0.0-20150624120057-a4371d70e3e3/go.mod h1:QDlpd3qS71vYtakd2hmdpqhJ9nwv6mD6A30bQ1BPBFE= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= +github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn0= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.21.0/go.mod h1:jjraHZVbKOXftJfsOYoAjaeygpj5hr8ermTRJNroD7A= +github.com/valyala/fasthttp v1.31.1-0.20211216042702-258a4c17b4f4/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vmware/vmware-go-kcl v1.5.0/go.mod h1:P92YfaWfQyudNf62BNx+E2rJn9pd165MhHsRt8ajkpM= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yashtewari/glob-intersection v0.1.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= +github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +github.com/zouyx/agollo/v3 v3.4.5/go.mod h1:LJr3kDmm23QSW+F1Ol4TMHDa7HvJvscMdVxJ2IpUTVc= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0-alpha.0/go.mod h1:mPcW6aZJukV6Aa81LSKpBjQXTWlXB5r74ymPoSWa3Sw= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0-alpha.0/go.mod h1:kdV+xzCJ3luEBSIeQyB/OEKkWKd8Zkux4sbDeANrosU= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= +go.etcd.io/etcd/client/v3 v3.5.0-alpha.0/go.mod h1:wKt7jgDgf/OfKiYmCq5WFGxOFAkVMLxiiXgLDFhECr8= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0/go.mod h1:tV31atvwzcybuqejDoY3oaNRTtlD2l/Ot78Pc9w7DMY= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0/go.mod h1:FAwse6Zlm5v4tEWZaTjmNhe17Int4Oxbu7+2r0DiD3w= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0-alpha.0/go.mod h1:tsKetYpt980ZTpzl/gb+UOJj9RkIyCb1u4wjzMg90BQ= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= +go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= +go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= +go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= +go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.3/go.mod h1:NEu79Xo32iVb+0gVNV8PMd7GoWqnyDXRlj04yFjqz40= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3/go.mod h1:UJmXdiVVBaZ63umRUTwJuCMAV//GCMvDiQwn703/GoY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.6.3/go.mod h1:ycItY/esVj8c0dKgYTOztTERXtPzcfDU/0o8EdwCjoA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= +go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= +go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= +go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= +go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= +go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/ratelimit v0.2.0/go.mod h1:YYBV4e4naJvhpitQrWJu1vCpgB7CboMe0qhltKt6mUg= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +goji.io v2.0.2+incompatible/go.mod h1:sbqFwrtqZACxLBTQcdgVjFh54yGVCvwq8+w49MVMMIk= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191219195013-becbf705a915/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201016165138-7b1cca2348c0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211105192438-b53810dc28af/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220621193019-9d032be2e588 h1:9ubFuySsnAJYGyJrZ3koiEv8FyqofCBdz3G9Mbf2YFc= +golang.org/x/net v0.0.0-20220621193019-9d032be2e588/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180828065106-d99a578cf41b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201223074533-0d417f636930/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211106132015-ebca88c72f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211109184856-51b60fd695b3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201014170642-d1624618ad65/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210106152847-07624b53cd92/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210701133433-6b8dcf568a95/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= +google.golang.org/genproto v0.0.0-20210707164411-8c882eb9abba/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211104193956-4c6863e31247/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220405205423-9d709892a2bf/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220622171453-ea41d75dfa0f h1:kYlCnpX4eB0QEnXm12j4DAX4yrjjhJmsyuWtSSZ+Buo= +google.golang.org/genproto v0.0.0-20220622171453-ea41d75dfa0f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/couchbase/gocb.v1 v1.6.4/go.mod h1:Ri5Qok4ZKiwmPr75YxZ0uELQy45XJgUSzeUnK806gTY= +gopkg.in/couchbase/gocbcore.v7 v7.1.18/go.mod h1:48d2Be0MxRtsyuvn+mWzqmoGUG9uA00ghopzOs148/E= +gopkg.in/couchbaselabs/gocbconnstr.v1 v1.0.4/go.mod h1:ZjII0iKx4Veo6N6da+pEZu/ptNyKLg9QTVt7fFmR6sw= +gopkg.in/couchbaselabs/gojcbmock.v1 v1.0.4/go.mod h1:jl/gd/aQ2S8whKVSTnsPs6n7BPeaAuw9UglBD/OF7eo= +gopkg.in/couchbaselabs/jsonx.v1 v1.0.1/go.mod h1:oR201IRovxvLW/eISevH12/+MiKHtNQAKfcX8iWZvJY= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= +gopkg.in/gorethink/gorethink.v4 v4.1.0/go.mod h1:M7JgwrUAmshJ3iUbEK0Pt049MPyPK+CYDGGaEjdZb/c= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/kataras/go-serializer.v0 v0.0.4/go.mod h1:v2jHg/3Wp7uncDNzenTsX75PRDxhzlxoo/qDvM4ZGxk= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= +k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= +k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4= +k8s.io/apimachinery v0.20.0/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= +k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= +k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= +k8s.io/cli-runtime v0.23.0/go.mod h1:B5N3YH0KP1iKr6gEuJ/RRmGjO0mJQ/f/JrsmEiPQAlU= +k8s.io/client-go v0.20.0/go.mod h1:4KWh/g+Ocd8KkCwKF8vUNnmqgv+EVnQDK4MBF4oB5tY= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= +k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/code-generator v0.20.0/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= +k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= +k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/metrics v0.20.0/go.mod h1:9yiRhfr8K8sjdj2EthQQE9WvpYDvsXIV3CjN4Ruq4Jw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +oras.land/oras-go v1.1.0/go.mod h1:1A7vR/0KknT2UkJVWh+xMi95I/AhK8ZrxrnUSmXN0bQ= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= +sigs.k8s.io/controller-runtime v0.11.0/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= +sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8= +sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +skywalking.apache.org/repo/goapi v0.0.0-20220401015832-2c9eee9481eb h1:+PP2DpKFN/rEporLdPI4A7bPWQjwfARlUDKNhSab8iM= +skywalking.apache.org/repo/goapi v0.0.0-20220401015832-2c9eee9481eb/go.mod h1:uWwwvhcwe2MD/nJCg0c1EE/eL6KzaBosLHDfMFoEJ30= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +stathat.com/c/consistent v1.0.0/go.mod h1:QkzMWzcbB+yQBL2AttO6sgsQS/JSTapcDISJalmCDS0= diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-uri/hello.go b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-uri/hello.go new file mode 100644 index 0000000..d726b8e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/openfunction/function-example/test-uri/hello.go @@ -0,0 +1,38 @@ +/* + * Copyright 2022 The OpenFunction Authors. + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hello + +import ( + "fmt" + ofctx "github.com/OpenFunction/functions-framework-go/context" + "net/http" + + "github.com/OpenFunction/functions-framework-go/functions" +) + +func init() { + functions.HTTP("HelloWorld", HelloWorld, + functions.WithFunctionPath("/{greeting}")) +} + +func HelloWorld(w http.ResponseWriter, r *http.Request) { + vars := ofctx.VarsFromCtx(r.Context()) + fmt.Fprintf(w, "Hello, %s!\n", vars["greeting"]) +} diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/otelcol-contrib/config.yaml b/CloudronPackages/APISIX/apisix-source/ci/pod/otelcol-contrib/config.yaml new file mode 100644 index 0000000..6068f4c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/otelcol-contrib/config.yaml @@ -0,0 +1,32 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 +exporters: + file: + path: /etc/otelcol-contrib/data-otlp.json +service: + pipelines: + traces: + receivers: [otlp] + exporters: [file] diff --git a/CloudronPackages/APISIX/apisix-source/ci/pod/vector/vector.toml b/CloudronPackages/APISIX/apisix-source/ci/pod/vector/vector.toml new file mode 100644 index 0000000..26716bc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/pod/vector/vector.toml @@ -0,0 +1,111 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[sources.log-from-tcp] +type = "socket" +address = "0.0.0.0:3000" +host_key = "host" +mode = "tcp" +port_key = "port" +shutdown_timeout_secs = 30 +socket_file_mode = 511 + +[sources.log-from-http] +type = "http_server" +address = "0.0.0.0:3001" + +[sources.log-from-udp] +type = "socket" +address = "0.0.0.0:8127" +host_key = "host" +mode = "udp" +port_key = "port" + +[sources.log-from-tls] +type = "socket" +address = "0.0.0.0:43000" +host_key = "host" +mode = "tcp" +port_key = "port" +tls.enabled = true +tls.verify = true +tls.ca_file = "/certs/vector_logs_ca.crt" +tls.crt_file = "/certs/vector_logs_server.crt" +tls.key_file = "/certs/vector_logs_server.key" + +[sources.log-from-syslog-tcp] +type = "syslog" +address = "0.0.0.0:5140" +mode = "tcp" + +[sources.log-from-syslog-udp] +type = "syslog" +address = "0.0.0.0:5150" +mode = "udp" + +[sources.log-from-splunk] +type = "splunk_hec" +address = "0.0.0.0:18088" +valid_tokens = [ + "BD274822-96AA-4DA6-90EC-18940FB2414C" +] + +[sinks.log-2-console] +inputs = [ "log-from-tcp", "log-from-tls", "log-from-syslog-tcp", "log-from-syslog-udp", "log-from-udp", "log-from-splunk", "log-from-http"] +type = "console" +encoding.codec = "json" + +[sinks.log-2-tcp-file] +inputs = [ "log-from-tcp" ] +type = "file" +encoding.codec = "text" +path = "/etc/vector/tcp.log" + +[sinks.log-2-http-file] +inputs = [ "log-from-http" ] +type = "file" +encoding.codec = "text" +path = "/etc/vector/http.log" + +[sinks.log-2-udp-file] +inputs = [ "log-from-udp" ] +type = "file" +encoding.codec = "json" +path = "/etc/vector/udp.log" + +[sinks.tls-log-2-file] +inputs = [ "log-from-tls" ] +type = "file" +encoding.codec = "json" +path = "/etc/vector/tls-datas.log" + +[sinks.log-2-syslog-tcp-file] +inputs = [ "log-from-syslog-tcp" ] +type = "file" +encoding.codec = "text" +path = "/etc/vector/syslog-tcp.log" + +[sinks.log-2-splunk-file] +inputs = [ "log-from-splunk" ] +type = "file" +encoding.codec = "json" +path = "/etc/vector/splunk.log" + +[sinks.log-2-syslog-udp-file] +inputs = [ "log-from-syslog-udp" ] +type = "file" +encoding.codec = "text" +path = "/etc/vector/syslog-udp.log" diff --git a/CloudronPackages/APISIX/apisix-source/ci/redhat-ci.sh b/CloudronPackages/APISIX/apisix-source/ci/redhat-ci.sh new file mode 100755 index 0000000..8cd63c8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/redhat-ci.sh @@ -0,0 +1,116 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./ci/common.sh +install_dependencies() { + export_version_info + export_or_prefix + + # install build & runtime deps + yum install -y --disablerepo=* --enablerepo=ubi-8-appstream-rpms --enablerepo=ubi-8-baseos-rpms \ + wget tar gcc gcc-c++ automake autoconf libtool make unzip git sudo openldap-devel hostname patch \ + which ca-certificates pcre pcre-devel xz \ + openssl-devel + yum install -y libyaml-devel + yum install -y --disablerepo=* --enablerepo=ubi-8-appstream-rpms --enablerepo=ubi-8-baseos-rpms cpanminus perl + + # install newer curl + yum makecache + yum install -y xz + install_curl + + # install apisix-runtime to make apisix's rpm test work + yum install -y yum-utils && yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo + yum install -y openresty-pcre-devel openresty-zlib-devel + + install_apisix_runtime + curl -o /usr/local/openresty/openssl3/ssl/openssl.cnf \ + https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/conf/openssl3/openssl.cnf + + # patch lua-resty-events + sed -i 's/log(ERR, "event worker failed: ", perr)/log(ngx.WARN, "event worker failed: ", perr)/' /usr/local/openresty/lualib/resty/events/worker.lua + + # install luarocks + ./utils/linux-install-luarocks.sh + + # install etcdctl + ./ci/linux-install-etcd-client.sh + + # install vault cli capabilities + install_vault_cli + + # install brotli + yum install -y cmake3 + install_brotli + + # install test::nginx + cpanm --notest Test::Nginx IPC::Run > build.log 2>&1 || (cat build.log && exit 1) + + # add go1.15 binary to the path + mkdir build-cache + pushd build-cache/ + # Go is required inside the container. + wget -q https://golang.org/dl/go1.17.linux-amd64.tar.gz && tar -xf go1.17.linux-amd64.tar.gz + export PATH=$PATH:$(pwd)/go/bin + popd + # install and start grpc_server_example + pushd t/grpc_server_example + + CGO_ENABLED=0 go build + popd + + yum install -y iproute procps + start_grpc_server_example + + start_sse_server_example + + # installing grpcurl + install_grpcurl + + # install nodejs + install_nodejs + + # grpc-web server && client + pushd t/plugin/grpc-web + ./setup.sh + # back to home directory + popd + + # install dependencies + git clone https://github.com/openresty/test-nginx.git test-nginx + create_lua_deps +} + +run_case() { + export_or_prefix + make init + set_coredns + # run test cases + FLUSH_ETCD=1 TEST_EVENTS_MODULE=$TEST_EVENTS_MODULE prove --timer -Itest-nginx/lib -I./ -r ${TEST_FILE_SUB_DIR} | tee /tmp/test.result + rerun_flaky_tests /tmp/test.result +} + +case_opt=$1 +case $case_opt in + (install_dependencies) + install_dependencies + ;; + (run_case) + run_case + ;; +esac diff --git a/CloudronPackages/APISIX/apisix-source/ci/tars-ci.sh b/CloudronPackages/APISIX/apisix-source/ci/tars-ci.sh new file mode 100755 index 0000000..c160db7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/ci/tars-ci.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./ci/common.sh + +run_case() { + export_or_prefix + export PERL5LIB=.:$PERL5LIB + prove -Itest-nginx/lib -I./ -r t/tars | tee test-result + rerun_flaky_tests test-result +} + +case_opt=$1 +case $case_opt in + (run_case) + run_case + ;; +esac diff --git a/CloudronPackages/APISIX/apisix-source/conf/cert/ssl_PLACE_HOLDER.crt b/CloudronPackages/APISIX/apisix-source/conf/cert/ssl_PLACE_HOLDER.crt new file mode 100644 index 0000000..503f277 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/conf/cert/ssl_PLACE_HOLDER.crt @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE----- +MIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G +A1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa +GA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n +RG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM +CHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe +cvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb +VDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR +2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr +abf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2 +WjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/ +Evm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1 +/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh +/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj +cTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ +tSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl +c3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC +tC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY +1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl +PYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob +rJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy +hme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1 +7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y +IJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve +U/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM= +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/conf/cert/ssl_PLACE_HOLDER.key b/CloudronPackages/APISIX/apisix-source/conf/cert/ssl_PLACE_HOLDER.key new file mode 100644 index 0000000..7105067 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/conf/cert/ssl_PLACE_HOLDER.key @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG5AIBAAKCAYEAyCM0rqJecvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5 +jhZB3W6BkWUWR4oNFLLSqcVbVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfo +eLj0efMiOepOSZflj9Ob4yKR2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5s +mPtW1Oc/BV5terhscJdOgmRrabf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt +6iMWEGeQU6mwPENgvj1olji2WjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiy +Vt1TmtMWn1ztk6FfLRqwJWR/Evm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1o +npRVeXhrBajbCRDRBMwaNw/1/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2 +fzaqpIfyUbPST4GdqNG9NyIh/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI +1cGrGwyXbrieNp63AgMBAAECggGBAJM8g0duoHmIYoAJzbmKe4ew0C5fZtFUQNmu +O2xJITUiLT3ga4LCkRYsdBnY+nkK8PCnViAb10KtIT+bKipoLsNWI9Xcq4Cg4G3t +11XQMgPPgxYXA6m8t+73ldhxrcKqgvI6xVZmWlKDPn+CY/Wqj5PA476B5wEmYbNC +GIcd1FLl3E9Qm4g4b/sVXOHARF6iSvTR+6ol4nfWKlaXSlx2gNkHuG8RVpyDsp9c +z9zUqAdZ3QyFQhKcWWEcL6u9DLBpB/gUjyB3qWhDMe7jcCBZR1ALyRyEjmDwZzv2 +jlv8qlLFfn9R29UI0pbuL1eRAz97scFOFme1s9oSU9a12YHfEd2wJOM9bqiKju8y +DZzePhEYuTZ8qxwiPJGy7XvRYTGHAs8+iDlG4vVpA0qD++1FTpv06cg/fOdnwshE +OJlEC0ozMvnM2rZ2oYejdG3aAnUHmSNa5tkJwXnmj/EMw1TEXf+H6+xknAkw05nh +zsxXrbuFUe7VRfgB5ElMA/V4NsScgQKBwQDmMRtnS32UZjw4A8DsHOKFzugfWzJ8 +Gc+3sTgs+4dNIAvo0sjibQ3xl01h0BB2Pr1KtkgBYB8LJW/FuYdCRS/KlXH7PHgX +84gYWImhNhcNOL3coO8NXvd6+m+a/Z7xghbQtaraui6cDWPiCNd/sdLMZQ/7LopM +RbM32nrgBKMOJpMok1Z6zsPzT83SjkcSxjVzgULNYEp03uf1PWmHuvjO1yELwX9/ +goACViF+jst12RUEiEQIYwr4y637GQBy+9cCgcEA3pN9W5OjSPDVsTcVERig8++O +BFURiUa7nXRHzKp2wT6jlMVcu8Pb2fjclxRyaMGYKZBRuXDlc/RNO3uTytGYNdC2 +IptU5N4M7iZHXj190xtDxRnYQWWo/PR6EcJj3f/tc3Itm1rX0JfuI3JzJQgDb9Z2 +s/9/ub8RRvmQV9LM/utgyOwNdf5dyVoPcTY2739X4ZzXNH+CybfNa+LWpiJIVEs2 +txXbgZrhmlaWzwA525nZ0UlKdfktdcXeqke9eBghAoHARVTHFy6CjV7ZhlmDEtqE +U58FBOS36O7xRDdpXwsHLnCXhbFu9du41mom0W4UdzjgVI9gUqG71+SXrKr7lTc3 +dMHcSbplxXkBJawND/Q1rzLG5JvIRHO1AGJLmRgIdl8jNgtxgV2QSkoyKlNVbM2H +Wy6ZSKM03lIj74+rcKuU3N87dX4jDuwV0sPXjzJxL7NpR/fHwgndgyPcI14y2cGz +zMC44EyQdTw+B/YfMnoZx83xaaMNMqV6GYNnTHi0TO2TAoHBAKmdrh9WkE2qsr59 +IoHHygh7Wzez+Ewr6hfgoEK4+QzlBlX+XV/9rxIaE0jS3Sk1txadk5oFDebimuSk +lQkv1pXUOqh+xSAwk5v88dBAfh2dnnSa8HFN3oz+ZfQYtnBcc4DR1y2X+fVNgr3i +nxruU2gsAIPFRnmvwKPc1YIH9A6kIzqaoNt1f9VM243D6fNzkO4uztWEApBkkJgR +4s/yOjp6ovS9JG1NMXWjXQPcwTq3sQVLnAHxZRJmOvx69UmK4QKBwFYXXjeXiU3d +bcrPfe6qNGjfzK+BkhWznuFUMbuxyZWDYQD5yb6ukUosrj7pmZv3BxKcKCvmONU+ +CHgIXB+hG+R9S2mCcH1qBQoP/RSm+TUzS/Bl2UeuhnFZh2jSZQy3OwryUi6nhF0u +LDzMI/6aO1ggsI23Ri0Y9ZtqVKczTkxzdQKR9xvoNBUufjimRlS80sJCEB3Qm20S +wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/conf/config.yaml b/CloudronPackages/APISIX/apisix-source/conf/config.yaml new file mode 100644 index 0000000..6a3c430 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/conf/config.yaml @@ -0,0 +1,63 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# If you want to set the specified configuration value, you can set the new +# in this file. For example if you want to specify the etcd address: +# +# deployment: +# role: traditional +# role_traditional: +# config_provider: etcd +# etcd: +# host: +# - http://127.0.0.1:2379 +# +# To configure via environment variables, you can use `${{VAR}}` syntax. For instance: +# +# deployment: +# role: traditional +# role_traditional: +# config_provider: etcd +# etcd: +# host: +# - http://${{ETCD_HOST}}:2379 +# +# And then run `export ETCD_HOST=$your_host` before `make init`. +# +# If the configured environment variable can't be found, an error will be thrown. +# +# Also, If you want to use default value when the environment variable not set, +# Use `${{VAR:=default_value}}` instead. For instance: +# +# deployment: +# role: traditional +# role_traditional: +# config_provider: etcd +# etcd: +# host: +# - http://${{ETCD_HOST:=localhost}}:2379 +# +# This will find environment variable `ETCD_HOST` first, and if it's not exist it will use `localhost` as default value. +# +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: + - name: admin + key: '' # using fixed API token has security risk, please update it when you deploy to production environment. If passed empty then will be autogenerated by APISIX and will be written back here. Recommended is to use external mechanism to generate and store the token. + role: admin diff --git a/CloudronPackages/APISIX/apisix-source/conf/config.yaml.example b/CloudronPackages/APISIX/apisix-source/conf/config.yaml.example new file mode 100644 index 0000000..6f2f831 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/conf/config.yaml.example @@ -0,0 +1,712 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# CAUTION: DO NOT MODIFY DEFAULT CONFIGURATIONS IN THIS FILE. +# Keep the custom configurations in conf/config.yaml. +# + +apisix: + # node_listen: 9080 # APISIX listening port. + node_listen: # APISIX listening ports. + - 9080 + # - port: 9081 + # - ip: 127.0.0.2 # If not set, default to `0.0.0.0` + # port: 9082 + enable_admin: true # Admin API + enable_dev_mode: false # If true, set nginx `worker_processes` to 1. + enable_reuseport: true # If true, enable nginx SO_REUSEPORT option. + show_upstream_status_in_response_header: false # If true, include the upstream HTTP status code in + # the response header `X-APISIX-Upstream-Status`. + # If false, show `X-APISIX-Upstream-Status` only if + # the upstream response code is 5xx. + enable_ipv6: true + enable_http2: true + + # proxy_protocol: # PROXY Protocol configuration + # listen_http_port: 9181 # APISIX listening port for HTTP traffic with PROXY protocol. + # listen_https_port: 9182 # APISIX listening port for HTTPS traffic with PROXY protocol. + # enable_tcp_pp: true # Enable the PROXY protocol when stream_proxy.tcp is set. + # enable_tcp_pp_to_upstream: true # Enable the PROXY protocol. + + enable_server_tokens: true # If true, show APISIX version in the `Server` response header. + extra_lua_path: "" # Extend lua_package_path to load third-party code. + extra_lua_cpath: "" # Extend lua_package_cpath to load third-party code. + # lua_module_hook: "my_project.my_hook" # Hook module used to inject third-party code into APISIX. + + proxy_cache: # Proxy Caching configuration + cache_ttl: 10s # The default caching time on disk if the upstream does not specify a caching time. + zones: + - name: disk_cache_one # Name of the cache. + memory_size: 50m # Size of the memory to store the cache index. + disk_size: 1G # Size of the disk to store the cache data. + disk_path: /tmp/disk_cache_one # Path to the cache file for disk cache. + cache_levels: "1:2" # Cache hierarchy levels of disk cache. + # - name: disk_cache_two + # memory_size: 50m + # disk_size: 1G + # disk_path: "/tmp/disk_cache_two" + # cache_levels: "1:2" + - name: memory_cache + memory_size: 50m + + delete_uri_tail_slash: false # Delete the '/' at the end of the URI + normalize_uri_like_servlet: false # If true, use the same path normalization rules as the Java + # servlet specification. See https://github.com/jakartaee/servlet/blob/master/spec/src/main/asciidoc/servlet-spec-body.adoc#352-uri-path-canonicalization, which is used in Tomcat. + + router: + http: radixtree_host_uri # radixtree_host_uri: match route by host and URI + # radixtree_uri: match route by URI + # radixtree_uri_with_parameter: similar to radixtree_uri but match URI with parameters. See https://github.com/api7/lua-resty-radixtree/#parameters-in-path for more details. + ssl: radixtree_sni # radixtree_sni: match route by SNI + + # http is the default proxy mode. proxy_mode can be one of `http`, `stream`, or `http&stream` + proxy_mode: "http" + # stream_proxy: # TCP/UDP L4 proxy + # tcp: + # - addr: 9100 # Set the TCP proxy listening ports. + # tls: true + # - addr: "127.0.0.1:9101" + # udp: # Set the UDP proxy listening ports. + # - 9200 + # - "127.0.0.1:9201" + + # dns_resolver: # If not set, read from `/etc/resolv.conf` + # - 1.1.1.1 + # - 8.8.8.8 + # dns_resolver_valid: 30 # Override the default TTL of the DNS records. + resolver_timeout: 5 # Set the time in seconds that the server will wait for a response from the + # DNS resolver before timing out. + enable_resolv_search_opt: true # If true, use search option in the resolv.conf file in DNS lookups. + + ssl: + enable: true + listen: # APISIX listening port for HTTPS traffic. + - port: 9443 + enable_http3: false # Enable HTTP/3 (with QUIC). If not set default to `false`. + # - ip: 127.0.0.3 # If not set, default to `0.0.0.0`. + # port: 9445 + # enable_http3: true + #ssl_trusted_certificate: system # Specifies a file path with trusted CA certificates in the PEM format. The default value is "system". + ssl_protocols: TLSv1.2 TLSv1.3 # TLS versions supported. + ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 + ssl_session_tickets: false # If true, session tickets are used for SSL/TLS connections. + # Disabled by default because it renders Perfect Forward Secrecy (FPS) + # useless. See https://github.com/mozilla/server-side-tls/issues/135. + + # fallback_sni: "my.default.domain" # Fallback SNI to be used if the client does not send SNI during + # # the handshake. + + enable_control: true # Control API + # control: + # ip: 127.0.0.1 + # port: 9090 + + disable_sync_configuration_during_start: false # Safe exit. TO BE REMOVED. + + data_encryption: # Data encryption settings. + enable_encrypt_fields: true # Whether enable encrypt fields specified in `encrypt_fields` in plugin schema. + keyring: # This field is used to encrypt the private key of SSL and the `encrypt_fields` + # in plugin schema. + - qeddd145sfvddff3 # Set the encryption key for AES-128-CBC. It should be a hexadecimal string + # of length 16. + - edd1c9f0985e76a2 # If not set, APISIX saves the original data into etcd. + # CAUTION: If you would like to update the key, add the new key as the + # first item in the array and keep the older keys below the newly added + # key, so that data can be decrypted with the older keys and encrypted + # with the new key. Removing the old keys directly can render the data + # unrecoverable. + + events: # Event distribution module configuration + module: lua-resty-events # Sets the name of the events module used. + # Supported module: lua-resty-worker-events and lua-resty-events +# status: # When enabled, APISIX will provide `/status` and `/status/ready` endpoints + # ip: 127.0.0.1 # /status endpoint will return 200 status code if APISIX has successfully started and running correctly + # port: 7085 # /status/ready endpoint will return 503 status code if any of the workers do not receive config from etcd + # or (standalone mode) the config isn't loaded yet either via file or Admin API. +nginx_config: # Config for render the template to generate nginx.conf + # user: root # Set the execution user of the worker process. This is only + # effective if the master process runs with super-user privileges. + error_log: logs/error.log # Location of the error log. + error_log_level: warn # Logging level: info, debug, notice, warn, error, crit, alert, or emerg. + worker_processes: auto # Automatically determine the optimal number of worker processes based + # on the available system resources. + # If you want use multiple cores in container, you can inject the number of + # CPU cores as environment variable "APISIX_WORKER_PROCESSES". + enable_cpu_affinity: false # Disable CPU affinity by default as worker_cpu_affinity affects the + # behavior of APISIX in containers. For example, multiple instances could + # be bound to one CPU core, which is not desirable. + # If APISIX is deployed on a physical machine, CPU affinity can be enabled. + worker_rlimit_nofile: 20480 # The number of files a worker process can open. + # The value should be larger than worker_connections. + worker_shutdown_timeout: 240s # Timeout for a graceful shutdown of worker processes. + + max_pending_timers: 16384 # The maximum number of pending timers that can be active at any given time. + # Error "too many pending timers" indicates the threshold is reached. + max_running_timers: 4096 # The maximum number of running timers that can be active at any given time. + # Error "lua_max_running_timers are not enough" error indicates the + # threshold is reached. + + event: + worker_connections: 10620 + + # envs: # Get environment variables. + # - TEST_ENV + + meta: + lua_shared_dict: # Nginx Lua shared memory zone. Size units are m or k. + prometheus-metrics: 15m + standalone-config: 10m + + stream: + enable_access_log: false # Enable stream proxy access logging. + access_log: logs/access_stream.log # Location of the stream access log. + access_log_format: | + "$remote_addr [$time_local] $protocol $status $bytes_sent $bytes_received $session_time" # Customize log format: http://nginx.org/en/docs/varindex.html + access_log_format_escape: default # Escape default or json characters in variables. + lua_shared_dict: # Nginx Lua shared memory zone. Size units are m or k. + etcd-cluster-health-check-stream: 10m + lrucache-lock-stream: 10m + plugin-limit-conn-stream: 10m + worker-events-stream: 10m + tars-stream: 1m + upstream-healthcheck-stream: 10m + + # Add other custom Nginx configurations. + # Users are responsible for validating the custom configurations + # to ensure they are not in conflict with APISIX configurations. + main_configuration_snippet: | + # Add custom Nginx main configuration to nginx.conf. + # The configuration should be well indented! + http_configuration_snippet: | + # Add custom Nginx http configuration to nginx.conf. + # The configuration should be well indented! + http_server_configuration_snippet: | + # Add custom Nginx http server configuration to nginx.conf. + # The configuration should be well indented! + http_server_location_configuration_snippet: | + # Add custom Nginx http server location configuration to nginx.conf. + # The configuration should be well indented! + http_admin_configuration_snippet: | + # Add custom Nginx admin server configuration to nginx.conf. + # The configuration should be well indented! + http_end_configuration_snippet: | + # Add custom Nginx http end configuration to nginx.conf. + # The configuration should be well indented! + stream_configuration_snippet: | + # Add custom Nginx stream configuration to nginx.conf. + # The configuration should be well indented! + + http: + enable_access_log: true # Enable HTTP proxy access logging. + access_log: logs/access.log # Location of the access log. + access_log_buffer: 16384 # buffer size of access log. + access_log_format: | + "$remote_addr - $remote_user [$time_local] $http_host \"$request\" $status $body_bytes_sent $request_time \"$http_referer\" \"$http_user_agent\" $upstream_addr $upstream_status $upstream_response_time \"$upstream_scheme://$upstream_host$upstream_uri\"" + # Customize log format: http://nginx.org/en/docs/varindex.html + access_log_format_escape: default # Escape default or json characters in variables. + keepalive_timeout: 60s # Set the maximum time for which TCP connection keeps alive. + client_header_timeout: 60s # Set the maximum time waiting for client to send the entire HTTP + # request header before closing the connection. + client_body_timeout: 60s # Set the maximum time waiting for client to send the request body. + client_max_body_size: 0 # Set the maximum allowed size of the client request body. + # Default to 0, unlimited. + # Unlike Nginx, APISIX does not limit the body size by default. + # If exceeded, the 413 (Request Entity Too Large) error is returned. + send_timeout: 10s # Set the maximum time for transmitting a response to the client before closing. + underscores_in_headers: "on" # Allow HTTP request headers to contain underscores in their names. + real_ip_header: X-Real-IP # https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header + real_ip_recursive: "off" # http://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_recursive + real_ip_from: # http://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from + - 127.0.0.1 + - "unix:" + + # custom_lua_shared_dict: # Custom Nginx Lua shared memory zone for nginx.conf. Size units are m or k. + # ipc_shared_dict: 100m # Custom shared cache, format: `cache-key: cache-size` + + proxy_ssl_server_name: true # Send the server name in the SNI extension when establishing an SSL/TLS + # connection with the upstream server, allowing the upstream server to + # select the appropriate SSL/TLS certificate and configuration based on + # the requested server name. + + upstream: + keepalive: 320 # Set the maximum time of keep-alive connections to the upstream servers. + # When the value is exceeded, the least recently used connection is closed. + keepalive_requests: 1000 # Set the maximum number of requests that can be served through one + # keep-alive connection. + # After the maximum number of requests is made, the connection is closed. + keepalive_timeout: 60s # Set the maximum time for which TCP connection keeps alive. + charset: utf-8 # Add the charset to the "Content-Type" response header field. + # See http://nginx.org/en/docs/http/ngx_http_charset_module.html#charset + variables_hash_max_size: 2048 # Set the maximum size of the variables hash table. + + lua_shared_dict: # Nginx Lua shared memory zone. Size units are m or k. + internal-status: 10m + plugin-limit-req: 10m + plugin-limit-count: 10m + prometheus-metrics: 10m # In production, less than 50m is recommended + plugin-limit-conn: 10m + upstream-healthcheck: 10m + worker-events: 10m + lrucache-lock: 10m + balancer-ewma: 10m + balancer-ewma-locks: 10m + balancer-ewma-last-touched-at: 10m + plugin-limit-req-redis-cluster-slot-lock: 1m + plugin-limit-count-redis-cluster-slot-lock: 1m + plugin-limit-conn-redis-cluster-slot-lock: 1m + tracing_buffer: 10m + plugin-api-breaker: 10m + etcd-cluster-health-check: 10m + discovery: 1m + jwks: 1m + introspection: 10m + access-tokens: 1m + ext-plugin: 1m + tars: 1m + cas-auth: 10m + ocsp-stapling: 10m + mcp-session: 10m + +# discovery: # Service Discovery +# dns: +# servers: +# - "127.0.0.1:8600" # Replace with the address of your DNS server. +# resolv_conf: /etc/resolv.conf # Replace with the path to the local DNS resolv config. Configure either "servers" or "resolv_conf". +# order: # Resolve DNS records this order. +# - last # Try the latest successful type for a hostname. +# - SRV +# - A +# - AAAA +# - CNAME +# eureka: # Eureka +# host: # Eureka address(es) +# - "http://127.0.0.1:8761" +# prefix: /eureka/ +# fetch_interval: 30 # Default 30s +# weight: 100 # Default weight for node +# timeout: +# connect: 2000 # Default 2000ms +# send: 2000 # Default 2000ms +# read: 5000 # Default 5000ms +# nacos: # Nacos +# host: # Nacos address(es) +# - "http://${username}:${password}@${host1}:${port1}" +# prefix: "/nacos/v1/" +# fetch_interval: 30 # Default 30s +# `weight` is the `default_weight` that will be attached to each discovered node that +# doesn't have a weight explicitly provided in nacos results +# weight: 100 # Default 100. +# timeout: +# connect: 2000 # Default 2000ms +# send: 2000 # Default 2000ms +# read: 5000 # Default 5000ms +# access_key: "" # Nacos AccessKey ID in Alibaba Cloud, notice that it's for Nacos instances on Microservices Engine (MSE) +# secret_key: "" # Nacos AccessKey Secret in Alibaba Cloud, notice that it's for Nacos instances on Microservices Engine (MSE) +# consul_kv: # Consul KV +# servers: # Consul KV address(es) +# - "http://127.0.0.1:8500" +# - "http://127.0.0.1:8600" +# prefix: "upstreams" +# skip_keys: # Skip special keys +# - "upstreams/unused_api/" +# timeout: +# connect: 2000 # Default 2000ms +# read: 2000 # Default 2000ms +# wait: 60 # Default 60s +# weight: 1 # Default 1 +# fetch_interval: 3 # Default 3s. Effective only when keepalive is false. +# keepalive: true # Default to true. Use long pull to query Consul. +# default_server: # Define default server to route traffic to. +# host: "127.0.0.1" +# port: 20999 +# metadata: +# fail_timeout: 1 # Default 1ms +# weight: 1 # Default 1 +# max_fails: 1 # Default 1 +# dump: # Dump the Consul key-value (KV) store to a file. +# path: "logs/consul_kv.dump" # Location of the dump file. +# expire: 2592000 # Specify the expiration time of the dump file in units of seconds. +# consul: # Consul +# servers: # Consul address(es) +# - "http://127.0.0.1:8500" +# - "http://127.0.0.1:8600" +# skip_services: # Skip services during service discovery. +# - "service_a" +# timeout: +# connect: 2000 # Default 2000ms +# read: 2000 # Default 2000ms +# wait: 60 # Default 60s +# weight: 1 # Default 1 +# fetch_interval: 3 # Default 3s. Effective only when keepalive is false. +# keepalive: true # Default to true. Use long pull to query Consul. +# default_service: # Define the default service to route traffic to. +# host: "127.0.0.1" +# port: 20999 +# metadata: +# fail_timeout: 1 # Default 1ms +# weight: 1 # Default 1 +# max_fails: 1 # Default 1 +# dump: # Dump the Consul key-value (KV) store to a file. +# path: "logs/consul_kv.dump" # Location of the dump file. +# expire: 2592000 # Specify the expiration time of the dump file in units of seconds. +# load_on_init: true # Default true, load the consul dump file on init +# kubernetes: # Kubernetes service discovery +# ### kubernetes service discovery both support single-cluster and multi-cluster mode +# ### applicable to the case where the service is distributed in a single or multiple kubernetes clusters. +# ### single-cluster mode ### +# service: +# schema: https # apiserver schema, options [http, https], default https +# host: ${KUBERNETES_SERVICE_HOST} # apiserver host, options [ipv4, ipv6, domain, environment variable], default ${KUBERNETES_SERVICE_HOST} +# port: ${KUBERNETES_SERVICE_PORT} # apiserver port, options [port number, environment variable], default ${KUBERNETES_SERVICE_PORT} +# client: +# # serviceaccount token or path of serviceaccount token_file +# token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} +# # token: |- +# # eyJhbGciOiJSUzI1NiIsImtpZCI6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEif +# # 6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEifeyJhbGciOiJSUzI1NiIsImtpZCI +# # kubernetes discovery plugin support use namespace_selector +# # you can use one of [equal, not_equal, match, not_match] filter namespace +# namespace_selector: +# # only save endpoints with namespace equal default +# equal: default +# # only save endpoints with namespace not equal default +# #not_equal: default +# # only save endpoints with namespace match one of [default, ^my-[a-z]+$] +# #match: +# #- default +# #- ^my-[a-z]+$ +# # only save endpoints with namespace not match one of [default, ^my-[a-z]+$ ] +# #not_match: +# #- default +# #- ^my-[a-z]+$ +# # kubernetes discovery plugin support use label_selector +# # for the expression of label_selector, please refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/labels +# label_selector: |- +# first="a",second="b" +# # reserved lua shared memory size,1m memory can store about 1000 pieces of endpoint +# shared_size: 1m #default 1m +# ### single-cluster mode ### +# ### multi-cluster mode ### +# - id: release # a custom name refer to the cluster, pattern ^[a-z0-9]{1,8} +# service: +# schema: https # apiserver schema, options [http, https], default https +# host: ${KUBERNETES_SERVICE_HOST} # apiserver host, options [ipv4, ipv6, domain, environment variable] +# port: ${KUBERNETES_SERVICE_PORT} # apiserver port, options [port number, environment variable] +# client: +# # serviceaccount token or path of serviceaccount token_file +# token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} +# # token: |- +# # eyJhbGciOiJSUzI1NiIsImtpZCI6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEif +# # 6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEifeyJhbGciOiJSUzI1NiIsImtpZCI +# # kubernetes discovery plugin support use namespace_selector +# # you can use one of [equal, not_equal, match, not_match] filter namespace +# namespace_selector: +# # only save endpoints with namespace equal default +# equal: default +# # only save endpoints with namespace not equal default +# #not_equal: default +# # only save endpoints with namespace match one of [default, ^my-[a-z]+$] +# #match: +# #- default +# #- ^my-[a-z]+$ +# # only save endpoints with namespace not match one of [default, ^my-[a-z]+$ ] +# #not_match: +# #- default +# #- ^my-[a-z]+$ +# # kubernetes discovery plugin support use label_selector +# # for the expression of label_selector, please refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/labels +# label_selector: |- +# first="a",second="b" +# # reserved lua shared memory size,1m memory can store about 1000 pieces of endpoint +# shared_size: 1m #default 1m +# ### multi-cluster mode ### + +graphql: + max_size: 1048576 # Set the maximum size limitation of graphql in bytes. Default to 1MiB. + +# ext-plugin: +# cmd: ["ls", "-l"] + +plugins: # plugin list (sorted by priority) + - real-ip # priority: 23000 + - ai # priority: 22900 + - client-control # priority: 22000 + - proxy-control # priority: 21990 + - request-id # priority: 12015 + - zipkin # priority: 12011 + #- skywalking # priority: 12010 + #- opentelemetry # priority: 12009 + - ext-plugin-pre-req # priority: 12000 + - fault-injection # priority: 11000 + - mocking # priority: 10900 + - serverless-pre-function # priority: 10000 + #- batch-requests # priority: 4010 + - cors # priority: 4000 + - ip-restriction # priority: 3000 + - ua-restriction # priority: 2999 + - referer-restriction # priority: 2990 + - csrf # priority: 2980 + - uri-blocker # priority: 2900 + - request-validation # priority: 2800 + - chaitin-waf # priority: 2700 + - multi-auth # priority: 2600 + - openid-connect # priority: 2599 + - cas-auth # priority: 2597 + - authz-casbin # priority: 2560 + - authz-casdoor # priority: 2559 + - wolf-rbac # priority: 2555 + - ldap-auth # priority: 2540 + - hmac-auth # priority: 2530 + - basic-auth # priority: 2520 + - jwt-auth # priority: 2510 + - jwe-decrypt # priority: 2509 + - key-auth # priority: 2500 + - consumer-restriction # priority: 2400 + - attach-consumer-label # priority: 2399 + - forward-auth # priority: 2002 + - opa # priority: 2001 + - authz-keycloak # priority: 2000 + #- error-log-logger # priority: 1091 + - proxy-cache # priority: 1085 + - body-transformer # priority: 1080 + - ai-prompt-template # priority: 1071 + - ai-prompt-decorator # priority: 1070 + - ai-prompt-guard # priority: 1072 + - ai-rag # priority: 1060 + - ai-rate-limiting # priority: 1030 + - ai-aws-content-moderation # priority: 1040 TODO: compare priority with other ai plugins + - proxy-mirror # priority: 1010 + - proxy-rewrite # priority: 1008 + - workflow # priority: 1006 + - api-breaker # priority: 1005 + - limit-conn # priority: 1003 + - limit-count # priority: 1002 + - limit-req # priority: 1001 + #- node-status # priority: 1000 + - ai-proxy # priority: 999 + - ai-proxy-multi # priority: 998 + #- brotli # priority: 996 + - gzip # priority: 995 + #- server-info # priority: 990 + - traffic-split # priority: 966 + - redirect # priority: 900 + - response-rewrite # priority: 899 + - mcp-bridge # priority: 510 + - degraphql # priority: 509 + - kafka-proxy # priority: 508 + #- dubbo-proxy # priority: 507 + - grpc-transcode # priority: 506 + - grpc-web # priority: 505 + - http-dubbo # priority: 504 + - public-api # priority: 501 + - prometheus # priority: 500 + - datadog # priority: 495 + - lago # priority: 415 + - loki-logger # priority: 414 + - elasticsearch-logger # priority: 413 + - echo # priority: 412 + - loggly # priority: 411 + - http-logger # priority: 410 + - splunk-hec-logging # priority: 409 + - skywalking-logger # priority: 408 + - google-cloud-logging # priority: 407 + - sls-logger # priority: 406 + - tcp-logger # priority: 405 + - kafka-logger # priority: 403 + - rocketmq-logger # priority: 402 + - syslog # priority: 401 + - udp-logger # priority: 400 + - file-logger # priority: 399 + - clickhouse-logger # priority: 398 + - tencent-cloud-cls # priority: 397 + - inspect # priority: 200 + #- log-rotate # priority: 100 + # <- recommend to use priority (0, 100) for your custom plugins + - example-plugin # priority: 0 + #- gm # priority: -43 + #- ocsp-stapling # priority: -44 + - aws-lambda # priority: -1899 + - azure-functions # priority: -1900 + - openwhisk # priority: -1901 + - openfunction # priority: -1902 + - serverless-post-function # priority: -2000 + - ext-plugin-post-req # priority: -3000 + - ext-plugin-post-resp # priority: -4000 + +stream_plugins: # stream plugin list (sorted by priority) + - ip-restriction # priority: 3000 + - limit-conn # priority: 1003 + - mqtt-proxy # priority: 1000 + #- prometheus # priority: 500 + - syslog # priority: 401 + # <- recommend to use priority (0, 100) for your custom plugins + + +# wasm: +# plugins: +# - name: wasm_log +# priority: 7999 +# file: t/wasm/log/main.go.wasm + +# xrpc: +# protocols: +# - name: pingpong +plugin_attr: # Plugin attributes + log-rotate: # Plugin: log-rotate + timeout: 10000 # maximum wait time for a log rotation(unit: millisecond) + interval: 3600 # Set the log rotate interval in seconds. + max_kept: 168 # Set the maximum number of log files to keep. If exceeded, historic logs are deleted. + max_size: -1 # Set the maximum size of log files in bytes before a rotation. + # Skip size check if max_size is less than 0. + enable_compression: false # Enable log file compression (gzip). + skywalking: # Plugin: skywalking + service_name: APISIX # Set the service name for SkyWalking reporter. + service_instance_name: APISIX Instance Name # Set the service instance name for SkyWalking reporter. + endpoint_addr: http://127.0.0.1:12800 # Set the SkyWalking HTTP endpoint. + report_interval: 3 # Set the reporting interval in second. + opentelemetry: # Plugin: opentelemetry + trace_id_source: x-request-id # Specify the source of the trace ID for OpenTelemetry traces. + resource: + service.name: APISIX # Set the service name for OpenTelemetry traces. + collector: + address: 127.0.0.1:4318 # Set the address of the OpenTelemetry collector to send traces to. + request_timeout: 3 # Set the timeout for requests to the OpenTelemetry collector in seconds. + request_headers: # Set the headers to include in requests to the OpenTelemetry collector. + Authorization: token # Set the authorization header to include an access token. + batch_span_processor: + drop_on_queue_full: false # Drop spans when the export queue is full. + max_queue_size: 1024 # Set the maximum size of the span export queue. + batch_timeout: 2 # Set the timeout for span batches to wait in the export queue before + # being sent. + inactive_timeout: 1 # Set the timeout for spans to wait in the export queue before being sent, + # if the queue is not full. + max_export_batch_size: 16 # Set the maximum number of spans to include in each batch sent to the + # OpenTelemetry collector. + set_ngx_var: false # Export opentelemetry variables to NGINX variables. + prometheus: # Plugin: prometheus + export_uri: /apisix/prometheus/metrics # Set the URI for the Prometheus metrics endpoint. + metric_prefix: apisix_ # Set the prefix for Prometheus metrics generated by APISIX. + enable_export_server: true # Enable the Prometheus export server. + export_addr: # Set the address for the Prometheus export server. + ip: 127.0.0.1 # Set the IP. + port: 9091 # Set the port. + # metrics: # Create extra labels from nginx variables: https://nginx.org/en/docs/varindex.html + # http_status: + # expire: 0 # The expiration time after which metrics are removed. unit: second. + # # 0 means the metrics will not expire + # extra_labels: + # - upstream_addr: $upstream_addr + # - status: $upstream_status # The label name does not need to be the same as the variable name. + # http_latency: + # expire: 0 # The expiration time after which metrics are removed. unit: second. + # # 0 means the metrics will not expire + # extra_labels: + # - upstream_addr: $upstream_addr + # bandwidth: + # expire: 0 # The expiration time after which metrics are removed. unit: second. + # # 0 means the metrics will not expire + # extra_labels: + # - upstream_addr: $upstream_addr + # upstream_status: + # expire: 0 # The expiration time after which metrics are removed. unit: second. + # default_buckets: + # - 10 + # - 50 + # - 100 + # - 200 + # - 500 + server-info: # Plugin: server-info + report_ttl: 60 # Set the TTL in seconds for server info in etcd. + # Maximum: 86400. Minimum: 3. + dubbo-proxy: # Plugin: dubbo-proxy + upstream_multiplex_count: 32 # Set the maximum number of connections that can be multiplexed over + # a single network connection between the Dubbo Proxy and the upstream + # Dubbo services. + proxy-mirror: # Plugin: proxy-mirror + timeout: # Set the timeout for mirrored requests. + connect: 60s + read: 60s + send: 60s + # redirect: # Plugin: redirect + # https_port: 8443 # Set the default port used to redirect HTTP to HTTPS. + inspect: # Plugin: inspect + delay: 3 # Set the delay in seconds for the frequency of checking the hooks file. + hooks_file: "/usr/local/apisix/plugin_inspect_hooks.lua" # Set the path to the Lua file that defines + # hooks. Only administrators should have + # write access to this file for security. + zipkin: # Plugin: zipkin + set_ngx_var: false # export zipkin variables to nginx variables + +deployment: # Deployment configurations + role: traditional # Set deployment mode: traditional, control_plane, or data_plane. + role_traditional: + config_provider: etcd # Set the configuration center. + + #role_data_plane: # Set data plane details if role is data_plane. + # config_provider: etcd # Set the configuration center: etcd, xds, or yaml. + + #role_control_plane: # Set control plane details if role is control_plane. + # config_provider: etcd # Set the configuration center. + + admin: # Admin API + admin_key_required: true # Enable Admin API authentication by default for security. + admin_key: + - + name: admin # admin: write access to configurations. + key: '' # Set API key for the admin of Admin API. + role: admin + # - + # name: viewer # viewer: read-only to configurations. + # key: 4054f7cf07e344346cd3f287985e76a2 # Set API key for the viewer of Admin API. + # role: viewer + + enable_admin_cors: true # Enable Admin API CORS response header `Access-Control-Allow-Origin`. + enable_admin_ui: true # Enable embedded APISIX Dashboard UI. + allow_admin: # Limit Admin API access by IP addresses. + - 127.0.0.0/24 # If not set, any IP address is allowed. + # - "::/64" + admin_listen: # Set the Admin API listening addresses. + ip: 0.0.0.0 # Set listening IP. + port: 9180 # Set listening port. Beware of port conflict with node_listen. + + # https_admin: true # Enable SSL for Admin API on IP and port specified in admin_listen. + # Use admin_api_mtls.admin_ssl_cert and admin_api_mtls.admin_ssl_cert_key. + # admin_api_mtls: # Set this if `https_admin` is true. + # admin_ssl_cert: "" # Set path to SSL/TLS certificate. + # admin_ssl_cert_key: "" # Set path to SSL/TLS key. + # admin_ssl_ca_cert: "" # Set path to CA certificate used to sign client certificates. + + admin_api_version: v3 # Set the version of Admin API (latest: v3). + + etcd: + host: # Set etcd address(es) in the same etcd cluster. + - "http://127.0.0.1:2379" # If TLS is enabled for etcd, use https://127.0.0.1:2379. + prefix: /apisix # Set etcd prefix. + timeout: 30 # The timeout when connect/read/write to etcd, Set timeout in seconds. + watch_timeout: 50 # The timeout when watch etcd + # resync_delay: 5 # Set resync time in seconds after a sync failure. + # The actual resync time would be resync_delay plus 50% random jitter. + # health_check_timeout: 10 # Set timeout in seconds for etcd health check. + # Default to 10 if not set or a negative value is provided. + startup_retry: 2 # Set the number of retries to etcd on startup. Default to 2. + # user: root # Set the root username for etcd. + # password: 5tHkHhYkjr6cQ # Set the root password for etcd. + tls: + # cert: /path/to/cert # Set the path to certificate used by the etcd client + # key: /path/to/key # Set the path to path of key used by the etcd client + verify: true # Verify the etcd certificate when establishing a TLS connection with etcd. + # sni: # The SNI for etcd TLS requests. + # If not set, the host from the URL is used. diff --git a/CloudronPackages/APISIX/apisix-source/conf/debug.yaml b/CloudronPackages/APISIX/apisix-source/conf/debug.yaml new file mode 100644 index 0000000..bf82562 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/conf/debug.yaml @@ -0,0 +1,36 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +basic: + enable: false # Enable the basic debug mode. +http_filter: + enable: false # Enable HTTP filter to dynamically apply advanced debug settings. + enable_header_name: X-APISIX-Dynamic-Debug # If the header is present in a request, apply the advanced debug settings. +hook_conf: + enable: false # Enable hook debug trace to log the target module function's input arguments or returned values. + name: hook_phase # Name of module and function list. + log_level: warn # Severity level for input arguments and returned values in the error log. + is_print_input_args: true # Print the input arguments. + is_print_return_value: true # Print the return value. + +hook_phase: # Name of module and function list. + apisix: # Required module name. + - http_access_phase # Required function names. + - http_header_filter_phase + - http_body_filter_phase + - http_log_phase + +#END diff --git a/CloudronPackages/APISIX/apisix-source/conf/mime.types b/CloudronPackages/APISIX/apisix-source/conf/mime.types new file mode 100644 index 0000000..b53f7f7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/conf/mime.types @@ -0,0 +1,98 @@ + +types { + text/html html htm shtml; + text/css css; + text/xml xml; + image/gif gif; + image/jpeg jpeg jpg; + application/javascript js; + application/atom+xml atom; + application/rss+xml rss; + + text/mathml mml; + text/plain txt; + text/vnd.sun.j2me.app-descriptor jad; + text/vnd.wap.wml wml; + text/x-component htc; + + image/png png; + image/svg+xml svg svgz; + image/tiff tif tiff; + image/vnd.wap.wbmp wbmp; + image/webp webp; + image/x-icon ico; + image/x-jng jng; + image/x-ms-bmp bmp; + + font/woff woff; + font/woff2 woff2; + + application/java-archive jar war ear; + application/json json; + application/mac-binhex40 hqx; + application/msword doc; + application/pdf pdf; + application/postscript ps eps ai; + application/rtf rtf; + application/vnd.apple.mpegurl m3u8; + application/vnd.google-earth.kml+xml kml; + application/vnd.google-earth.kmz kmz; + application/vnd.ms-excel xls; + application/vnd.ms-fontobject eot; + application/vnd.ms-powerpoint ppt; + application/vnd.oasis.opendocument.graphics odg; + application/vnd.oasis.opendocument.presentation odp; + application/vnd.oasis.opendocument.spreadsheet ods; + application/vnd.oasis.opendocument.text odt; + application/vnd.openxmlformats-officedocument.presentationml.presentation + pptx; + application/vnd.openxmlformats-officedocument.spreadsheetml.sheet + xlsx; + application/vnd.openxmlformats-officedocument.wordprocessingml.document + docx; + application/vnd.wap.wmlc wmlc; + application/wasm wasm; + application/x-7z-compressed 7z; + application/x-cocoa cco; + application/x-java-archive-diff jardiff; + application/x-java-jnlp-file jnlp; + application/x-makeself run; + application/x-perl pl pm; + application/x-pilot prc pdb; + application/x-rar-compressed rar; + application/x-redhat-package-manager rpm; + application/x-sea sea; + application/x-shockwave-flash swf; + application/x-stuffit sit; + application/x-tcl tcl tk; + application/x-x509-ca-cert der pem crt; + application/x-xpinstall xpi; + application/xhtml+xml xhtml; + application/xspf+xml xspf; + application/zip zip; + + application/octet-stream bin exe dll; + application/octet-stream deb; + application/octet-stream dmg; + application/octet-stream iso img; + application/octet-stream msi msp msm; + + audio/midi mid midi kar; + audio/mpeg mp3; + audio/ogg ogg; + audio/x-m4a m4a; + audio/x-realaudio ra; + + video/3gpp 3gpp 3gp; + video/mp2t ts; + video/mp4 mp4; + video/mpeg mpeg mpg; + video/quicktime mov; + video/webm webm; + video/x-flv flv; + video/x-m4v m4v; + video/x-mng mng; + video/x-ms-asf asx asf; + video/x-ms-wmv wmv; + video/x-msvideo avi; +} diff --git a/CloudronPackages/APISIX/apisix-source/docker/compose/apisix_conf/master/config.yaml b/CloudronPackages/APISIX/apisix-source/docker/compose/apisix_conf/master/config.yaml new file mode 100644 index 0000000..1220d8f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docker/compose/apisix_conf/master/config.yaml @@ -0,0 +1,36 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apisix: + node_listen: 9080 # APISIX listening port + enable_ipv6: false + +deployment: + admin: + allow_admin: # https://nginx.org/en/docs/http/ngx_http_access_module.html#allow + - 0.0.0.0/0 # We need to restrict ip access rules for security. 0.0.0.0/0 is for test. + + admin_key: + - name: "admin" + key: edd1c9f034335f136f87ad84b625c8f1 + role: admin # admin: manage all configuration data + + etcd: + host: # it's possible to define multiple etcd hosts addresses of the same etcd cluster. + - "http://etcd:2379" # multiple etcd address + prefix: "/apisix" # apisix configurations prefix + timeout: 30 # 30 seconds diff --git a/CloudronPackages/APISIX/apisix-source/docker/compose/docker-compose-master.yaml b/CloudronPackages/APISIX/apisix-source/docker/compose/docker-compose-master.yaml new file mode 100644 index 0000000..08e2b9b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docker/compose/docker-compose-master.yaml @@ -0,0 +1,52 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +version: "3" + +services: + apisix: + image: "apache/apisix:${APISIX_DOCKER_TAG}" + restart: always + volumes: + - ./apisix_conf/master/config.yaml:/usr/local/apisix/conf/config.yaml:ro + depends_on: + - etcd + ports: + - "9180:9180/tcp" + - "9080:9080/tcp" + - "9091:9091/tcp" + - "9443:9443/tcp" + networks: + - apisix + + etcd: + image: bitnami/etcd:3.6 + restart: always + environment: + ETCD_DATA_DIR: /etcd_data + ETCD_ENABLE_V2: "true" + ALLOW_NONE_AUTHENTICATION: "yes" + ETCD_ADVERTISE_CLIENT_URLS: "http://etcd:2379" + ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379" + ports: + - "2379:2379/tcp" + networks: + - apisix + +networks: + apisix: + driver: bridge diff --git a/CloudronPackages/APISIX/apisix-source/docker/debian-dev/Dockerfile b/CloudronPackages/APISIX/apisix-source/docker/debian-dev/Dockerfile new file mode 100644 index 0000000..20447c4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docker/debian-dev/Dockerfile @@ -0,0 +1,80 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +FROM debian:bullseye-slim AS build + +ARG ENABLE_PROXY=false +ARG CODE_PATH + +ENV DEBIAN_FRONTEND=noninteractive +ENV ENV_INST_LUADIR=/usr/local/apisix + +COPY ${CODE_PATH} /apisix + +WORKDIR /apisix + +RUN set -x \ + && apt-get -y update --fix-missing \ + && apt-get install -y \ + make \ + git \ + sudo \ + libyaml-dev \ + && ls -al \ + && make deps \ + && mkdir -p ${ENV_INST_LUADIR} \ + && cp -r deps ${ENV_INST_LUADIR} \ + && make install + +FROM debian:bullseye-slim + +ARG ENTRYPOINT_PATH=./docker-entrypoint.sh +ARG INSTALL_BROTLI=./install-brotli.sh +ARG CHECK_STANDALONE_CONFIG=./check_standalone_config.sh + +# Install the runtime libyaml package +RUN apt-get -y update --fix-missing \ + && apt-get install -y libldap2-dev libyaml-0-2 \ + && apt-get remove --purge --auto-remove -y \ + && mkdir -p /usr/local/apisix/ui + +COPY --from=build /usr/local/apisix /usr/local/apisix +COPY --from=build /usr/local/openresty /usr/local/openresty +COPY --from=build /usr/bin/apisix /usr/bin/apisix +COPY --chown=nobody:root ui/ /usr/local/apisix/ui/ + +COPY ${INSTALL_BROTLI} /install-brotli.sh +RUN chmod +x /install-brotli.sh \ + && cd / && ./install-brotli.sh && rm -rf /install-brotli.sh + +ENV PATH=$PATH:/usr/local/openresty/luajit/bin:/usr/local/openresty/nginx/sbin:/usr/local/openresty/bin + +WORKDIR /usr/local/apisix + +RUN ln -sf /dev/stdout /usr/local/apisix/logs/access.log \ + && ln -sf /dev/stderr /usr/local/apisix/logs/error.log + +EXPOSE 9080 9443 + +COPY ${ENTRYPOINT_PATH} /docker-entrypoint.sh +COPY ${CHECK_STANDALONE_CONFIG} /check_standalone_config.sh +RUN chmod +x /docker-entrypoint.sh /check_standalone_config.sh + +ENTRYPOINT ["/docker-entrypoint.sh"] + +CMD ["docker-start"] + +STOPSIGNAL SIGQUIT diff --git a/CloudronPackages/APISIX/apisix-source/docker/debian-dev/docker-entrypoint.sh b/CloudronPackages/APISIX/apisix-source/docker/debian-dev/docker-entrypoint.sh new file mode 100644 index 0000000..b130a97 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docker/debian-dev/docker-entrypoint.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -eo pipefail + +PREFIX=${APISIX_PREFIX:=/usr/local/apisix} + +if [[ "$1" == "docker-start" ]]; then + if [ "$APISIX_STAND_ALONE" = "true" ]; then + # If the file is not present then initialise the content otherwise update relevant keys for standalone mode + if [ ! -f "${PREFIX}/conf/config.yaml" ]; then + cat > ${PREFIX}/conf/config.yaml << _EOC_ +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + else + # Check if the deployment role is set to data_plane and config provider is set to yaml for standalone mode + source /check_standalone_config.sh + fi + + if [ ! -f "${PREFIX}/conf/apisix.yaml" ]; then + cat > ${PREFIX}/conf/apisix.yaml << _EOC_ +routes: + - +#END +_EOC_ + fi + /usr/bin/apisix init + else + /usr/bin/apisix init + /usr/bin/apisix init_etcd + fi + + # For versions below 3.5.0 whose conf_server has not been removed. + if [ -e "/usr/local/apisix/conf/config_listen.sock" ]; then + rm -f "/usr/local/apisix/conf/config_listen.sock" + fi + + if [ -e "/usr/local/apisix/logs/worker_events.sock" ]; then + rm -f "/usr/local/apisix/logs/worker_events.sock" + fi + + exec /usr/local/openresty/bin/openresty -p /usr/local/apisix -g 'daemon off;' +fi + +exec "$@" diff --git a/CloudronPackages/APISIX/apisix-source/docker/debian-dev/install-brotli.sh b/CloudronPackages/APISIX/apisix-source/docker/debian-dev/install-brotli.sh new file mode 100644 index 0000000..679254e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docker/debian-dev/install-brotli.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +install_brotli() { + apt-get -qy update + apt-get install -y sudo cmake wget unzip + local BORTLI_VERSION="1.1.0" + wget -q https://github.com/google/brotli/archive/refs/tags/v${BORTLI_VERSION}.zip || exit 1 + unzip v${BORTLI_VERSION}.zip && cd ./brotli-${BORTLI_VERSION} && mkdir build && cd build || exit 1 + local CMAKE=$(command -v cmake3 >/dev/null 2>&1 && echo cmake3 || echo cmake) || exit 1 + ${CMAKE} -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local/brotli .. || exit 1 + sudo ${CMAKE} --build . --config Release --target install || exit 1 + if [ -d "/usr/local/brotli/lib64" ]; then + echo /usr/local/brotli/lib64 | sudo tee /etc/ld.so.conf.d/brotli.conf + else + echo /usr/local/brotli/lib | sudo tee /etc/ld.so.conf.d/brotli.conf + fi + sudo ldconfig || exit 1 + ln -sf /usr/local/brotli/bin/brotli /usr/bin/brotli + cd ../.. + rm -rf brotli-${BORTLI_VERSION} + rm -rf /v${BORTLI_VERSION}.zip + export SUDO_FORCE_REMOVE=yes + apt purge -qy cmake sudo wget unzip + apt-get remove --purge --auto-remove -y +} +install_brotli diff --git a/CloudronPackages/APISIX/apisix-source/docker/utils/check_standalone_config.sh b/CloudronPackages/APISIX/apisix-source/docker/utils/check_standalone_config.sh new file mode 100644 index 0000000..22792c5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docker/utils/check_standalone_config.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +if ! grep -q 'role: data_plane' "${PREFIX}/conf/config.yaml"; then + echo "Error: ${PREFIX}/conf/config.yaml does not contain 'role: data_plane'. Deployment role must be set to 'data_plane' for standalone mode." + echo "Please refer to the APISIX documentation for deployment modes: https://apisix.apache.org/docs/apisix/deployment-modes/" + exit 1 +fi + +if ! grep -q 'role_data_plane:' "${PREFIX}/conf/config.yaml"; then + echo "Error: ${PREFIX}/conf/config.yaml does not contain 'role_data_plane:'." + echo "Please refer to the APISIX documentation for deployment modes: https://apisix.apache.org/docs/apisix/deployment-modes/" + exit 1 +fi + +if ! grep -q 'config_provider: yaml' "${PREFIX}/conf/config.yaml"; then + echo "Error: ${PREFIX}/conf/config.yaml does not contain 'config_provider: yaml'. Config provider must be set to 'yaml' for standalone mode." + echo "Please refer to the APISIX documentation for deployment modes: https://apisix.apache.org/docs/apisix/deployment-modes/" + exit 1 +fi diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/MA.jpeg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/MA.jpeg new file mode 100644 index 0000000..e6ed73b Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/MA.jpeg differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/OA.jpg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/OA.jpg new file mode 100644 index 0000000..9aa10f6 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/OA.jpg differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/apache.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/apache.png new file mode 100644 index 0000000..d0075db Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/apache.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/apisix-multi-lang-support.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/apisix-multi-lang-support.png new file mode 100644 index 0000000..c3110e4 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/apisix-multi-lang-support.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/apisix-seed.svg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/apisix-seed.svg new file mode 100644 index 0000000..efbbd85 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/assets/images/apisix-seed.svg @@ -0,0 +1,3 @@ + + +
APISIX-SEED
APISIX-SEED
1、Register route
1、Register route
7、Fetch changes  & refresh memory
7、Fetch changes  & refresh memory
APISIX
APISIX
ETCD
ETCD
Zookeeper
Zookeeper
3、Register service listen event
3、Register service listen event
4、Register or update service
4、Register or update service
CLI
CLI
6、Update etcd
6、Update etcd
5、Fetch service changes
5、Fetch service changes
2、Fetch discovery type and service name
2、Fetch discovery type and service name
Text is not SVG - cannot display
diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/apisix.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/apisix.png new file mode 100644 index 0000000..d7672dd Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/apisix.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-caddy-php-welcome-page.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-caddy-php-welcome-page.png new file mode 100644 index 0000000..70f28fa Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-caddy-php-welcome-page.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-define-route.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-define-route.png new file mode 100644 index 0000000..7136a02 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-define-route.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-define-service.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-define-service.png new file mode 100644 index 0000000..56c47e1 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-define-service.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-fargate-cdk.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-fargate-cdk.png new file mode 100644 index 0000000..b2cdad5 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-fargate-cdk.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-nlb-ip-addr.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-nlb-ip-addr.png new file mode 100644 index 0000000..feec598 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/aws-nlb-ip-addr.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/benchmark-1.jpg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/benchmark-1.jpg new file mode 100644 index 0000000..3641101 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/benchmark-1.jpg differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/benchmark-2.jpg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/benchmark-2.jpg new file mode 100644 index 0000000..c99b05d Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/benchmark-2.jpg differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/build-devcontainers-vscode-command.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/build-devcontainers-vscode-command.png new file mode 100644 index 0000000..b7908d3 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/build-devcontainers-vscode-command.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/build-devcontainers-vscode-progressbar.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/build-devcontainers-vscode-progressbar.png new file mode 100644 index 0000000..05a51df Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/build-devcontainers-vscode-progressbar.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/consumer-internal.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/consumer-internal.png new file mode 100644 index 0000000..1c1cb56 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/consumer-internal.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/consumer-who.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/consumer-who.png new file mode 100644 index 0000000..c749488 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/consumer-who.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/contributor-over-time.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/contributor-over-time.png new file mode 100644 index 0000000..2c17864 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/contributor-over-time.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/control-plane-service-discovery.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/control-plane-service-discovery.png new file mode 100644 index 0000000..034f81c Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/control-plane-service-discovery.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/create-a-route.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/create-a-route.png new file mode 100644 index 0000000..d89c2af Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/create-a-route.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/dashboard.jpeg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/dashboard.jpeg new file mode 100644 index 0000000..08697b2 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/dashboard.jpeg differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/deployment-cp_and_dp.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/deployment-cp_and_dp.png new file mode 100644 index 0000000..6445cb3 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/deployment-cp_and_dp.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/deployment-traditional.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/deployment-traditional.png new file mode 100644 index 0000000..f2dc7d6 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/deployment-traditional.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/discovery-cn.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/discovery-cn.png new file mode 100644 index 0000000..7b448c2 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/discovery-cn.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/discovery.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/discovery.png new file mode 100644 index 0000000..6b592e3 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/discovery.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/external-plugin.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/external-plugin.png new file mode 100644 index 0000000..38c3cde Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/external-plugin.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/flamegraph-1.jpg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/flamegraph-1.jpg new file mode 100644 index 0000000..79df5b2 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/flamegraph-1.jpg differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/flamegraph-2.jpg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/flamegraph-2.jpg new file mode 100644 index 0000000..8defc22 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/flamegraph-2.jpg differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/flow-load-plugin.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/flow-load-plugin.png new file mode 100644 index 0000000..26637d3 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/flow-load-plugin.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/flow-plugin-internal.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/flow-plugin-internal.png new file mode 100644 index 0000000..107fe48 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/flow-plugin-internal.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/flow-software-architecture.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/flow-software-architecture.png new file mode 100644 index 0000000..633d04c Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/flow-software-architecture.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/health_check_node_state_diagram.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/health_check_node_state_diagram.png new file mode 100644 index 0000000..777c503 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/health_check_node_state_diagram.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/health_check_status_page.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/health_check_status_page.png new file mode 100644 index 0000000..ed4aebe Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/health_check_status_page.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/latency-1.jpg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/latency-1.jpg new file mode 100644 index 0000000..b5410c6 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/latency-1.jpg differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/latency-2.jpg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/latency-2.jpg new file mode 100644 index 0000000..51017d3 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/latency-2.jpg differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/list-of-routes.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/list-of-routes.png new file mode 100644 index 0000000..891dff1 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/list-of-routes.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/authz-keycloak.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/authz-keycloak.png new file mode 100644 index 0000000..6b6ae84 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/authz-keycloak.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/basic-auth-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/basic-auth-1.png new file mode 100644 index 0000000..17fb2d2 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/basic-auth-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/basic-auth-2.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/basic-auth-2.png new file mode 100644 index 0000000..70fa618 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/basic-auth-2.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/grafana-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/grafana-1.png new file mode 100644 index 0000000..7c68f70 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/grafana-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/grafana-2.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/grafana-2.png new file mode 100644 index 0000000..8446e7a Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/grafana-2.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/grafana-3.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/grafana-3.png new file mode 100644 index 0000000..e8673c2 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/grafana-3.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/grafana-4.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/grafana-4.png new file mode 100644 index 0000000..b07c62d Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/grafana-4.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/hmac-auth-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/hmac-auth-1.png new file mode 100644 index 0000000..5780307 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/hmac-auth-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/hmac-auth-2.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/hmac-auth-2.png new file mode 100644 index 0000000..4f4be8b Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/hmac-auth-2.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/inspect.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/inspect.png new file mode 100644 index 0000000..efe82ee Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/inspect.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jaeger-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jaeger-1.png new file mode 100644 index 0000000..b25b441 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jaeger-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jaeger-2.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jaeger-2.png new file mode 100644 index 0000000..610f75d Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jaeger-2.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jwt-auth-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jwt-auth-1.png new file mode 100644 index 0000000..16c0c6e Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jwt-auth-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jwt-auth-2.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jwt-auth-2.png new file mode 100644 index 0000000..d44d450 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jwt-auth-2.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jwt-auth-3.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jwt-auth-3.png new file mode 100644 index 0000000..50bc494 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/jwt-auth-3.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/key-auth-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/key-auth-1.png new file mode 100644 index 0000000..2913bff Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/key-auth-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/key-auth-2.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/key-auth-2.png new file mode 100644 index 0000000..f87d205 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/key-auth-2.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/limit-conn-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/limit-conn-1.png new file mode 100644 index 0000000..037e386 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/limit-conn-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/limit-count-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/limit-count-1.png new file mode 100644 index 0000000..8fc1ef6 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/limit-count-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/limit-req-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/limit-req-1.png new file mode 100644 index 0000000..8f8e1f9 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/limit-req-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/loggly-dashboard.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/loggly-dashboard.png new file mode 100644 index 0000000..5e7fab2 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/loggly-dashboard.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/oauth-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/oauth-1.png new file mode 100644 index 0000000..f49d71f Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/oauth-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/prometheus-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/prometheus-1.png new file mode 100644 index 0000000..bb9225b Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/prometheus-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/prometheus-2.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/prometheus-2.png new file mode 100644 index 0000000..3e6332f Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/prometheus-2.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/prometheus01.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/prometheus01.png new file mode 100644 index 0000000..f5c1ee1 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/prometheus01.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/prometheus02.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/prometheus02.png new file mode 100644 index 0000000..94934a9 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/prometheus02.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/skywalking-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/skywalking-1.png new file mode 100644 index 0000000..d80879c Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/skywalking-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/skywalking-3.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/skywalking-3.png new file mode 100644 index 0000000..691b306 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/skywalking-3.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/skywalking-4.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/skywalking-4.png new file mode 100644 index 0000000..4a8fb15 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/skywalking-4.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/skywalking-5.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/skywalking-5.png new file mode 100644 index 0000000..f24235e Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/skywalking-5.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/sls-logger-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/sls-logger-1.png new file mode 100644 index 0000000..03b645d Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/sls-logger-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/splunk-hec-admin-cn.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/splunk-hec-admin-cn.png new file mode 100644 index 0000000..e8997d1 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/splunk-hec-admin-cn.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/splunk-hec-admin-en.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/splunk-hec-admin-en.png new file mode 100644 index 0000000..b70678e Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/splunk-hec-admin-en.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/wolf-rbac-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/wolf-rbac-1.png new file mode 100644 index 0000000..137ce4a Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/wolf-rbac-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/wolf-rbac-2.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/wolf-rbac-2.png new file mode 100644 index 0000000..5f6fdce Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/wolf-rbac-2.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/zipkin-1.jpg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/zipkin-1.jpg new file mode 100644 index 0000000..99184d9 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/zipkin-1.jpg differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/zipkin-1.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/zipkin-1.png new file mode 100644 index 0000000..7b3451a Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/zipkin-1.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/zipkin-2.jpg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/zipkin-2.jpg new file mode 100644 index 0000000..a7d866f Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/plugin/zipkin-2.jpg differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/pubsub-architecture.svg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/pubsub-architecture.svg new file mode 100644 index 0000000..9bce53b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/assets/images/pubsub-architecture.svg @@ -0,0 +1,4 @@ + + + +
Client
Client
APISIX
APISIX
MQ Broker
MQ Broker
Fetch
Fetch
Push
Push
Subscribe
Subscribe
Text is not SVG - cannot display
\ No newline at end of file diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/requesturl.jpg b/CloudronPackages/APISIX/apisix-source/docs/assets/images/requesturl.jpg new file mode 100644 index 0000000..9697b96 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/requesturl.jpg differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/routes-example.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/routes-example.png new file mode 100644 index 0000000..4f116f3 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/routes-example.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/secret.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/secret.png new file mode 100644 index 0000000..94d2388 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/secret.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/service-example.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/service-example.png new file mode 100644 index 0000000..9840bf8 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/service-example.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/skip-mtls.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/skip-mtls.png new file mode 100644 index 0000000..a739e66 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/skip-mtls.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/update-docker-desktop-file-sharing.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/update-docker-desktop-file-sharing.png new file mode 100644 index 0000000..39999ac Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/update-docker-desktop-file-sharing.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/images/upstream-example.png b/CloudronPackages/APISIX/apisix-source/docs/assets/images/upstream-example.png new file mode 100644 index 0000000..1ad8c9f Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/images/upstream-example.png differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/other/apisix-plugin-design.graffle b/CloudronPackages/APISIX/apisix-source/docs/assets/other/apisix-plugin-design.graffle new file mode 100644 index 0000000..e844fb4 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/docs/assets/other/apisix-plugin-design.graffle differ diff --git a/CloudronPackages/APISIX/apisix-source/docs/assets/other/json/apisix-grafana-dashboard.json b/CloudronPackages/APISIX/apisix-source/docs/assets/other/json/apisix-grafana-dashboard.json new file mode 100644 index 0000000..1ccf79b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/assets/other/json/apisix-grafana-dashboard.json @@ -0,0 +1,1984 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "7.3.7" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "${DS_PROMETHEUS}", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "limit": 100, + "name": "Annotations & Alerts", + "showIn": 0, + "type": "dashboard" + } + ] + }, + "description": "MicroService API Gateway Apache APISIX", + "editable": true, + "gnetId": 11719, + "graphTooltip": 0, + "id": null, + "iteration": 1617695812393, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 10, + "panels": [], + "title": "Nginx", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 1 + }, + "id": 8, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(apisix_http_requests_total{instance=~\"$instance\"})", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Total Requests", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 1 + }, + "id": 16, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(apisix_nginx_http_current_connections{state=\"accepted\", instance=~\"$instance\"})", + "intervalFactor": 2, + "legendFormat": "Accepted", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Accepted Connections", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 1 + }, + "id": 11, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true, + "ymax": null, + "ymin": null + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(apisix_nginx_http_current_connections{state=\"handled\", instance=~\"$instance\"})", + "intervalFactor": 2, + "legendFormat": "Total", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Handled Connections", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 6 + }, + "hiddenSeries": false, + "id": 17, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(apisix_nginx_http_current_connections{state=~\"active|reading|writing|waiting\", instance=~\"$instance\"}) by (state)", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{state}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Nginx connection state", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "decimals": null, + "format": "Misc", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 12 + }, + "id": 13, + "panels": [], + "title": "Bandwidth", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 13 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(apisix_bandwidth{instance=~\"$instance\"}[$__rate_interval])) by (type)", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Total Bandwidth", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 19 + }, + "hiddenSeries": false, + "id": 21, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(apisix_bandwidth{type=\"ingress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (service)", + "legendFormat": "service:{{service}}", + "refId": "A" + }, + { + "expr": "sum(irate(apisix_bandwidth{type=\"ingress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (route)", + "legendFormat": "route:{{route}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Ingress per service/route", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 25 + }, + "hiddenSeries": false, + "id": 19, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(apisix_bandwidth{type=\"egress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (service)", + "interval": "", + "legendFormat": "service:{{service}}", + "refId": "A" + }, + { + "expr": "sum(rate(apisix_bandwidth{type=\"egress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (route)", + "legendFormat": "route:{{route}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Egress per service/route", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 31 + }, + "id": 15, + "panels": [], + "title": "HTTP", + "type": "row" + }, + { + "aliasColors": { + "HTTP Status:200": "green", + "HTTP Status:500": "red" + }, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 3, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 32 + }, + "hiddenSeries": false, + "id": 2, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "state", + "lines": true + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (code)", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "HTTP Status:{{code}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests per second (RPS) by status code", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 3, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 32 + }, + "hiddenSeries": false, + "id": 2, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "state", + "lines": true + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(apisix_http_status{instance=~\"$instance\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "refId": "A", + "legendFormat": "Requests/second" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Total requests per second (RPS)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 3, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 38 + }, + "hiddenSeries": false, + "id": 32, + "interval": "", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sort": "total", + "sortDesc": true, + "total": true, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "state", + "lines": true + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (service)", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "service:{{service}}", + "refId": "A" + }, + { + "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (route)", + "interval": "", + "legendFormat": "route:{{route}}", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests per second (RPS) per service/route", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 44 + }, + "hiddenSeries": false, + "id": 27, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", + "format": "time_series", + "interval": "", + "legendFormat": "P90", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", + "interval": "", + "legendFormat": "P95", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", + "interval": "", + "legendFormat": "P99", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Request Latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 8, + "x": 8, + "y": 44 + }, + "hiddenSeries": false, + "id": 28, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "P90", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", + "interval": "", + "legendFormat": "P95", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", + "interval": "", + "legendFormat": "P99", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "APISIX Latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 44 + }, + "hiddenSeries": false, + "id": 33, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", + "format": "time_series", + "interval": "", + "legendFormat": "P90", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", + "interval": "", + "legendFormat": "P95", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", + "interval": "", + "legendFormat": "P99", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Upstream Latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 50 + }, + "id": 23, + "panels": [], + "title": "Misc", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 18, + "x": 0, + "y": 51 + }, + "hiddenSeries": false, + "id": 30, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(apisix_etcd_modify_indexes{key=~\"consumers|global_rules|max_modify_index|prev_index|protos|routes|services|ssls|stream_routes|upstreams|x_etcd_index\"}) by (key)", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{key}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Etcd modify indexes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "decimals": null, + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "${DS_PROMETHEUS}", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0 + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 18, + "y": 51 + }, + "id": 25, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "7.3.7", + "targets": [ + { + "expr": "sum(apisix_etcd_reachable{instance=~\"$instance\"})", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Etcd reachable", + "type": "stat" + }, + { + "datasource": "${DS_PROMETHEUS}", + "fieldConfig": { + "defaults": { + "custom": {}, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 21, + "y": 51 + }, + "id": 29, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "7.3.7", + "targets": [ + { + "expr": "sum(apisix_nginx_metric_errors_total{instance=~\"$instance\"})", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Nginx metric errors", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "description": "The free space percent of each nginx shared DICT since APISIX start", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 57 + }, + "hiddenSeries": false, + "id": 35, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(apisix_shared_dict_free_space_bytes * 100) / on (name) apisix_shared_dict_capacity_bytes", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{state}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Nginx shared dict free space percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:117", + "decimals": null, + "format": "percent", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:118", + "decimals": null, + "format": "Misc", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "5s", + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": ".*", + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "label_values(apisix_http_status,service)", + "error": null, + "hide": 0, + "includeAll": true, + "label": null, + "multi": true, + "name": "service", + "options": [], + "query": "label_values(apisix_http_status,service)", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "label_values(apisix_http_status,route)", + "error": null, + "hide": 0, + "includeAll": true, + "label": null, + "multi": true, + "name": "route", + "options": [], + "query": "label_values(apisix_http_status,route)", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "label_values(apisix_nginx_http_current_connections,instance)", + "error": null, + "hide": 0, + "includeAll": true, + "label": null, + "multi": true, + "name": "instance", + "options": [], + "query": "label_values(apisix_http_status,instance)", + "refresh": 2, + "regex": ".*", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "label_values(apisix_http_status,consumer)", + "error": null, + "hide": 0, + "includeAll": true, + "label": null, + "multi": true, + "name": "consumer", + "options": [], + "query": "label_values(apisix_http_status,consumer)", + "refresh": 2, + "regex": ".*", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "label_values(apisix_http_status,node)", + "error": null, + "hide": 0, + "includeAll": true, + "label": null, + "multi": true, + "name": "node", + "options": [], + "query": "label_values(apisix_http_status,node)", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Apache APISIX", + "uid": "bLlNuRLWz", + "version": 13 +} diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/FAQ.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/FAQ.md new file mode 100644 index 0000000..a19b5e9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/FAQ.md @@ -0,0 +1,775 @@ +--- +title: FAQ +keywords: + - Apache APISIX + - API Gateway + - FAQ +description: This article lists solutions to common problems when using Apache APISIX. +--- + + + +## Why do I need a new API gateway? + +As organizations move towards cloud native microservices, there is a need for an API gateway that is performant, flexible, secure and scalable. + +APISIX outperforms other API gateways in these metrics while being platform agnostic and fully dynamic delivering features like supporting multiple protocols, fine-grained routing and multi-language support. + +## How does Apache APISIX differ from other API gateways? + +Apache APISIX differs in the following ways: + +- It uses etcd to save and synchronize configurations rather than relational databases like PostgreSQL or MySQL. The real-time event notification system in etcd is easier to scale than in these alternatives. This allows APISIX to synchronize the configuration in real-time, makes the code concise and avoids a single point of failure. +- Fully dynamic. +- Supports [hot loading of Plugins](./terminology/plugin.md#hot-reload). + +## What is the performance impact of using Apache APISIX? + +Apache APISIX delivers the best performance among other API gateways with a single-core QPS of 18,000 with an average delay of 0.2 ms. + +Specific results of the performance benchmarks can be found [here](benchmark.md). + +## Which platforms does Apache APISIX support? + +Apache APISIX is platform agnostic and avoids vendor lock-in. It is built for cloud native environments and can run on bare-metal machines to Kubernetes. It even support Apple Silicon chips. + +## What does it mean by "Apache APISIX is fully dynamic"? + +Apache APISIX is fully dynamic in the sense that it doesn't require restarts to change its behavior. + +It does the following dynamically: + +- Reloading Plugins +- Proxy rewrites +- Proxy mirror +- Response rewrites +- Health checks +- Traffic split + +## Does Apache APISIX have a user interface? + +Yes. Apache APISIX has an experimental feature called [Apache APISIX Dashboard](https://github.com/apache/apisix-dashboard), which is independent from Apache APISIX. To work with Apache APISIX through a user interface, you can deploy the Apache APISIX Dashboard. + +## Can I write my own Plugins for Apache APISIX? + +Yes. Apache APISIX is flexible and extensible through the use of custom Plugins that can be specific to user needs. + +You can write your own Plugins by referring to [How to write your own Plugins](plugin-develop.md). + +## Why does Apache APISIX use etcd for the configuration center? + +In addition to the basic functionality of storing the configurations, Apache APISIX also needs a storage system that supports these features: + +1. Distributed deployments in clusters. +2. Guarded transactions by comparisons. +3. Multi-version concurrency control. +4. Notifications and watch streams. +5. High performance with minimum read/write latency. + +etcd provides these features and more making it ideal over other databases like PostgreSQL and MySQL. + +To learn more on how etcd compares with other alternatives see this [comparison chart](https://etcd.io/docs/latest/learning/why/#comparison-chart). + +## When installing Apache APISIX dependencies with LuaRocks, why does it cause a timeout or result in a slow or unsuccessful installation? + +This is likely because the LuaRocks server used is blocked. + +To solve this you can use https_proxy or use the `--server` flag to specify a faster LuaRocks server. + +You can run the command below to see the available servers (needs LuaRocks 3.0+): + +```shell +luarocks config rocks_servers +``` + +Mainland China users can use `luarocks.cn` as the LuaRocks server. You can use this wrapper with the Makefile to set this up: + +```bash +make deps ENV_LUAROCKS_SERVER=https://luarocks.cn +``` + +If this does not solve your problem, you can try getting a detailed log by using the `--verbose` or `-v` flag to diagnose the problem. + +## How do I build the APISIX-Runtime environment? + +Some functions need to introduce additional NGINX modules, which requires APISIX to run on APISIX-Runtime. If you need these functions, you can refer to the code in [api7/apisix-build-tools](https://github.com/api7/apisix-build-tools) to build your own APISIX-Runtime environment. + +## How can I make a gray release with Apache APISIX? + +Let's take an example query `foo.com/product/index.html?id=204&page=2` and consider that you need to make a gray release based on the `id` in the query string with this condition: + +1. Group A: `id <= 1000` +2. Group B: `id > 1000` + +There are two different ways to achieve this in Apache APISIX: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +1. Using the `vars` field in a [Route](terminology/route.md): + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "vars": [ + ["arg_id", "<=", "1000"] + ], + "plugins": { + "redirect": { + "uri": "/test?group_id=1" + } + } +}' + +curl -i http://127.0.0.1:9180/apisix/admin/routes/2 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "vars": [ + ["arg_id", ">", "1000"] + ], + "plugins": { + "redirect": { + "uri": "/test?group_id=2" + } + } +}' +``` + +All the available operators of the current `lua-resty-radixtree` are listed [here](https://github.com/api7/lua-resty-radixtree#operator-list). + +2. Using the [traffic-split](plugins/traffic-split.md) Plugin. + +## How do I redirect HTTP traffic to HTTPS with Apache APISIX? + +For example, you need to redirect traffic from `http://foo.com` to `https://foo.com`. + +Apache APISIX provides several different ways to achieve this: + +1. Setting `http_to_https` to `true` in the [redirect](plugins/redirect.md) Plugin: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true + } + } +}' +``` + +2. Advanced routing with `vars` in the redirect Plugin: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "host": "foo.com", + "vars": [ + [ + "scheme", + "==", + "http" + ] + ], + "plugins": { + "redirect": { + "uri": "https://$host$request_uri", + "ret_code": 301 + } + } +}' +``` + +3. Using the `serverless` Plugin: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": ["return function() if ngx.var.scheme == \"http\" and ngx.var.host == \"foo.com\" then ngx.header[\"Location\"] = \"https://foo.com\" .. ngx.var.request_uri; ngx.exit(ngx.HTTP_MOVED_PERMANENTLY); end; end"] + } + } +}' +``` + +To test this serverless Plugin: + +```shell +curl -i -H 'Host: foo.com' http://127.0.0.1:9080/hello +``` + +The response should be: + +``` +HTTP/1.1 301 Moved Permanently +Date: Mon, 18 May 2020 02:56:04 GMT +Content-Type: text/html +Content-Length: 166 +Connection: keep-alive +Location: https://foo.com/hello +Server: APISIX web server + + +301 Moved Permanently + +

301 Moved Permanently

+
openresty
+ + +``` + +## How do I change Apache APISIX's log level? + +By default the log level of Apache APISIX is set to `warn`. You can set this to `info` to trace the messages printed by `core.log.info`. + +For this, you can set the `error_log_level` parameter in your configuration file (conf/config.yaml) as shown below and reload Apache APISIX. + +```yaml +nginx_config: + error_log_level: "info" +``` + +## How do I reload my custom Plugins for Apache APISIX? + +All Plugins in Apache APISIX are hot reloaded. + +You can learn more about hot reloading of Plugins [here](./terminology/plugin.md#hot-reload). + +## How do I configure Apache APISIX to listen on multiple ports when handling HTTP or HTTPS requests? + +By default, Apache APISIX listens only on port 9080 when handling HTTP requests. + +To configure Apache APISIX to listen on multiple ports, you can: + +1. Modify the parameter `node_listen` in `conf/config.yaml`: + + ``` + apisix: + node_listen: + - 9080 + - 9081 + - 9082 + ``` + + Similarly for HTTPS requests, modify the parameter `ssl.listen` in `conf/config.yaml`: + + ``` + apisix: + ssl: + enable: true + listen: + - port: 9443 + - port: 9444 + - port: 9445 + ``` + +2. Reload or restart Apache APISIX. + +## After uploading the SSL certificate, why can't the corresponding route be accessed through HTTPS + IP? + +If you directly use HTTPS + IP address to access the server, the server will use the IP address to compare with the bound SNI. Since the SSL certificate is bound to the domain name, the corresponding resource cannot be found in the SNI, so that the certificate will be verified. The authentication fails, and the user cannot access the gateway via HTTPS + IP. + +You can implement this function by setting the `fallback_sni` parameter in the configuration file and configuring the domain name. When the user uses HTTPS + IP to access the gateway, when the SNI is empty, it will fall back to the default SNI to achieve HTTPS + IP access to the gateway. + +```yaml title="./conf/config.yaml" +apisix + ssl: + fallback_sni: "${your sni}" +``` + +## How does Apache APISIX achieve millisecond-level configuration synchronization? + +Apache APISIX uses etcd for its configuration center. etcd provides subscription functions like [watch](https://github.com/api7/lua-resty-etcd/blob/master/api_v3.md#watch) and [watchdir](https://github.com/api7/lua-resty-etcd/blob/master/api_v3.md#watchdir) that can monitor changes to specific keywords or directories. + +In Apache APISIX, we use [etcd.watchdir](https://github.com/api7/lua-resty-etcd/blob/master/api_v3.md#watchdir) to monitor changes in a directory. + +If there is no change in the directory being monitored, the process will be blocked until it times out or run into any errors. + +If there are changes in the directory being monitored, etcd will return this new data within milliseconds and Apache APISIX will update the cache memory. + +## How do I customize the Apache APISIX instance id? + +By default, Apache APISIX reads the instance id from `conf/apisix.uid`. If this is not found and no id is configured, Apache APISIX will generate a `uuid` for the instance id. + +To specify a meaningful id to bind Apache APISIX to your internal system, set the `id` in your `conf/config.yaml` file: + +```yaml +apisix: + id: "your-id" +``` + +## Why are there errors saying "failed to fetch data from etcd, failed to read etcd dir, etcd key: xxxxxx" in the error.log? + +Please follow the troubleshooting steps described below: + +1. Make sure that there aren't any networking issues between Apache APISIX and your etcd deployment in your cluster. +2. If your network is healthy, check whether you have enabled the [gRPC gateway](https://etcd.io/docs/v3.4/dev-guide/api_grpc_gateway/) for etcd. The default state depends on whether you used command line options or a configuration file to start the etcd server. + + - If you used command line options, gRPC gateway is enabled by default. You can enable it manually as shown below: + + ```sh + etcd --enable-grpc-gateway --data-dir=/path/to/data + ``` + + **Note**: This flag is not shown while running `etcd --help`. + + - If you used a configuration file, gRPC gateway is disabled by default. You can manually enable it as shown below: + + In `etcd.json`: + + ```json + { + "enable-grpc-gateway": true, + "data-dir": "/path/to/data" + } + ``` + + In `etcd.conf.yml`: + + ```yml + enable-grpc-gateway: true + ``` + +**Note**: This distinction was eliminated by etcd in their latest master branch but wasn't backported to previous versions. + +## How do I setup high availability Apache APISIX clusters? + +Apache APISIX can be made highly available by adding a load balancer in front of it as APISIX's data plane is stateless and can be scaled when needed. + +The control plane of Apache APISIX is highly available as it relies only on an etcd cluster. + +## Why does the `make deps` command fail when installing Apache APISIX from source? + +When executing `make deps` to install Apache APISIX from source, you can get an error as shown below: + +```shell +$ make deps +...... +Error: Failed installing dependency: https://luarocks.org/luasec-0.9-1.src.rock - Could not find header file for OPENSSL + No file openssl/ssl.h in /usr/local/include +You may have to install OPENSSL in your system and/or pass OPENSSL_DIR or OPENSSL_INCDIR to the luarocks command. +Example: luarocks install luasec OPENSSL_DIR=/usr/local +make: *** [deps] Error 1 +``` + +This is caused by the missing OpenResty openssl development kit. To install it, refer [installing dependencies](install-dependencies.md). + +## How do I access the APISIX Dashboard through Apache APISIX proxy? + +You can follow the steps below to configure this: + +1. Configure different ports for Apache APISIX proxy and Admin API. Or, disable the Admin API. + +```yaml +deployment: + admin: + admin_listen: # use a separate port + ip: 127.0.0.1 + port: 9180 +``` + +2. Add a proxy Route for the Apache APISIX dashboard: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uris":[ "/*" ], + "name":"apisix_proxy_dashboard", + "upstream":{ + "nodes":[ + { + "host":"127.0.0.1", + "port":9000, + "weight":1 + } + ], + "type":"roundrobin" + } +}' +``` + +**Note**: The Apache APISIX Dashboard is listening on `127.0.0.1:9000`. + +## How do I use regular expressions (regex) for matching `uri` in a Route? + +You can use the `vars` field in a Route for matching regular expressions: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/*", + "vars": [ + ["uri", "~~", "^/[a-z]+$"] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +And to test this request: + +```shell +# uri matched +$ curl http://127.0.0.1:9080/hello -i +HTTP/1.1 200 OK +... + +# uri didn't match +$ curl http://127.0.0.1:9080/12ab -i +HTTP/1.1 404 Not Found +... +``` + +For more info on using `vars` refer to [lua-resty-expr](https://github.com/api7/lua-resty-expr). + +## Does the Upstream node support configuring a [FQDN](https://en.wikipedia.org/wiki/Fully_qualified_domain_name) address? + +Yes. The example below shows configuring the FQDN `httpbin.default.svc.cluster.local` (a Kubernetes service): + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/ip", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.default.svc.cluster.local": 1 + } + } +}' +``` + +To test this Route: + +```shell +$ curl http://127.0.0.1:9080/ip -i +HTTP/1.1 200 OK +... +``` + +## What is the `X-API-KEY` of the Admin API? Can it be modified? + +`X-API-KEY` of the Admin API refers to the `apisix.admin_key.key` in your `conf/config.yaml` file. It is the access token for the Admin API. + +By default, it is set to `edd1c9f034335f136f87ad84b625c8f1` and can be modified by changing the parameter in your `conf/config.yaml` file: + +```yaml +apisix: + admin_key + - + name: "admin" + key: newkey + role: admin +``` + +Now, to access the Admin API: + +```shell +$ curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: newkey' -X PUT -d ' +{ + "uris":[ "/*" ], + "name":"admin-token-test", + "upstream":{ + "nodes":[ + { + "host":"127.0.0.1", + "port":1980, + "weight":1 + } + ], + "type":"roundrobin" + } +}' + +HTTP/1.1 200 OK +...... +``` + +**Note**: By using the default token, you could be exposed to security risks. It is required to update it when deploying to a production environment. + +## How do I allow all IPs to access Apache APISIX's Admin API? + +By default, Apache APISIX only allows IPs in the range `127.0.0.0/24` to access the Admin API. + +To allow IPs in all ranges, you can update your configuration file as show below and restart or reload Apache APISIX. + +```yaml +deployment: + admin: + allow_admin: + - 0.0.0.0/0 +``` + +**Note**: This should only be used in non-production environments to allow all clients to access Apache APISIX and is not safe for production environments. Always authorize specific IP addresses or address ranges for production environments. + +## How do I auto renew SSL certificates with acme.sh? + +You can run the commands below to achieve this: + +```bash +curl --output /root/.acme.sh/renew-hook-update-apisix.sh --silent https://gist.githubusercontent.com/anjia0532/9ebf8011322f43e3f5037bc2af3aeaa6/raw/65b359a4eed0ae990f9188c2afa22bacd8471652/renew-hook-update-apisix.sh +``` + +```bash +chmod +x /root/.acme.sh/renew-hook-update-apisix.sh +``` + +```bash +acme.sh --issue --staging -d demo.domain --renew-hook "/root/.acme.sh/renew-hook-update-apisix.sh -h http://apisix-admin:port -p /root/.acme.sh/demo.domain/demo.domain.cer -k /root/.acme.sh/demo.domain/demo.domain.key -a xxxxxxxxxxxxx" +``` + +```bash +acme.sh --renew --domain demo.domain +``` + +You can check [this post](https://juejin.cn/post/6965778290619449351) for a more detailed instruction on setting this up. + +## How do I strip a prefix from a path before forwarding to Upstream in Apache APISIX? + +To strip a prefix from a path in your route, like to take `/foo/get` and strip it to `/get`, you can use the [proxy-rewrite](plugins/proxy-rewrite.md) Plugin: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/foo/*", + "plugins": { + "proxy-rewrite": { + "regex_uri": ["^/foo/(.*)","/$1"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +And to test this configuration: + +```shell +curl http://127.0.0.1:9080/foo/get -i +HTTP/1.1 200 OK +... +{ + ... + "url": "http://127.0.0.1/get" +} +``` + +## How do I fix the error `unable to get local issuer certificate` in Apache APISIX? + +You can manually set the path to your certificate by adding it to your `conf/config.yaml` file as shown below: + +```yaml +apisix: + ssl: + ssl_trusted_certificate: /path/to/certs/ca-certificates.crt +``` + +**Note**: When you are trying to connect TLS services with cosocket and if APISIX does not trust the peer's TLS certificate, you should set the parameter `apisix.ssl.ssl_trusted_certificate`. + +For example, if you are using Nacos for service discovery in APISIX, and Nacos has TLS enabled (configured host starts with `https://`), you should set `apisix.ssl.ssl_trusted_certificate` and use the same CA certificate as Nacos. + +## How do I fix the error `module 'resty.worker.events' not found` in Apache APISIX? + +This error is caused by installing Apache APISIX in the `/root` directory. The worker process would by run by the user "nobody" and it would not have enough permissions to access the files in the `/root` directory. + +To fix this, you can change the APISIX installation directory to the recommended directory: `/usr/local`. + +## What is the difference between `plugin-metadata` and `plugin-configs` in Apache APISIX? + +The differences between the two are described in the table below: + +| `plugin-metadata` | `plugin-config` | +| ---------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | +| Metadata of a Plugin shared by all configuration instances of the Plugin. | Collection of configuration instances of multiple different Plugins. | +| Used when there are property changes that needs to be propagated across all configuration instances of a Plugin. | Used when you need to reuse a common set of configuration instances so that it can be extracted to a `plugin-config` and bound to different Routes. | +| Takes effect on all the entities bound to the configuration instances of the Plugin. | Takes effect on Routes bound to the `plugin-config`. | + +## After deploying Apache APISIX, how to detect the survival of the APISIX data plane? + +You can create a route named `health-info` and enable the [fault-injection](https://apisix.apache.org/docs/apisix/plugins/fault-injection/) plugin (where YOUR-TOKEN is the user's token; 127.0.0.1 is the IP address of the control plane, which can be modified by yourself): + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/health-info \ +-H 'X-API-KEY: YOUR-TOKEN' -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "fine" + } + } + }, + "uri": "/status" +}' +```` + +Verification: + +Access the `/status` of the Apache APISIX data plane to detect APISIX. If the response code is 200, it means APISIX is alive. + +:::note + +This method only detects whether the APISIX data plane is alive or not. It does not mean that the routing and other functions of APISIX are normal. These require more routing-level detection. + +::: + +## What are the scenarios with high APISIX latency related to [etcd](https://etcd.io/) and how to fix them? + +etcd is the data storage component of apisix, and its stability is related to the stability of APISIX. + +In actual scenarios, if APISIX uses a certificate to connect to etcd through HTTPS, the following two problems of high latency for data query or writing may occur: + +1. Query or write data through APISIX Admin API. +2. In the monitoring scenario, Prometheus crawls the APISIX data plane Metrics API timeout. + +These problems related to higher latency seriously affect the service stability of APISIX, and the reason why such problems occur is mainly because etcd provides two modes of operation: HTTP (HTTPS) and gRPC. And APISIX uses the HTTP (HTTPS) protocol to operate etcd by default. +In this scenario, etcd has a bug about HTTP/2: if etcd is operated over HTTPS (HTTP is not affected), the upper limit of HTTP/2 connections is the default `250` in Golang. Therefore, when the number of APISIX data plane nodes is large, once the number of connections between all APISIX nodes and etcd exceeds this upper limit, the response of APISIX API interface will be very slow. + +In Golang, the default upper limit of HTTP/2 connections is `250`, the code is as follows: + +```go +package http2 + +import ... + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + maxQueuedControlFrames = 10000 +) + +``` + +etcd officially maintains two main branches, `3.4` and `3.5`. In the `3.4` series, the recently released `3.4.20` version has fixed this issue. As for the `3.5` version, the official is preparing to release the `3.5.5` version a long time ago, but it has not been released as of now (2022.09.13). So, if you are using etcd version less than `3.5.5`, you can refer to the following ways to solve this problem: + +1. Change the communication method between APISIX and etcd from HTTPS to HTTP. +2. Roll back the etcd to `3.4.20`. +3. Clone the etcd source code and compile the `release-3.5` branch directly (this branch has fixed the problem of HTTP/2 connections, but the new version has not been released yet). + +The way to recompile etcd is as follows: + +```shell +git checkout release-3.5 +make GOOS=linux GOARCH=amd64 +``` + +The compiled binary is in the bin directory, replace it with the etcd binary of your server environment, and then restart etcd: + +For more information, please refer to: + +- [when etcd node have many http long polling connections, it may cause etcd to respond slowly to http requests.](https://github.com/etcd-io/etcd/issues/14185) +- [bug: when apisix starts for a while, its communication with etcd starts to time out](https://github.com/apache/apisix/issues/7078) +- [the prometheus metrics API is tool slow](https://github.com/apache/apisix/issues/7353) +- [Support configuring `MaxConcurrentStreams` for http2](https://github.com/etcd-io/etcd/pull/14169) + +Another solution is to switch to an experimental gRPC-based configuration synchronization. This requires setting `use_grpc: true` in the configuration file `conf/config.yaml`: + +```yaml + etcd: + use_grpc: true + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" +``` + +## Why is the file-logger logging garbled? + +If you are using the `file-logger` plugin but getting garbled logs, one possible reason is your upstream response has returned a compressed response body. You can fix this by setting the accept-encoding in the request header to not receive compressed responses using the [proxy-rewirte](https://apisix.apache.org/docs/apisix/plugins/proxy-rewrite/) plugin: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: YOUR-TOKEN' -X PUT -d ' +{ + "methods":[ + "GET" + ], + "uri":"/test/index.html", + "plugins":{ + "proxy-rewrite":{ + "headers":{ + "set":{ + "accept-encoding":"gzip;q=0,deflate,sdch" + } + } + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1 + } + } +}' +``` + +## How does APISIX configure ETCD with authentication? + +Suppose you have an ETCD cluster that enables the auth. To access this cluster, you need to configure the correct username and password for Apache APISIX in `conf/config.yaml`: + +```yaml +deployment: + etcd: + host: + - "http://127.0.0.1:2379" + user: etcd_user # username for etcd + password: etcd_password # password for etcd +``` + +For other ETCD configurations, such as expiration times, retries, and so on, you can refer to the `etcd` section in the sample configuration `conf/config.yaml.example` file. + +## What is the difference between SSLs, `tls.client_cert` in upstream configurations, and `ssl_trusted_certificate` in `config.yaml`? + +The `ssls` is managed through the `/apisix/admin/ssls` API. It's used for managing TLS certificates. These certificates may be used during TLS handshake (between Apache APISIX and its clients). Apache APISIX uses Server Name Indication (SNI) to differentiate between certificates of different domains. + +The `tls.client_cert`, `tls.client_key`, and `tls.client_cert_id` in upstream are used for mTLS communication with the upstream. + +The `ssl_trusted_certificate` in `config.yaml` configures a trusted CA certificate. It is used for verifying some certificates signed by private authorities within APISIX, to avoid APISIX rejects the certificate. Note that it is not used to trust the certificates of APISIX upstream, because APISIX does not verify the legality of the upstream certificates. Therefore, even if the upstream uses an invalid TLS certificate, it can still be accessed without configuring a root certificate. + +## Where can I find more answers? + +You can find more answers on: + +- [Apache APISIX Slack Channel](/docs/general/join/#join-the-slack-channel) +- [Ask questions on APISIX mailing list](/docs/general/join/#subscribe-to-the-mailing-list) +- [GitHub Issues](https://github.com/apache/apisix/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc) and [GitHub Discussions](https://github.com/apache/apisix/discussions) diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/admin-api.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/admin-api.md new file mode 100644 index 0000000..7533274 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/admin-api.md @@ -0,0 +1,1783 @@ +--- +title: Admin API +keywords: + - Apache APISIX + - API Gateway + - Admin API + - Route + - Plugin + - Upstream +description: This article introduces the functions supported by the Apache APISIX Admin API, which you can use to get, create, update, and delete resources. +--- + + + +## Description + +The Admin API lets users control their deployed Apache APISIX instance. The [architecture design](./architecture-design/apisix.md) gives an idea about how everything fits together. + +## Configuration + +When APISIX is started, the Admin API will listen on port `9180` by default and take the API prefixed with `/apisix/admin`. + +Therefore, to avoid conflicts between your designed API and `/apisix/admin`, you can modify the configuration file [`/conf/config.yaml`](https://github.com/apache/apisix/blob/master/conf/config.yaml) to modify the default listening port. + +APISIX supports setting the IP access allowlist of Admin API to prevent APISIX from being illegally accessed and attacked. You can configure the IP addresses to allow access in the `deployment.admin.allow_admin` option in the `./conf/config.yaml` file. + +The `X-API-KEY` shown below refers to the `deployment.admin.admin_key.key` in the `./conf/config.yaml` file, which is the access token for the Admin API. + +:::tip + +For security reasons, please modify the default `admin_key`, and check the `allow_admin` IP access list. + +::: + +```yaml title="./conf/config.yaml" +deployment: + admin: + admin_key: + - name: admin + key: edd1c9f034335f136f87ad84b625c8f1 # using fixed API token has security risk, please update it when you deploy to production environment + role: admin + allow_admin: # http://nginx.org/en/docs/http/ngx_http_access_module.html#allow + - 127.0.0.0/24 + admin_listen: + ip: 0.0.0.0 # Specific IP, if not set, the default value is `0.0.0.0`. + port: 9180 # Specific port, which must be different from node_listen's port. +``` + +### Using environment variables + +To configure via environment variables, you can use the `${{VAR}}` syntax. For instance: + +```yaml title="./conf/config.yaml" +deployment: + admin: + admin_key: + - name: admin + key: ${{ADMIN_KEY}} + role: admin + allow_admin: + - 127.0.0.0/24 + admin_listen: + ip: 0.0.0.0 + port: 9180 +``` + +And then run `export ADMIN_KEY=$your_admin_key` before running `make init`. + +If the configured environment variable can't be found, an error will be thrown. + +If you want to use a default value when the environment variable is not set, use `${{VAR:=default_value}}` instead. For instance: + +```yaml title="./conf/config.yaml" +deployment: + admin: + admin_key: + - name: admin + key: ${{ADMIN_KEY:=edd1c9f034335f136f87ad84b625c8f1}} + role: admin + allow_admin: + - 127.0.0.0/24 + admin_listen: + ip: 0.0.0.0 + port: 9180 +``` + +This will find the environment variable `ADMIN_KEY` first, and if it does not exist, it will use `edd1c9f034335f136f87ad84b625c8f1` as the default value. + +You can also specify environment variables in yaml keys. This is specifically useful in the `standalone` [mode](./deployment-modes.md#standalone) where you can specify the upstream nodes as follows: + +```yaml title="./conf/apisix.yaml" +routes: + - + uri: "/test" + upstream: + nodes: + "${{HOST_IP}}:${{PORT}}": 1 + type: roundrobin +#END +``` + +### Force Delete + +By default, the Admin API checks for references between resources and will refuse to delete resources in use. + +You can make a force deletion by adding the request argument `force=true` to the delete request, for example: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```bash +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H "X-API-KEY: $admin_key" -X PUT -d '{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" +}' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d '{ + "uri": "/*", + "upstream_id": 1 +}' +{"value":{"priority":0,"upstream_id":1,"uri":"/*","create_time":1689038794,"id":"1","status":1,"update_time":1689038916},"key":"/apisix/routes/1"} + +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H "X-API-KEY: $admin_key" -X DELETE +{"error_msg":"can not delete this upstream, route [1] is still using it now"} +$ curl "http://127.0.0.1:9180/apisix/admin/upstreams/1?force=anyvalue" -H "X-API-KEY: $admin_key" -X DELETE +{"error_msg":"can not delete this upstream, route [1] is still using it now"} +$ curl "http://127.0.0.1:9180/apisix/admin/upstreams/1?force=true" -H "X-API-KEY: $admin_key" -X DELETE +{"deleted":"1","key":"/apisix/upstreams/1"} +``` + +## V3 new feature + +The Admin API has made some breaking changes in V3 version, as well as supporting additional features. + +### Support new response body format + +1. Remove `action` field in response body; +2. Adjust the response body structure when fetching the list of resources, the new response body structure like: + +Return single resource: + +```json +{ + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 +} +``` + +Return multiple resources: + +```json +{ + "list": [ + { + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 + }, + { + "modifiedIndex": 2685163, + "value": { + "id": "2", + ... + }, + "key": "/apisix/routes/2", + "createdIndex": 2685163 + } + ], + "total": 2 +} +``` + +### Support paging query + +Paging query is supported when getting the resource list, paging parameters include: + +| parameter | Default | Valid range | Description | +| --------- | ------ | ----------- | ----------------------------- | +| page | 1 | [1, ...] | Number of pages. | +| page_size | | [10, 500] | Number of resources per page. | + +The example is as follows: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes?page=1&page_size=10" \ +-H "X-API-KEY: $admin_key" -X GET +``` + +```json +{ + "total": 1, + "list": [ + { + ... + } + ] +} +``` + +Resources that support paging queries: + +- Consumer +- Consumer Group +- Global Rules +- Plugin Config +- Proto +- Route +- Service +- SSL +- Stream Route +- Upstream +- Secret + +### Support filtering query + +When getting a list of resources, it supports filtering resources based on `name`, `label`, `uri`. + +| parameter | parameter | +| --------- | ------------------------------------------------------------ | +| name | Query resource by their `name`, which will not appear in the query results if the resource itself does not have `name`. | +| label | Query resource by their `label`, which will not appear in the query results if the resource itself does not have `label`. | +| uri | Supported on Route resources only. If the `uri` of a Route is equal to the uri of the query or if the `uris` contains the uri of the query, the Route resource appears in the query results. | + +:::tip + +When multiple filter parameters are enabled, use the intersection of the query results for different filter parameters. + +::: + +The following example will return a list of routes, and all routes in the list satisfy: the `name` of the route contains the string "test", the `uri` contains the string "foo", and there is no restriction on the `label` of the route, since the label of the query is the empty string. + +```shell +curl 'http://127.0.0.1:9180/apisix/admin/routes?name=test&uri=foo&label=' \ +-H "X-API-KEY: $admin_key" -X GET +``` + +```json +{ + "total": 1, + "list": [ + { + ... + } + ] +} +``` + +### Support reference filtering query + +:::note + +This feature was introduced in APISIX 3.13.0. + +APISIX supports querying routes and stream routes by `service_id` and `upstream_id`. Other resources or fields are not currently supported. + +::: + +When getting a list of resources, it supports a `filter` for filtering resources by filters. + +It is encoded in the following manner. + +```text +filter=escape_uri(key1=value1&key2=value2) +``` + +The following example filters routes using `service_id`. Applying multiple filters simultaneously will return results that match all filter conditions. + +```shell +curl 'http://127.0.0.1:9180/apisix/admin/routes?filter=service_id%3D1' \ +-H "X-API-KEY: $admin_key" -X GET +``` + +```json +{ + "total": 1, + "list": [ + { + ... + } + ] +} +``` + +## Route + +[Routes](./terminology/route.md) match the client's request based on defined rules, loads and executes the corresponding [plugins](#plugin), and forwards the request to the specified [Upstream](#upstream). + +### Route API + +Route resource request address: /apisix/admin/routes/{id}?ttl=0 + +### Quick Note on ID Syntax + +ID's as a text string must be of a length between 1 and 64 characters and they should only contain uppercase, lowercase, numbers and no special characters apart from dashes ( - ), periods ( . ) and underscores ( _ ). For integer values they simply must have a minimum character count of 1. + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | -------------------------------- | ------------ | ----------------------------------------------------------------------------------------------------------------------------- | +| GET | /apisix/admin/routes | NULL | Fetches a list of all configured Routes. | +| GET | /apisix/admin/routes/{id} | NULL | Fetches specified Route by id. | +| PUT | /apisix/admin/routes/{id} | {...} | Creates a Route with the specified id. | +| POST | /apisix/admin/routes | {...} | Creates a Route and assigns a random id. | +| DELETE | /apisix/admin/routes/{id} | NULL | Removes the Route with the specified id. | +| PATCH | /apisix/admin/routes/{id} | {...} | Updates the selected attributes of the specified, existing Route. To delete an attribute, set value of attribute set to null. | +| PATCH | /apisix/admin/routes/{id}/{path} | {...} | Updates the attribute specified in the path. The values of other attributes remain unchanged. | + +### URI Request Parameters + +| parameter | Required | Type | Description | Example | +| --------- | -------- | --------- | --------------------------------------------------- | ------- | +| ttl | False | Auxiliary | Request expires after the specified target seconds. | ttl=1 | + +### Request Body Parameters + +| Parameter | Required | Type | Description | Example | +| ---------------- | ---------------------------------------- | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------- | +| name | False | Auxiliary | Identifier for the Route. | route-xxxx | +| desc | False | Auxiliary | Description of usage scenarios. | route xxxx | +| uri | True, can't be used with `uris` | Match Rules | Matches the uri. For more advanced matching see [Router](./terminology/router.md). | "/hello" | +| uris | True, can't be used with `uri` | Match Rules | Matches with any one of the multiple `uri`s specified in the form of a non-empty list. | ["/hello", "/word"] | +| host | False, can't be used with `hosts` | Match Rules | Matches with domain names such as `foo.com` or PAN domain names like `*.foo.com`. | "foo.com" | +| hosts | False, can't be used with `host` | Match Rules | Matches with any one of the multiple `host`s specified in the form of a non-empty list. | ["foo.com", "*.bar.com"] | +| remote_addr | False, can't be used with `remote_addrs` | Match Rules | Matches with the specified IP address in standard IPv4 format (`192.168.1.101`), CIDR format (`192.168.1.0/24`), or in IPv6 format (`::1`, `fe80::1`, `fe80::1/64`). | "192.168.1.0/24" | +| remote_addrs | False, can't be used with `remote_addr` | Match Rules | Matches with any one of the multiple `remote_addr`s specified in the form of a non-empty list. | ["127.0.0.1", "192.0.0.0/8", "::1"] | +| methods | False | Match Rules | Matches with the specified methods. Matches all methods if empty or unspecified. | ["GET", "POST"] | +| priority | False | Match Rules | If different Routes matches to the same `uri`, then the Route is matched based on its `priority`. A higher value corresponds to higher priority. It is set to `0` by default. | priority = 10 | +| vars | False | Match Rules | Matches based on the specified variables consistent with variables in Nginx. Takes the form `[[var, operator, val], [var, operator, val], ...]]`. Note that this is case sensitive when matching a cookie name. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more details. | [["arg_name", "==", "json"], ["arg_age", ">", 18]] | +| filter_func | False | Match Rules | Matches using a user-defined function in Lua. Used in scenarios where `vars` is not sufficient. Functions accept an argument `vars` which provides access to built-in variables (including Nginx variables). | function(vars) return tonumber(vars.arg_userid) % 4 > 2; end | +| plugins | False | Plugin | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | | +| script | False | Script | Used for writing arbitrary Lua code or directly calling existing plugins to be executed. See [Script](terminology/script.md) for more. | | +| upstream | False | Upstream | Configuration of the [Upstream](./terminology/upstream.md). | | +| upstream_id | False | Upstream | Id of the [Upstream](terminology/upstream.md) service. | | +| service_id | False | Service | Configuration of the bound [Service](terminology/service.md). | | +| plugin_config_id | False, can't be used with `script` | Plugin | [Plugin config](terminology/plugin-config.md) bound to the Route. | | +| labels | False | Match Rules | Attributes of the Route specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | +| timeout | False | Auxiliary | Sets the timeout (in seconds) for connecting to, and sending and receiving messages between the Upstream and the Route. This will overwrite the `timeout` value configured in your [Upstream](#upstream). | {"connect": 3, "send": 3, "read": 3} | +| enable_websocket | False | Auxiliary | Enables a websocket. Set to `false` by default. | | +| status | False | Auxiliary | Enables the current Route. Set to `1` (enabled) by default. | `1` to enable, `0` to disable | + +Example configuration: + +```shell +{ + "id": "1", # id, unnecessary. + "uris": ["/a","/b"], # A set of uri. + "methods": ["GET","POST"], # Can fill multiple methods + "hosts": ["a.com","b.com"], # A set of host. + "plugins": {}, # Bound plugin + "priority": 0, # If different routes contain the same `uri`, determine which route is matched first based on the attribute` priority`, the default value is 0. + "name": "route-xxx", + "desc": "hello world", + "remote_addrs": ["127.0.0.1"], # A set of Client IP. + "vars": [["http_user", "==", "ios"]], # A list of one or more `[var, operator, val]` elements + "upstream_id": "1", # upstream id, recommended + "upstream": {}, # upstream, not recommended + "timeout": { # Set the upstream timeout for connecting, sending and receiving messages of the route. + "connect": 3, + "send": 3, + "read": 3 + }, + "filter_func": "" # User-defined filtering function +} +``` + +### Example API usage + +- Create a route + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' + { + "uri": "/index.html", + "hosts": ["foo.com", "*.bar.com"], + "remote_addrs": ["127.0.0.0/8"], + "methods": ["PUT", "GET"], + "enable_websocket": true, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }' + ``` + + ```shell + HTTP/1.1 201 Created + Date: Sat, 31 Aug 2019 01:17:15 GMT + ... + ``` + +- Create a route expires after 60 seconds, then it's deleted automatically + + ```shell + curl 'http://127.0.0.1:9180/apisix/admin/routes/2?ttl=60' \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' + { + "uri": "/aa/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }' + ``` + + ```shell + HTTP/1.1 201 Created + Date: Sat, 31 Aug 2019 01:17:15 GMT + ... + ``` + +- Add an upstream node to the Route + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + } + } + }' + ``` + + ```shell + HTTP/1.1 200 OK + ... + ``` + + After successful execution, upstream nodes will be updated to: + + ```shell + { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + ``` + +- Update the weight of an upstream node to the Route + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "upstream": { + "nodes": { + "127.0.0.1:1981": 10 + } + } + }' + ``` + + ```shell + HTTP/1.1 200 OK + ... + ``` + + After successful execution, upstream nodes will be updated to: + + ```shell + { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 10 + } + ``` + +- Delete an upstream node for the Route + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "upstream": { + "nodes": { + "127.0.0.1:1980": null + } + } + }' + ``` + + ```shell + HTTP/1.1 200 OK + ... + ``` + + After successful execution, upstream nodes will be updated to: + + ```shell + { + "127.0.0.1:1981": 10 + } + ``` + +- Replace methods of the Route -- array + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d '{ + "methods": ["GET", "POST"] + }' + ``` + + ```shell + HTTP/1.1 200 OK + ... + ``` + + After successful execution, methods will not retain the original data, and the entire update is: + + ```shell + ["GET", "POST"] + ``` + +- Replace upstream nodes of the Route -- sub path + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1/upstream/nodes \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "127.0.0.1:1982": 1 + }' + ``` + + ```shell + HTTP/1.1 200 OK + ... + ``` + + After successful execution, nodes will not retain the original data, and the entire update is: + + ```shell + { + "127.0.0.1:1982": 1 + } + ``` + +- Replace methods of the Route -- sub path + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1/methods \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d'["POST", "DELETE", " PATCH"]' + ``` + + ```shell + HTTP/1.1 200 OK + ... + ``` + + After successful execution, methods will not retain the original data, and the entire update is: + + ```shell + ["POST", "DELETE", "PATCH"] + ``` + +- Disable route + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "status": 0 + }' + ``` + + ```shell + HTTP/1.1 200 OK + ... + ``` + + After successful execution, status nodes will be updated to: + + ```shell + { + "status": 0 + } + ``` + +- Enable route + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "status": 1 + }' + ``` + + ```shell + HTTP/1.1 200 OK + ... + ``` + + After successful execution, status nodes will be updated to: + + ```shell + { + "status": 1 + } + ``` + +### Response Parameters + +Currently, the response is returned from etcd. + +## Service + +A Service is an abstraction of an API (which can also be understood as a set of Route abstractions). It usually corresponds to an upstream service abstraction. + +The relationship between Routes and a Service is usually N:1. + +### Service API + +Service resource request address: /apisix/admin/services/{id} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | ---------------------------------- | ------------ | ------------------------------------------------------------------------------------------------------------------------------- | +| GET | /apisix/admin/services | NULL | Fetches a list of available Services. | +| GET | /apisix/admin/services/{id} | NULL | Fetches specified Service by id. | +| PUT | /apisix/admin/services/{id} | {...} | Creates a Service with the specified id. | +| POST | /apisix/admin/services | {...} | Creates a Service and assigns a random id. | +| DELETE | /apisix/admin/services/{id} | NULL | Removes the Service with the specified id. | +| PATCH | /apisix/admin/services/{id} | {...} | Updates the selected attributes of the specified, existing Service. To delete an attribute, set value of attribute set to null. | +| PATCH | /apisix/admin/services/{id}/{path} | {...} | Updates the attribute specified in the path. The values of other attributes remain unchanged. | + +### Request Body Parameters + +| Parameter | Required | Type | Description | Example | +| ---------------- | -------- | ----------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------ | +| plugins | False | Plugin | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | | +| upstream | False | Upstream | Configuration of the [Upstream](./terminology/upstream.md). | | +| upstream_id | False | Upstream | Id of the [Upstream](terminology/upstream.md) service. | | +| name | False | Auxiliary | Identifier for the Service. | service-xxxx | +| desc | False | Auxiliary | Description of usage scenarios. | service xxxx | +| labels | False | Match Rules | Attributes of the Service specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | +| enable_websocket | False | Auxiliary | Enables a websocket. Set to `false` by default. | | +| hosts | False | Match Rules | Matches with any one of the multiple `host`s specified in the form of a non-empty list. | ["foo.com", "*.bar.com"] | + +Example configuration: + +```shell +{ + "id": "1", # id + "plugins": {}, # Bound plugin + "upstream_id": "1", # upstream id, recommended + "upstream": {}, # upstream, not recommended + "name": "service-test", + "desc": "hello world", + "enable_websocket": true, + "hosts": ["foo.com"] +} +``` + +### Example API usage + +- Create a service + + ```shell + curl http://127.0.0.1:9180/apisix/admin/services/201 \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' + { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "enable_websocket": true, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }' + ``` + + ```shell + HTTP/1.1 201 Created + ... + ``` + +- Add an upstream node to the Service + + ```shell + curl http://127.0.0.1:9180/apisix/admin/services/201 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + } + } + }' + ``` + + ```shell + HTTP/1.1 200 OK + ... + ``` + + After successful execution, upstream nodes will be updated to: + + ```shell + { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + ``` + +- Update the weight of an upstream node to the Service + + ```shell + curl http://127.0.0.1:9180/apisix/admin/services/201 \ + -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' + { + "upstream": { + "nodes": { + "127.0.0.1:1981": 10 + } + } + }' + ``` + + ```shell + HTTP/1.1 200 OK + ... + ``` + + After successful execution, upstream nodes will be updated to: + + ```shell + { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 10 + } + ``` + +- Delete an upstream node for the Service + + ```shell + curl http://127.0.0.1:9180/apisix/admin/services/201 \ + -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' + { + "upstream": { + "nodes": { + "127.0.0.1:1980": null + } + } + }' + ``` + + ```shell + HTTP/1.1 200 OK + ... + ``` + + After successful execution, upstream nodes will be updated to: + + ```shell + { + "127.0.0.1:1981": 10 + } + ``` + +- Replace upstream nodes of the Service + + ```shell + curl http://127.0.0.1:9180/apisix/admin/services/201/upstream/nodes \ + -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' + { + "127.0.0.1:1982": 1 + }' + ``` + + ```shell + HTTP/1.1 200 OK + ... + ``` + + After successful execution, upstream nodes will not retain the original data, and the entire update is: + + ```shell + { + "127.0.0.1:1982": 1 + } + ``` + +### Response Parameters + +Currently, the response is returned from etcd. + +## Consumer + +Consumers are users of services and can only be used in conjunction with a user authentication system. A Consumer is identified by a `username` property. So, for creating a new Consumer, only the HTTP `PUT` method is supported. + +### Consumer API + +Consumer resource request address: /apisix/admin/consumers/{username} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | ---------------------------------- | ------------ | ------------------------------------------------- | +| GET | /apisix/admin/consumers | NULL | Fetches a list of all Consumers. | +| GET | /apisix/admin/consumers/{username} | NULL | Fetches specified Consumer by username. | +| PUT | /apisix/admin/consumers | {...} | Create new Consumer. | +| DELETE | /apisix/admin/consumers/{username} | NULL | Removes the Consumer with the specified username. | + +### Request Body Parameters + +| Parameter | Required | Type | Description | Example | +| ----------- | -------- | ----------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------ | +| username | True | Name | Name of the Consumer. | | +| group_id | False | Name | Group of the Consumer. | | +| plugins | False | Plugin | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | | +| desc | False | Auxiliary | Description of usage scenarios. | customer xxxx | +| labels | False | Match Rules | Attributes of the Consumer specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | + +Example Configuration: + +```shell +{ + "plugins": {}, # Bound plugin + "username": "name", # Consumer name + "desc": "hello world" # Consumer desc +} +``` + +When bound to a Route or Service, the Authentication Plugin infers the Consumer from the request and does not require any parameters. Whereas, when it is bound to a Consumer, username, password and other information needs to be provided. + +### Example API usage + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers \ +-H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } +}' +``` + +```shell +HTTP/1.1 200 OK +Date: Thu, 26 Dec 2019 08:17:49 GMT +... + +{"node":{"value":{"username":"jack","plugins":{"key-auth":{"key":"auth-one"},"limit-count":{"time_window":60,"count":2,"rejected_code":503,"key":"remote_addr","policy":"local"}}},"createdIndex":64,"key":"\/apisix\/consumers\/jack","modifiedIndex":64},"prevNode":{"value":"{\"username\":\"jack\",\"plugins\":{\"key-auth\":{\"key\":\"auth-one\"},\"limit-count\":{\"time_window\":60,\"count\":2,\"rejected_code\":503,\"key\":\"remote_addr\",\"policy\":\"local\"}}}","createdIndex":63,"key":"\/apisix\/consumers\/jack","modifiedIndex":63}} +``` + +Since `v2.2`, we can bind multiple authentication plugins to the same consumer. + +### Response Parameters + +Currently, the response is returned from etcd. + +## Credential + +Credential is used to hold the authentication credentials for the Consumer. +Credentials are used when multiple credentials need to be configured for a Consumer. + +### Credential API + +Credential resource request address:/apisix/admin/consumers/{username}/credentials/{credential_id} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ |----------------------------------------------------------------|--------------|------------------------------------------------| +| GET | /apisix/admin/consumers/{username}/credentials | NUll | Fetches list of all credentials of the Consumer | +| GET | /apisix/admin/consumers/{username}/credentials/{credential_id} | NUll | Fetches the Credential by `credential_id` | +| PUT | /apisix/admin/consumers/{username}/credentials/{credential_id} | {...} | Create or update a Creddential | +| DELETE | /apisix/admin/consumers/{username}/credentials/{credential_id} | NUll | Delete the Credential | + +### Request Body Parameters + +| Parameter | Required | Type | Description | Example | +| ----------- |-----| ------- |------------------------------------------------------------|-------------------------------------------------| +| plugins | False | Plugin | Auth plugins configuration. | | +| name | False | Auxiliary | Identifier for the Credential. | credential_primary | +| desc | False | Auxiliary | Description of usage scenarios. | credential xxxx | +| labels | False | Match Rules | Attributes of the Credential specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | + +Example Configuration: + +```shell +{ + "plugins": { + "key-auth": { + "key": "auth-one" + } + }, + "desc": "hello world" +} +``` + +### Example API usage + +Prerequisite: Consumer `jack` has been created. + +Create the `key-auth` Credential for consumer `jack`: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials/auth-one \ +-H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "plugins": { + "key-auth": { + "key": "auth-one" + } + } +}' +``` + +``` +HTTP/1.1 200 OK +Date: Thu, 26 Dec 2019 08:17:49 GMT +... + +{"key":"\/apisix\/consumers\/jack\/credentials\/auth-one","value":{"update_time":1666260780,"plugins":{"key-auth":{"key":"auth-one"}},"create_time":1666260780}} +``` + +## Upstream + +Upstream is a virtual host abstraction that performs load balancing on a given set of service nodes according to the configured rules. + +An Upstream configuration can be directly bound to a Route or a Service, but the configuration in Route has a higher priority. This behavior is consistent with priority followed by the Plugin object. + +### Upstream API + +Upstream resource request address: /apisix/admin/upstreams/{id} + +For notes on ID syntax please refer to: [ID Syntax](#quick-note-on-id-syntax) + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | ----------------------------------- | ------------ | -------------------------------------------------------------------------------------------------------------------------------- | +| GET | /apisix/admin/upstreams | NULL | Fetch a list of all configured Upstreams. | +| GET | /apisix/admin/upstreams/{id} | NULL | Fetches specified Upstream by id. | +| PUT | /apisix/admin/upstreams/{id} | {...} | Creates an Upstream with the specified id. | +| POST | /apisix/admin/upstreams | {...} | Creates an Upstream and assigns a random id. | +| DELETE | /apisix/admin/upstreams/{id} | NULL | Removes the Upstream with the specified id. | +| PATCH | /apisix/admin/upstreams/{id} | {...} | Updates the selected attributes of the specified, existing Upstream. To delete an attribute, set value of attribute set to null. | +| PATCH | /apisix/admin/upstreams/{id}/{path} | {...} | Updates the attribute specified in the path. The values of other attributes remain unchanged. | + +### Request Body Parameters + +In addition to the equalization algorithm selections, Upstream also supports passive health check and retry for the upstream. See the table below for more details: + +| Parameter | Required | Type | Description | Example | +|-----------------------------|------------------------------------------------------------------|-------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| type | False | Enumeration | Load balancing algorithm to be used, and the default value is `roundrobin`. | | +| nodes | True, can't be used with `service_name` | Node | IP addresses (with optional ports) of the Upstream nodes represented as a hash table or an array. In the hash table, the key is the IP address and the value is the weight of the node for the load balancing algorithm. For hash table case, if the key is IPv6 address with port, then the IPv6 address must be quoted with square brackets. In the array, each item is a hash table with keys `host`, `weight`, and the optional `port` and `priority` (defaults to `0`). Nodes with lower priority are used only when all nodes with a higher priority are tried and are unavailable. Empty nodes are treated as placeholders and clients trying to access this Upstream will receive a 502 response. | `192.168.1.100:80`, `[::1]:80` | +| service_name | True, can't be used with `nodes` | String | Service name used for [service discovery](discovery.md). | `a-bootiful-client` | +| discovery_type | True, if `service_name` is used | String | The type of service [discovery](discovery.md). | `eureka` | +| hash_on | False | Auxiliary | Only valid if the `type` is `chash`. Supports Nginx variables (`vars`), custom headers (`header`), `cookie` and `consumer`. Defaults to `vars`. | | +| key | False | Match Rules | Only valid if the `type` is `chash`. Finds the corresponding node `id` according to `hash_on` and `key` values. When `hash_on` is set to `vars`, `key` is a required parameter and it supports [Nginx variables](http://nginx.org/en/docs/varindex.html). When `hash_on` is set as `header`, `key` is a required parameter, and `header name` can be customized. When `hash_on` is set to `cookie`, `key` is also a required parameter, and `cookie name` can be customized. When `hash_on` is set to `consumer`, `key` need not be set and the `key` used by the hash algorithm would be the authenticated `consumer_name`. | `uri`, `server_name`, `server_addr`, `request_uri`, `remote_port`, `remote_addr`, `query_string`, `host`, `hostname`, `arg_***`, `arg_***` | +| checks | False | Health Checker | Configures the parameters for the [health check](./tutorials/health-check.md). | | +| retries | False | Integer | Sets the number of retries while passing the request to Upstream using the underlying Nginx mechanism. Set according to the number of available backend nodes by default. Setting this to `0` disables retry. | | +| retry_timeout | False | Integer | Timeout to continue with retries. Setting this to `0` disables the retry timeout. | | +| timeout | False | Timeout | Sets the timeout (in seconds) for connecting to, and sending and receiving messages to and from the Upstream. | `{"connect": 0.5,"send": 0.5,"read": 0.5}` | +| name | False | Auxiliary | Identifier for the Upstream. | | +| desc | False | Auxiliary | Description of usage scenarios. | | +| pass_host | False | Enumeration | Configures the `host` when the request is forwarded to the upstream. Can be one of `pass`, `node` or `rewrite`. Defaults to `pass` if not specified. `pass`- transparently passes the client's host to the Upstream. `node`- uses the host configured in the node of the Upstream. `rewrite`- Uses the value configured in `upstream_host`. | | +| upstream_host | False | Auxiliary | Specifies the host of the Upstream request. This is only valid if the `pass_host` is set to `rewrite`. | | +| scheme | False | Auxiliary | The scheme used when communicating with the Upstream. For an L7 proxy, this value can be one of `http`, `https`, `grpc`, `grpcs`. For an L4 proxy, this value could be one of `tcp`, `udp`, `tls`. Defaults to `http`. | | +| labels | False | Match Rules | Attributes of the Upstream specified as `key-value` pairs. | {"version":"v2","build":"16","env":"production"} | +| tls.client_cert | False, can't be used with `tls.client_cert_id` | HTTPS certificate | Sets the client certificate while connecting to a TLS Upstream. | | +| tls.client_key | False, can't be used with `tls.client_cert_id` | HTTPS certificate private key | Sets the client private key while connecting to a TLS Upstream. | | +| tls.client_cert_id | False, can't be used with `tls.client_cert` and `tls.client_key` | SSL | Set the referenced [SSL](#ssl) id. | | +| keepalive_pool.size | False | Auxiliary | Sets `keepalive` directive dynamically. | | +| keepalive_pool.idle_timeout | False | Auxiliary | Sets `keepalive_timeout` directive dynamically. | | +| keepalive_pool.requests | False | Auxiliary | Sets `keepalive_requests` directive dynamically. | | + +An Upstream can be one of the following `types`: + +- `roundrobin`: Round robin balancing with weights. +- `chash`: Consistent hash. +- `ewma`: Pick the node with minimum latency. See [EWMA Chart](https://en.wikipedia.org/wiki/EWMA_chart) for more details. +- `least_conn`: Picks the node with the lowest value of `(active_conn + 1) / weight`. Here, an active connection is a connection being used by the request and is similar to the concept in Nginx. +- user-defined load balancer loaded via `require("apisix.balancer.your_balancer")`. + +The following should be considered when setting the `hash_on` value: + +- When set to `vars`, a `key` is required. The value of the key can be any of the [Nginx variables](http://nginx.org/en/docs/varindex.html) without the `$` prefix. +- When set to `header`, a `key` is required. This is equal to "http\_`key`". +- When set to `cookie`, a `key` is required. This key is equal to "cookie\_`key`". The cookie name is case-sensitive. +- When set to `consumer`, the `key` is optional and the key is set to the `consumer_name` captured from the authentication Plugin. +- When set to `vars_combinations`, the `key` is required. The value of the key can be a combination of any of the [Nginx variables](http://nginx.org/en/docs/varindex.html) like `$request_uri$remote_addr`. + +The features described below requires APISIX to be run on [APISIX-Runtime](./FAQ.md#how-do-i-build-the-apisix-runtime-environment): + +You can set the `scheme` to `tls`, which means "TLS over TCP". + +To use mTLS to communicate with Upstream, you can use the `tls.client_cert/key` in the same format as SSL's `cert` and `key` fields. + +Or you can reference SSL object by `tls.client_cert_id` to set SSL cert and key. The SSL object can be referenced only if the `type` field is `client`, otherwise the request will be rejected by APISIX. In addition, only `cert` and `key` will be used in the SSL object. + +To allow Upstream to have a separate connection pool, use `keepalive_pool`. It can be configured by modifying its child fields. + +Example Configuration: + +```shell +{ + "id": "1", # id + "retries": 1, # retry times + "timeout": { # Set the timeout for connecting, sending and receiving messages, each is 15 seconds. + "connect":15, + "send":15, + "read":15 + }, + "nodes": {"host:80": 100}, # Upstream machine address list, the format is `Address + Port` + # is the same as "nodes": [ {"host": "host", "port": 80, "weight": 100} ], + "type":"roundrobin", + "checks": {}, # Health check parameters + "hash_on": "", + "key": "", + "name": "upstream-for-test", + "desc": "hello world", + "scheme": "http" # The scheme used when communicating with upstream, the default is `http` +} +``` + +### Example API usage + +#### Create an Upstream and modify the data in `nodes` + +1. Create upstream + + ```shell + curl http://127.0.0.1:9180/apisix/admin/upstreams/100 \ + -H "X-API-KEY: $admin_key" -i -X PUT -d ' + { + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980": 1 + } + }' + ``` + + ```shell + HTTP/1.1 201 Created + ... + ``` + +2. Add a node to the Upstream + + ```shell + curl http://127.0.0.1:9180/apisix/admin/upstreams/100 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "nodes": { + "127.0.0.1:1981": 1 + } + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + After successful execution, nodes will be updated to: + + ```shell + { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + ``` + +3. Update the weight of a node to the Upstream + + ```shell + curl http://127.0.0.1:9180/apisix/admin/upstreams/100 \ + -H'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PATCH -i -d ' + { + "nodes": { + "127.0.0.1:1981": 10 + } + }' + ``` + + ```shell + HTTP/1.1 200 OK + ... + ``` + + After successful execution, nodes will be updated to: + + ```shell + { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 10 + } + ``` + +4. Delete a node for the Upstream + + ```shell + curl http://127.0.0.1:9180/apisix/admin/upstreams/100 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "nodes": { + "127.0.0.1:1980": null + } + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + After successful execution, nodes will be updated to: + + ```shell + { + "127.0.0.1:1981": 10 + } + ``` + +5. Replace the nodes of the Upstream + + ```shell + curl http://127.0.0.1:9180/apisix/admin/upstreams/100/nodes \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "127.0.0.1:1982": 1 + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + After the execution is successful, nodes will not retain the original data, and the entire update is: + + ```shell + { + "127.0.0.1:1982": 1 + } + ``` + +#### Proxy client request to `https` Upstream service + +1. Create a route and configure the upstream scheme as `https`. + + ```shell + curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "uri": "/get", + "upstream": { + "type": "roundrobin", + "scheme": "https", + "nodes": { + "httpbin.org:443": 1 + } + } + }' + ``` + + After successful execution, the scheme when requesting to communicate with the upstream will be `https`. + +2. Send a request to test. + + ```shell + curl http://127.0.0.1:9080/get + ``` + + ```shell + { + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/7.29.0", + "X-Amzn-Trace-Id": "Root=1-6058324a-0e898a7f04a5e95b526bb183", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "127.0.0.1", + "url": "https://127.0.0.1/get" + } + ``` + +The request is successful, meaning that the proxy Upstream `https` is valid. + +:::note + +Each node can be configured with a priority. A node with low priority will only be +used when all the nodes with higher priority have been tried or are unavailable. + +::: + +As the default priority is 0, nodes with negative priority can be configured as a backup. + +For example: + +```json +{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": [ + { "host": "127.0.0.1", "port": 1980, "weight": 2000 }, + { "host": "127.0.0.1", "port": 1981, "weight": 1, "priority": -1 } + ], + "checks": { + "active": { + "http_path": "/status", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 1 + } + } + } + } +} +``` + +Node `127.0.0.2` will be used only after `127.0.0.1` is tried or unavailable. +It can therefore act as a backup for the node `127.0.0.1`. + +### Response Parameters + +Currently, the response is returned from etcd. + +## SSL + +### SSL API + +SSL resource request address: /apisix/admin/ssls/{id} + +For notes on ID syntax please refer to: [ID Syntax](#quick-note-on-id-syntax) + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | ---------------------- | ------------ | ----------------------------------------------- | +| GET | /apisix/admin/ssls | NULL | Fetches a list of all configured SSL resources. | +| GET | /apisix/admin/ssls/{id} | NULL | Fetch specified resource by id. | +| PUT | /apisix/admin/ssls/{id} | {...} | Creates a resource with the specified id. | +| POST | /apisix/admin/ssls | {...} | Creates a resource and assigns a random id. | +| DELETE | /apisix/admin/ssls/{id} | NULL | Removes the resource with the specified id. | + +### Request Body Parameters + +| Parameter | Required | Type | Description | Example | +| ------------ | -------- | ------------------------ | -------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ | +| cert | True | Certificate | HTTPS certificate. This field supports saving the value in Secret Manager using the [APISIX Secret](./terminology/secret.md) resource. | | +| key | True | Private key | HTTPS private key. This field supports saving the value in Secret Manager using the [APISIX Secret](./terminology/secret.md) resource. | | +| certs | False | An array of certificates | Used for configuring multiple certificates for the same domain excluding the one provided in the `cert` field. This field supports saving the value in Secret Manager using the [APISIX Secret](./terminology/secret.md) resource. | | +| keys | False | An array of private keys | Private keys to pair with the `certs`. This field supports saving the value in Secret Manager using the [APISIX Secret](./terminology/secret.md) resource. | | +| client.ca | False | Certificate | Sets the CA certificate that verifies the client. Requires OpenResty 1.19+. | | +| client.depth | False | Certificate | Sets the verification depth in client certificate chains. Defaults to 1. Requires OpenResty 1.19+. | | +| client.skip_mtls_uri_regex | False | An array of regular expressions, in PCRE format | Used to match URI, if matched, this request bypasses the client certificate checking, i.e. skip the MTLS. | ["/hello[0-9]+", "/foobar"] | +| snis | True, only if `type` is `server` | Match Rules | A non-empty array of HTTPS SNI | | +| desc | False | Auxiliary | Description of usage scenarios. | certs for production env | +| labels | False | Match Rules | Attributes of the resource specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | +| type | False | Auxiliary | Identifies the type of certificate, default `server`. | `client` Indicates that the certificate is a client certificate, which is used when APISIX accesses the upstream; `server` Indicates that the certificate is a server-side certificate, which is used by APISIX when verifying client requests. | +| status | False | Auxiliary | Enables the current SSL. Set to `1` (enabled) by default. | `1` to enable, `0` to disable | +| ssl_protocols | False | An array of ssl protocols | It is used to control the SSL/TLS protocol version used between servers and clients. See [SSL Protocol](./ssl-protocol.md) for more examples. | `["TLSv1.1", "TLSv1.2", "TLSv1.3"]` | + +Example Configuration: + +```shell +{ + "id": "1", # id + "cert": "cert", # Certificate + "key": "key", # Private key + "snis": ["t.com"] # https SNI +} +``` + +See [Certificate](./certificate.md) for more examples. + +## Global Rule + +Sets Plugins which run globally. i.e these Plugins will be run before any Route/Service level Plugins. + +### Global Rule API + +Global Rule resource request address: /apisix/admin/global_rules/{id} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | -------------------------------------- | ------------ | ----------------------------------------------------------------------------------------------------------------------------------- | +| GET | /apisix/admin/global_rules | NULL | Fetches a list of all Global Rules. | +| GET | /apisix/admin/global_rules/{id} | NULL | Fetches specified Global Rule by id. | +| PUT | /apisix/admin/global_rules/{id} | {...} | Creates a Global Rule with the specified id. | +| DELETE | /apisix/admin/global_rules/{id} | NULL | Removes the Global Rule with the specified id. | +| PATCH | /apisix/admin/global_rules/{id} | {...} | Updates the selected attributes of the specified, existing Global Rule. To delete an attribute, set value of attribute set to null. | +| PATCH | /apisix/admin/global_rules/{id}/{path} | {...} | Updates the attribute specified in the path. The values of other attributes remain unchanged. | + +### Request Body Parameters + +| Parameter | Required | Description | Example | +| ----------- | -------- | ------------------------------------------------------------------------------------------------------------------ | ---------- | +| plugins | True | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | | + +## Consumer group + +Group of Plugins which can be reused across Consumers. + +### Consumer group API + +Consumer group resource request address: /apisix/admin/consumer_groups/{id} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | ---------------------------------------- | ------------ | ------------------------------------------------------------------------------------------------------------------------------------- | +| GET | /apisix/admin/consumer_groups | NULL | Fetches a list of all Consumer groups. | +| GET | /apisix/admin/consumer_groups/{id} | NULL | Fetches specified Consumer group by id. | +| PUT | /apisix/admin/consumer_groups/{id} | {...} | Creates a new Consumer group with the specified id. | +| DELETE | /apisix/admin/consumer_groups/{id} | NULL | Removes the Consumer group with the specified id. | +| PATCH | /apisix/admin/consumer_groups/{id} | {...} | Updates the selected attributes of the specified, existing Consumer group. To delete an attribute, set value of attribute set to null. | +| PATCH | /apisix/admin/consumer_groups/{id}/{path} | {...} | Updates the attribute specified in the path. The values of other attributes remain unchanged. | + +### Request Body Parameters + +| Parameter | Required | Description | Example | +| ----------- | -------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------ | +| plugins | True | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | | +| name | False | Identifier for the consumer group. | premium-tier | +| desc | False | Description of usage scenarios. | customer xxxx | +| labels | False | Attributes of the Consumer group specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | + +## Plugin config + +Group of Plugins which can be reused across Routes. + +### Plugin Config API + +Plugin Config resource request address: /apisix/admin/plugin_configs/{id} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | ---------------------------------------- | ------------ | ------------------------------------------------------------------------------------------------------------------------------------- | +| GET | /apisix/admin/plugin_configs | NULL | Fetches a list of all Plugin configs. | +| GET | /apisix/admin/plugin_configs/{id} | NULL | Fetches specified Plugin config by id. | +| PUT | /apisix/admin/plugin_configs/{id} | {...} | Creates a new Plugin config with the specified id. | +| DELETE | /apisix/admin/plugin_configs/{id} | NULL | Removes the Plugin config with the specified id. | +| PATCH | /apisix/admin/plugin_configs/{id} | {...} | Updates the selected attributes of the specified, existing Plugin config. To delete an attribute, set value of attribute set to null. | +| PATCH | /apisix/admin/plugin_configs/{id}/{path} | {...} | Updates the attribute specified in the path. The values of other attributes remain unchanged. | + +### Request Body Parameters + +| Parameter | Required | Description | Example | +| ----------- | -------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------ | +| plugins | True | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | | +| desc | False | Description of usage scenarios. | customer xxxx | +| labels | False | Attributes of the Plugin config specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | + +## Plugin Metadata + +### Plugin Metadata API + +Plugin Metadata resource request address: /apisix/admin/plugin_metadata/{plugin_name} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | ------------------------------------------- | ------------ | --------------------------------------------------------------- | +| GET | /apisix/admin/plugin_metadata/{plugin_name} | NULL | Fetches the metadata of the specified Plugin by `plugin_name`. | +| PUT | /apisix/admin/plugin_metadata/{plugin_name} | {...} | Creates metadata for the Plugin specified by the `plugin_name`. | +| DELETE | /apisix/admin/plugin_metadata/{plugin_name} | NULL | Removes metadata for the Plugin specified by the `plugin_name`. | + +### Request Body Parameters + +A JSON object defined according to the `metadata_schema` of the Plugin ({plugin_name}). + +Example Configuration: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/example-plugin \ +-H "X-API-KEY: $admin_key" -i -X PUT -d ' +{ + "skey": "val", + "ikey": 1 +}' +``` + +```shell +HTTP/1.1 201 Created +Date: Thu, 26 Dec 2019 04:19:34 GMT +Content-Type: text/plain +``` + +## Plugin + +### Plugin API + +Plugin resource request address: /apisix/admin/plugins/{plugin_name} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | ----------------------------------- | ------------ | ---------------------------------------------- | +| GET | /apisix/admin/plugins/list | NULL | Fetches a list of all Plugins. | +| GET | /apisix/admin/plugins/{plugin_name} | NULL | Fetches the specified Plugin by `plugin_name`. | +| GET | /apisix/admin/plugins?all=true | NULL | Get all properties of all plugins. | +| GET | /apisix/admin/plugins?all=true&subsystem=stream| NULL | Gets properties of all Stream plugins.| +| GET | /apisix/admin/plugins?all=true&subsystem=http | NULL | Gets properties of all HTTP plugins. | +| PUT | /apisix/admin/plugins/reload | NULL | Reloads the plugin according to the changes made in code | +| GET | apisix/admin/plugins/{plugin_name}?subsystem=stream | NULL | Gets properties of a specified plugin if it is supported in Stream/L4 subsystem. | +| GET | apisix/admin/plugins/{plugin_name}?subsystem=http | NULL | Gets properties of a specified plugin if it is supported in HTTP/L7 subsystem. | + +:::caution + +The interface of getting properties of all plugins via `/apisix/admin/plugins?all=true` will be deprecated soon. + +::: + +### Request Body Parameters + +The Plugin ({plugin_name}) of the data structure. + +### Request Arguments + +| Name | Description | Default | +| --------- | ----------------------------- | ------- | +| subsystem | The subsystem of the Plugins. | http | + +The plugin can be filtered on subsystem so that the ({plugin_name}) is searched in the subsystem passed through query params. + +### Example API usage: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/plugins/list" \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' +``` + +```shell +["zipkin","request-id",...] +``` + +```shell +curl "http://127.0.0.1:9180/apisix/admin/plugins/key-auth?subsystem=http" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' +``` + +```json +{"$comment":"this is a mark for our injected plugin schema","properties":{"header":{"default":"apikey","type":"string"},"hide_credentials":{"default":false,"type":"boolean"},"_meta":{"properties":{"filter":{"type":"array","description":"filter determines whether the plugin needs to be executed at runtime"},"disable":{"type":"boolean"},"error_response":{"oneOf":[{"type":"string"},{"type":"object"}]},"priority":{"type":"integer","description":"priority of plugins by customized order"}},"type":"object"},"query":{"default":"apikey","type":"string"}},"type":"object"} +``` + +:::tip + +You can use the `/apisix/admin/plugins?all=true` API to get all properties of all plugins. This API will be deprecated soon. + +::: + +## Stream Route + +Route used in the [Stream Proxy](./stream-proxy.md). + +### Stream Route API + +Stream Route resource request address: /apisix/admin/stream_routes/{id} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | -------------------------------- | ------------ | ----------------------------------------------- | +| GET | /apisix/admin/stream_routes | NULL | Fetches a list of all configured Stream Routes. | +| GET | /apisix/admin/stream_routes/{id} | NULL | Fetches specified Stream Route by id. | +| PUT | /apisix/admin/stream_routes/{id} | {...} | Creates a Stream Route with the specified id. | +| POST | /apisix/admin/stream_routes | {...} | Creates a Stream Route and assigns a random id. | +| DELETE | /apisix/admin/stream_routes/{id} | NULL | Removes the Stream Route with the specified id. | + +### Request Body Parameters + +| Parameter | Required | Type | Description | Example | +| ----------- | -------- | -------- | ------------------------------------------------------------------- | ----------------------------- | +| name | False | Auxiliary | Identifier for the Stream Route. | postgres-proxy | +| desc | False | Auxiliary | Description of usage scenarios. | proxy endpoint for postgresql | +| labels | False | Match Rules | Attributes of the Proto specified as key-value pairs. | {"version":"17","service":"user","env":"production"} | +| upstream | False | Upstream | Configuration of the [Upstream](./terminology/upstream.md). | | +| upstream_id | False | Upstream | Id of the [Upstream](terminology/upstream.md) service. | | +| service_id | False | String | Id of the [Service](terminology/service.md) service. | | +| remote_addr | False | IPv4, IPv4 CIDR, IPv6 | Filters Upstream forwards by matching with client IP. | "127.0.0.1" or "127.0.0.1/32" or "::1" | +| server_addr | False | IPv4, IPv4 CIDR, IPv6 | Filters Upstream forwards by matching with APISIX Server IP. | "127.0.0.1" or "127.0.0.1/32" or "::1" | +| server_port | False | Integer | Filters Upstream forwards by matching with APISIX Server port. | 9090 | +| sni | False | Host | Server Name Indication. | "test.com" | +| protocol.name | False | String | Name of the protocol proxyed by xRPC framework. | "redis" | +| protocol.conf | False | Configuration | Protocol-specific configuration. | | + +To learn more about filtering in stream proxies, check [this](./stream-proxy.md#more-route-match-options) document. + +## Secret + +Secret means `Secrets Management`, which could use any secret manager supported, e.g. `vault`. + +### Secret API + +Secret resource request address: /apisix/admin/secrets/{secretmanager}/{id} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | ---------------------------------- | ------------ | ------------------------------------------------- | +| GET | /apisix/admin/secrets | NULL | Fetches a list of all secrets. | +| GET | /apisix/admin/secrets/{manager}/{id} | NULL | Fetches specified secrets by id. | +| PUT | /apisix/admin/secrets/{manager} | {...} | Create new secrets configuration. | +| DELETE | /apisix/admin/secrets/{manager}/{id} | NULL | Removes the secrets with the specified id. | +| PATCH | /apisix/admin/secrets/{manager}/{id} | {...} | Updates the selected attributes of the specified, existing secrets. To delete an attribute, set value of attribute set to null. | +| PATCH | /apisix/admin/secrets/{manager}/{id}/{path} | {...} | Updates the attribute specified in the path. The values of other attributes remain unchanged. | + +### Request Body Parameters + +#### When Secret Manager is Vault + +| Parameter | Required | Type | Description | Example | +| ----------- | -------- | ----------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------ | +| uri | True | URI | URI of the vault server. | | +| prefix | True | string | key prefix +| token | True | string | vault token. | | +| namespace | False | string | Vault namespace, no default value | `admin` | + +Example Configuration: + +```shell +{ + "uri": "https://localhost/vault", + "prefix": "/apisix/kv", + "token": "343effad" +} +``` + +Example API usage: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/secrets/vault/test2 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "http://xxx/get", + "prefix" : "apisix", + "token" : "apisix" +}' +``` + +```shell +HTTP/1.1 200 OK +... + +{"key":"\/apisix\/secrets\/vault\/test2","value":{"id":"vault\/test2","token":"apisix","prefix":"apisix","update_time":1669625828,"create_time":1669625828,"uri":"http:\/\/xxx\/get"}} +``` + +#### When Secret Manager is AWS + +| Parameter | Required | Type | Description | +| ----------------- | -------- | ------ | --------------------------------------- | +| access_key_id | True | string | AWS Access Key ID | +| secret_access_key | True | string | AWS Secret Access Key | +| session_token | False | string | Temporary access credential information | +| region | False | string | AWS Region | +| endpoint_url | False | URI | AWS Secret Manager URL | + +Example Configuration: + +```json +{ + "endpoint_url": "http://127.0.0.1:4566", + "region": "us-east-1", + "access_key_id": "access", + "secret_access_key": "secret", + "session_token": "token" +} +``` + +Example API usage: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/secrets/aws/test3 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "endpoint_url": "http://127.0.0.1:4566", + "region": "us-east-1", + "access_key_id": "access", + "secret_access_key": "secret", + "session_token": "token" +}' +``` + +```shell +HTTP/1.1 200 OK +... + +{"value":{"create_time":1726069970,"endpoint_url":"http://127.0.0.1:4566","region":"us-east-1","access_key_id":"access","secret_access_key":"secret","id":"aws/test3","update_time":1726069970,"session_token":"token"},"key":"/apisix/secrets/aws/test3"} +``` + +#### When Secret Manager is GCP + +| Parameter | Required | Type | Description | Example | +| ------------------------ | -------- | ------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | +| auth_config | True | object | Either `auth_config` or `auth_file` must be provided. | | +| auth_config.client_email | True | string | Email address of the Google Cloud service account. | | +| auth_config.private_key | True | string | Private key of the Google Cloud service account. | | +| auth_config.project_id | True | string | Project ID in the Google Cloud service account. | | +| auth_config.token_uri | False | string | Token URI of the Google Cloud service account. | [https://oauth2.googleapis.com/token](https://oauth2.googleapis.com/token) | +| auth_config.entries_uri | False | string | The API access endpoint for the Google Secrets Manager. | [https://secretmanager.googleapis.com/v1](https://secretmanager.googleapis.com/v1) | +| auth_config.scope | False | string | Access scopes of the Google Cloud service account. See [OAuth 2.0 Scopes for Google APIs](https://developers.google.com/identity/protocols/oauth2/scopes) | [https://www.googleapis.com/auth/cloud-platform](https://www.googleapis.com/auth/cloud-platform) | +| auth_file | True | string | Path to the Google Cloud service account authentication JSON file. Either `auth_config` or `auth_file` must be provided. | | +| ssl_verify | False | boolean | When set to `true`, enables SSL verification as mentioned in [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). | true | + +Example Configuration: + +```json +{ + "auth_config" : { + "client_email": "email@apisix.iam.gserviceaccount.com", + "private_key": "private_key", + "project_id": "apisix-project", + "token_uri": "https://oauth2.googleapis.com/token", + "entries_uri": "https://secretmanager.googleapis.com/v1", + "scope": ["https://www.googleapis.com/auth/cloud-platform"] + } +} +``` + +Example API usage: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/secrets/gcp/test4 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "auth_config" : { + "client_email": "email@apisix.iam.gserviceaccount.com", + "private_key": "private_key", + "project_id": "apisix-project", + "token_uri": "https://oauth2.googleapis.com/token", + "entries_uri": "https://secretmanager.googleapis.com/v1", + "scope": ["https://www.googleapis.com/auth/cloud-platform"] + } +}' +``` + +```shell +HTTP/1.1 200 OK +... + +{"value":{"id":"gcp/test4","ssl_verify":true,"auth_config":{"token_uri":"https://oauth2.googleapis.com/token","scope":["https://www.googleapis.com/auth/cloud-platform"],"entries_uri":"https://secretmanager.googleapis.com/v1","client_email":"email@apisix.iam.gserviceaccount.com","private_key":"private_key","project_id":"apisix-project"},"create_time":1726070161,"update_time":1726070161},"key":"/apisix/secrets/gcp/test4"} +``` + +### Response Parameters + +Currently, the response is returned from etcd. + +## Proto + +Proto is used to store protocol buffers so that APISIX can communicate in gRPC. + +See [grpc-transcode plugin](./plugins/grpc-transcode.md#enabling-the-plugin) doc for more examples. + +### Proto API + +Proto resource request address: /apisix/admin/protos/{id} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | -------------------------------- | ------------ | ----------------------------------------------- | +| GET | /apisix/admin/protos | NULL | List all Protos. | +| GET | /apisix/admin/protos/{id} | NULL | Get a Proto by id. | +| PUT | /apisix/admin/protos/{id} | {...} | Create or update a Proto with the given id. | +| POST | /apisix/admin/protos | {...} | Create a Proto with a random id. | +| DELETE | /apisix/admin/protos/{id} | NULL | Delete Proto by id. | + +### Request Body Parameters + +| Parameter | Required | Type | Description | Example | +|-----------|----------|-----------|--------------------------------------| ----------------------------- | +| content | True | String | Content of `.proto` or `.pb` files | See [here](./plugins/grpc-transcode.md#enabling-the-plugin) | +| name | False | Auxiliary | Identifier for the Protobuf definition. | user-proto | +| desc | False | Auxiliary | Description of usage scenarios. | protobuf for user service | +| labels | False | Match Rules | Attributes of the Proto specified as key-value pairs. | {"version":"v2","service":"user","env":"production"} | + +## Schema validation + +Check the validity of a configuration against its entity schema. This allows you to test your input before submitting a request to the entity endpoints of the Admin API. + +Note that this only performs the schema validation checks, checking that the input configuration is well-formed. Requests to the entity endpoint using the given configuration may still fail due to other reasons, such as invalid foreign key relationships or uniqueness check failures against the contents of the data store. + +### Schema validation + +Schema validation request address: /apisix/admin/schema/validate/{resource} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | -------------------------------- | ------------ | ----------------------------------------------- | +| POST | /apisix/admin/schema/validate/{resource} | {..resource conf..} | Validate the resource configuration against corresponding schema. | + +### Request Body Parameters + +* 200: validate ok. +* 400: validate failed, with error as response body in JSON format. + +Example: + +```bash +curl http://127.0.0.1:9180/apisix/admin/schema/validate/routes \ + -H "X-API-KEY: $admin_key" -X POST -i -d '{ + "uri": 1980, + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } +}' +HTTP/1.1 400 Bad Request +Date: Mon, 21 Aug 2023 07:37:13 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.4.0 +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Access-Control-Expose-Headers: * +Access-Control-Max-Age: 3600 + +{"error_msg":"property \"uri\" validation failed: wrong type: expected string, got number"} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/apisix-variable.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/apisix-variable.md new file mode 100644 index 0000000..314d22d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/apisix-variable.md @@ -0,0 +1,54 @@ +--- +title: APISIX variable +keywords: + - Apache APISIX + - API Gateway + - APISIX variable +description: This article describes the variables supported by Apache APISIX. +--- + + + +## Description + +Besides [NGINX variable](http://nginx.org/en/docs/varindex.html), APISIX also provides +additional variables. + +## List of variables + +| Variable Name | Origin | Description | Example | +|-------------------- | ---------- | ----------------------------------------------------------------------------------- | ------------- | +| balancer_ip | core | The IP of picked upstream server. | 192.168.1.2 | +| balancer_port | core | The port of picked upstream server. | 80 | +| consumer_name | core | Username of Consumer. | | +| consumer_group_id | core | Group ID of Consumer. | | +| graphql_name | core | The [operation name](https://graphql.org/learn/queries/#operation-name) of GraphQL. | HeroComparison | +| graphql_operation | core | The operation type of GraphQL. | mutation | +| graphql_root_fields | core | The top level fields of GraphQL. | ["hero"] | +| mqtt_client_id | mqtt-proxy | The client id in MQTT protocol. | | +| route_id | core | Id of Route. | | +| route_name | core | Name of Route. | | +| service_id | core | Id of Service. | | +| service_name | core | Name of Service. | | +| redis_cmd_line | Redis | The content of Redis command. | | +| resp_body | core | In the logger plugin, if some of the plugins support logging of response body, for example by configuring `include_resp_body: true`, then this variable can be used in the log format. | | +| rpc_time | xRPC | Time spent at the rpc request level. | | + +You can also register your own [variable](./plugin-develop.md#register-custom-variable). diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/architecture-design/apisix.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/architecture-design/apisix.md new file mode 100644 index 0000000..0f6824c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/architecture-design/apisix.md @@ -0,0 +1,51 @@ +--- +title: Architecture +keywords: + - API Gateway + - Apache APISIX + - APISIX architecture +description: Architecture of Apache APISIX—the Cloud Native API Gateway. +--- + + +APISIX is built on top of Nginx and [ngx_lua](https://github.com/openresty/lua-nginx-module) leveraging the power offered by LuaJIT. See [Why Apache APISIX chose Nginx and Lua to build API Gateway?](https://apisix.apache.org/blog/2021/08/25/why-apache-apisix-chose-nginx-and-lua/). + +![flow-software-architecture](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/flow-software-architecture.png) + +APISIX has two main parts: + +1. APISIX core, Lua plugin, multi-language Plugin runtime, and the WASM plugin runtime. +2. Built-in Plugins that adds features for observability, security, traffic control, etc. + +The APISIX core handles the important functions like matching Routes, load balancing, service discovery, configuration management, and provides a management API. It also includes APISIX Plugin runtime supporting Lua and multilingual Plugins (Go, Java , Python, JavaScript, etc) including the experimental WASM Plugin runtime. + +APISIX also has a set of [built-in Plugins](https://apisix.apache.org/docs/apisix/plugins/batch-requests) that adds features like authentication, security, observability, etc. They are written in Lua. + +## Request handling process + +The diagram below shows how APISIX handles an incoming request and applies corresponding Plugins: + +![flow-load-plugin](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/flow-load-plugin.png) + +## Plugin hierarchy + +The chart below shows the order in which different types of Plugin are applied to a request: + +![flow-plugin-internal](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/flow-plugin-internal.png) diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/aws.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/aws.md new file mode 100644 index 0000000..ce9636c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/aws.md @@ -0,0 +1,276 @@ +--- +title: Running APISIX in AWS with AWS CDK +--- + + + +[APISIX](https://github.com/apache/apisix) is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices. + +## Architecture + +This reference architecture walks you through building **APISIX** as a serverless container API Gateway on top of AWS Fargate with AWS CDK. + +![Apache APISIX Serverless Architecture](../../assets/images/aws-fargate-cdk.png) + +## Generate an AWS CDK project with `projen` + +```bash +$ mkdir apisix-aws +$ cd $_ +$ npx projen new awscdk-app-ts +``` + +update the `.projenrc.js` with the following content: + +```js +const { AwsCdkTypeScriptApp } = require('projen'); + +const project = new AwsCdkTypeScriptApp({ + cdkVersion: "1.70.0", + name: "apisix-aws", + cdkDependencies: [ + '@aws-cdk/aws-ec2', + '@aws-cdk/aws-ecs', + '@aws-cdk/aws-ecs-patterns', + ] +}); + +project.synth(); +``` + +update the project: + +```ts +$ npx projen +``` + +## update `src/main.ts` + +```ts +import * as cdk from '@aws-cdk/core'; +import { Vpc, Port } from '@aws-cdk/aws-ec2'; +import { Cluster, ContainerImage, TaskDefinition, Compatibility } from '@aws-cdk/aws-ecs'; +import { ApplicationLoadBalancedFargateService, NetworkLoadBalancedFargateService } from '@aws-cdk/aws-ecs-patterns'; + +export class ApiSixStack extends cdk.Stack { + constructor(scope: cdk.Construct, id: string, props?: cdk.StackProps) { + super(scope, id, props); + + const vpc = Vpc.fromLookup(this, 'VPC', { + isDefault: true + }) + + const cluster = new Cluster(this, 'Cluster', { + vpc + }) + + /** + * ApiSix service + */ + const taskDefinition = new TaskDefinition(this, 'TaskApiSix', { + compatibility: Compatibility.FARGATE, + memoryMiB: '512', + cpu: '256' + }) + + taskDefinition + .addContainer('apisix', { + image: ContainerImage.fromRegistry('iresty/apisix'), + }) + .addPortMappings({ + containerPort: 9080 + }) + + taskDefinition + .addContainer('etcd', { + image: ContainerImage.fromRegistry('gcr.azk8s.cn/etcd-development/etcd:v3.3.12'), + // image: ContainerImage.fromRegistry('gcr.io/etcd-development/etcd:v3.3.12'), + }) + .addPortMappings({ + containerPort: 2379 + }) + + const svc = new ApplicationLoadBalancedFargateService(this, 'ApiSixService', { + cluster, + taskDefinition, + }) + + svc.targetGroup.setAttribute('deregistration_delay.timeout_seconds', '30') + svc.targetGroup.configureHealthCheck({ + interval: cdk.Duration.seconds(5), + healthyHttpCodes: '404', + healthyThresholdCount: 2, + unhealthyThresholdCount: 3, + timeout: cdk.Duration.seconds(4) + }) + + /** + * PHP service + */ + const taskDefinitionPHP = new TaskDefinition(this, 'TaskPHP', { + compatibility: Compatibility.FARGATE, + memoryMiB: '512', + cpu: '256' + }) + + taskDefinitionPHP + .addContainer('php', { + image: ContainerImage.fromRegistry('abiosoft/caddy:php'), + }) + .addPortMappings({ + containerPort: 2015 + }) + + const svcPHP = new NetworkLoadBalancedFargateService(this, 'PhpService', { + cluster, + taskDefinition: taskDefinitionPHP, + assignPublicIp: true, + }) + + // allow Fargate task behind NLB to accept all traffic + svcPHP.service.connections.allowFromAnyIpv4(Port.tcp(2015)) + svcPHP.targetGroup.setAttribute('deregistration_delay.timeout_seconds', '30') + svcPHP.loadBalancer.setAttribute('load_balancing.cross_zone.enabled', 'true') + + new cdk.CfnOutput(this, 'ApiSixDashboardURL', { + value: `http://${svc.loadBalancer.loadBalancerDnsName}/apisix/dashboard/` + }) + } +} + +const devEnv = { + account: process.env.CDK_DEFAULT_ACCOUNT, + region: process.env.CDK_DEFAULT_REGION, +}; + +const app = new cdk.App(); + +new ApiSixStack(app, 'apisix-stack-dev', { env: devEnv }); + +app.synth(); +``` + +## Deploy the APISIX Stack with AWS CDK + +```bash +$ cdk diff +$ cdk deploy +``` + +On deployment complete, some outputs will be returned: + +```bash +Outputs: +apiSix.PhpServiceLoadBalancerDNS5E5BAB1B = apiSi-PhpSe-FOL2MM4TW7G8-09029e095ab36fcc.elb.us-west-2.amazonaws.com +apiSix.ApiSixDashboardURL = http://apiSi-ApiSi-1TM103DN35GRY-1477666967.us-west-2.elb.amazonaws.com/apisix/dashboard/ +apiSix.ApiSixServiceLoadBalancerDNSD4E5B8CB = apiSi-ApiSi-1TM103DN35GRY-1477666967.us-west-2.elb.amazonaws.com +apiSix.ApiSixServiceServiceURLF6EC7872 = http://apiSi-ApiSi-1TM103DN35GRY-1477666967.us-west-2.elb.amazonaws.com +``` + +Open the `apiSix.ApiSixDashboardURL` from your browser and you will see the login prompt. + +### Configure the upstream nodes + +All upstream nodes are running as **AWS Fargate** tasks and registered to the **NLB(Network Load Balancer)** exposing multiple static IP addresses. We can query the IP addresses by **nslookup** the **apiSix.PhpServiceLoadBalancerDNS5E5BAB1B** like this: + +```bash +$ nslookup apiSi-PhpSe-FOL2MM4TW7G8-09029e095ab36fcc.elb.us-west-2.amazonaws.com +Server: 192.168.31.1 +Address: 192.168.31.1#53 + +Non-authoritative answer: +Name: apiSi-PhpSe-FOL2MM4TW7G8-09029e095ab36fcc.elb.us-west-2.amazonaws.com +Address: 44.224.124.213 +Name: apiSi-PhpSe-FOL2MM4TW7G8-09029e095ab36fcc.elb.us-west-2.amazonaws.com +Address: 18.236.43.167 +Name: apiSi-PhpSe-FOL2MM4TW7G8-09029e095ab36fcc.elb.us-west-2.amazonaws.com +Address: 35.164.164.178 +Name: apiSi-PhpSe-FOL2MM4TW7G8-09029e095ab36fcc.elb.us-west-2.amazonaws.com +Address: 44.226.102.63 +``` + +Configure the IP addresses returned as your upstream nodes in your **APISIX** dashboard followed by the **Services** and **Routes** configuration. Let's say we have a `/index.php` as the URI for the first route for our first **Service** from the **Upstream** IP addresses. + +![upstream with AWS NLB IP addresses](../../assets/images/aws-nlb-ip-addr.png) +![service with created upstream](../../assets/images/aws-define-service.png) +![define route with service and uri](../../assets/images/aws-define-route.png) + +## Validation + +OK. Let's test the `/index.php` on `{apiSix.ApiSixServiceServiceURL}/index.php` + +![Testing Apache APISIX on AWS Fargate](../../assets/images/aws-caddy-php-welcome-page.png) + +Now we have been successfully running **APISIX** in AWS Fargate as serverless container API Gateway service. + +## Clean up + +```bash +$ cdk destroy +``` + +## Running APISIX in AWS China Regions + +update `src/main.ts` + +```js + taskDefinition + .addContainer('etcd', { + image: ContainerImage.fromRegistry('gcr.azk8s.cn/etcd-development/etcd:v3.3.12'), + // image: ContainerImage.fromRegistry('gcr.io/etcd-development/etcd:v3.3.12'), + }) + .addPortMappings({ + containerPort: 2379 + }) +``` + +_(read [here](https://github.com/iresty/docker-apisix/blob/9a731f698171f4838e9bc0f1c05d6dda130ca89b/example/docker-compose.yml#L18-L19) for more reference)_ + +Run `cdk deploy` and specify your preferred AWS region in China. + +```bash +# let's say we have another AWS_PROFILE for China regions called 'cn' +# make sure you have aws configure --profile=cn properly. +# +# deploy to NingXia region +$ cdk deploy --profile cn -c region=cn-northwest-1 +# deploy to Beijing region +$ cdk deploy --profile cn -c region=cn-north-1 +``` + +In the following case, we got the `Outputs` returned for **AWS Ningxia region(cn-northwest-1)**: + +```bash +Outputs: +apiSix.PhpServiceLoadBalancerDNS5E5BAB1B = apiSi-PhpSe-1760FFS3K7TXH-562fa1f7f642ec24.elb.cn-northwest-1.amazonaws.com.cn +apiSix.ApiSixDashboardURL = http://apiSi-ApiSi-123HOROQKWZKA-1268325233.cn-northwest-1.elb.amazonaws.com.cn/apisix/dashboard/ +apiSix.ApiSixServiceLoadBalancerDNSD4E5B8CB = apiSi-ApiSi-123HOROQKWZKA-1268325233.cn-northwest-1.elb.amazonaws.com.cn +apiSix.ApiSixServiceServiceURLF6EC7872 = http://apiSi-ApiSi-123HOROQKWZKA-1268325233.cn-northwest-1.elb.amazonaws.com.cn +``` + +Open the `apiSix.ApiSixDashboardURL` URL and log in to configure your **APISIX** in AWS China region. + +_TBD_ + +## Decouple APISIX and etcd3 on AWS + +For high availability and state consistency consideration, you might be interested to decouple the **etcd3** as a separate cluster from **APISIX** not only for performance but also high availability and fault tolerance yet with highly reliable state consistency. + +_TBD_ diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/batch-processor.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/batch-processor.md new file mode 100644 index 0000000..48c1a9d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/batch-processor.md @@ -0,0 +1,149 @@ +--- +title: Batch Processor +--- + + + +The batch processor can be used to aggregate entries(logs/any data) and process them in a batch. +When the batch_max_size is set to 1 the processor will execute each entry immediately. Setting the batch max size more +than 1 will start aggregating the entries until it reaches the max size or the timeout expires. + +## Configurations + +The only mandatory parameter to create a batch processor is a function. The function will be executed when the batch reaches the max size +or when the buffer duration exceeds. + +| Name | Type | Requirement | Default | Valid | Description | +| ---------------- | ------- | ----------- | ------- | ------- | ------------------------------------------------------------ | +| name | string | optional | logger's name | ["http logger",...] | A unique identifier used to identify the batch processor, which defaults to the name of the logger plug-in that calls the batch processor, such as plug-in "http logger" 's `name` is "http logger. | +| batch_max_size | integer | optional | 1000 | [1,...] | Sets the maximum number of logs sent in each batch. When the number of logs reaches the set maximum, all logs will be automatically pushed to the HTTP/HTTPS service. | +| inactive_timeout | integer | optional | 5 | [1,...] | The maximum time to refresh the buffer (in seconds). When the maximum refresh time is reached, all logs will be automatically pushed to the HTTP/HTTPS service regardless of whether the number of logs in the buffer reaches the maximum number set. | +| buffer_duration | integer | optional | 60 | [1,...] | Maximum age in seconds of the oldest entry in a batch before the batch must be processed. | +| max_retry_count | integer | optional | 0 | [0,...] | Maximum number of retries before removing the entry from the processing pipeline when an error occurs. | +| retry_delay | integer | optional | 1 | [0,...] | Number of seconds the process execution should be delayed if the execution fails. | + +The following code shows an example of how to use batch processor in your plugin: + +```lua +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +... + +local plugin_name = "xxx-logger" +local batch_processor_manager = bp_manager_mod.new(plugin_name) +local schema = {...} +local _M = { + ... + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), +} + +... + + +function _M.log(conf, ctx) + local entry = {...} -- data to log + + if batch_processor_manager:add_entry(conf, entry) then + return + end + -- create a new processor if not found + + -- entries is an array table of entry, which can be processed in batch + local func = function(entries) + -- serialize to json array core.json.encode(entries) + -- process/send data + return true + -- return false, err_msg, first_fail if failed + -- first_fail(optional) indicates first_fail-1 entries have been successfully processed + -- and during processing of entries[first_fail], the error occurred. So the batch processor + -- only retries for the entries having index >= first_fail as per the retry policy. + end + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func) +end +``` + +The batch processor's configuration will be set inside the plugin's configuration. +For example: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "http-logger": { + "uri": "http://mockbin.org/bin/:ID", + "batch_max_size": 10, + "max_retry_count": 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +If your plugin only uses one global batch processor, +you can also use the processor directly: + +```lua +local entry = {...} -- data to log +if log_buffer then + log_buffer:push(entry) + return +end + +local config_bat = { + name = config.name, + retry_delay = config.retry_delay, + ... +} + +local err +-- entries is an array table of entry, which can be processed in batch +local func = function(entries) + ... + return true + -- return false, err_msg, first_fail if failed +end +log_buffer, err = batch_processor:new(func, config_bat) + +if not log_buffer then + core.log.warn("error when creating the batch processor: ", err) + return +end + +log_buffer:push(entry) +``` + +Note: Please make sure the batch max size (entry count) is within the limits of the function execution. +The timer to flush the batch runs based on the `inactive_timeout` configuration. Thus, for optimal usage, +keep the `inactive_timeout` smaller than the `buffer_duration`. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/benchmark.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/benchmark.md new file mode 100644 index 0000000..8ddddc6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/benchmark.md @@ -0,0 +1,151 @@ +--- +title: Benchmark +--- + + + +### Benchmark Environments + +n1-highcpu-8 (8 vCPUs, 7.2 GB memory) on Google Cloud + +But we **only** used 4 cores to run APISIX, and left 4 cores for system and [wrk](https://github.com/wg/wrk), +which is the HTTP benchmarking tool. + +### Benchmark Test for reverse proxy + +Only used APISIX as the reverse proxy server, with no logging, limit rate, or other plugins enabled, +and the response size was 1KB. + +#### QPS + +The x-axis means the size of CPU core, and the y-axis is QPS. + +![benchmark-1](../../assets/images/benchmark-1.jpg) + +#### Latency + +Note the y-axis latency in **microsecond(μs)** not millisecond. + +![latency-1](../../assets/images/latency-1.jpg) + +#### Flame Graph + +The result of Flame Graph: +![flamegraph-1](../../assets/images/flamegraph-1.jpg) + +And if you want to run the benchmark test in your machine, you should run another Nginx to listen 80 port. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1, + "127.0.0.2:80": 1 + } + } +}' +``` + +then run wrk: + +```shell +wrk -d 60 --latency http://127.0.0.1:9080/hello +``` + +### Benchmark Test for reverse proxy, enabled 2 plugins + +Only used APISIX as the reverse proxy server, enabled the limit rate and prometheus plugins, +and the response size was 1KB. + +#### QPS + +The x-axis means the size of CPU core, and the y-axis is QPS. + +![benchmark-2](../../assets/images/benchmark-2.jpg) + +#### Latency + +Note the y-axis latency in **microsecond(μs)** not millisecond. + +![latency-2](../../assets/images/latency-2.jpg) + +#### Flame Graph + +The result of Flame Graph: +![flamegraph-2](../../assets/images/flamegraph-2.jpg) + +And if you want to run the benchmark test in your machine, you should run another Nginx to listen 80 port. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 999999999, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + }, + "prometheus":{} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1, + "127.0.0.2:80": 1 + } + } +}' +``` + +then run wrk: + +```shell +wrk -d 60 --latency http://127.0.0.1:9080/hello +``` + +For more reference on how to run the benchmark test, you can see this [PR](https://github.com/apache/apisix/pull/6136) and this [script](https://gist.github.com/membphis/137db97a4bf64d3653aa42f3e016bd01). + +:::tip + +If you want to run the benchmark with a large number of connections, You may have to update the [**keepalive**](https://github.com/apache/apisix/blob/master/conf/config.yaml.example#L241) config by adding the configuration to [`config.yaml`](https://github.com/apache/apisix/blob/master/conf/config.yaml) and reload APISIX. Connections exceeding this number will become short connections. You can run the following command to test the benchmark with a large number of connections: + +```bash +wrk -t200 -c5000 -d30s http://127.0.0.1:9080/hello +``` + +For more details, you can refer to [Module ngx_http_upstream_module](http://nginx.org/en/docs/http/ngx_http_upstream_module.html). + +::: diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/build-apisix-dev-environment-devcontainers.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/build-apisix-dev-environment-devcontainers.md new file mode 100644 index 0000000..22c3d86 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/build-apisix-dev-environment-devcontainers.md @@ -0,0 +1,119 @@ +--- +id: build-apisix-dev-environment-devcontainers +title: Build development environment with Dev Containers +description: This paper introduces how to quickly start the APISIX API Gateway development environment using Dev Containers. +--- + + + +Previously, building and developing APISIX on Linux or macOS required developers to install its runtime environment and toolchain themselves, and developers might not be familiar with them. + +As it needs to support multiple operating systems and CPU ISAs, the process has inherent complexities in how to find and install dependencies and toolchains. + +:::note + +The tutorial can be used as an alternative to a [bare-metal environment](building-apisix.md) or a [macOS container development environment](build-apisix-dev-environment-on-mac.md). + +It only requires that you have an environment running Docker or a similar alternative (the docker/docker compose command is required), and no other dependent components need to be installed on your host machine. + +::: + +## Supported systems and CPU ISA + +- Linux + - AMD64 + - ARM64 +- Windows (with WSL2 supported) + - AMD64 +- macOS + - ARM64 + - AMD64 + +## Quick Setup of Apache APISIX Development Environment + +### Implementation Idea + +We use Dev Containers to build development environment, and when we open an APISIX project using the IDE, we have access to the container-driven runtime environment. + +There the etcd is ready and we can start APISIX directly. + +### Steps + +:::note + +The following uses Visual Studio Code, which has built-in integration with Dev Containers. + +In theory you could also use any other editor or IDE that integrates with Dev Containers. + +::: + +First, clone the APISIX source code, open project in Visual Studio Code. + +```shell +git clone https://github.com/apache/apisix.git +cd apisix +code . # VSCode needs to be in the PATH environment variable, you can also open the project directory manually in the UI. +``` + +Next, switch to Dev Containers. Open the VSCode Command Palette, and execute `Dev Containers: Reopen in Container`. + +![VSCode Command open in container](../../assets/images/build-devcontainers-vscode-command.png) + +VSCode will open the Dev Containers project in a new window, where it will build the runtime and install the toolchain according to the Dockerfile before starting the connection and finally installing the APISIX dependencies. + +:::note + +This process requires a reliable network connection, and it will access Docker Hub, GitHub, and some other sites. You will need to ensure the network connection yourself, otherwise the container build may fail. + +::: + +Wait some minutes, depending on the internet connection or computer performance, it may take from a few minutes to tens of minutes, you can click on the Progress Bar in the bottom right corner to view a live log where you will be able to check unusual stuck. + +If you encounter any problems, you can search or ask questions in [GitHub Issues](https://github.com/apache/apisix/issues) or [GitHub Discussions](https://github.com/apache/apisix/discussions), and community members will respond as promptly as possible. + +![VSCode dev containers building progress bar](../../assets/images/build-devcontainers-vscode-progressbar.png) + +When the process in the terminal is complete, the development environment is ready, and even etcd is ready. + +Start APISIX with the following command: + +```shell +make run +``` + +Now you can start writing code and test cases, and testing tools are available: + +```shell +export TEST_NGINX_BINARY=openresty + +# run all tests +make test + +# or run a specify test case file +FLUSH_ETCD=1 prove -Itest-nginx/lib -I. -r t/admin/api.t +``` + +## FAQ + +### Where's the code? When I delete the container, are the changes lost? + +It will be on your host, which is where you cloned the APISIX source code, and the container uses the volume to mount the code into the container. Containers contain only the runtime environment, not the source code, so no changes will be lost whether you close or delete the container. + +And, the `git` is already installed in the container, so you can commit a change directly there. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/build-apisix-dev-environment-on-mac.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/build-apisix-dev-environment-on-mac.md new file mode 100644 index 0000000..fb31e5a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/build-apisix-dev-environment-on-mac.md @@ -0,0 +1,94 @@ +--- +id: build-apisix-dev-environment-on-mac +title: Build development environment on Mac +description: This paper introduces how to use Docker to quickly build the development environment of API gateway Apache APISIX on Mac. +--- + + + +If you want to quickly build and develop APISIX on your Mac platform, you can refer to this tutorial. + +:::note + +This tutorial is suitable for situations where you need to quickly start development on the Mac platform, if you want to go further and have a better development experience, the better choice is the Linux-based virtual machine, or directly use this kind of system as your development environment. + +You can see the specific supported systems [here](install-dependencies.md#install). + +::: + +## Quick Setup of Apache APISIX Development Environment + +### Implementation Idea + +We use Docker to build the test environment of Apache APISIX. When the container starts, we can mount the source code of Apache APISIX into the container, and then we can build and run test cases in the container. + +### Implementation Steps + +First, clone the APISIX source code, build an image that can run test cases, and compile the Apache APISIX. + +```shell +git clone https://github.com/apache/apisix.git +cd apisix +docker build -t apisix-dev-env -f example/build-dev-image.dockerfile . +``` + +Next, start Etcd: + +```shell +docker run -d --name etcd-apisix --net=host pachyderm/etcd:v3.5.2 +``` + +Mount the APISIX directory and start the development environment container: + +```shell +docker run -d --name apisix-dev-env --net=host -v $(pwd):/apisix:rw apisix-dev-env:latest +``` + +Finally, enter the container, build the Apache APISIX runtime, and configure the test environment: + +```shell +docker exec -it apisix-dev-env make deps +docker exec -it apisix-dev-env ln -s /usr/bin/openresty /usr/bin/nginx +``` + +### Run and Stop APISIX + +```shell +docker exec -it apisix-dev-env make run +docker exec -it apisix-dev-env make stop +``` + +:::note + +If you encounter an error message like `nginx: [emerg] bind() to unix:/apisix/logs/worker_events.sock failed (95: Operation not supported)` while running `make run`, please use this solution. + +Change the `File Sharing` settings of your Docker-Desktop: + +![Docker-Desktop File Sharing Setting](../../assets/images/update-docker-desktop-file-sharing.png) + +Changing to either `gRPC FUSE` or `osxfs` can resolve this issue. + +::: + +### Run Specific Test Cases + +```shell +docker exec -it apisix-dev-env prove t/admin/routes.t +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/building-apisix.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/building-apisix.md new file mode 100644 index 0000000..cf2c2da --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/building-apisix.md @@ -0,0 +1,267 @@ +--- +id: building-apisix +title: Building APISIX from source +keywords: + - API Gateway + - Apache APISIX + - Code Contribution + - Building APISIX +description: Guide for building and running APISIX locally for development. +--- + + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +If you are looking to setup a development environment or contribute to APISIX, this guide is for you. + +If you are looking to quickly get started with APISIX, check out the other [installation methods](./installation-guide.md). + +:::note + +To build an APISIX docker image from source code, see [build image from source code](https://apisix.apache.org/docs/docker/build/#build-an-image-from-customizedpatched-source-code). + +To build and package APISIX for a specific platform, see [apisix-build-tools](https://github.com/api7/apisix-build-tools) instead. + +::: + +## Building APISIX from source + +First of all, we need to specify the branch to be built: + +```shell +APISIX_BRANCH='release/3.13' +``` + +Then, you can run the following command to clone the APISIX source code from Github: + +```shell +git clone --depth 1 --branch ${APISIX_BRANCH} https://github.com/apache/apisix.git apisix-${APISIX_BRANCH} +``` + +Alternatively, you can also download the source package from the [Downloads](https://apisix.apache.org/downloads/) page. Note that source packages here are not distributed with test cases. + +Before installation, install [OpenResty](https://openresty.org/en/installation.html). + +Next, navigate to the directory, install dependencies, and build APISIX. + +```shell +cd apisix-${APISIX_BRANCH} +make deps +make install +``` + +This will install the runtime-dependent Lua libraries and `apisix-runtime` the `apisix` CLI tool. + +:::note + +If you get an error message like `Could not find header file for LDAP/PCRE/openssl` while running `make deps`, use this solution. + +`luarocks` supports custom compile-time dependencies (See: [Config file format](https://github.com/luarocks/luarocks/wiki/Config-file-format)). You can use a third-party tool to install the missing packages and add its installation directory to the `luarocks`' variables table. This method works on macOS, Ubuntu, CentOS, and other similar operating systems. + +The solution below is for macOS but it works similarly for other operating systems: + +1. Install `openldap` by running: + + ```shell + brew install openldap + ``` + +2. Locate the installation directory by running: + + ```shell + brew --prefix openldap + ``` + +3. Add this path to the project configuration file by any of the two methods shown below: + 1. You can use the `luarocks config` command to set `LDAP_DIR`: + + ```shell + luarocks config variables.LDAP_DIR /opt/homebrew/cellar/openldap/2.6.1 + ``` + + 2. You can also change the default configuration file of `luarocks`. Open the file `~/.luaorcks/config-5.1.lua` and add the following: + + ```shell + variables = { LDAP_DIR = "/opt/homebrew/cellar/openldap/2.6.1", LDAP_INCDIR = "/opt/homebrew/cellar/openldap/2.6.1/include", } + ``` + + `/opt/homebrew/cellar/openldap/` is default path `openldap` is installed on Apple Silicon macOS machines. For Intel machines, the default path is `/usr/local/opt/openldap/`. + +::: + +To uninstall the APISIX runtime, run: + +```shell +make uninstall +make undeps +``` + +:::danger + +This operation will remove the files completely. + +::: + +## Installing etcd + +APISIX uses [etcd](https://github.com/etcd-io/etcd) to save and synchronize configuration. Before running APISIX, you need to install etcd on your machine. Installation methods based on your operating system are mentioned below. + + + + +```shell +ETCD_VERSION='3.4.18' +wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz +tar -xvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz && \ + cd etcd-v${ETCD_VERSION}-linux-amd64 && \ + sudo cp -a etcd etcdctl /usr/bin/ +nohup etcd >/tmp/etcd.log 2>&1 & +``` + + + + + +```shell +brew install etcd +brew services start etcd +``` + + + + +## Running and managing APISIX server + +To initialize the configuration file, within the APISIX directory, run: + +```shell +apisix init +``` + +:::tip + +You can run `apisix help` to see a list of available commands. + +::: + +You can then test the created configuration file by running: + +```shell +apisix test +``` + +Finally, you can run the command below to start APISIX: + +```shell +apisix start +``` + +To stop APISIX, you can use either the `quit` or the `stop` subcommand. + +`apisix quit` will gracefully shutdown APISIX. It will ensure that all received requests are completed before stopping. + +```shell +apisix quit +``` + +Where as, the `apisix stop` command does a force shutdown and discards all pending requests. + +```shell +apisix stop +``` + +## Building runtime for APISIX + +Some features of APISIX requires additional Nginx modules to be introduced into OpenResty. + +To use these features, you need to build a custom distribution of OpenResty (apisix-runtime). See [apisix-build-tools](https://github.com/api7/apisix-build-tools) for setting up your build environment and building it. + +## Running tests + +The steps below show how to run the test cases for APISIX: + +1. Install [cpanminus](https://metacpan.org/pod/App::cpanminus#INSTALLATION), the package manager for Perl. +2. Install the [test-nginx](https://github.com/openresty/test-nginx) dependencies with `cpanm`: + + ```shell + sudo cpanm --notest Test::Nginx IPC::Run > build.log 2>&1 || (cat build.log && exit 1) + ``` + +3. Clone the test-nginx source code locally: + + ```shell + git clone https://github.com/openresty/test-nginx.git + ``` + +4. Append the current directory to Perl's module directory by running: + + ```shell + export PERL5LIB=.:$PERL5LIB + ``` + + You can specify the Nginx binary path by running: + + ```shell + TEST_NGINX_BINARY=/usr/local/bin/openresty prove -Itest-nginx/lib -r t + ``` + +5. Run the tests by running: + + ```shell + make test + ``` + +:::note + +Some tests rely on external services and system configuration modification. See [ci/linux_openresty_common_runner.sh](https://github.com/apache/apisix/blob/master/ci/linux_openresty_common_runner.sh) for a complete test environment build. + +::: + +### Troubleshooting + +These are some common troubleshooting steps for running APISIX test cases. + +#### Configuring Nginx path + +For the error `Error unknown directive "lua_package_path" in /API_ASPIX/apisix/t/servroot/conf/nginx.conf`, ensure that OpenResty is set to the default Nginx and export the path as follows: + +- Linux default installation path: + + ```shell + export PATH=/usr/local/openresty/nginx/sbin:$PATH + ``` + +#### Running a specific test case + +To run a specific test case, use the command below: + +```shell +prove -Itest-nginx/lib -r t/plugin/openid-connect.t +``` + +See [testing framework](./internal/testing-framework.md) for more details. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/certificate.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/certificate.md new file mode 100644 index 0000000..8916b66 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/certificate.md @@ -0,0 +1,328 @@ +--- +title: Certificate +--- + + + +`APISIX` supports to load multiple SSL certificates by TLS extension Server Name Indication (SNI). + +### Single SNI + +It is most common for an SSL certificate to contain only one domain. We can create an `ssl` object. Here is a simple case, creates a `ssl` object and `route` object. + +* `cert`: PEM-encoded public certificate of the SSL key pair. +* `key`: PEM-encoded private key of the SSL key pair. +* `snis`: Hostname(s) to associate with this certificate as SNIs. To set this attribute this certificate must have a valid private key associated with it. + +The following is an example of configuring an SSL certificate with a single SNI in APISIX. + +Create an SSL object with the certificate and key valid for the SNI: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat t/certs/apisix.crt)"'", + "key": "'"$(cat t/certs/apisix.key)"'", + "snis": ["test.com"] +}' +``` + +Create a Router object: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/get", + "hosts": ["test.com"], + "methods": ["GET"], + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } +}' +``` + +Send a request to verify: + +```shell +curl --resolve 'test.com:9443:127.0.0.1' https://test.com:9443/get -k -vvv + +* Added test.com:9443:127.0.0.1 to DNS cache +* About to connect() to test.com port 9443 (#0) +* Trying 127.0.0.1... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +* ALPN, server accepted to use h2 +* Server certificate: +* subject: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* start date: Jun 24 22:18:05 2019 GMT +* expire date: May 31 22:18:05 2119 GMT +* issuer: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* SSL certificate verify result: self-signed certificate (18), continuing anyway. +> GET /get HTTP/2 +> Host: test.com:9443 +> user-agent: curl/7.81.0 +> accept: */* +``` + +### wildcard SNI + +An SSL certificate could also be valid for a wildcard domain like `*.test.com`, which means it is valid for any domain of that pattern, including `www.test.com` and `mail.test.com`. + +The following is an example of configuring an SSL certificate with a wildcard SNI in APISIX. + +Create an SSL object with the certificate and key valid for the SNI: + +```shell +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "cert" : "'"$(cat t/certs/apisix.crt)"'", + "key": "'"$(cat t/certs/apisix.key)"'", + "snis": ["*.test.com"] + }' +``` + +Create a Router object: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/get", + "hosts": ["*.test.com"], + "methods": ["GET"], + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } +}' +``` + +Send a request to verify: + +```shell +curl --resolve 'www.test.com:9443:127.0.0.1' https://www.test.com:9443/get -k -vvv + +* Added www.test.com:9443:127.0.0.1 to DNS cache +* Hostname www.test.com was found in DNS cache +* Trying 127.0.0.1:9443... +* Connected to www.test.com (127.0.0.1) port 9443 (#0) +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +* ALPN, server accepted to use h2 +* Server certificate: +* subject: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* start date: Jun 24 22:18:05 2019 GMT +* expire date: May 31 22:18:05 2119 GMT +* issuer: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* SSL certificate verify result: self signed certificate (18), continuing anyway. +> GET /get HTTP/2 +> Host: www.test.com:9443 +> user-agent: curl/7.74.0 +> accept: */* +``` + +### multiple domain + +If your SSL certificate may contain more than one domain, like `www.test.com` and `mail.test.com`, then you can add them into the `snis` array. For example: + +```json +{ + "snis": ["www.test.com", "mail.test.com"] +} +``` + +### multiple certificates for a single domain + +If you want to configure multiple certificate for a single domain, for +instance, supporting both the +[ECC](https://en.wikipedia.org/wiki/Elliptic-curve_cryptography) +and RSA key-exchange algorithm, then just configure the extra certificates (the +first certificate and private key should be still put in `cert` and `key`) and +private keys by `certs` and `keys`. + +* `certs`: PEM-encoded certificate array. +* `keys`: PEM-encoded private key array. + +`APISIX` will pair certificate and private key with the same indice as a SSL key +pair. So the length of `certs` and `keys` must be same. + +### set up multiple CA certificates + +APISIX currently uses CA certificates in several places, such as [Protect Admin API](./mtls.md#protect-admin-api), [etcd with mTLS](./mtls.md#etcd-with-mtls), and [Deployment Modes](./deployment-modes.md). + +In these places, `ssl_trusted_certificate` or `trusted_ca_cert` will be used to set up the CA certificate, but these configurations will eventually be translated into [lua_ssl_trusted_certificate](https://github.com/openresty/lua-nginx-module#lua_ssl_trusted_certificate) directive in OpenResty. + +If you need to set up different CA certificates in different places, then you can package these CA certificates into a CA bundle file and point to this file when you need to set up CAs. This will avoid the problem that the generated `lua_ssl_trusted_certificate` has multiple locations and overwrites each other. + +The following is a complete example to show how to set up multiple CA certificates in APISIX. + +Suppose we let client and APISIX Admin API, APISIX and ETCD communicate with each other using mTLS protocol, and currently there are two CA certificates, `foo_ca.crt` and `bar_ca.crt`, and use each of these two CA certificates to issue client and server certificate pairs, `foo_ca.crt` and its issued certificate pair are used to protect Admin API, and `bar_ca.crt` and its issued certificate pair are used to protect ETCD. + +The following table details the configurations involved in this example and what they do: + +| Configuration | Type | Description | +| ------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| foo_ca.crt | CA cert | Issues the secondary certificate required for the client to communicate with the APISIX Admin API over mTLS. | +| foo_client.crt | cert | A certificate issued by `foo_ca.crt` and used by the client to prove its identity when accessing the APISIX Admin API. | +| foo_client.key | key | Issued by `foo_ca.crt`, used by the client, the key file required to access the APISIX Admin API. | +| foo_server.crt | cert | Issued by `foo_ca.crt`, used by APISIX, corresponding to the `admin_api_mtls.admin_ssl_cert` configuration entry. | +| foo_server.key | key | Issued by `foo_ca.crt`, used by APISIX, corresponding to the `admin_api_mtls.admin_ssl_cert_key` configuration entry. | +| admin.apisix.dev | doname | Common Name used in issuing `foo_server.crt` certificate, through which the client accesses APISIX Admin API | +| bar_ca.crt | CA cert | Issues the secondary certificate required for APISIX to communicate with ETCD over mTLS. | +| bar_etcd.crt | cert | Issued by `bar_ca.crt` and used by ETCD, corresponding to the `-cert-file` option in the ETCD startup command. | +| bar_etcd.key | key | Issued by `bar_ca.crt` and used by ETCD, corresponding to the `--key-file` option in the ETCD startup command. | +| bar_apisix.crt | cert | Issued by `bar_ca.crt`, used by APISIX, corresponding to the `etcd.tls.cert` configuration entry. | +| bar_apisix.key | key | Issued by `bar_ca.crt`, used by APISIX, corresponding to the `etcd.tls.key` configuration entry. | +| etcd.cluster.dev | key | Common Name used in issuing `bar_etcd.crt` certificate, which is used as SNI when APISIX communicates with ETCD over mTLS. corresponds to `etcd.tls.sni` configuration item. | +| apisix.ca-bundle | CA bundle | Merged from `foo_ca.crt` and `bar_ca.crt`, replacing `foo_ca.crt` and `bar_ca.crt`. | + +1. Create CA bundle files + +```shell +cat /path/to/foo_ca.crt /path/to/bar_ca.crt > apisix.ca-bundle +``` + +2. Start the ETCD cluster and enable client authentication + +Start by writing a `goreman` configuration named `Procfile-single-enable-mtls`, the content as: + +```text +# Use goreman to run `go get github.com/mattn/goreman` +etcd1: etcd --name infra1 --listen-client-urls https://127.0.0.1:12379 --advertise-client-urls https://127.0.0.1:12379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +etcd2: etcd --name infra2 --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +etcd3: etcd --name infra3 --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +``` + +Use `goreman` to start the ETCD cluster: + +```shell +goreman -f Procfile-single-enable-mtls start > goreman.log 2>&1 & +``` + +3. Update `config.yaml` + +```yaml title="conf/config.yaml" +deployment: + admin: + admin_key + - name: admin + key: edd1c9f034335f136f87ad84b625c8f1 + role: admin + admin_listen: + ip: 127.0.0.1 + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_ca_cert: /path/to/apisix.ca-bundle + admin_ssl_cert: /path/to/foo_server.crt + admin_ssl_cert_key: /path/to/foo_server.key + +apisix: + ssl: + ssl_trusted_certificate: /path/to/apisix.ca-bundle + +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + - "https://127.0.0.1:22379" + - "https://127.0.0.1:32379" + tls: + cert: /path/to/bar_apisix.crt + key: /path/to/bar_apisix.key + sni: etcd.cluster.dev +``` + +4. Test APISIX Admin API + +Start APISIX, if APISIX starts successfully and there is no abnormal output in `logs/error.log`, it means that mTLS communication between APISIX and ETCD is normal. + +Use curl to simulate a client, communicate with APISIX Admin API with mTLS, and create a route: + +```shell +curl -vvv \ + --resolve 'admin.apisix.dev:9180:127.0.0.1' https://admin.apisix.dev:9180/apisix/admin/routes/1 \ + --cert /path/to/foo_client.crt \ + --key /path/to/foo_client.key \ + --cacert /path/to/apisix.ca-bundle \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/get", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +A successful mTLS communication between curl and the APISIX Admin API is indicated if the following SSL handshake process is output: + +```shell +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Request CERT (13): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Certificate (11): +* TLSv1.3 (OUT), TLS handshake, CERT verify (15): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +``` + +5. Verify APISIX proxy + +```shell +curl http://127.0.0.1:9080/get -i + +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 298 +Connection: keep-alive +Date: Tue, 26 Jul 2022 16:31:00 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/2.14.1 + +... +``` + +APISIX proxied the request to the `/get` path of the upstream `httpbin.org` and returned `HTTP/1.1 200 OK`. The whole process is working fine using CA bundle instead of CA certificate. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/config.json b/CloudronPackages/APISIX/apisix-source/docs/en/latest/config.json new file mode 100644 index 0000000..c51aa89 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/config.json @@ -0,0 +1,428 @@ +{ + "version": "3.13.0", + "sidebar": [ + { + "type": "category", + "label": "Getting Started", + "items": [ + "getting-started/README", + "getting-started/configure-routes", + "getting-started/load-balancing", + "getting-started/key-authentication", + "getting-started/rate-limiting" + ] + }, + { + "type": "doc", + "id": "installation-guide" + }, + { + "type": "doc", + "id": "architecture-design/apisix" + }, + { + "type": "category", + "label": "Tutorials", + "items": [ + "tutorials/expose-api", + "tutorials/protect-api", + { + "type": "category", + "label": "Observability", + "items": [ + "tutorials/observe-your-api", + "tutorials/health-check", + "tutorials/monitor-api-health-check" + ] + }, + "tutorials/manage-api-consumers", + "tutorials/cache-api-responses", + "tutorials/add-multiple-api-versions", + "tutorials/client-to-apisix-mtls", + "tutorials/websocket-authentication", + "tutorials/keycloak-oidc" + ] + }, + { + "type": "category", + "label": "Terminology", + "items": [ + "terminology/api-gateway", + "terminology/consumer", + "terminology/consumer-group", + "terminology/credential", + "terminology/global-rule", + "terminology/plugin", + "terminology/plugin-config", + "terminology/plugin-metadata", + "terminology/route", + "terminology/router", + "terminology/script", + "terminology/service", + "terminology/upstream", + "terminology/secret" + ] + }, + { + "type": "category", + "label": "Plugins", + "items": [ + { + "type": "category", + "label": "AI", + "items": [ + "plugins/ai-proxy", + "plugins/ai-proxy-multi", + "plugins/ai-rate-limiting", + "plugins/ai-prompt-guard", + "plugins/ai-aws-content-moderation", + "plugins/ai-prompt-decorator", + "plugins/ai-prompt-template", + "plugins/ai-rag", + "plugins/ai-request-rewrite" + ] + }, + { + "type": "category", + "label": "General", + "items": [ + "plugins/batch-requests", + "plugins/redirect", + "plugins/echo", + "plugins/gzip", + "plugins/brotli", + "plugins/real-ip", + "plugins/server-info", + "plugins/ext-plugin-pre-req", + "plugins/ext-plugin-post-req", + "plugins/ext-plugin-post-resp", + "plugins/inspect", + "plugins/ocsp-stapling" + ] + }, + { + "type": "category", + "label": "Transformation", + "items": [ + "plugins/response-rewrite", + "plugins/proxy-rewrite", + "plugins/grpc-transcode", + "plugins/grpc-web", + "plugins/fault-injection", + "plugins/mocking", + "plugins/degraphql", + "plugins/body-transformer", + "plugins/attach-consumer-label" + ] + }, + { + "type": "category", + "label": "Authentication", + "items": [ + "plugins/key-auth", + "plugins/jwt-auth", + "plugins/jwe-decrypt", + "plugins/basic-auth", + "plugins/authz-keycloak", + "plugins/authz-casdoor", + "plugins/wolf-rbac", + "plugins/openid-connect", + "plugins/cas-auth", + "plugins/hmac-auth", + "plugins/authz-casbin", + "plugins/ldap-auth", + "plugins/opa", + "plugins/forward-auth", + "plugins/multi-auth" + ] + }, + { + "type": "category", + "label": "Security", + "items": [ + "plugins/cors", + "plugins/uri-blocker", + "plugins/ip-restriction", + "plugins/ua-restriction", + "plugins/referer-restriction", + "plugins/consumer-restriction", + "plugins/csrf", + "plugins/public-api", + "plugins/gm", + "plugins/chaitin-waf" + ] + }, + { + "type": "category", + "label": "Traffic", + "items": [ + "plugins/limit-req", + "plugins/limit-conn", + "plugins/limit-count", + "plugins/proxy-cache", + "plugins/request-validation", + "plugins/proxy-mirror", + "plugins/api-breaker", + "plugins/traffic-split", + "plugins/request-id", + "plugins/proxy-control", + "plugins/client-control", + "plugins/workflow" + ] + }, + { + "type": "category", + "label": "Observability", + "items": [ + { + "type": "category", + "label": "Tracers", + "items": [ + "plugins/zipkin", + "plugins/skywalking", + "plugins/opentelemetry" + ] + }, + { + "type": "category", + "label": "Metrics", + "items": [ + "plugins/prometheus", + "plugins/node-status", + "plugins/datadog" + ] + }, + { + "type": "category", + "label": "Loggers", + "items": [ + "plugins/http-logger", + "plugins/skywalking-logger", + "plugins/tcp-logger", + "plugins/kafka-logger", + "plugins/rocketmq-logger", + "plugins/udp-logger", + "plugins/clickhouse-logger", + "plugins/syslog", + "plugins/log-rotate", + "plugins/error-log-logger", + "plugins/sls-logger", + "plugins/google-cloud-logging", + "plugins/splunk-hec-logging", + "plugins/file-logger", + "plugins/loggly", + "plugins/elasticsearch-logger", + "plugins/tencent-cloud-cls", + "plugins/loki-logger", + "plugins/lago" + ] + } + ] + }, + { + "type": "category", + "label": "Serverless", + "items": [ + "plugins/serverless", + "plugins/azure-functions", + "plugins/openwhisk", + "plugins/aws-lambda", + "plugins/openfunction" + ] + }, + { + "type": "category", + "label": "Other protocols", + "items": [ + "plugins/dubbo-proxy", + "plugins/mqtt-proxy", + "plugins/kafka-proxy", + "plugins/http-dubbo" + ] + } + ] + }, + { + "type": "category", + "label": "API", + "items": [ + { + "type": "doc", + "id": "admin-api" + }, + { + "type": "doc", + "id": "control-api" + }, + { + "type": "doc", + "id": "status-api" + } + ] + }, + { + "type": "category", + "label": "Development", + "items": [ + { + "type": "doc", + "id": "build-apisix-dev-environment-devcontainers" + }, + { + "type": "doc", + "id": "building-apisix" + }, + { + "type": "doc", + "id": "build-apisix-dev-environment-on-mac" + }, + { + "type": "doc", + "id": "support-fips-in-apisix" + }, + { + "type": "doc", + "id": "external-plugin" + }, + { + "type": "doc", + "id": "wasm" + }, + { + "type": "link", + "label": "CODE_STYLE", + "href": "https://github.com/apache/apisix/blob/master/CODE_STYLE.md" + }, + { + "type": "category", + "label": "internal", + "items": [ + "internal/plugin-runner", + "internal/testing-framework" + ] + }, + { + "type": "doc", + "id": "plugin-develop" + }, + { + "type": "doc", + "id": "debug-mode" + } + ] + }, + { + "type": "doc", + "id": "deployment-modes" + }, + { + "type": "doc", + "id": "FAQ" + }, + { + "type": "category", + "label": "Others", + "items": [ + { + "type": "category", + "label": "Discovery", + "items": [ + "discovery", + "discovery/dns", + "discovery/consul", + "discovery/consul_kv", + "discovery/nacos", + "discovery/eureka", + "discovery/control-plane-service-discovery", + "discovery/kubernetes" + ] + }, + { + "type": "category", + "label": "PubSub", + "items": [ + "pubsub", + "pubsub/kafka" + ] + }, + { + "type": "category", + "label": "xRPC", + "items": [ + "xrpc/redis", + "xrpc" + ] + }, + { + "type": "doc", + "id": "router-radixtree" + }, + { + "type": "doc", + "id": "stream-proxy" + }, + { + "type": "doc", + "id": "grpc-proxy" + }, + { + "type": "doc", + "id": "customize-nginx-configuration" + }, + { + "type": "doc", + "id": "certificate" + }, + { + "type": "doc", + "id": "batch-processor" + }, + { + "type": "doc", + "id": "benchmark" + }, + { + "type": "doc", + "id": "install-dependencies" + }, + { + "type": "doc", + "id": "apisix-variable" + }, + { + "type": "doc", + "id": "aws" + }, + { + "type": "doc", + "id": "mtls" + }, + { + "type": "doc", + "id": "debug-function" + }, + { + "type": "doc", + "id": "profile" + }, + { + "type": "doc", + "id": "ssl-protocol" + }, + { + "type": "doc", + "id": "http3" + } + ] + }, + { + "type": "link", + "label": "CHANGELOG", + "href": "https://github.com/apache/apisix/blob/master/CHANGELOG.md" + }, + { + "type": "doc", + "id": "upgrade-guide-from-2.15.x-to-3.0.0" + } + ] +} diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/control-api.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/control-api.md new file mode 100644 index 0000000..4a5e6e9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/control-api.md @@ -0,0 +1,555 @@ +--- +title: Control API +--- + + + +In Apache APISIX, the control API is used to: + +* Expose the internal state of APISIX. +* Control the behavior of a single, isolated APISIX data plane. + +To change the default endpoint (`127.0.0.1:9090`) of the Control API server, change the `ip` and `port` in the `control` section in your configuration file (`conf/config.yaml`): + +```yaml +apisix: + ... + enable_control: true + control: + ip: "127.0.0.1" + port: 9090 +``` + +To enable parameter matching in plugin's control API, add `router: 'radixtree_uri_with_parameter'` to the control section. + +**Note**: Never configure the control API server to listen to public traffic. + +## Control API Added via Plugins + +[Plugins](./terminology/plugin.md) can be enabled to add its control API. + +Some Plugins have their own control APIs. See the documentation of the specific Plugin to learn more. + +## Plugin Independent Control API + +The supported APIs are listed below. + +### GET /v1/schema + +Introduced in [v2.2](https://github.com/apache/apisix/releases/tag/2.2). + +Returns the JSON schema used by the APISIX instance: + +```json +{ + "main": { + "route": { + "properties": {...} + }, + "upstream": { + "properties": {...} + }, + ... + }, + "plugins": { + "example-plugin": { + "consumer_schema": {...}, + "metadata_schema": {...}, + "schema": {...}, + "type": ..., + "priority": 0, + "version": 0.1 + }, + ... + }, + "stream-plugins": { + "mqtt-proxy": { + ... + }, + ... + } +} +``` + +**Note**: Only the enabled `plugins` are returned and they may lack fields like `consumer_schema` or `type` depending on how they were defined. + +### GET /v1/healthcheck + +Introduced in [v2.3](https://github.com/apache/apisix/releases/tag/2.3). + +Returns a [health check](./tutorials/health-check.md) of the APISIX instance. + +```json +[ + { + "nodes": [ + { + "ip": "52.86.68.46", + "counter": { + "http_failure": 0, + "success": 0, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "healthy" + }, + { + "ip": "100.24.156.8", + "counter": { + "http_failure": 5, + "success": 0, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "unhealthy" + } + ], + "name": "/apisix/routes/1", + "type": "http" + } +] + +``` + +Each of the returned objects contain the following fields: + +* name: resource id, where the health checker is reporting from. +* type: health check type: `["http", "https", "tcp"]`. +* nodes: target nodes of the health checker. +* nodes[i].ip: ip address. +* nodes[i].port: port number. +* nodes[i].status: health check result: `["healthy", "unhealthy", "mostly_healthy", "mostly_unhealthy"]`. +* nodes[i].counter.success: success health check count. +* nodes[i].counter.http_failure: http failures count. +* nodes[i].counter.tcp_failure: tcp connect/read/write failures count. +* nodes[i].counter.timeout_failure: timeout count. + +You can also use `/v1/healthcheck/$src_type/$src_id` to get the health status of specific nodes. + +For example, `GET /v1/healthcheck/upstreams/1` returns: + +```json +{ + "nodes": [ + { + "ip": "52.86.68.46", + "counter": { + "http_failure": 0, + "success": 2, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "healthy" + }, + { + "ip": "100.24.156.8", + "counter": { + "http_failure": 5, + "success": 0, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "unhealthy" + } + ], + "type": "http" + "name": "/apisix/routes/1" +} + +``` + +:::note + +Only when one upstream is satisfied by the conditions below, +its status is shown in the result list: + +* The upstream is configured with a health checker +* The upstream has served requests in any worker process + +::: + +If you use browser to access the control API URL, then you will get the HTML output: + +![Health Check Status Page](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/health_check_status_page.png) + +### POST /v1/gc + +Introduced in [v2.8](https://github.com/apache/apisix/releases/tag/2.8). + +Triggers a full garbage collection in the HTTP subsystem. + +**Note**: When stream proxy is enabled, APISIX runs another Lua VM for the stream subsystem. Full garbage collection is not triggered in this VM. + +### GET /v1/routes + +Introduced in [v2.10.0](https://github.com/apache/apisix/releases/tag/2.10.0). + +Returns all configured [Routes](./terminology/route.md): + +```json +[ + { + "value": { + "priority": 0, + "uris": [ + "/hello" + ], + "id": "1", + "upstream": { + "scheme": "http", + "pass_host": "pass", + "nodes": [ + { + "port": 1980, + "host": "127.0.0.1", + "weight": 1 + } + ], + "type": "roundrobin", + "hash_on": "vars" + }, + "status": 1 + }, + "clean_handlers": {}, + "has_domain": false, + "orig_modifiedIndex": 1631193445, + "modifiedIndex": 1631193445, + "key": "/routes/1" + } +] +``` + +### GET /v1/route/{route_id} + +Introduced in [v2.10.0](https://github.com/apache/apisix/releases/tag/2.10.0). + +Returns the Route with the specified `route_id`: + +```json +{ + "value": { + "priority": 0, + "uris": [ + "/hello" + ], + "id": "1", + "upstream": { + "scheme": "http", + "pass_host": "pass", + "nodes": [ + { + "port": 1980, + "host": "127.0.0.1", + "weight": 1 + } + ], + "type": "roundrobin", + "hash_on": "vars" + }, + "status": 1 + }, + "clean_handlers": {}, + "has_domain": false, + "orig_modifiedIndex": 1631193445, + "modifiedIndex": 1631193445, + "key": "/routes/1" +} +``` + +### GET /v1/services + +Introduced in [v2.11.0](https://github.com/apache/apisix/releases/tag/2.11.0). + +Returns all the Services: + +```json +[ + { + "has_domain": false, + "clean_handlers": {}, + "modifiedIndex": 671, + "key": "/apisix/services/200", + "createdIndex": 671, + "value": { + "upstream": { + "scheme": "http", + "hash_on": "vars", + "pass_host": "pass", + "type": "roundrobin", + "nodes": [ + { + "port": 1980, + "weight": 1, + "host": "127.0.0.1" + } + ] + }, + "create_time": 1634552648, + "id": "200", + "plugins": { + "limit-count": { + "key": "remote_addr", + "time_window": 60, + "redis_timeout": 1000, + "allow_degradation": false, + "show_limit_quota_header": true, + "policy": "local", + "count": 2, + "rejected_code": 503 + } + }, + "update_time": 1634552648 + } + } +] +``` + +### GET /v1/service/{service_id} + +Introduced in [v2.11.0](https://github.com/apache/apisix/releases/tag/2.11.0). + +Returns the Service with the specified `service_id`: + +```json +{ + "has_domain": false, + "clean_handlers": {}, + "modifiedIndex": 728, + "key": "/apisix/services/5", + "createdIndex": 728, + "value": { + "create_time": 1634554563, + "id": "5", + "upstream": { + "scheme": "http", + "hash_on": "vars", + "pass_host": "pass", + "type": "roundrobin", + "nodes": [ + { + "port": 1980, + "weight": 1, + "host": "127.0.0.1" + } + ] + }, + "update_time": 1634554563 + } +} +``` + +### GET /v1/upstreams + +Introduced in [v2.11.0](https://github.com/apache/apisix/releases/tag/2.11.0). + +Dumps all Upstreams: + +```json +[ + { + "value":{ + "scheme":"http", + "pass_host":"pass", + "nodes":[ + { + "host":"127.0.0.1", + "port":80, + "weight":1 + }, + { + "host":"foo.com", + "port":80, + "weight":2 + } + ], + "hash_on":"vars", + "update_time":1634543819, + "key":"remote_addr", + "create_time":1634539759, + "id":"1", + "type":"chash" + }, + "has_domain":true, + "key":"\/apisix\/upstreams\/1", + "clean_handlers":{ + }, + "createdIndex":938, + "modifiedIndex":1225 + } +] +``` + +### GET /v1/upstream/{upstream_id} + +Introduced in [v2.11.0](https://github.com/apache/apisix/releases/tag/2.11.0). + +Dumps the Upstream with the specified `upstream_id`: + +```json +{ + "value":{ + "scheme":"http", + "pass_host":"pass", + "nodes":[ + { + "host":"127.0.0.1", + "port":80, + "weight":1 + }, + { + "host":"foo.com", + "port":80, + "weight":2 + } + ], + "hash_on":"vars", + "update_time":1634543819, + "key":"remote_addr", + "create_time":1634539759, + "id":"1", + "type":"chash" + }, + "has_domain":true, + "key":"\/apisix\/upstreams\/1", + "clean_handlers":{ + }, + "createdIndex":938, + "modifiedIndex":1225 +} +``` + +### GET /v1/plugin_metadatas + +Introduced in [v3.0.0](https://github.com/apache/apisix/releases/tag/3.0.0). + +Dumps all plugin_metadatas: + +```json +[ + { + "log_format": { + "upstream_response_time": "$upstream_response_time" + }, + "id": "file-logger" + }, + { + "ikey": 1, + "skey": "val", + "id": "example-plugin" + } +] +``` + +### GET /v1/plugin_metadata/{plugin_name} + +Introduced in [v3.0.0](https://github.com/apache/apisix/releases/tag/3.0.0). + +Dumps the metadata with the specified `plugin_name`: + +```json +{ + "log_format": { + "upstream_response_time": "$upstream_response_time" + }, + "id": "file-logger" +} +``` + +### PUT /v1/plugins/reload + +Introduced in [v3.9.0](https://github.com/apache/apisix/releases/tag/3.9.0) + +Triggers a hot reload of the plugins. + +```shell +curl "http://127.0.0.1:9090/v1/plugins/reload" -X PUT +``` + +### GET /v1/discovery/{service}/dump + +Get memory dump of discovered service endpoints and configuration details: + +```json +{ + "endpoints": [ + { + "endpoints": [ + { + "value": "{\"https\":[{\"host\":\"172.18.164.170\",\"port\":6443,\"weight\":50},{\"host\":\"172.18.164.171\",\"port\":6443,\"weight\":50},{\"host\":\"172.18.164.172\",\"port\":6443,\"weight\":50}]}", + "name": "default/kubernetes" + }, + { + "value": "{\"metrics\":[{\"host\":\"172.18.164.170\",\"port\":2379,\"weight\":50},{\"host\":\"172.18.164.171\",\"port\":2379,\"weight\":50},{\"host\":\"172.18.164.172\",\"port\":2379,\"weight\":50}]}", + "name": "kube-system/etcd" + }, + { + "value": "{\"http-85\":[{\"host\":\"172.64.89.2\",\"port\":85,\"weight\":50}]}", + "name": "test-ws/testing" + } + ], + "id": "first" + } + ], + "config": [ + { + "default_weight": 50, + "id": "first", + "client": { + "token": "xxx" + }, + "service": { + "host": "172.18.164.170", + "port": "6443", + "schema": "https" + }, + "shared_size": "1m" + } + ] +} +``` + +## GET /v1/discovery/{service}/show_dump_file + +Get configured services details. + +```json +{ + "services": { + "service_a": [ + { + "host": "172.19.5.12", + "port": 8000, + "weight": 120 + }, + { + "host": "172.19.5.13", + "port": 8000, + "weight": 120 + } + ] + }, + "expire": 0, + "last_update": 1615877468 +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/customize-nginx-configuration.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/customize-nginx-configuration.md new file mode 100644 index 0000000..9a6baa5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/customize-nginx-configuration.md @@ -0,0 +1,63 @@ +--- +title: Customize Nginx configuration +--- + + + +The Nginx configuration used by APISIX is generated via the template file `apisix/cli/ngx_tpl.lua` and the parameters in `apisix/cli/config.lua` and `conf/config.yaml`. + +You can take a look at the generated Nginx configuration in `conf/nginx.conf` after running `./bin/apisix start`. + +If you want to customize the Nginx configuration, please read through the `nginx_config` in `conf/config.default.example`. You can override the default value in the `conf/config.yaml`. For instance, you can inject some snippets in the `conf/nginx.conf` via configuring the `xxx_snippet` entries: + +```yaml +... +# put this in config.yaml: +nginx_config: + main_configuration_snippet: | + daemon on; + http_configuration_snippet: | + server + { + listen 45651; + server_name _; + access_log off; + + location /ysec_status { + req_status_show; + allow 127.0.0.1; + deny all; + } + } + + chunked_transfer_encoding on; + + http_server_configuration_snippet: | + set $my "var"; + http_admin_configuration_snippet: | + log_format admin "$request_time $pipe"; + http_end_configuration_snippet: | + server_names_hash_bucket_size 128; + stream_configuration_snippet: | + tcp_nodelay off; +... +``` + +Pay attention to the indent of `nginx_config` and sub indent of the sub entries, the incorrect indent may cause `./bin/apisix start` to fail to generate Nginx configuration in `conf/nginx.conf`. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/debug-function.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/debug-function.md new file mode 100644 index 0000000..3d57286 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/debug-function.md @@ -0,0 +1,162 @@ +--- +title: Debug Function +--- + + + +## `5xx` response status code + +Similar `5xx` status codes such as 500, 502, 503, etc., are the status codes in response to a server error. When a request has a `5xx` status code; it may come from `APISIX` or `Upstream`. How to identify the source of these response status codes is a very meaningful thing. It can quickly help us determine the problem. (When modifying the configuration `show_upstream_status_in_response_header` in `conf/config.yaml` to `true`, all upstream status codes will be returned, not only `5xx` status.) + +## How to identify the source of the `5xx` response status code + +In the response header of the request, through the response header of `X-APISIX-Upstream-Status`, we can effectively identify the source of the `5xx` status code. When the `5xx` status code comes from `Upstream`, the response header `X-APISIX-Upstream-Status` can be seen in the response header, and the value of this response header is the response status code. When the `5xx` status code is derived from `APISIX`, there is no response header information of `X-APISIX-Upstream-Status` in the response header. That is, only when the status code of `5xx` is derived from Upstream will the `X-APISIX-Upstream-Status` response header appear. + +## Example + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +>Example 1: `502` response status code comes from `Upstream` (IP address is not available) + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +Test: + +```shell +$ curl http://127.0.0.1:9080/hello -v +...... +< HTTP/1.1 502 Bad Gateway +< Date: Wed, 25 Nov 2020 14:40:22 GMT +< Content-Type: text/html; charset=utf-8 +< Content-Length: 154 +< Connection: keep-alive +< Server: APISIX/2.0 +< X-APISIX-Upstream-Status: 502 +< + +502 Bad Gateway + +

502 Bad Gateway

+
openresty
+ + + +``` + +It has a response header of `X-APISIX-Upstream-Status: 502`. + +>Example 2: `502` response status code comes from `APISIX` + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 500, + "body": "Fault Injection!\n" + } + } + }, + "uri": "/hello" +}' +``` + +Test: + +```shell +$ curl http://127.0.0.1:9080/hello -v +...... +< HTTP/1.1 500 Internal Server Error +< Date: Wed, 25 Nov 2020 14:50:20 GMT +< Content-Type: text/plain; charset=utf-8 +< Transfer-Encoding: chunked +< Connection: keep-alive +< Server: APISIX/2.0 +< +Fault Injection! +``` + +There is no response header for `X-APISIX-Upstream-Status`. + +>Example 3: `Upstream` has multiple nodes, and all nodes are unavailable + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "nodes": { + "127.0.0.3:1": 1, + "127.0.0.2:1": 1, + "127.0.0.1:1": 1 + }, + "retries": 2, + "type": "roundrobin" +}' +``` + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "upstream_id": "1" +}' +``` + +Test: + +```shell +$ curl http://127.0.0.1:9080/hello -v +< HTTP/1.1 502 Bad Gateway +< Date: Wed, 25 Nov 2020 15:07:34 GMT +< Content-Type: text/html; charset=utf-8 +< Content-Length: 154 +< Connection: keep-alive +< Server: APISIX/2.0 +< X-APISIX-Upstream-Status: 502, 502, 502 +< + +502 Bad Gateway + +

502 Bad Gateway

+
openresty
+ + +``` + +It has a response header of `X-APISIX-Upstream-Status: 502, 502, 502`. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/debug-mode.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/debug-mode.md new file mode 100644 index 0000000..86dd228 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/debug-mode.md @@ -0,0 +1,140 @@ +--- +id: debug-mode +title: Debug mode +keywords: + - API gateway + - Apache APISIX + - Debug mode +description: Guide for enabling debug mode in Apache APISIX. +--- + + + +You can use APISIX's debug mode to troubleshoot your configuration. + +## Basic debug mode + +You can enable the basic debug mode by adding this line to your debug configuration file (`conf/debug.yaml`): + +```yaml title="conf/debug.yaml" +basic: + enable: true +#END +``` + +APISIX loads the configurations of `debug.yaml` on startup and then checks if the file is modified on an interval of 1 second. If the file is changed, APISIX automatically applies the configuration changes. + +:::note + +For APISIX releases prior to v2.10, basic debug mode is enabled by setting `apisix.enable_debug = true` in your configuration file (`conf/config.yaml`). + +::: + +If you have configured two Plugins `limit-conn` and `limit-count` on the Route `/hello`, you will receive a response with the header `Apisix-Plugins: limit-conn, limit-count` when you enable the basic debug mode. + +```shell +curl http://127.0.0.1:1984/hello -i +``` + +```shell +HTTP/1.1 200 OK +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Apisix-Plugins: limit-conn, limit-count +X-RateLimit-Limit: 2 +X-RateLimit-Remaining: 1 +Server: openresty + +hello world +``` + +:::info IMPORTANT + +If the debug information cannot be included in a response header (for example, when the Plugin is in a stream subsystem), the debug information will be logged as an error log at a `warn` level. + +::: + +## Advanced debug mode + +You can configure advanced options in debug mode by modifying your debug configuration file (`conf/debug.yaml`). + +The following configurations are available: + +| Key | Required | Default | Description | +|---------------------------------|----------|---------|-----------------------------------------------------------------------------------------------------------------------| +| hook_conf.enable | True | false | Enables/disables hook debug trace. i.e. if enabled, will print the target module function's inputs or returned value. | +| hook_conf.name | True | | Module list name of the hook that enabled the debug trace. | +| hook_conf.log_level | True | warn | Log level for input arguments & returned values. | +| hook_conf.is_print_input_args | True | true | When set to `true` enables printing input arguments. | +| hook_conf.is_print_return_value | True | true | When set to `true` enables printing returned values. | + +:::note + +A checker would check every second for changes to the configuration file. It will only check a file if the file was updated based on its last modification time. + +You can add an `#END` flag to indicate to the checker to only look for changes until that point. + +::: + +The example below shows how you can configure advanced options in debug mode: + +```yaml title="conf/debug.yaml" +hook_conf: + enable: false # Enables/disables hook debug trace + name: hook_phase # Module list name of the hook that enabled the debug trace + log_level: warn # Log level for input arguments & returned values + is_print_input_args: true # When set to `true` enables printing input arguments + is_print_return_value: true # When set to `true` enables printing returned values + +hook_phase: # Module function list, Name: hook_phase + apisix: # Referenced module name + - http_access_phase # Function names:Array + - http_header_filter_phase + - http_body_filter_phase + - http_log_phase +#END +``` + +### Dynamically enable advanced debug mode + +You can also enable advanced debug mode only on particular requests. + +The example below shows how you can enable it on requests with the header `X-APISIX-Dynamic-Debug`: + +```yaml title="conf/debug.yaml" +http_filter: + enable: true # Enable/disable advanced debug mode dynamically + enable_header_name: X-APISIX-Dynamic-Debug # Trace for the request with this header +... +#END +``` + +This will enable the advanced debug mode only for requests like: + +```shell +curl 127.0.0.1:9090/hello --header 'X-APISIX-Dynamic-Debug: foo' +``` + +:::note + +The `apisix.http_access_phase` module cannot be hooked for this dynamic rule as the advanced debug mode is enabled based on the request. + +::: diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/deployment-modes.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/deployment-modes.md new file mode 100644 index 0000000..3752858 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/deployment-modes.md @@ -0,0 +1,1032 @@ +--- +title: Deployment modes +keywords: + - API Gateway + - Apache APISIX + - APISIX deployment modes +description: Documentation about the three deployment modes of Apache APISIX. +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + +APISIX has three different deployment modes for different production use cases. The table below summarises the deployment modes: + +| Deployment mode | Roles | Description | +|-----------------|----------------------------|---------------------------------------------------------------------------------------------------------------------| +| traditional | traditional | Data plane and control plane are deployed together. `enable_admin` attribute should be disabled manually. | +| decoupled | data_plane / control_plane | Data plane and control plane are deployed independently. | +| standalone | data_plane / traditional | The `data_plane` mode loads configuration from a local YAML / JSON file, while the traditional mode expects configuration through Admin API. | + +Each of these deployment modes are explained in detail below. + +## Traditional + +In the traditional deployment mode, one instance of APISIX will be both the `data_plane` and the `control_plane`. + +An example configuration of the traditional deployment mode is shown below: + +```yaml title="conf/config.yaml" +apisix: + node_listen: + - port: 9080 +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_listen: + port: 9180 + etcd: + host: + - http://${etcd_IP}:${etcd_Port} + prefix: /apisix + timeout: 30 +#END +``` + +The instance of APISIX deployed as the traditional role will: + +1. Listen on port `9080` to handle user requests, controlled by `node_listen`. +2. Listen on port `9180` to handle Admin API requests, controlled by `admin_listen`. + +## Decoupled + +In the decoupled deployment mode the `data_plane` and `control_plane` instances of APISIX are deployed separately, i.e., one instance of APISIX is configured to be a *data plane* and the other to be a *control plane*. + +The instance of APISIX deployed as the data plane will: + +Once the service is started, it will handle the user requests. + +The example below shows the configuration of an APISIX instance as *data plane* in the decoupled mode: + +```yaml title="conf/config.yaml" +deployment: + role: data_plane + role_data_plane: + config_provider: etcd +#END +``` + +The instance of APISIX deployed as the control plane will: + +1. Listen on port `9180` and handle Admin API requests. + +The example below shows the configuration of an APISIX instance as *control plane* in the decoupled mode: + +```yaml title="conf/config.yaml" +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + etcd: + host: + - https://${etcd_IP}:${etcd_Port} + prefix: /apisix + timeout: 30 +#END +``` + +## Standalone + +Turning on the APISIX node in Standalone mode will no longer use the default etcd as the configuration center. + +This method is more suitable for two types of users: + +1. Kubernetes(k8s):Declarative API that dynamically updates the routing rules with a full yaml configuration. +2. Different configuration centers: There are many implementations of the configuration center, such as Consul, etc., using the full yaml file for intermediate conversion. + +### Modes + +Now, we have two standalone running modes, file-driven and API-driven. + +#### File-driven + +The file-driven mode is the kind APISIX has always supported. + +The routing rules in the `conf/apisix.yaml` file are loaded into memory immediately after the APISIX node service starts. At each interval (default: 1 second), APISIX checks for updates to the file. If changes are detected, it reloads the rules. + +*Note*: Reloading and updating routing rules are all hot memory updates. There is no replacement of working processes, since it's a hot update. + +This requires us to set the APISIX role to data plane. That is, set `deployment.role` to `data_plane` and `deployment.role_data_plane.config_provider` to `yaml`. + +Refer to the example below: + +```yaml +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +``` + +You can also provide the configuration in JSON format by placing it in `conf/apisix.json`. Before proceeding, you should change the `deployment.role_data_plane.config_provider` to `json`. + +Refer to the example below: + +```yaml +deployment: + role: data_plane + role_data_plane: + config_provider: json +``` + +This makes it possible to disable the Admin API and discover configuration changes and reloads based on the local file system. + +#### API-driven (Experimental) + +> This mode is experimental, please do not rely on it in your production environment. +> We use it to validate certain specific workloads and if it is appropriate we will turn it into an officially supported feature, otherwise it will be removed. + +##### Overview + +API-driven mode is an emerging paradigm for standalone deployment, where routing rules are stored entirely in memory rather than in a configuration file. Updates must be made through the dedicated Standalone Admin API. Each update replaces the full configuration and takes effect immediately through hot updates, without requiring a restart. + +##### Configuration + +To enable this mode, set the APISIX role to `traditional` (to start both the API gateway and the Admin API endpoint) and use the YAML config provider. Example configuration: + +```yaml +deployment: + role: traditional + role_traditional: + config_provider: yaml +``` + +This disables the local file source of configuration in favor of the API. When APISIX starts, it uses an empty configuration until updated via the API. + +##### API Endpoints + +* `conf_version` by resource type + + Use `_conf_version` to indicate the client’s current version for each resource type (e.g. routes, upstreams, services, etc.). + + ```json + { + "routes_conf_version": 12, + "upstreams_conf_version": 102, + "routes": [], + "upstreams": [] + } + ``` + + APISIX compares each provided `_conf_version` against its in-memory `_conf_version` for that resource type. If the provided `_conf_version` is: + + - **Greater than** the current `conf_version`, APISIX will **rebuild/reset** that resource type’s data to match your payload. + + - **Equal to** the current `conf_version`, APISIX treats the resource as **unchanged** and **ignores** it (no data is rebuilt). + + - **Less than** the current `conf_version`, APISIX considers your update **stale** and **rejects** the request for that resource type with a **400 Bad Request**. + +* `modifiedIndex` by individual resource + + Allow setting an index for each resource. APISIX compares this index to its modifiedIndex to determine whether to accept the update. + +##### Example + +1. get configuration + +```shell +curl -X GET http://127.0.0.1:9180/apisix/admin/configs \ + -H "X-API-KEY: " \ + -H "Accept: application/json" ## or application/yaml +``` + +This returns the current configuration in JSON or YAML format. + +```json +{ + "consumer_groups_conf_version": 0, + "consumers_conf_version": 0, + "global_rules_conf_version": 0, + "plugin_configs_conf_version": 0, + "plugin_metadata_conf_version": 0, + "protos_conf_version": 0, + "routes_conf_version": 0, + "secrets_conf_version": 0, + "services_conf_version": 0, + "ssls_conf_version": 0, + "upstreams_conf_version": 0 +} +``` + +2. full update + +```shell +curl -X PUT http://127.0.0.1:9180/apisix/admin/configs \ + -H "X-API-KEY: " \ + -H "Content-Type: application/json" ## or application/yaml \ + -d '{}' +``` + +3. update based on resource type + +In APISIX memory, the current configuration is: + +```json +{ + "routes_conf_version": 1000, + "upstreams_conf_version": 1000, +} +``` + +Update the previous upstreams configuration by setting a higher version number, such as 1001, to replace the current version 1000: + +```shell +curl -X PUT http://127.0.0.1:9180/apisix/admin/configs \ + -H "X-API-KEY: ${API_KEY}" \ + -H "Content-Type: application/json" \ + -d ' +{ + "routes_conf_version": 1000, + "upstreams_conf_version": 1001, + "routes": [ + { + "modifiedIndex": 1000, + "id": "r1", + "uri": "/hello", + "upstream_id": "u1" + } + ], + "upstreams": [ + { + "modifiedIndex": 1001, + "id": "u1", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + ] +}' +``` + +:::note + +These APIs apply the same security requirements as the Admin API, including API key, TLS/mTLS, CORS, and IP allowlist. + +The API accepts input in the same format as the file-based mode, supporting both JSON and YAML. Unlike the file-based mode, the API does not rely on the `#END` suffix, as HTTP guarantees input integrity. + +::: + +### How to configure rules + +#### To `config_provider: yaml` + +All of the rules are stored in one file which named `conf/apisix.yaml`, +APISIX checks if this file has any change **every second**. +If the file is changed & it ends with `#END`, +APISIX loads the rules from this file and updates its memory. + +Here is a mini example: + +```yaml +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +``` + +*WARNING*: APISIX will not load the rules into memory from file `conf/apisix.yaml` if there is no `#END` at the end. + +Environment variables can also be used like so: + +```yaml +routes: + - + uri: /hello + upstream: + nodes: + "${{UPSTREAM_ADDR}}": 1 + type: roundrobin +#END +``` + +*WARNING*: When using docker to deploy APISIX in standalone mode. New environment variables added to `apisix.yaml` while APISIX has been initialized will only take effect after a reload. + +More information about using environment variables can be found [here](./admin-api.md#using-environment-variables). + +#### To `config_provider: json` + +All of the rules are stored in one file which named `conf/apisix.json`, +APISIX checks if this file has any change **every second**. +If the file is changed, +APISIX loads the rules from this file and updates its memory. + +Here is a mini example: + +```json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +``` + +*WARNING*: when using `conf/apisix.json`, the `#END` marker is not required, as APISIX can directly parse and validate the JSON structure. + +### How to configure Route + +Single Route: + + + + +```yaml +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +``` + + + + + +```json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +``` + + + + +Multiple Routes: + + + + +```yaml +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + - + uri: /hello2 + upstream: + nodes: + "127.0.0.1:1981": 1 + type: roundrobin +#END +``` + + + + + +```json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }, + { + "uri": "/hello2", + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + } + } + ] +} +``` + + + + +### How to configure Route + Service + + + + +```yaml +routes: + - + uri: /hello + service_id: 1 +services: + - + id: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +``` + + + + + +```json +{ + "routes": [ + { + "uri": "/hello", + "service_id": 1 + } + ], + "services": [ + { + "id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +``` + + + + +### How to configure Route + Upstream + + + + +```yaml +routes: + - + uri: /hello + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +``` + + + + + +```json +{ + "routes": [ + { + "uri": "/hello", + "upstream_id": 1 + } + ], + "upstreams": [ + { + "id": 1, + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + ] +} +``` + + + + +### How to configure Route + Service + Upstream + + + + +```yaml +routes: + - + uri: /hello + service_id: 1 +services: + - + id: 1 + upstream_id: 2 +upstreams: + - + id: 2 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +``` + + + + + +```json +{ + "routes": [ + { + "uri": "/hello", + "service_id": 1 + } + ], + "services": [ + { + "id": 1, + "upstream_id": 2 + } + ], + "upstreams": [ + { + "id": 2, + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + ] +} +``` + + + + +### How to configure Plugins + + + + +```yaml +# plugins listed here will be hot reloaded and override the boot configuration +plugins: + - name: ip-restriction + - name: jwt-auth + - name: mqtt-proxy + stream: true # set 'stream' to true for stream plugins +#END +``` + + + + + +```json +{ + "plugins": [ + { + "name": "ip-restriction" + }, + { + "name": "jwt-auth" + }, + { + "name": "mqtt-proxy", + "stream": true + } + ] +} +``` + + + + +### How to configure Plugin Configs + + + + +```yaml +plugin_configs: + - + id: 1 + plugins: + response-rewrite: + body: "hello\n" +routes: + - id: 1 + uri: /hello + plugin_config_id: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +``` + + + + + +```json +{ + "plugin_configs": [ + { + "id": 1, + "plugins": { + "response-rewrite": { + "body": "hello\n" + } + } + } + ], + "routes": [ + { + "id": 1, + "uri": "/hello", + "plugin_config_id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +``` + + + + +### How to enable SSL + + + + +```yaml +ssls: + - + cert: | + -----BEGIN CERTIFICATE----- + MIIDrzCCApegAwIBAgIJAI3Meu/gJVTLMA0GCSqGSIb3DQEBCwUAMG4xCzAJBgNV + BAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL + BgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl + ci5sb2NhbDAeFw0yMDEwMjgwMzMzMDJaFw0yMTEwMjgwMzMzMDJaMG4xCzAJBgNV + BAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA0GA1UEBwwISGFuZ3pob3UxDTAL + BgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl + ci5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ/qwxCR7g5S + s9+VleopkLi5pAszEkHYOBpwF/hDeRdxU0I0e1zZTdTlwwPy2vf8m3kwoq6fmNCt + tdUUXh5Wvgi/2OA8HBBzaQFQL1Av9qWwyES5cx6p0ZBwIrcXQIsl1XfNSUpQNTSS + D44TGduXUIdeshukPvMvLWLezynf2/WlgVh/haWtDG99r/Gj3uBdjl0m/xGvKvIv + NFy6EdgG9fkwcIalutjrUnGl9moGjwKYu4eXW2Zt5el0d1AHXUsqK4voe0p+U2Nz + quDmvxteXWdlsz8o5kQT6a4DUtWhpPIfNj9oZfPRs3LhBFQ74N70kVxMOCdec1lU + bnFzLIMGlz0CAwEAAaNQME4wHQYDVR0OBBYEFFHeljijrr+SPxlH5fjHRPcC7bv2 + MB8GA1UdIwQYMBaAFFHeljijrr+SPxlH5fjHRPcC7bv2MAwGA1UdEwQFMAMBAf8w + DQYJKoZIhvcNAQELBQADggEBAG6NNTK7sl9nJxeewVuogCdMtkcdnx9onGtCOeiQ + qvh5Xwn9akZtoLMVEdceU0ihO4wILlcom3OqHs9WOd6VbgW5a19Thh2toxKidHz5 + rAaBMyZsQbFb6+vFshZwoCtOLZI/eIZfUUMFqMXlEPrKru1nSddNdai2+zi5rEnM + HCot43+3XYuqkvWlOjoi9cP+C4epFYrxpykVbcrtbd7TK+wZNiK3xtDPnVzjdNWL + geAEl9xrrk0ss4nO/EreTQgS46gVU+tLC+b23m2dU7dcKZ7RDoiA9bdVc4a2IsaS + 2MvLL4NZ2nUh8hAEHiLtGMAV3C6xNbEyM07hEpDW6vk6tqk= + -----END CERTIFICATE----- + key: | + -----BEGIN PRIVATE KEY----- + MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCf6sMQke4OUrPf + lZXqKZC4uaQLMxJB2DgacBf4Q3kXcVNCNHtc2U3U5cMD8tr3/Jt5MKKun5jQrbXV + FF4eVr4Iv9jgPBwQc2kBUC9QL/alsMhEuXMeqdGQcCK3F0CLJdV3zUlKUDU0kg+O + Exnbl1CHXrIbpD7zLy1i3s8p39v1pYFYf4WlrQxvfa/xo97gXY5dJv8RryryLzRc + uhHYBvX5MHCGpbrY61JxpfZqBo8CmLuHl1tmbeXpdHdQB11LKiuL6HtKflNjc6rg + 5r8bXl1nZbM/KOZEE+muA1LVoaTyHzY/aGXz0bNy4QRUO+De9JFcTDgnXnNZVG5x + cyyDBpc9AgMBAAECggEAatcEtehZPJaCeClPPF/Cwbe9YoIfe4BCk186lHI3z7K1 + 5nB7zt+bwVY0AUpagv3wvXoB5lrYVOsJpa9y5iAb3GqYMc/XDCKfD/KLea5hwfcn + BctEn0LjsPVKLDrLs2t2gBDWG2EU+udunwQh7XTdp2Nb6V3FdOGbGAg2LgrSwP1g + 0r4z14F70oWGYyTQ5N8UGuyryVrzQH525OYl38Yt7R6zJ/44FVi/2TvdfHM5ss39 + SXWi00Q30fzaBEf4AdHVwVCRKctwSbrIOyM53kiScFDmBGRblCWOxXbiFV+d3bjX + gf2zxs7QYZrFOzOO7kLtHGua4itEB02497v+1oKDwQKBgQDOBvCVGRe2WpItOLnj + SF8iz7Sm+jJGQz0D9FhWyGPvrN7IXGrsXavA1kKRz22dsU8xdKk0yciOB13Wb5y6 + yLsr/fPBjAhPb4h543VHFjpAQcxpsH51DE0b2oYOWMmz+rXGB5Jy8EkP7Q4njIsc + 2wLod1dps8OT8zFx1jX3Us6iUQKBgQDGtKkfsvWi3HkwjFTR+/Y0oMz7bSruE5Z8 + g0VOHPkSr4XiYgLpQxjbNjq8fwsa/jTt1B57+By4xLpZYD0BTFuf5po+igSZhH8s + QS5XnUnbM7d6Xr/da7ZkhSmUbEaMeHONSIVpYNgtRo4bB9Mh0l1HWdoevw/w5Ryt + L/OQiPhfLQKBgQCh1iG1fPh7bbnVe/HI71iL58xoPbCwMLEFIjMiOFcINirqCG6V + LR91Ytj34JCihl1G4/TmWnsH1hGIGDRtJLCiZeHL70u32kzCMkI1jOhFAWqoutMa + 7obDkmwraONIVW/kFp6bWtSJhhTQTD4adI9cPCKWDXdcCHSWj0Xk+U8HgQKBgBng + t1HYhaLzIZlP/U/nh3XtJyTrX7bnuCZ5FhKJNWrYjxAfgY+NXHRYCKg5x2F5j70V + be7pLhxmCnrPTMKZhik56AaTBOxVVBaYWoewhUjV4GRAaK5Wc8d9jB+3RizPFwVk + V3OU2DJ1SNZ+W2HBOsKrEfwFF/dgby6i2w6MuAP1AoGBAIxvxUygeT/6P0fHN22P + zAHFI4v2925wYdb7H//D8DIADyBwv18N6YH8uH7L+USZN7e4p2k8MGGyvTXeC6aX + IeVtU6fH57Ddn59VPbF20m8RCSkmBvSdcbyBmqlZSBE+fKwCliKl6u/GH0BNAWKz + r8yiEiskqRmy7P7MY9hDmEbG + -----END PRIVATE KEY----- + snis: + - "yourdomain.com" +#END +``` + + + + + +```json +{ + "ssls": [ + { + "cert": "-----BEGIN CERTIFICATE-----\nMIIDrzCCApegAwIBAgIJAI3Meu/gJVTLMA0GCSqGSIb3DQEBCwUAMG4xCzAJBgNV\nBAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL\nBgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl\nci5sb2NhbDAeFw0yMDEwMjgwMzMzMDJaFw0yMTEwMjgwMzMzMDJaMG4xCzAJBgNV\nBAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL\nBgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl\nci5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ/qwxCR7g5S\ns9+VleopkLi5pAszEkHYOBpwF/hDeRdxU0I0e1zZTdTlwwPy2vf8m3kwoq6fmNCt\ntdUUXh5Wvgi/2OA8HBBzaQFQL1Av9qWwyES5cx6p0ZBwIrcXQIsl1XfNSUpQNTSS\nD44TGduXUIdeshukPvMvLWLezynf2/WlgVh/haWtDG99r/Gj3uBdjl0m/xGvKvIv\nNFy6EdgG9fkwcIalutjrUnGl9moGjwKYu4eXW2Zt5el0d1AHXUsqK4voe0p+U2Nz\nquDmvxteXWdlsz8o5kQT6a4DUtWhpPIfNj9oZfPRs3LhBFQ74N70kVxMOCdec1lU\nbnFzLIMGlz0CAwEAAaNQME4wHQYDVR0OBBYEFFHeljijrr+SPxlH5fjHRPcC7bv2\nMB8GA1UdIwQYMBaAFFHeljijrr+SPxlH5fjHRPcC7bv2MAwGA1UdEwQFMAMBAf8w\nDQYJKoZIhvcNAQELBQADggEBAG6NNTK7sl9nJxeewVuogCdMtkcdnx9onGtCOeiQ\nqvh5Xwn9akZtoLMVEdceU0ihO4wILlcom3OqHs9WOd6VbgW5a19Thh2toxKidHz5\nrAaBMyZsQbFb6+vFshZwoCtOLZI/eIZfUUMFqMXlEPrKru1nSddNdai2+zi5rEnM\nHCot43+3XYuqkvWlOjoi9cP+C4epFYrxpykVbcrtbd7TK+wZNiK3xtDPnVzjdNWL\ngeAEl9xrrk0ss4nO/EreTQgS46gVU+tLC+b23m2dU7dcKZ7RDoiA9bdVc4a2IsaS\n2MvLL4NZ2nUh8hAEHiLtGMAV3C6xNbEyM07hEpDW6vk6tqk=\n-----END CERTIFICATE-----", + "key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCf6sMQke4OUrPf\nlZXqKZC4uaQLMxJB2DgacBf4Q3kXcVNCNHtc2U3U5cMD8tr3/Jt5MKKun5jQrbXV\nFF4eVr4Iv9jgPBwQc2kBUC9QL/alsMhEuXMeqdGQcCK3F0CLJdV3zUlKUDU0kg+O\nExnbl1CHXrIbpD7zLy1i3s8p39v1pYFYf4WlrQxvfa/xo97gXY5dJv8RryryLzRc\nuhHYBvX5MHCGpbrY61JxpfZqBo8CmLuHl1tmbeXpdHdQB11LKiuL6HtKflNjc6rg\n5r8bXl1nZbM/KOZEE+muA1LVoaTyHzY/aGXz0bNy4QRUO+De9JFcTDgnXnNZVG5x\ncyyDBpc9AgMBAAECggEAatcEtehZPJaCeClPPF/Cwbe9YoIfe4BCk186lHI3z7K1\n5nB7zt+bwVY0AUpagv3wvXoB5lrYVOsJpa9y5iAb3GqYMc/XDCKfD/KLea5hwfcn\nBctEn0LjsPVKLDrLs2t2gBDWG2EU+udunwQh7XTdp2Nb6V3FdOGbGAg2LgrSwP1g\n0r4z14F70oWGYyTQ5N8UGuyryVrzQH525OYl38Yt7R6zJ/44FVi/2TvdfHM5ss39\nSXWi00Q30fzaBEf4AdHVwVCRKctwSbrIOyM53kiScFDmBGRblCWOxXbiFV+d3bjX\ngf2zxs7QYZrFOzOO7kLtHGua4itEB02497v+1oKDwQKBgQDOBvCVGRe2WpItOLnj\nSF8iz7Sm+jJGQz0D9FhWyGPvrN7IXGrsXavA1kKRz22dsU8xdKk0yciOB13Wb5y6\nyLsr/fPBjAhPb4h543VHFjpAQcxpsH51DE0b2oYOWMmz+rXGB5Jy8EkP7Q4njIsc\n2wLod1dps8OT8zFx1jX3Us6iUQKBgQDGtKkfsvWi3HkwjFTR+/Y0oMz7bSruE5Z8\ng0VOHPkSr4XiYgLpQxjbNjq8fwsa/jTt1B57+By4xLpZYD0BTFuf5po+igSZhH8s\nQS5XnUnbM7d6Xr/da7ZkhSmUbEaMeHONSIVpYNgtRo4bB9Mh0l1HWdoevw/w5Ryt\nL/OQiPhfLQKBgQCh1iG1fPh7bbnVe/HI71iL58xoPbCwMLEFIjMiOFcINirqCG6V\nLR91Ytj34JCihl1G4/TmWnsH1hGIGDRtJLCiZeHL70u32kzCMkI1jOhFAWqoutMa\n7obDkmwraONIVW/kFp6bWtSJhhTQTD4adI9cPCKWDXdcCHSWj0Xk+U8HgQKBgBng\nt1HYhaLzIZlP/U/nh3XtJyTrX7bnuCZ5FhKJNWrYjxAfgY+NXHRYCKg5x2F5j70V\nbe7pLhxmCnrPTMKZhik56AaTBOxVVBaYWoewhUjV4GRAaK5Wc8d9jB+3RizPFwVk\nV3OU2DJ1SNZ+W2HBOsKrEfwFF/dgby6i2w6MuAP1AoGBAIxvxUygeT/6P0fHN22P\nzAHFI4v2925wYdb7H//D8DIADyBwv18N6YH8uH7L+USZN7e4p2k8MGGyvTXeC6aX\nIeVtU6fH57Ddn59VPbF20m8RCSkmBvSdcbyBmqlZSBE+fKwCliKl6u/GH0BNAWKz\nr8yiEiskqRmy7P7MY9hDmEbG\n-----END PRIVATE KEY-----", + "snis": [ + "yourdomain.com" + ] + } + ] +} +``` + + + + +### How to configure global rule + + + + +```yaml +global_rules: + - + id: 1 + plugins: + response-rewrite: + body: "hello\n" +#END +``` + + + + + +```json +{ + "global_rules": [ + { + "id": 1, + "plugins": { + "response-rewrite": { + "body": "hello\n" + } + } + } + ] +} +``` + + + + +### How to configure consumer + + + + +```yaml +consumers: + - username: jwt + plugins: + jwt-auth: + key: user-key + secret: my-secret-key +#END +``` + + + + + +```json +{ + "consumers": [ + { + "username": "jwt", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "my-secret-key" + } + } + } + ] +} +``` + + + + +### How to configure plugin metadata + + + + +```yaml +upstreams: + - id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +routes: + - + uri: /hello + upstream_id: 1 + plugins: + http-logger: + batch_max_size: 1 + uri: http://127.0.0.1:1980/log +plugin_metadata: + - id: http-logger # note the id is the plugin name + log_format: + host: "$host" + remote_addr: "$remote_addr" +#END +``` + + + + + +```json +{ + "upstreams": [ + { + "id": 1, + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + ], + "routes": [ + { + "uri": "/hello", + "upstream_id": 1, + "plugins": { + "http-logger": { + "batch_max_size": 1, + "uri": "http://127.0.0.1:1980/log" + } + } + } + ], + "plugin_metadata": [ + { + "id": "http-logger", + "log_format": { + "host": "$host", + "remote_addr": "$remote_addr" + } + } + ] +} +``` + + + + +### How to configure stream route + + + + +```yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream_id: 1 + plugins: + mqtt-proxy: + protocol_name: "MQTT" + protocol_level: 4 +upstreams: + - nodes: + "127.0.0.1:1995": 1 + type: roundrobin + id: 1 +#END +``` + + + + + +```json +{ + "stream_routes": [ + { + "server_addr": "127.0.0.1", + "server_port": 1985, + "id": 1, + "upstream_id": 1, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + } + } + ], + "upstreams": [ + { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin", + "id": 1 + } + ] +} +``` + + + + +### How to configure protos + + + + +```yaml +protos: + - id: helloworld + desc: hello world + content: > + syntax = "proto3"; + package helloworld; + + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + } +#END +``` + + + + + +```json +{ + "protos": [ + { + "id": "helloworld", + "desc": "hello world", + "content": "syntax = \"proto3\";\npackage helloworld;\n\nservice Greeter {\n rpc SayHello (HelloRequest) returns (HelloReply) {}\n}\nmessage HelloRequest {\n string name = 1;\n}\nmessage HelloReply {\n string message = 1;\n}\n" + } + ] +} +``` + + + diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery.md new file mode 100644 index 0000000..6844ede --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery.md @@ -0,0 +1,307 @@ +--- +title: Integration service discovery registry +--- + + + +## Summary + +When system traffic changes, the number of servers of the upstream service also increases or decreases, or the server needs to be replaced due to its hardware failure. If the gateway maintains upstream service information through configuration, the maintenance costs in the microservices architecture pattern are unpredictable. Furthermore, due to the untimely update of these information, will also bring a certain impact for the business, and the impact of human error operation can not be ignored. So it is very necessary for the gateway to automatically get the latest list of service instances through the service registry。As shown in the figure below: + +![discovery through service registry](../../assets/images/discovery.png) + +1. When the service starts, it will report some of its information, such as the service name, IP, port and other information to the registry. The services communicate with the registry using a mechanism such as a heartbeat, and if the registry and the service are unable to communicate for a long time, the instance will be cancel.When the service goes offline, the registry will delete the instance information. +2. The gateway gets service instance information from the registry in near-real time. +3. When the user requests the service through the gateway, the gateway selects one instance from the registry for proxy. + +## How to extend the discovery client? + +### Basic steps + +It is very easy for APISIX to extend the discovery client, the basic steps are as follows + +1. Add the implementation of registry client in the 'apisix/discovery/' directory; + +2. Implement the `_M.init_worker()` function for initialization and the `_M.nodes(service_name)` function for obtaining the list of service instance nodes; + +3. If you need the discovery module to export the debugging information online, implement the `_M.dump_data()` function; + +4. Convert the registry data into data in APISIX; + +### the example of Eureka + +#### Implementation of Eureka client + +First, create a directory `eureka` under `apisix/discovery`; + +After that, add [`init.lua`](https://github.com/apache/apisix/blob/master/apisix/discovery/init.lua) in the `apisix/discovery/eureka` directory; + +Then implement the `_M.init_worker()` function for initialization and the `_M.nodes(service_name)` function for obtaining the list of service instance nodes in `init.lua`: + + ```lua + local _M = { + version = 1.0, + } + + + function _M.nodes(service_name) + ... ... + end + + + function _M.init_worker() + ... ... + end + + + function _M.dump_data() + return {config = your_config, services = your_services, other = ... } + end + + + return _M + ``` + +Finally, provide the schema for YAML configuration in the `schema.lua` under `apisix/discovery/eureka`. + +#### How convert Eureka's instance data to APISIX's node? + +Here's an example of Eureka's data: + +```json +{ + "applications": { + "application": [ + { + "name": "USER-SERVICE", # service name + "instance": [ + { + "instanceId": "192.168.1.100:8761", + "hostName": "192.168.1.100", + "app": "USER-SERVICE", # service name + "ipAddr": "192.168.1.100", # IP address + "status": "UP", + "overriddenStatus": "UNKNOWN", + "port": { + "$": 8761, + "@enabled": "true" + }, + "securePort": { + "$": 443, + "@enabled": "false" + }, + "metadata": { + "management.port": "8761", + "weight": 100 # Setting by 'eureka.instance.metadata-map.weight' of the spring boot application + }, + "homePageUrl": "http://192.168.1.100:8761/", + "statusPageUrl": "http://192.168.1.100:8761/actuator/info", + "healthCheckUrl": "http://192.168.1.100:8761/actuator/health", + ... ... + } + ] + } + ] + } +} +``` + +Deal with the Eureka's instance data need the following steps : + +1. select the UP instance. When the value of `overriddenStatus` is "UP" or the value of `overriddenStatus` is "UNKNOWN" and the value of `status` is "UP". +2. Host. The `ipAddr` is the IP address of instance; and must be IPv4 or IPv6. +3. Port. If the value of `port["@enabled"]` is equal to "true", using the value of `port["\$"]`, If the value of `securePort["@enabled"]` is equal to "true", using the value of `securePort["\$"]`. +4. Weight. `local weight = metadata.weight or local_conf.eureka.weight or 100` + +The result of this example is as follows: + +```json +[ + { + "host" : "192.168.1.100", + "port" : 8761, + "weight" : 100, + "metadata" : { + "management.port": "8761" + } + } +] +``` + +## Configuration for discovery client + +### Initial service discovery + +Add the following configuration to `conf/config.yaml` to add different service discovery clients for dynamic selection during use: + +```yaml +discovery: + eureka: + ... +``` + +This name should be consistent with the file name of the implementation registry in the `apisix/discovery/` directory. + +The supported discovery client: Eureka. + +### Configuration for Eureka + +Add following configuration in `conf/config.yaml` : + +```yaml +discovery: + eureka: + host: # it's possible to define multiple eureka hosts addresses of the same eureka cluster. + - "http://${username}:${password}@${eureka_host1}:${eureka_port1}" + - "http://${username}:${password}@${eureka_host2}:${eureka_port2}" + prefix: "/eureka/" + fetch_interval: 30 # 30s + weight: 100 # default weight for node + timeout: + connect: 2000 # 2000ms + send: 2000 # 2000ms + read: 5000 # 5000ms +``` + +## Upstream setting + +### L7 + +Here is an example of routing a request with a URL of "/user/*" to a service which named "user-service" and use eureka discovery client in the registry : + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/user/*", + "upstream": { + "service_name": "USER-SERVICE", + "type": "roundrobin", + "discovery_type": "eureka" + } +}' + +HTTP/1.1 201 Created +Date: Sat, 31 Aug 2019 01:17:15 GMT +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server + +{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin", "discovery_type": "eureka"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925}} +``` + +Because the upstream interface URL may have conflict, usually in the gateway by prefix to distinguish: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/a/*", + "plugins": { + "proxy-rewrite" : { + "regex_uri": ["^/a/(.*)", "/${1}"] + } + }, + "upstream": { + "service_name": "A-SERVICE", + "type": "roundrobin", + "discovery_type": "eureka" + } +}' + +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/b/*", + "plugins": { + "proxy-rewrite" : { + "regex_uri": ["^/b/(.*)", "/${1}"] + } + }, + "upstream": { + "service_name": "B-SERVICE", + "type": "roundrobin", + "discovery_type": "eureka" + } +}' +``` + +Suppose both A-SERVICE and B-SERVICE provide a `/test` API. The above configuration allows access to A-SERVICE's `/test` API through `/a/test` and B-SERVICE's `/test` API through `/b/test`. + +**Notice**:When configuring `upstream.service_name`, `upstream.nodes` will no longer take effect, but will be replaced by 'nodes' obtained from the registry. + +### L4 + +Eureka service discovery also supports use in L4, the configuration method is similar to L7. + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "remote_addr": "127.0.0.1", + "upstream": { + "scheme": "tcp", + "discovery_type": "eureka", + "service_name": "APISIX-EUREKA", + "type": "roundrobin" + } +}' +HTTP/1.1 200 OK +Date: Fri, 30 Dec 2022 03:52:19 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.0.0 +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Access-Control-Expose-Headers: * +Access-Control-Max-Age: 3600 +X-API-VERSION: v3 + +{"key":"\/apisix\/stream_routes\/1","value":{"remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","type":"roundrobin","discovery_type":"eureka","scheme":"tcp","pass_host":"pass","service_name":"APISIX-EUREKA"},"id":"1","create_time":1672106762,"update_time":1672372339}} +``` + +## Embedded control api for debugging + +Sometimes we need the discovery client to export online data snapshot in memory when running for debugging, and if you implement the `_M. dump_data()` function: + +```lua +function _M.dump_data() + return {config = local_conf.discovery.eureka, services = applications} +end +``` + +Then you can call its control api as below: + +```shell +GET /v1/discovery/{discovery_type}/dump +``` + +eg: + +```shell +curl http://127.0.0.1:9090/v1/discovery/eureka/dump +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/consul.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/consul.md new file mode 100644 index 0000000..fd6758b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/consul.md @@ -0,0 +1,344 @@ +--- +title: consul +--- + + + +## Summary + +APACHE APISIX supports Consul as a service discovery + +## Configuration for discovery client + +### Configuration for Consul + +First of all, we need to add following configuration in `conf/config.yaml` : + +```yaml +discovery: + consul: + servers: # make sure service name is unique in these consul servers + - "http://127.0.0.1:8500" # `http://127.0.0.1:8500` and `http://127.0.0.1:8600` are different clusters + - "http://127.0.0.1:8600" # `consul` service is default skip service + token: "..." # if your consul cluster has enabled acl access control, you need to specify the token + skip_services: # if you need to skip special services + - "service_a" + timeout: + connect: 1000 # default 2000 ms + read: 1000 # default 2000 ms + wait: 60 # default 60 sec + weight: 1 # default 1 + fetch_interval: 5 # default 3 sec, only take effect for keepalive: false way + keepalive: true # default true, use the long pull way to query consul servers + sort_type: "origin" # default origin + default_service: # you can define default service when missing hit + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 # default 1 ms + weight: 1 # default 1 + max_fails: 1 # default 1 + dump: # if you need, when registered nodes updated can dump into file + path: "logs/consul.dump" + expire: 2592000 # unit sec, here is 30 day +``` + +And you can config it in short by default value: + +```yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8500" +``` + +The `keepalive` has two optional values: + +- `true`, default and recommend value, use the long pull way to query consul servers +- `false`, not recommend, it would use the short pull way to query consul servers, then you can set the `fetch_interval` for fetch interval + +The `sort_type` has four optional values: + +- `origin`, not sorting +- `host_sort`, sort by host +- `port_sort`, sort by port +- `combine_sort`, with the precondition that hosts are ordered, ports are also ordered. + +#### Dump Data + +When we need reload `apisix` online, as the `consul` module maybe loads data from CONSUL slower than load routes from ETCD, and would get the log at the moment before load successfully from consul: + +``` + http_access_phase(): failed to set upstream: no valid upstream node +``` + +So, we import the `dump` function for `consul` module. When reload, would load the dump file before from consul; when the registered nodes in consul been updated, would dump the upstream nodes into file automatically. + +The `dump` has three optional values now: + +- `path`, the dump file save path + - support relative path, eg: `logs/consul.dump` + - support absolute path, eg: `/tmp/consul.dump` + - make sure the dump file's parent path exist + - make sure the `apisix` has the dump file's read-write access permission,eg: add below config in `conf/config.yaml` + +```yaml +nginx_config: # config for render the template to generate nginx.conf + user: root # specifies the execution user of the worker process. +``` + +- `load_on_init`, default value is `true` + - if `true`, just try to load the data from the dump file before loading data from consul when starting, does not care the dump file exists or not + - if `false`, ignore loading data from the dump file + - Whether `true` or `false`, we don't need to prepare a dump file for apisix at anytime +- `expire`, unit sec, avoiding load expired dump data when load + - default `0`, it is unexpired forever + - recommend 2592000, which is 30 days(equals 3600 \* 24 \* 30) + +### Register Http API Services + +Now, register nodes into consul: + +```shell +curl -X PUT 'http://127.0.0.1:8500/v1/agent/service/register' \ +-d '{ + "ID": "service_a1", + "Name": "service_a", + "Tags": ["primary", "v1"], + "Address": "127.0.0.1", + "Port": 8000, + "Meta": { + "service_a_version": "4.0" + }, + "EnableTagOverride": false, + "Weights": { + "Passing": 10, + "Warning": 1 + } +}' + +curl -X PUT 'http://127.0.0.1:8500/v1/agent/service/register' \ +-d '{ + "ID": "service_a1", + "Name": "service_a", + "Tags": ["primary", "v1"], + "Address": "127.0.0.1", + "Port": 8002, + "Meta": { + "service_a_version": "4.0" + }, + "EnableTagOverride": false, + "Weights": { + "Passing": 10, + "Warning": 1 + } +}' +``` + +In some cases, same service name might exist in different consul servers. +To avoid confusion, use the full consul key url path as service name in practice. + +### Port Handling + +When APISIX retrieves service information from Consul, it handles port values as follows: + +- If the service registration includes a valid port number, that port will be used. +- If the port is `nil` (not specified) or `0`, APISIX will default to port `80` for HTTP services. + +### Upstream setting + +#### L7 + +Here is an example of routing a request with a URL of "/*" to a service which named "service_a" and use consul discovery client in the registry : + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/*", + "upstream": { + "service_name": "service_a", + "type": "roundrobin", + "discovery_type": "consul" + } +}' +``` + +The format response as below: + +```json +{ + "key": "/apisix/routes/1", + "value": { + "uri": "/*", + "priority": 0, + "id": "1", + "upstream": { + "scheme": "http", + "type": "roundrobin", + "hash_on": "vars", + "discovery_type": "consul", + "service_name": "service_a", + "pass_host": "pass" + }, + "create_time": 1669267329, + "status": 1, + "update_time": 1669267329 + } +} +``` + +You could find more usage in the `apisix/t/discovery/consul.t` file. + +#### L4 + +Consul service discovery also supports use in L4, the configuration method is similar to L7. + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "remote_addr": "127.0.0.1", + "upstream": { + "scheme": "tcp", + "service_name": "service_a", + "type": "roundrobin", + "discovery_type": "consul" + } +}' +``` + +You could find more usage in the `apisix/t/discovery/stream/consul.t` file. + +## Debugging API + +It also offers control api for debugging. + +### Memory Dump API + +```shell +GET /v1/discovery/consul/dump +``` + +For example: + +```shell +# curl http://127.0.0.1:9090/v1/discovery/consul/dump | jq +{ + "config": { + "fetch_interval": 3, + "timeout": { + "wait": 60, + "connect": 6000, + "read": 6000 + }, + "weight": 1, + "servers": [ + "http://172.19.5.30:8500", + "http://172.19.5.31:8500" + ], + "keepalive": true, + "default_service": { + "host": "172.19.5.11", + "port": 8899, + "metadata": { + "fail_timeout": 1, + "weight": 1, + "max_fails": 1 + } + }, + "skip_services": [ + "service_d" + ] + }, + "services": { + "service_a": [ + { + "host": "127.0.0.1", + "port": 30513, + "weight": 1 + }, + { + "host": "127.0.0.1", + "port": 30514, + "weight": 1 + } + ], + "service_b": [ + { + "host": "172.19.5.51", + "port": 50051, + "weight": 1 + } + ], + "service_c": [ + { + "host": "127.0.0.1", + "port": 30511, + "weight": 1 + }, + { + "host": "127.0.0.1", + "port": 30512, + "weight": 1 + } + ] + } +} +``` + +### Show Dump File API + +It offers another control api for dump file view now. Maybe would add more api for debugging in future. + +```shell +GET /v1/discovery/consul/show_dump_file +``` + +For example: + +```shell +curl http://127.0.0.1:9090/v1/discovery/consul/show_dump_file | jq +{ + "services": { + "service_a": [ + { + "host": "172.19.5.12", + "port": 8000, + "weight": 120 + }, + { + "host": "172.19.5.13", + "port": 8000, + "weight": 120 + } + ] + }, + "expire": 0, + "last_update": 1615877468 +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/consul_kv.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/consul_kv.md new file mode 100644 index 0000000..3eb0f9d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/consul_kv.md @@ -0,0 +1,314 @@ +--- +title: consul_kv +--- + + + +## Summary + +For users that are using [nginx-upsync-module](https://github.com/weibocom/nginx-upsync-module) and Consul KV as a service discovery, like the Weibo Mobile Team, this may be needed. + +Thanks to @fatman-x guy, who developed this module, called `consul_kv`, and its worker process data flow is below: +![consul kv module data flow diagram](https://user-images.githubusercontent.com/548385/107141841-6ced3e00-6966-11eb-8aa4-bc790a4ad113.png) + +## Configuration for discovery client + +### Configuration for Consul KV + +Add following configuration in `conf/config.yaml` : + +```yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8500" + - "http://127.0.0.1:8600" + token: "..." # if your consul cluster has enabled acl access control, you need to specify the token + prefix: "upstreams" + skip_keys: # if you need to skip special keys + - "upstreams/unused_api/" + timeout: + connect: 1000 # default 2000 ms + read: 1000 # default 2000 ms + wait: 60 # default 60 sec + weight: 1 # default 1 + fetch_interval: 5 # default 3 sec, only take effect for keepalive: false way + keepalive: true # default true, use the long pull way to query consul servers + default_server: # you can define default server when missing hit + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 # default 1 ms + weight: 1 # default 1 + max_fails: 1 # default 1 + dump: # if you need, when registered nodes updated can dump into file + path: "logs/consul_kv.dump" + expire: 2592000 # unit sec, here is 30 day +``` + +And you can config it in short by default value: + +```yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8500" +``` + +The `keepalive` has two optional values: + +- `true`, default and recommend value, use the long pull way to query consul servers +- `false`, not recommend, it would use the short pull way to query consul servers, then you can set the `fetch_interval` for fetch interval + +#### Dump Data + +When we need reload `apisix` online, as the `consul_kv` module maybe loads data from CONSUL slower than load routes from ETCD, and would get the log at the moment before load successfully from consul: + +``` + http_access_phase(): failed to set upstream: no valid upstream node +``` + +So, we import the `dump` function for `consul_kv` module. When reload, would load the dump file before from consul; when the registered nodes in consul been updated, would dump the upstream nodes into file automatically. + +The `dump` has three optional values now: + +- `path`, the dump file save path + - support relative path, eg: `logs/consul_kv.dump` + - support absolute path, eg: `/tmp/consul_kv.bin` + - make sure the dump file's parent path exist + - make sure the `apisix` has the dump file's read-write access permission,eg: `chown www:root conf/upstream.d/` +- `load_on_init`, default value is `true` + - if `true`, just try to load the data from the dump file before loading data from consul when starting, does not care the dump file exists or not + - if `false`, ignore loading data from the dump file + - Whether `true` or `false`, we don't need to prepare a dump file for apisix at anytime +- `expire`, unit sec, avoiding load expired dump data when load + - default `0`, it is unexpired forever + - recommend 2592000, which is 30 days(equals 3600 \* 24 \* 30) + +### Register Http API Services + +Service register Key&Value template: + +``` +Key: {Prefix}/{Service_Name}/{IP}:{Port} +Value: {"weight": , "max_fails": , "fail_timeout": } +``` + +The register consul key use `upstreams` as prefix by default. The http api service name called `webpages` for example, and you can also use `webpages/oneteam/hello` as service name. The api instance of node's ip and port make up new key: `:`. + +Now, register nodes into consul: + +```shell +curl \ + -X PUT \ + -d ' {"weight": 1, "max_fails": 2, "fail_timeout": 1}' \ + http://127.0.0.1:8500/v1/kv/upstreams/webpages/172.19.5.12:8000 + +curl \ + -X PUT \ + -d ' {"weight": 1, "max_fails": 2, "fail_timeout": 1}' \ + http://127.0.0.1:8500/v1/kv/upstreams/webpages/172.19.5.13:8000 +``` + +In some case, same keys exist in different consul servers. +To avoid confusion, use the full consul key url path as service name in practice. + +### Upstream setting + +#### L7 + +Here is an example of routing a request with a URL of "/*" to a service which named "http://127.0.0.1:8500/v1/kv/upstreams/webpages/" and use consul_kv discovery client in the registry : + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/*", + "upstream": { + "service_name": "http://127.0.0.1:8500/v1/kv/upstreams/webpages/", + "type": "roundrobin", + "discovery_type": "consul_kv" + } +}' +``` + +The format response as below: + +```json +{ + "node": { + "value": { + "priority": 0, + "update_time": 1612755230, + "upstream": { + "discovery_type": "consul_kv", + "service_name": "http://127.0.0.1:8500/v1/kv/upstreams/webpages/", + "hash_on": "vars", + "type": "roundrobin", + "pass_host": "pass" + }, + "id": "1", + "uri": "/*", + "create_time": 1612755230, + "status": 1 + }, + "key": "/apisix/routes/1" + } +} +``` + +You could find more usage in the `apisix/t/discovery/consul_kv.t` file. + +#### L4 + +Consul_kv service discovery also supports use in L4, the configuration method is similar to L7. + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "remote_addr": "127.0.0.1", + "upstream": { + "scheme": "tcp", + "service_name": "http://127.0.0.1:8500/v1/kv/upstreams/webpages/", + "type": "roundrobin", + "discovery_type": "consul_kv" + } +}' +``` + +You could find more usage in the `apisix/t/discovery/stream/consul_kv.t` file. + +## Debugging API + +It also offers control api for debugging. + +### Memory Dump API + +```shell +GET /v1/discovery/consul_kv/dump +``` + +For example: + +```shell +# curl http://127.0.0.1:9090/v1/discovery/consul_kv/dump | jq +{ + "config": { + "fetch_interval": 3, + "timeout": { + "wait": 60, + "connect": 6000, + "read": 6000 + }, + "prefix": "upstreams", + "weight": 1, + "servers": [ + "http://172.19.5.30:8500", + "http://172.19.5.31:8500" + ], + "keepalive": true, + "default_service": { + "host": "172.19.5.11", + "port": 8899, + "metadata": { + "fail_timeout": 1, + "weight": 1, + "max_fails": 1 + } + }, + "skip_keys": [ + "upstreams/myapi/gateway/apisix/" + ] + }, + "services": { + "http://172.19.5.31:8500/v1/kv/upstreams/webpages/": [ + { + "host": "127.0.0.1", + "port": 30513, + "weight": 1 + }, + { + "host": "127.0.0.1", + "port": 30514, + "weight": 1 + } + ], + "http://172.19.5.30:8500/v1/kv/upstreams/1614480/grpc/": [ + { + "host": "172.19.5.51", + "port": 50051, + "weight": 1 + } + ], + "http://172.19.5.30:8500/v1/kv/upstreams/webpages/": [ + { + "host": "127.0.0.1", + "port": 30511, + "weight": 1 + }, + { + "host": "127.0.0.1", + "port": 30512, + "weight": 1 + } + ] + } +} +``` + +### Show Dump File API + +It offers another control api for dump file view now. Maybe would add more api for debugging in future. + +```shell +GET /v1/discovery/consul_kv/show_dump_file +``` + +For example: + +```shell +curl http://127.0.0.1:9090/v1/discovery/consul_kv/show_dump_file | jq +{ + "services": { + "http://172.19.5.31:8500/v1/kv/upstreams/1614480/webpages/": [ + { + "host": "172.19.5.12", + "port": 8000, + "weight": 120 + }, + { + "host": "172.19.5.13", + "port": 8000, + "weight": 120 + } + ] + }, + "expire": 0, + "last_update": 1615877468 +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/control-plane-service-discovery.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/control-plane-service-discovery.md new file mode 100644 index 0000000..0b9d2ee --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/control-plane-service-discovery.md @@ -0,0 +1,72 @@ +--- +title: Control Plane Service Discovery +keywords: + - API Gateway + - Apache APISIX + - ZooKeeper + - Nacos + - APISIX-Seed +description: This documentation describes implementing service discovery through Nacos and ZooKeeper on the API Gateway APISIX Control Plane. +--- + + + +This document describes how to implement service discovery with Nacos and Zookeeper on the APISIX Control Plane. + +## APISIX-Seed Architecture + +Apache APISIX has supported Data Plane service discovery in the early days, and now APISIX also supports Control Plane service discovery through the [APISIX-Seed](https://github.com/api7/apisix-seed) project. The following figure shows the APISIX-Seed architecture diagram. + +![control-plane-service-discovery](../../../assets/images/control-plane-service-discovery.png) + +The specific information represented by the figures in the figure is as follows: + +1. Register an upstream with APISIX and specify the service discovery type. APISIX-Seed will watch APISIX resource changes in etcd, filter discovery types, and obtain service names. +2. APISIX-Seed subscribes the specified service name to the service registry to obtain changes to the corresponding service. +3. After the client registers the service with the service registry, APISIX-Seed will obtain the new service information and write the updated service node into etcd; +4. When the corresponding resources in etcd change, APISIX worker will refresh the latest service node information to memory. + +:::note + +It should be noted that after the introduction of APISIX-Seed, if the service of the registry changes frequently, the data in etcd will also change frequently. So, it is best to set the `--auto-compaction` option when starting etcd to compress the history periodically to avoid etcd eventually exhausting its storage space. Please refer to [revisions](https://etcd.io/docs/v3.5/learning/api/#revisions). + +::: + +## Why APISIX-Seed + +- Network topology becomes simpler + + APISIX does not need to maintain a network connection with each registry, and only needs to pay attention to the configuration information in etcd. This will greatly simplify the network topology. + +- Total data volume about upstream service becomes smaller + + Due to the characteristics of the registry, APISIX may store the full amount of registry service data in the worker, such as consul_kv. By introducing APISIX-Seed, each process of APISIX will not need to additionally cache upstream service-related information. + +- Easier to manage + + Service discovery configuration needs to be configured once per APISIX instance. By introducing APISIX-Seed, Apache APISIX will be in different to the configuration changes of the service registry. + +## Supported service registry + +ZooKeeper and Nacos are currently supported, and more service registries will be supported in the future. For more information, please refer to: [APISIX Seed](https://github.com/api7/apisix-seed#apisix-seed-for-apache-apisix). + +- If you want to enable control plane ZooKeeper service discovery, please refer to: [ZooKeeper Deployment Tutorial](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md). + +- If you want to enable control plane Nacos service discovery, please refer to: [Nacos Deployment Tutorial](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/nacos.md). diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/dns.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/dns.md new file mode 100644 index 0000000..8611cd2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/dns.md @@ -0,0 +1,155 @@ +--- +title: DNS +--- + + + +## service discovery via DNS + +Some service discovery system, like Consul, support exposing service information +via DNS. Therefore we can use this way to discover service directly. Both L4 and L7 are supported. + +First of all, we need to configure the address of DNS servers: + +```yaml +# add this to config.yaml +discovery: + dns: + servers: + - "127.0.0.1:8600" # use the real address of your dns server +``` + +Unlike configuring the domain in the Upstream's `nodes` field, service discovery via +DNS will return all records. For example, with upstream configuration: + +```json +{ + "id": 1, + "discovery_type": "dns", + "service_name": "test.consul.service", + "type": "roundrobin" +} +``` + +and `test.consul.service` be resolved as `1.1.1.1` and `1.1.1.2`, this result will be the same as: + +```json +{ + "id": 1, + "type": "roundrobin", + "nodes": [ + {"host": "1.1.1.1", "weight": 1}, + {"host": "1.1.1.2", "weight": 1} + ] +} +``` + +Note that all the IPs from `test.consul.service` share the same weight. + +The resolved records will be cached according to their TTL. +For service whose record is not in the cache, we will query it in the order of `SRV -> A -> AAAA -> CNAME` by default. +When we refresh the cache record, we will try from the last previously successful type. +We can also customize the order by modifying the configuration file. + +```yaml +# add this to config.yaml +discovery: + dns: + servers: + - "127.0.0.1:8600" # use the real address of your dns server + order: # order in which to try different dns record types when resolving + - last # "last" will try the last previously successful type for a hostname. + - SRV + - A + - AAAA + - CNAME +``` + +If you want to specify the port for the upstream server, you can add it to the `service_name`: + +```json +{ + "id": 1, + "discovery_type": "dns", + "service_name": "test.consul.service:1980", + "type": "roundrobin" +} +``` + +Another way to do it is via the SRV record, see below. + +### SRV record + +By using SRV record you can specify the port and the weight of a service. + +Assumed you have the SRV record like this: + +``` +; under the section of blah.service +A 300 IN A 1.1.1.1 +B 300 IN A 1.1.1.2 +B 300 IN A 1.1.1.3 + +; name TTL type priority weight port +srv 86400 IN SRV 10 60 1980 A +srv 86400 IN SRV 20 20 1981 B +``` + +Upstream configuration like: + +```json +{ + "id": 1, + "discovery_type": "dns", + "service_name": "srv.blah.service", + "type": "roundrobin" +} +``` + +is the same as: + +```json +{ + "id": 1, + "type": "roundrobin", + "nodes": [ + {"host": "1.1.1.1", "port": 1980, "weight": 60, "priority": -10}, + {"host": "1.1.1.2", "port": 1981, "weight": 10, "priority": -20}, + {"host": "1.1.1.3", "port": 1981, "weight": 10, "priority": -20} + ] +} +``` + +Note that two records of domain B split the weight evenly. +For SRV record, nodes with lower priority are chosen first, so the final priority is negative. + +As for 0 weight SRV record, the [RFC 2782](https://www.ietf.org/rfc/rfc2782.txt) says: + +> Domain administrators SHOULD use Weight 0 when there isn't any server +selection to do, to make the RR easier to read for humans (less +noisy). In the presence of records containing weights greater +than 0, records with weight 0 should have a very small chance of +being selected. + +We treat weight 0 record has a weight of 1 so the node "have a very small chance of +being selected", which is also the common way to treat this type of record. + +For SRV record which has port 0, we will fallback to use the upstream protocol's default port. +You can also specify the port in the "service_name" field directly, like "srv.blah.service:8848". diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/eureka.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/eureka.md new file mode 100644 index 0000000..3404a93 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/eureka.md @@ -0,0 +1,25 @@ +--- +title: eureka +--- + + + +Apache APISIX supports service discovery via Eureka. For the details, please start your +reading from [Supported discovery registries](../discovery.md#supported-discovery-registries). diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/kubernetes.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/kubernetes.md new file mode 100644 index 0000000..f2000a1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/kubernetes.md @@ -0,0 +1,406 @@ +--- +title: Kubernetes +keywords: + - Kubernetes + - Apache APISIX + - Service discovery + - Cluster + - API Gateway +description: This article introduce how to perform service discovery based on Kubernetes in Apache APISIX and summarize related issues. +--- + + + +## Summary + +The [_Kubernetes_](https://kubernetes.io/) service discovery [_List-Watch_](https://kubernetes.io/docs/reference/using-api/api-concepts/) real-time changes of [_Endpoints_](https://kubernetes.io/docs/concepts/services-networking/service/) resources, then store theirs value into `ngx.shared.DICT`. + +Discovery also provides a node query interface in accordance with the [_APISIX Discovery Specification_](../discovery.md). + +## How To Use + +Kubernetes service discovery both support single-cluster and multi-cluster modes, applicable to the case where the service is distributed in single or multiple Kubernetes clusters. + +### Single-Cluster Mode Configuration + +A detailed configuration for single-cluster mode Kubernetes service discovery is as follows: + +```yaml +discovery: + kubernetes: + service: + # apiserver schema, options [http, https] + schema: https #default https + + # apiserver host, options [ipv4, ipv6, domain, environment variable] + host: ${KUBERNETES_SERVICE_HOST} #default ${KUBERNETES_SERVICE_HOST} + + # apiserver port, options [port number, environment variable] + port: ${KUBERNETES_SERVICE_PORT} #default ${KUBERNETES_SERVICE_PORT} + + client: + # serviceaccount token or token_file + token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + #token: |- + # eyJhbGciOiJSUzI1NiIsImtpZCI6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEif + # 6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEifeyJhbGciOiJSUzI1NiIsImtpZCI + + default_weight: 50 # weight assigned to each discovered endpoint. default 50, minimum 0 + + # kubernetes discovery support namespace_selector + # you can use one of [equal, not_equal, match, not_match] filter namespace + namespace_selector: + # only save endpoints with namespace equal default + equal: default + + # only save endpoints with namespace not equal default + #not_equal: default + + # only save endpoints with namespace match one of [default, ^my-[a-z]+$] + #match: + #- default + #- ^my-[a-z]+$ + + # only save endpoints with namespace not match one of [default, ^my-[a-z]+$ ] + #not_match: + #- default + #- ^my-[a-z]+$ + + # kubernetes discovery support label_selector + # for the expression of label_selector, please refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + label_selector: |- + first="a",second="b" + + # reserved lua shared memory size,1m memory can store about 1000 pieces of endpoint + shared_size: 1m #default 1m + + # if watch_endpoint_slices setting true, watch apiserver with endpointslices instead of endpoints + watch_endpoint_slices: false #default false +``` + +If the Kubernetes service discovery runs inside a pod, you can use minimal configuration: + +```yaml +discovery: + kubernetes: { } +``` + +If the Kubernetes service discovery runs outside a pod, you need to create or select a specified [_ServiceAccount_](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/), then get its token value, and use following configuration: + +```yaml +discovery: + kubernetes: + service: + schema: https + host: # enter apiserver host value here + port: # enter apiserver port value here + client: + token: # enter serviceaccount token value here + #token_file: # enter file path here +``` + +### Single-Cluster Mode Query Interface + +The Kubernetes service discovery provides a query interface in accordance with the [_APISIX Discovery Specification_](../discovery.md). + +**function:** + nodes(service_name) + +**description:** + nodes() function attempts to look up the ngx.shared.DICT for nodes corresponding to service_name, \ + service_name should match pattern: _[namespace]/[name]:[portName]_ + + + namespace: The namespace where the Kubernetes endpoints is located + + + name: The name of the Kubernetes endpoints + + + portName: The `ports.name` value in the Kubernetes endpoints, if there is no `ports.name`, use `targetPort`, `port` instead. If `ports.name` exists, then port number cannot be used. + +**return value:** + if the Kubernetes endpoints value is as follows: + + ```yaml + apiVersion: v1 + kind: Endpoints + metadata: + name: plat-dev + namespace: default + subsets: + - addresses: + - ip: "10.5.10.109" + - ip: "10.5.10.110" + ports: + - port: 3306 + name: port + ``` + + a nodes("default/plat-dev:port") call will get follow result: + + ``` + { + { + host="10.5.10.109", + port= 3306, + weight= 50, + }, + { + host="10.5.10.110", + port= 3306, + weight= 50, + }, + } + ``` + +### Multi-Cluster Mode Configuration + +A detailed configuration for multi-cluster mode Kubernetes service discovery is as follows: + +```yaml +discovery: + kubernetes: + - id: release # a custom name refer to the cluster, pattern ^[a-z0-9]{1,8} + service: + # apiserver schema, options [http, https] + schema: https #default https + + # apiserver host, options [ipv4, ipv6, domain, environment variable] + host: "1.cluster.com" + + # apiserver port, options [port number, environment variable] + port: "6443" + + client: + # serviceaccount token or token_file + token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + #token: |- + # eyJhbGciOiJSUzI1NiIsImtpZCI6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEif + # 6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEifeyJhbGciOiJSUzI1NiIsImtpZCI + + default_weight: 50 # weight assigned to each discovered endpoint. default 50, minimum 0 + + # kubernetes discovery support namespace_selector + # you can use one of [equal, not_equal, match, not_match] filter namespace + namespace_selector: + # only save endpoints with namespace equal default + equal: default + + # only save endpoints with namespace not equal default + #not_equal: default + + # only save endpoints with namespace match one of [default, ^my-[a-z]+$] + #match: + #- default + #- ^my-[a-z]+$ + + # only save endpoints with namespace not match one of [default, ^my-[a-z]+$] + #not_match: + #- default + #- ^my-[a-z]+$ + + # kubernetes discovery support label_selector + # for the expression of label_selector, please refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + label_selector: |- + first="a",second="b" + + # reserved lua shared memory size,1m memory can store about 1000 pieces of endpoint + shared_size: 1m #default 1m + + # if watch_endpoint_slices setting true, watch apiserver with endpointslices instead of endpoints + watch_endpoint_slices: false #default false +``` + +Multi-Kubernetes service discovery does not fill default values for service and client fields, you need to fill them according to the cluster configuration. + +### Multi-Cluster Mode Query Interface + +The Kubernetes service discovery provides a query interface in accordance with the [_APISIX Discovery Specification_](../discovery.md). + +**function:** +nodes(service_name) + +**description:** +nodes() function attempts to look up the ngx.shared.DICT for nodes corresponding to service_name, \ +service_name should match pattern: _[id]/[namespace]/[name]:[portName]_ + ++ id: value defined in service discovery configuration + ++ namespace: The namespace where the Kubernetes endpoints is located + ++ name: The name of the Kubernetes endpoints + ++ portName: The `ports.name` value in the Kubernetes endpoints, if there is no `ports.name`, use `targetPort`, `port` instead. If `ports.name` exists, then port number cannot be used. + +**return value:** +if the Kubernetes endpoints value is as follows: + + ```yaml + apiVersion: v1 + kind: Endpoints + metadata: + name: plat-dev + namespace: default + subsets: + - addresses: + - ip: "10.5.10.109" + - ip: "10.5.10.110" + ports: + - port: 3306 + name: port + ``` + +a nodes("release/default/plat-dev:port") call will get follow result: + + ``` + { + { + host="10.5.10.109", + port= 3306, + weight= 50, + }, + { + host="10.5.10.110", + port= 3306, + weight= 50, + }, + } + ``` + +## Q&A + +**Q: Why only support configuration token to access _Kubernetes APIServer_?** + +A: Usually, we will use three ways to complete the authentication of _Kubernetes APIServer_: + ++ mTLS ++ Token ++ Basic authentication + +Because lua-resty-http does not currently support mTLS, and basic authentication is not recommended, so currently only the token authentication method is implemented. + +**Q: APISIX inherits Nginx's multiple process model, does it mean that each nginx worker process will [_List-Watch_](https://kubernetes.io/docs/reference/using-api/api-concepts/) kubernetes endpoints resources?** + +A: The Kubernetes service discovery only uses privileged processes to [_List-Watch_](https://kubernetes.io/docs/reference/using-api/api-concepts/) Kubernetes endpoints resources, then store theirs value into `ngx.shared.DICT`, worker processes get results by querying `ngx.shared.DICT`. + +**Q: What permissions do [_ServiceAccount_](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) require?** + +A: ServiceAccount requires the permissions of cluster-level [ get, list, watch ] endpoints resources, the declarative definition is as follows: + +```yaml +kind: ServiceAccount +apiVersion: v1 +metadata: + name: apisix-test + namespace: default +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: apisix-test +rules: +- apiGroups: [ "" ] + resources: [ endpoints,endpointslices ] + verbs: [ get,list,watch ] +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: apisix-test +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: apisix-test +subjects: + - kind: ServiceAccount + name: apisix-test + namespace: default +``` + +**Q: How to get [_ServiceAccount_](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) token value?** + +A: Assume your [_ServiceAccount_](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) located in namespace apisix and name is Kubernetes-discovery, you can use the following steps to get token value. + + 1. Get secret name. You can execute the following command, the output of the first column is the secret name we want: + + ```shell + kubectl -n apisix get secrets | grep kubernetes-discovery + ``` + + 2. Get token value. Assume secret resources name is kubernetes-discovery-token-c64cv, you can execute the following command, the output is the service account token value we want: + + ```shell + kubectl -n apisix get secret kubernetes-discovery-token-c64cv -o jsonpath={.data.token} | base64 -d + ``` + +## Debugging API + +It also offers control api for debugging. + +### Memory Dump API + +To query/list the nodes discoverd by kubernetes discovery, you can query the /v1/discovery/kubernetes/dump control API endpoint like so: + +```shell +GET /v1/discovery/kubernetes/dump +``` + +Which will yield the following response: + +``` +{ + "endpoints": [ + { + "endpoints": [ + { + "value": "{\"https\":[{\"host\":\"172.18.164.170\",\"port\":6443,\"weight\":50},{\"host\":\"172.18.164.171\",\"port\":6443,\"weight\":50},{\"host\":\"172.18.164.172\",\"port\":6443,\"weight\":50}]}", + "name": "default/kubernetes" + }, + { + "value": "{\"metrics\":[{\"host\":\"172.18.164.170\",\"port\":2379,\"weight\":50},{\"host\":\"172.18.164.171\",\"port\":2379,\"weight\":50},{\"host\":\"172.18.164.172\",\"port\":2379,\"weight\":50}]}", + "name": "kube-system/etcd" + }, + { + "value": "{\"http-85\":[{\"host\":\"172.64.89.2\",\"port\":85,\"weight\":50}]}", + "name": "test-ws/testing" + } + ], + "id": "first" + } + ], + "config": [ + { + "default_weight": 50, + "id": "first", + "client": { + "token": "xxx" + }, + "service": { + "host": "172.18.164.170", + "port": "6443", + "schema": "https" + }, + "shared_size": "1m" + } + ] +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/nacos.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/nacos.md new file mode 100644 index 0000000..5ebbcee --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/discovery/nacos.md @@ -0,0 +1,280 @@ +--- +title: nacos +--- + + + +## Service discovery via Nacos + +The performance of this module needs to be improved: + +1. send the request parallelly. + +### Configuration for Nacos + +Add following configuration in `conf/config.yaml` : + +```yaml +discovery: + nacos: + host: + - "http://${username}:${password}@${host1}:${port1}" + prefix: "/nacos/v1/" + fetch_interval: 30 # default 30 sec + # `weight` is the `default_weight` that will be attached to each discovered node that + # doesn't have a weight explicitly provided in nacos results + weight: 100 # default 100 + timeout: + connect: 2000 # default 2000 ms + send: 2000 # default 2000 ms + read: 5000 # default 5000 ms +``` + +And you can config it in short by default value: + +```yaml +discovery: + nacos: + host: + - "http://192.168.33.1:8848" +``` + +### Upstream setting + +#### L7 + +Here is an example of routing a request with an URI of "/nacos/*" to a service which named "http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS" and use nacos discovery client in the registry: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/nacos/*", + "upstream": { + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos" + } +}' +``` + +The formatted response as below: + +```json +{ + "node": { + "key": "\/apisix\/routes\/1", + "value": { + "id": "1", + "create_time": 1615796097, + "status": 1, + "update_time": 1615799165, + "upstream": { + "hash_on": "vars", + "pass_host": "pass", + "scheme": "http", + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos" + }, + "priority": 0, + "uri": "\/nacos\/*" + } + } +} +``` + +#### L4 + +Nacos service discovery also supports use in L4, the configuration method is similar to L7. + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "remote_addr": "127.0.0.1", + "upstream": { + "scheme": "tcp", + "discovery_type": "nacos", + "service_name": "APISIX-NACOS", + "type": "roundrobin" + } +}' +``` + +### discovery_args + +| Name | Type | Requirement | Default | Valid | Description | +| ------------ | ------ | ----------- | ------- | ----- | ------------------------------------------------------------ | +| namespace_id | string | optional | public | | This parameter is used to specify the namespace of the corresponding service | +| group_name | string | optional | DEFAULT_GROUP | | This parameter is used to specify the group of the corresponding service | + +#### Specify the namespace + +Example of routing a request with an URI of "/nacosWithNamespaceId/*" to a service with name, namespaceId "http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&namespaceId=test_ns" and use nacos discovery client in the registry: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/nacosWithNamespaceId/*", + "upstream": { + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos", + "discovery_args": { + "namespace_id": "test_ns" + } + } +}' +``` + +The formatted response as below: + +```json +{ + "node": { + "key": "\/apisix\/routes\/2", + "value": { + "id": "2", + "create_time": 1615796097, + "status": 1, + "update_time": 1615799165, + "upstream": { + "hash_on": "vars", + "pass_host": "pass", + "scheme": "http", + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos", + "discovery_args": { + "namespace_id": "test_ns" + } + }, + "priority": 0, + "uri": "\/nacosWithNamespaceId\/*" + } + } +} +``` + +#### Specify the group + +Example of routing a request with an URI of "/nacosWithGroupName/*" to a service with name, groupName "http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&groupName=test_group" and use nacos discovery client in the registry: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/3 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/nacosWithGroupName/*", + "upstream": { + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos", + "discovery_args": { + "group_name": "test_group" + } + } +}' +``` + +The formatted response as below: + +```json +{ + "node": { + "key": "\/apisix\/routes\/3", + "value": { + "id": "3", + "create_time": 1615796097, + "status": 1, + "update_time": 1615799165, + "upstream": { + "hash_on": "vars", + "pass_host": "pass", + "scheme": "http", + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos", + "discovery_args": { + "group_name": "test_group" + } + }, + "priority": 0, + "uri": "\/nacosWithGroupName\/*" + } + } +} +``` + +#### Specify the namespace and group + +Example of routing a request with an URI of "/nacosWithNamespaceIdAndGroupName/*" to a service with name, namespaceId, groupName "http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&namespaceId=test_ns&groupName=test_group" and use nacos discovery client in the registry: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/4 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/nacosWithNamespaceIdAndGroupName/*", + "upstream": { + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos", + "discovery_args": { + "namespace_id": "test_ns", + "group_name": "test_group" + } + } +}' +``` + +The formatted response as below: + +```json +{ + "node": { + "key": "\/apisix\/routes\/4", + "value": { + "id": "4", + "create_time": 1615796097, + "status": 1, + "update_time": 1615799165, + "upstream": { + "hash_on": "vars", + "pass_host": "pass", + "scheme": "http", + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos", + "discovery_args": { + "namespace_id": "test_ns", + "group_name": "test_group" + } + }, + "priority": 0, + "uri": "\/nacosWithNamespaceIdAndGroupName\/*" + } + } +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/examples/plugins-hmac-auth-generate-signature.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/examples/plugins-hmac-auth-generate-signature.md new file mode 100644 index 0000000..9eaba73 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/examples/plugins-hmac-auth-generate-signature.md @@ -0,0 +1,204 @@ +--- +title: HMAC Generate Signature Examples +--- + + + +## Python 3 + +```python +import base64 +import hashlib +import hmac + +secret = bytes('the shared secret key here', 'utf-8') +message = bytes('this is signature string', 'utf-8') + + +hash = hmac.new(secret, message, hashlib.sha256) + +# to lowercase hexits +hash.hexdigest() + +# to lowercase base64 +base64.b64encode(hash.digest()) +``` + +## Java + +```java +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; +import java.security.InvalidKeyException; +import java.security.NoSuchAlgorithmException; +import javax.xml.bind.DatatypeConverter; + +class Main { + public static void main(String[] args) { + try { + String secret = "the shared secret key here"; + String message = "this is signature string"; + + Mac hasher = Mac.getInstance("HmacSHA256"); + hasher.init(new SecretKeySpec(secret.getBytes(), "HmacSHA256")); + + byte[] hash = hasher.doFinal(message.getBytes()); + + // to lowercase hexits + DatatypeConverter.printHexBinary(hash); + + // to base64 + DatatypeConverter.printBase64Binary(hash); + } + catch (NoSuchAlgorithmException e) {} + catch (InvalidKeyException e) {} + } +} +``` + +## Go + +```go +package main + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/hex" +) + +func main() { + secret := []byte("the shared secret key here") + message := []byte("this is signature string") + + hash := hmac.New(sha256.New, secret) + hash.Write(message) + + // to lowercase hexits + hex.EncodeToString(hash.Sum(nil)) + + // to base64 + base64.StdEncoding.EncodeToString(hash.Sum(nil)) +} +``` + +## Ruby + +```ruby +require 'base64' +require 'openssl' + +secret = 'the shared secret key here' +message = 'this is signature string' + +# to lowercase hexits +OpenSSL::HMAC.hexdigest('sha256', secret, message) + +# to base64 +Base64.encode64(OpenSSL::HMAC.digest('sha256', secret, message)) +``` + +## NodeJs + +```js +var crypto = require('crypto'); + +var secret = 'the shared secret key here'; +var message = 'this is signature string'; + +var hash = crypto.createHmac('sha256', secret).update(message); + +// to lowercase hexits +hash.digest('hex'); + +// to base64 +hash.digest('base64'); +``` + +## JavaScript ES6 + +```js +const secret = 'the shared secret key here'; +const message = 'this is signature string'; + +const getUtf8Bytes = str => + new Uint8Array( + [...unescape(encodeURIComponent(str))].map(c => c.charCodeAt(0)) + ); + +const secretBytes = getUtf8Bytes(secret); +const messageBytes = getUtf8Bytes(message); + +const cryptoKey = await crypto.subtle.importKey( + 'raw', secretBytes, { name: 'HMAC', hash: 'SHA-256' }, + true, ['sign'] +); +const sig = await crypto.subtle.sign('HMAC', cryptoKey, messageBytes); + +// to lowercase hexits +[...new Uint8Array(sig)].map(b => b.toString(16).padStart(2, '0')).join(''); + +// to base64 +btoa(String.fromCharCode(...new Uint8Array(sig))); +``` + +## PHP + +```php + + +## What are external plugin and plugin runner + +APISIX supports writing plugins in Lua. This type of plugin will be executed +inside APISIX. Sometimes you want to develop plugins in other languages, so APISIX +provides sidecars that load your plugins and run them when the requests hit +APISIX. These sidecars are called plugin runners and your plugins are called +external plugins. + +## How does it work + +![external-plugin](../../assets/images/external-plugin.png) + +When you configure a plugin runner in APISIX, APISIX will run the plugin runner +as a subprocess. The process will belong to the same user of the APISIX +process. When we restart or reload APISIX, the plugin runner will be restarted too. + +Once you have configured `ext-plugin-*` plugins for a given route, the requests +which hit the route will trigger RPC call from APISIX to the plugin runner via +unix socket. + +The plugin runner will handle the RPC call, create a fake request at its side, +run external plugins and return the result back to APISIX. + +The target external plugins and the execution order are configured in the `ext-plugin-*` +plugins. Like other plugins, they can be enabled and reconfigured on the fly. + +## How is it implemented + +If you are interested in the implementation of Plugin Runner, please refer to [The Implementation of Plugin Runner](./internal/plugin-runner.md). + +## Supported plugin runners + +- Java: https://github.com/apache/apisix-java-plugin-runner +- Go: https://github.com/apache/apisix-go-plugin-runner +- Python: https://github.com/apache/apisix-python-plugin-runner +- JavaScript: https://github.com/zenozeng/apisix-javascript-plugin-runner + +## Configuration for plugin runner in APISIX + +To run the plugin runner in the prod, add the section below to `config.yaml`: + +```yaml +ext-plugin: + cmd: ["blah"] # replace it to the real runner executable according to the runner you choice +``` + +Then APISIX will manage the runner as its subprocess. + +Note: APISIX can't manage the runner on the Mac in `v2.6`. + +During development, we want to run the runner separately so that we can restart it without +restarting APISIX first. + +By specifying the environment variable `APISIX_LISTEN_ADDRESS`, we can force the runner to +listen to a fixed address. +For instance: + +```bash +APISIX_LISTEN_ADDRESS=unix:/tmp/x.sock ./the_runner +``` + +will force the runner to listen to `/tmp/x.sock`. + +Then you need to configure APISIX to send RPC to the fixed address: + +```yaml +ext-plugin: + # cmd: ["blah"] # don't configure the executable! + path_for_test: "/tmp/x.sock" # without 'unix:' prefix +``` + +In the prod environment, `path_for_test` should not be used and the unix socket +path will be generated dynamically. + +## FAQ + +### When managing by APISIX, the runner can't access my environment variable + +Since `v2.7`, APISIX can pass environment variables to the runner. + +However, Nginx will hide all environment variables by default. So you need to +declare your variable first in the `conf/config.yaml`: + +```yaml +nginx_config: + envs: + - MY_ENV_VAR +``` + +### APISIX terminates my runner with SIGKILL but not SIGTERM! + +Since `v2.7`, APISIX will stop the runner with SIGTERM when it is running on +OpenResty 1.19+. + +However, APISIX needs to wait for the runner to quit so that we can ensure the resource +for the process group is freed. + +Therefore, we send SIGTERM first. And then after 1 second, if the runner is still +running, we will send SIGKILL. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/README.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/README.md new file mode 100644 index 0000000..d57fd47 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/README.md @@ -0,0 +1,71 @@ +--- +title: Get APISIX +description: This tutorial uses a script to quickly install Apache APISIX in your local environment and verify it through the Admin API. +--- + + + + + +> The Getting Started tutorials are contributed by [API7.ai](https://api7.ai/). + +Developed and donated by API7.ai, Apache APISIX is an open source, dynamic, scalable, and high-performance cloud native API gateway for all your APIs and microservices. It is a [top-level project](https://projects.apache.org/project.html?apisix) of the Apache Software Foundation. + +You can use APISIX API Gateway as a traffic entrance to process all business data. It offers features including dynamic routing, dynamic upstream, dynamic certificates, A/B testing, canary release, blue-green deployment, limit rate, defense against malicious attacks, metrics, monitoring alarms, service observability, service governance, and more. + +This tutorial uses a script to quickly install [Apache APISIX](https://api7.ai/apisix) in your local environment and verifies the installation through the Admin API. + +## Prerequisite(s) + +The quickstart script relies on several components: + +* [Docker](https://docs.docker.com/get-docker/) is used to install the containerized **etcd** and **APISIX**. +* [curl](https://curl.se/) is used to send requests to APISIX for validation. + +## Get APISIX + +:::caution + +To provide a better experience in this tutorial, the authorization of Admin API is switched off by default. Please turn on the authorization of Admin API in the production environment. + +::: +APISIX can be easily installed and started with the quickstart script: + +```shell +curl -sL https://run.api7.ai/apisix/quickstart | sh +``` + +The script should start two Docker containers, _apisix-quickstart_ and _etcd_. APISIX uses etcd to save and synchronize configurations. Both the etcd and the APISIX use [**host**](https://docs.docker.com/network/host/) Docker network mode. That is, the APISIX can be accessed from local. + +You will see the following message once APISIX is ready: + +```text +✔ APISIX is ready! +``` + +## Validate + +Once APISIX is running, you can use curl to interact with it. Send a simple HTTP request to validate if APISIX is working properly: + +```shell +curl "http://127.0.0.1:9080" --head | grep Server +``` + +If everything is ok, you will get the following response: + +```text +Server: APISIX/Version +``` + +`Version` refers to the version of APISIX that you have installed. For example, `APISIX/3.3.0`. + +You now have APISIX installed and running successfully!​ + +## Next Steps + +The following tutorial is based on the working APISIX, please keep everything running and move on to the next step. + +* [Configure Routes](configure-routes.md) +* [Load Balancing](load-balancing.md) +* [Rate Limiting](rate-limiting.md) +* [Key Authentication](key-authentication.md) diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/configure-routes.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/configure-routes.md new file mode 100644 index 0000000..19c1631 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/configure-routes.md @@ -0,0 +1,73 @@ +--- +title: Configure Routes +slug: /getting-started/configure-routes +--- + + + + + +> The Getting Started tutorials are contributed by [API7.ai](https://api7.ai/). + +Apache APISIX provides flexible gateway management capabilities based on _routes_, where routing paths and targets are defined for requests. + +This tutorial guides you on how to create a route and validate it. You will complete the following steps: + +1. Create a route with a sample _upstream_ that points to [httpbin.org](http://httpbin.org). +2. Use _cURL_ to send a test request to see how APISIX proxies and forwards the request. + +## What is a Route + +A route is a routing path to upstream targets. In [Apache APISIX](https://api7.ai/apisix), routes are responsible for matching client's requests based on defined rules, loading and executing the corresponding plugins, as well as forwarding requests to the specified upstream services. + +In APISIX, a simple route can be set up with a path-matching URI and a corresponding upstream address. + +## What is an Upstream + +An upstream is a set of target nodes with the same work. It defines a virtual host abstraction that performs load balancing on a given set of service nodes according to the configured rules. + +## Prerequisite(s) + +1. Complete [Get APISIX](./README.md) to install APISIX. + +## Create a Route + +In this section, you will create a route that forwards client requests to [httpbin.org](http://httpbin.org), a public HTTP request and response service. + +The following command creates a route, which should forward all requests sent to `http://127.0.0.1:9080/ip` to [httpbin.org/ip](http://httpbin.org/ip): + +[//]: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "getting-started-ip", + "uri": "/ip", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +You will receive an `HTTP/1.1 201 Created` response if the route was created successfully. + +## Validate + +```shell +curl "http://127.0.0.1:9080/ip" +``` + +The expected response is similar to the following: + +```text +{ + "origin": "183.94.122.205" +} +``` + +## What's Next + +This tutorial creates a route with only one target node. In the next tutorial, you will learn how to configure load balancing with multiple target nodes. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/key-authentication.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/key-authentication.md new file mode 100644 index 0000000..4165536 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/key-authentication.md @@ -0,0 +1,184 @@ +--- +title: Key Authentication +slug: /getting-started/key-authentication +--- + + + + + +> The Getting Started tutorials are contributed by [API7.ai](https://api7.ai/). + +An API gateway's primary role is to connect API consumers and providers. For security reasons, it should authenticate and authorize consumers before access to internal resources. + +![Key Authentication](https://static.apiseven.com/uploads/2023/02/08/8mRaK3v1_consumer.png) + +APISIX has a flexible plugin extension system and a number of existing plugins for user authentication and authorization. For example: + +- [Key Authentication](https://apisix.apache.org/docs/apisix/plugins/key-auth/) +- [Basic Authentication](https://apisix.apache.org/docs/apisix/plugins/basic-auth/) +- [JSON Web Token (JWT) Authentication](https://apisix.apache.org/docs/apisix/plugins/jwt-auth/) +- [Keycloak](https://apisix.apache.org/docs/apisix/plugins/authz-keycloak/) +- [Casdoor](https://apisix.apache.org/docs/apisix/plugins/authz-casdoor/) +- [Wolf RBAC](https://apisix.apache.org/docs/apisix/plugins/wolf-rbac/) +- [OpenID Connect](https://apisix.apache.org/docs/apisix/plugins/openid-connect/) +- [Central Authentication Service (CAS)](https://apisix.apache.org/docs/apisix/plugins/cas-auth/) +- [HMAC](https://apisix.apache.org/docs/apisix/plugins/hmac-auth/) +- [Casbin](https://apisix.apache.org/docs/apisix/plugins/authz-casbin/) +- [LDAP](https://apisix.apache.org/docs/apisix/plugins/ldap-auth/) +- [Open Policy Agent (OPA)](https://apisix.apache.org/docs/apisix/plugins/opa/) +- [Forward Authentication](https://apisix.apache.org/docs/apisix/plugins/forward-auth/) +- [Multiple Authentications](https://apisix.apache.org/docs/apisix/plugins/multi-auth/) + +In this tutorial, you will create a _consumer_ with _key authentication_, and learn how to enable and disable key authentication. + +## What is a Consumer + +A Consumer is an application or a developer who consumes the API. + +In APISIX, a Consumer requires a unique _username_ and an authentication _plugin_ from the list above to be created. + +## What is Key Authentication + +Key authentication is a relatively simple but widely used authentication approach. The idea is as follows: + +1. Administrator adds an authentication key (API key) to the Route. +2. API consumers add the key to the query string or headers for authentication when sending requests. + +## Enable Key Authentication + +### Prerequisite(s) + +1. Complete [Get APISIX](./README.md) to install APISIX. +2. Complete [Configure Routes](./configure-routes.md#what-is-a-route). + +### Create a Consumer + +Let's create a consumer named `tom` and enable the `key-auth` plugin with an API key `secret-key`. All requests sent with the key `secret-key` should be authenticated as `tom`. + +:::caution + +Please use a complex key in the Production environment. + +::: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT -d ' +{ + "username": "tom", + "plugins": { + "key-auth": { + "key": "secret-key" + } + } +}' +``` + +You will receive an `HTTP/1.1 201 Created` response if the consumer was created successfully. + +### Enable Authentication + +Inheriting the route `getting-started-ip` from [Configure Routes](./configure-routes.md), we only need to use the `PATCH` method to add the `key-auth` plugin to the route: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "key-auth": {} + } +}' +``` + +You will receive an `HTTP/1.1 201 Created` response if the plugin was added successfully. + +### Validate + +Let's validate the authentication in the following scenarios: + +#### 1. Send a request without any key + +Send a request without the `apikey` header. + +```shell +curl -i "http://127.0.0.1:9080/ip" +``` + +Since you enabled the key authentication, you will receive an unauthorized response with `HTTP/1.1 401 Unauthorized`. + +```text +HTTP/1.1 401 Unauthorized +Date: Wed, 08 Feb 2023 09:38:36 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.1.0 +``` + +#### 2. Send a request with a wrong key + +Send a request with a wrong key (e.g. `wrong-key`) in the `apikey` header. + +```shell +curl -i "http://127.0.0.1:9080/ip" -H 'apikey: wrong-key' +``` + +Since the key is incorrect, you will receive an unauthorized response with `HTTP/1.1 401 Unauthorized`. + +```text +HTTP/1.1 401 Unauthorized +Date: Wed, 08 Feb 2023 09:38:27 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.1.0 +``` + +#### 3. Send a request with the correct key + +Send a request with the correct key (`secret-key`) in the `apikey` header. + +```shell +curl -i "http://127.0.0.1:9080/ip" -H 'apikey: secret-key' +``` + +You will receive an `HTTP/1.1 200 OK` response. + +```text +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 44 +Connection: keep-alive +Date: Thu, 09 Feb 2023 03:27:57 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/3.1.0 +``` + +### Disable Authentication + +Disable the key authentication plugin by setting the `_meta.disable` parameter to `true`. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "key-auth": { + "_meta": { + "disable": true + } + } + } +}' +``` + +You can send a request without any key to validate: + +```shell +curl -i "http://127.0.0.1:9080/ip" +``` + +Because you have disabled the key authentication plugin, you will receive an `HTTP/1.1 200 OK` response. + +## What's Next + +You have learned how to configure key authentication for a route. In the next tutorial, you will learn how to configure rate limiting. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/load-balancing.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/load-balancing.md new file mode 100644 index 0000000..520564f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/load-balancing.md @@ -0,0 +1,99 @@ +--- +title: Load Balancing +slug: /getting-started/load-balancing +--- + + + + + +> The Getting Started tutorials are contributed by [API7.ai](https://api7.ai/). + +Load balancing manages traffic between clients and servers. It is a mechanism used to decide which server handles a specific request, allowing for improved performance, scalability, and reliability. Load balancing is a key consideration in designing systems that need to handle a large volume of traffic. + +Apache APISIX supports weighted round-robin load balancing, in which incoming traffic are distributed across a set of servers in a cyclical pattern, with each server taking a turn in a predefined order. + +In this tutorial, you will create a route with two upstream services and enable round-robin load balancing to distribute traffic between the two services. + +## Prerequisite(s) + +1. Complete [Get APISIX](./README.md) to install APISIX. +2. Understand APISIX [Route and Upstream](./configure-routes.md#what-is-a-route). + +## Enable Load Balancing + +Let's create a route with two upstream services. All requests sent to the `/headers` endpoint will be forwarded to [httpbin.org](https://httpbin.org/headers) and [mock.api7.ai](https://mock.api7.ai/headers), which should echo back the requester's headers. + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "getting-started-headers", + "uri": "/headers", + "upstream" : { + "type": "roundrobin", + "nodes": { + "httpbin.org:443": 1, + "mock.api7.ai:443": 1 + }, + "pass_host": "node", + "scheme": "https" + } +}' +``` + +You will receive an `HTTP/1.1 201 Created` response if the route was created successfully. + +:::info + +1. The `pass_host` field is set to `node` to pass the host header to the upstream. +2. The `scheme` field is set to `https` to enable TLS when sending requests to the upstream. + +::: + +## Validate + +The two services respond with different data. + +From `httpbin.org`: + +```json +{ + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.58.0", + "X-Amzn-Trace-Id": "Root=1-63e34b15-19f666602f22591b525e1e80", + "X-Forwarded-Host": "localhost" + } +} +``` + +From `mock.api7.ai`: + +```json +{ + "headers": { + "accept": "*/*", + "host": "mock.api7.ai", + "user-agent": "curl/7.58.0", + "content-type": "application/json", + "x-application-owner": "API7.ai" + } +} +``` + +Let's generate 100 requests to test the load-balancing effect: + +```shell +hc=$(seq 100 | xargs -I {} curl "http://127.0.0.1:9080/headers" -sL | grep "httpbin" | wc -l); echo httpbin.org: $hc, mock.api7.ai: $((100 - $hc)) +``` + +The result shows the requests were distributed over the two services almost equally: + +```text +httpbin.org: 51, mock.api7.ai: 49 +``` + +## What's Next + +You have learned how to configure load balancing. In the next tutorial, you will learn how to configure key authentication. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/rate-limiting.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/rate-limiting.md new file mode 100644 index 0000000..0205095 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/getting-started/rate-limiting.md @@ -0,0 +1,104 @@ +--- +title: Rate Limiting +slug: /getting-started/rate-limiting +--- + + + + + +> The Getting Started tutorials are contributed by [API7.ai](https://api7.ai/). + +APISIX is a unified control point, managing the ingress and egress of APIs and microservices traffic. In addition to the legitimate client requests, these requests may also include unwanted traffic generated by web crawlers as well as cyber attacks, such as DDoS. + +APISIX offers rate limiting capabilities to protect APIs and microservices by limiting the number of requests sent to upstream services in a given period of time. The count of requests is done efficiently in memory with low latency and high performance. + +
+
+Routes Diagram +
+
+ +In this tutorial, you will enable the `limit-count` plugin to set a rate limiting constraint on the incoming traffic. + +## Prerequisite(s) + +1. Complete the [Get APISIX](./README.md) step to install APISIX first. +2. Complete the [Configure Routes](./configure-routes.md#what-is-a-route) step. + +## Enable Rate Limiting + +The following route `getting-started-ip` is inherited from [Configure Routes](./configure-routes.md). You only need to use the `PATCH` method to add the `limit-count` plugin to the route: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 10, + "rejected_code": 503 + } + } +}' +``` + +You will receive an `HTTP/1.1 201 Created` response if the plugin was added successfully. The above configuration limits the incoming requests to a maximum of 2 requests within 10 seconds. + +### Validate + +Let's generate 100 simultaneous requests to see the rate limiting plugin in effect. + +```shell +count=$(seq 100 | xargs -I {} curl "http://127.0.0.1:9080/ip" -I -sL | grep "503" | wc -l); echo \"200\": $((100 - $count)), \"503\": $count +``` + +The results are as expected: out of the 100 requests, 2 requests were sent successfully (status code `200`) while the others were rejected (status code `503`). + +```text +"200": 2, "503": 98 +``` + +## Disable Rate Limiting + +Disable rate limiting by setting the `_meta.disable` parameter to `true`: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "limit-count": { + "_meta": { + "disable": true + } + } + } +}' +``` + +### Validate + +Let's generate 100 requests again to validate if it is disabled: + +```shell +count=$(seq 100 | xargs -i curl "http://127.0.0.1:9080/ip" -I -sL | grep "503" | wc -l); echo \"200\": $((100 - $count)), \"503\": $count +``` + +The results below show that all of the requests were sent successfully: + +```text +"200": 100, "503": 0 +``` + +## More + +[//]: +[//]: +[//]: +You can use the APISIX variables to configure fined matching rules of rate limiting, such as `$host` and `$uri`. In addition, APISIX also supports rate limiting at the cluster level using Redis. + +## What's Next + +Congratulations! You have learned how to configure rate limiting and completed the Getting Started tutorials. + +You can continue to explore other documentations to customize APISIX and meet your production needs. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/grpc-proxy.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/grpc-proxy.md new file mode 100644 index 0000000..dbe1a8c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/grpc-proxy.md @@ -0,0 +1,122 @@ +--- +title: gRPC Proxy +--- + + + +proxying gRPC traffic: +gRPC client -> APISIX -> gRPC/gRPCS server + +## Parameters + +* `scheme`: the `scheme` of the route's upstream must be `grpc` or `grpcs`. +* `uri`: format likes /service/method, Example:/helloworld.Greeter/SayHello + +### Example + +#### create proxying gRPC route + +Here's an example, to proxying gRPC service by specified route: + +* attention: the `scheme` of the route's upstream must be `grpc` or `grpcs`. +* attention: APISIX use TLS‑encrypted HTTP/2 to expose gRPC service, so need to [config SSL certificate](certificate.md) +* attention: APISIX also support to expose gRPC service with plaintext HTTP/2, which does not rely on TLS, usually used to proxy gRPC service in intranet environment +* the grpc server example:[grpc_server_example](https://github.com/api7/grpc_server_example) + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["POST", "GET"], + "uri": "/helloworld.Greeter/SayHello", + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50051": 1 + } + } +}' +``` + +#### testing HTTP/2 with TLS‑encrypted + +Invoking the route created before: + +```shell +$ grpcurl -insecure -import-path /pathtoprotos -proto helloworld.proto -d '{"name":"apisix"}' 127.0.0.1:9443 helloworld.Greeter.SayHello +{ + "message": "Hello apisix" +} +``` + +> grpcurl is a CLI tool, similar to curl, that acts as a gRPC client and lets you interact with a gRPC server. For installation, please check out the official [documentation](https://github.com/fullstorydev/grpcurl#installation). + +This means that the proxying is working. + +#### testing HTTP/2 with plaintext + +By default, the APISIX only listens to `9443` for TLS‑encrypted HTTP/2. You can support HTTP/2 with plaintext via the `node_listen` section under `apisix` in `conf/config.yaml`: + +```yaml +apisix: + node_listen: + - port: 9080 + - port: 9081 + enable_http2: true +``` + +Invoking the route created before: + +```shell +$ grpcurl -plaintext -import-path /pathtoprotos -proto helloworld.proto -d '{"name":"apisix"}' 127.0.0.1:9081 helloworld.Greeter.SayHello +{ + "message": "Hello apisix" +} +``` + +This means that the proxying is working. + +### gRPCS + +If your gRPC service encrypts with TLS by itself (so called `gPRCS`, gPRC + TLS), you need to change the `scheme` to `grpcs`. The example above runs gRPCS service on port 50052, to proxy gRPC request, we need to use the configuration below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["POST", "GET"], + "uri": "/helloworld.Greeter/SayHello", + "upstream": { + "scheme": "grpcs", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50052": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/http3.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/http3.md new file mode 100644 index 0000000..f25a2e5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/http3.md @@ -0,0 +1,186 @@ +--- +title: HTTP/3 Protocol +--- + + + +[HTTP/3](https://en.wikipedia.org/wiki/HTTP/3) is the third major version of the Hypertext Transfer Protocol (HTTP). Unlike its predecessors which rely on TCP, HTTP/3 is based on [QUIC (Quick UDP Internet Connections) protocol](https://en.wikipedia.org/wiki/QUIC). It brings several benefits that collectively result in reduced latency and improved performance: + +* enabling seamless transition between different network connections, such as switching from Wi-Fi to mobile data. +* eliminating head-of-line blocking, so that a lost packet does not block all streams. +* negotiating TLS versions at the same time as the TLS handshakes, allowing for faster connections. +* providing encryption by default, ensuring that all data transmitted over an HTTP/3 connection is protected and confidential. +* providing zero round-trip time (0-RTT) when communicating with servers that clients already established connections to. + +APISIX currently supports HTTP/3 connections between downstream clients and APISIX. HTTP/3 connections with upstream services are not yet supported, and contributions are welcomed. + +:::caution + +This feature is currently experimental and not recommended for production use. + +::: + +This document will show you how to configure APISIX to enable HTTP/3 connections between client and APISIX and document a few known issues. + +## Usage + +### Enable HTTP/3 in APISIX + +Enable HTTP/3 on port `9443` (or a different port) by adding the following configurations to APISIX's `config.yaml` configuration file: + +```yaml title="config.yaml" +apisix: + ssl: + listen: + - port: 9443 + enable_http3: true + ssl_protocols: TLSv1.3 +``` + +:::info + +If you are deploying APISIX using Docker, make sure to allow UDP in the HTTP3 port, such as `-p 9443:9443/udp`. + +::: + +Then reload APISIX for configuration changes to take effect: + +```shell +apisix reload +``` + +### Generate Certificates and Keys + +HTTP/3 requires TLS. You can leverage the purchased certificates or self-generate them, whichever applicable. + +To self-generate, first generate the certificate authority (CA) key and certificate: + +```shell +openssl genrsa -out ca.key 2048 && \ + openssl req -new -sha256 -key ca.key -out ca.csr -subj "/CN=ROOTCA" && \ + openssl x509 -req -days 36500 -sha256 -extensions v3_ca -signkey ca.key -in ca.csr -out ca.crt +``` + +Next, generate the key and certificate with a common name for APISIX, and sign with the CA certificate: + +```shell +openssl genrsa -out server.key 2048 && \ + openssl req -new -sha256 -key server.key -out server.csr -subj "/CN=test.com" && \ + openssl x509 -req -days 36500 -sha256 -extensions v3_req \ + -CA ca.crt -CAkey ca.key -CAserial ca.srl -CAcreateserial \ + -in server.csr -out server.crt +``` + +### Configure HTTPS + +Optionally load the content stored in `server.crt` and `server.key` into shell variables: + +```shell +server_cert=$(cat server.crt) +server_key=$(cat server.key) +``` + +Create an SSL certificate object to save the server certificate and its key: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/ssls" -X PUT -d ' +{ + "id": "quickstart-tls-client-ssl", + "sni": "test.com", + "cert": "'"${server_cert}"'", + "key": "'"${server_key}"'" +}' +``` + +### Create a Route + +Create a sample route to `httpbin.org`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id":"httpbin-route", + "uri":"/get", + "upstream": { + "type":"roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +### Verify HTTP/3 Connections + +Install [static-curl](https://github.com/stunnel/static-curl) or any other curl executable that has HTTP/3 support. + +Send a request to the route: + +```shell +curl -kv --http3-only \ + -H "Host: test.com" \ + --resolve "test.com:9443:127.0.0.1" "https://test.com:9443/get" +``` + +You should receive an `HTTP/3 200` response similar to the following: + +```text +* Added test.com:9443:127.0.0.1 to DNS cache +* Hostname test.com was found in DNS cache +* Trying 127.0.0.1:9443... +* QUIC cipher selection: TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_CCM_SHA256 +* Skipped certificate verification +* Connected to test.com (127.0.0.1) port 9443 +* using HTTP/3 +* [HTTP/3] [0] OPENED stream for https://test.com:9443/get +* [HTTP/3] [0] [:method: GET] +* [HTTP/3] [0] [:scheme: https] +* [HTTP/3] [0] [:authority: test.com] +* [HTTP/3] [0] [:path: /get] +* [HTTP/3] [0] [user-agent: curl/8.7.1] +* [HTTP/3] [0] [accept: */*] +> GET /get HTTP/3 +> Host: test.com +> User-Agent: curl/8.7.1 +> Accept: */* +> +* Request completely sent off +< HTTP/3 200 +... +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Content-Length": "0", + "Host": "test.com", + "User-Agent": "curl/8.7.1", + "X-Amzn-Trace-Id": "Root=1-6656013a-27da6b6a34d98e3e79baaf5b", + "X-Forwarded-Host": "test.com" + }, + "origin": "172.19.0.1, 123.40.79.456", + "url": "http://test.com/get" +} +* Connection #0 to host test.com left intact +``` + +## Known Issues + +- For APISIX-3.9, test cases of Tongsuo will fail because the Tongsuo does not support QUIC TLS. +- APISIX-3.9 is based on NGINX-1.25.3 with vulnerabilities in HTTP/3 (CVE-2024-24989, CVE-2024-24990). diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/install-dependencies.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/install-dependencies.md new file mode 100644 index 0000000..049c1a6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/install-dependencies.md @@ -0,0 +1,52 @@ +--- +title: Install Dependencies +--- + + + +## Note + +- Since v2.0 Apache APISIX would not support the v2 protocol storage to etcd anymore, and the minimum etcd version supported is v3.4.0. What's more, etcd v3 uses gRPC as the messaging protocol, while Apache APISIX uses HTTP(S) to communicate with etcd cluster, so be sure the [etcd gRPC gateway](https://etcd.io/docs/v3.4.0/dev-guide/api_grpc_gateway/) is enabled. + +- Now by default Apache APISIX uses HTTP protocol to talk with etcd cluster, which is insecure. Please configure certificate and corresponding private key for your etcd cluster, and use "https" scheme explicitly in the etcd endpoints list in your Apache APISIX configuration, if you want to keep the data secure and integral. See the etcd section in `conf/config.yaml.example` for more details. + +- If it is OpenResty 1.19, APISIX will use OpenResty's built-in LuaJIT to run `bin/apisix`; otherwise it will use Lua 5.1. If you encounter `luajit: lj_asm_x86.h:2819: asm_loop_ fixup: Assertion '((intptr_t)target & 15) == 0' failed`, this is a problem with the low version of OpenResty's built-in LuaJIT under certain compilation conditions. + +- On some platforms, installing LuaRocks via the package manager will cause Lua to be upgraded to Lua 5.3, so we recommend installing LuaRocks via source code. if you install OpenResty and its OpenSSL develop library (openresty-openssl111-devel for rpm and openresty-openssl111-dev for deb) via the official repository, then [we provide a script for automatic installation](https://github.com/apache/apisix/blob/master/utils/linux-install-luarocks.sh). If you compile OpenResty yourself, you can refer to the above script and change the path in it. If you don't specify the OpenSSL library path when you compile, you don't need to configure the OpenSSL variables in LuaRocks, because the system's OpenSSL is used by default. If the OpenSSL library is specified at compile time, then you need to ensure that LuaRocks' OpenSSL configuration is consistent with OpenResty's. + +- OpenResty is a dependency of APISIX. If it is your first time to deploy APISIX and you don't need to use OpenResty to deploy other services, you can stop and disable OpenResty after installation since it will not affect the normal work of APISIX. Please operate carefully according to your service. For example in Ubuntu: `systemctl stop openresty && systemctl disable openresty`. + +## Install + +Run the following command to install Apache APISIX's dependencies on a supported operating system. + +Supported OS versions: CentOS7, Fedora31 & 32, Ubuntu 16.04 & 18.04, Debian 9 & 10, Arch Linux. + +Note that in the case of Arch Linux, we use `openresty` from the AUR, thus requiring a AUR helper. For now `yay` and `pacaur` are supported. + +``` +curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-dependencies.sh -sL | bash - +``` + +If you have cloned the Apache APISIX project, execute in the Apache APISIX root directory: + +``` +bash utils/install-dependencies.sh +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/installation-guide.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/installation-guide.md new file mode 100644 index 0000000..44fbd3f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/installation-guide.md @@ -0,0 +1,340 @@ +--- +title: Installation +keywords: + - APISIX + - Installation +description: This document walks you through the different Apache APISIX installation methods. +--- + + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +This guide walks you through how you can install and run Apache APISIX in your environment. + +Refer to the [Getting Started](./getting-started/README.md) guide for a quick walk-through on running Apache APISIX. + +## Installing APISIX + +APISIX can be installed by the different methods listed below: + + + + +First clone the [apisix-docker](https://github.com/apache/apisix-docker) repository: + +```shell +git clone https://github.com/apache/apisix-docker.git +cd apisix-docker/example +``` + +Now, you can use `docker-compose` to start APISIX. + + + + +```shell +docker-compose -p docker-apisix up -d +``` + + + + + +```shell +docker-compose -p docker-apisix -f docker-compose-arm64.yml up -d +``` + + + + + + + + +To install APISIX via Helm, run: + +```shell +helm repo add apisix https://charts.apiseven.com +helm repo update +helm install apisix apisix/apisix --create-namespace --namespace apisix +``` + +You can find other Helm charts on the [apisix-helm-chart](https://github.com/apache/apisix-helm-chart) repository. + + + + + +This installation method is suitable for CentOS 7 and Centos 8. If you choose this method to install APISIX, you need to install etcd first. For the specific installation method, please refer to [Installing etcd](#installing-etcd). + +### Installation via RPM repository + +If OpenResty is **not** installed, you can run the command below to install both OpenResty and APISIX repositories: + +```shell +sudo yum install -y https://repos.apiseven.com/packages/centos/apache-apisix-repo-1.0-1.noarch.rpm +``` + +If OpenResty is installed, the command below will install the APISIX repositories: + +```shell +sudo yum-config-manager --add-repo https://repos.apiseven.com/packages/centos/apache-apisix.repo +``` + +Then, to install APISIX, run: + +```shell +sudo yum install apisix +``` + +:::tip + +You can also install a specific version of APISIX by specifying it: + +```shell +sudo yum install apisix-3.8.0 +``` + +::: + +### Installation via RPM offline package + +First, download APISIX RPM offline package to an `apisix` folder: + +```shell +sudo mkdir -p apisix +sudo yum install -y https://repos.apiseven.com/packages/centos/apache-apisix-repo-1.0-1.noarch.rpm +sudo yum clean all && yum makecache +sudo yum install -y --downloadonly --downloaddir=./apisix apisix +``` + +Then copy the `apisix` folder to the target host and run: + +```shell +sudo yum install ./apisix/*.rpm +``` + +### Managing APISIX server + +Once APISIX is installed, you can initialize the configuration file and etcd by running: + +```shell +apisix init +``` + +To start APISIX server, run: + +```shell +apisix start +``` + +:::tip + +Run `apisix help` to get a list of all available operations. + +::: + + + + + +### Installation via DEB repository + +Currently the only DEB repository supported by APISIX is Debian 11 (Bullseye) and supports both amd64 and arm64 architectures. + +```shell +# amd64 +wget -O - http://repos.apiseven.com/pubkey.gpg | sudo apt-key add - +echo "deb http://repos.apiseven.com/packages/debian bullseye main" | sudo tee /etc/apt/sources.list.d/apisix.list + +# arm64 +wget -O - http://repos.apiseven.com/pubkey.gpg | sudo apt-key add - +echo "deb http://repos.apiseven.com/packages/arm64/debian bullseye main" | sudo tee /etc/apt/sources.list.d/apisix.list +``` + +Then, to install APISIX, run: + +```shell +sudo apt update +sudo apt install -y apisix=3.8.0-0 +``` + +### Managing APISIX server + +Once APISIX is installed, you can initialize the configuration file and etcd by running: + +```shell +sudo apisix init +``` + +To start APISIX server, run: + +```shell +sudo apisix start +``` + +:::tip + +Run `apisix help` to get a list of all available operations. + +::: + + + + + +If you want to build APISIX from source, please refer to [Building APISIX from source](./building-apisix.md). + + + + +## Installing etcd + +APISIX uses [etcd](https://github.com/etcd-io/etcd) to save and synchronize configuration. Before installing APISIX, you need to install etcd on your machine. + +It would be installed automatically if you choose the Docker or Helm install method while installing APISIX. If you choose a different method or you need to install it manually, follow the steps shown below: + + + + +```shell +ETCD_VERSION='3.5.4' +wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz +tar -xvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz && \ + cd etcd-v${ETCD_VERSION}-linux-amd64 && \ + sudo cp -a etcd etcdctl /usr/bin/ +nohup etcd >/tmp/etcd.log 2>&1 & +``` + + + + + +```shell +brew install etcd +brew services start etcd +``` + + + + +## Next steps + +### Configuring APISIX + +You can configure your APISIX deployment in two ways: + +1. By directly changing your configuration file (`conf/config.yaml`). +2. By using the `--config` or the `-c` flag to pass the path to your configuration file while starting APISIX. + + ```shell + apisix start -c + ``` + +APISIX will use the configurations added in this configuration file and will fall back to the default configuration if anything is not configured. The default configurations can be found in `apisix/cli/config.lua` and should not be modified. + +For example, to configure the default listening port to be `8000` without changing other configurations, your configuration file could look like this: + +```yaml title="conf/config.yaml" +apisix: + node_listen: 8000 +``` + +Now, if you decide you want to change the etcd address to `http://foo:2379`, you can add it to your configuration file. This will not change other configurations. + +```yaml title="conf/config.yaml" +apisix: + node_listen: 8000 + +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://foo:2379" +``` + +:::warning + +The `conf/nginx.conf` file is automatically generated and should not be modified. + +::: + +### APISIX deployment modes + +APISIX has three different deployment modes for different use cases. To learn more and configure deployment modes, see the [documentation](./deployment-modes.md). + +### Updating Admin API key + +It is recommended to modify the Admin API key to ensure security. + +You can update your configuration file as shown below: + +```yaml title="conf/config.yaml" +deployment: + admin: + admin_key: + - name: "admin" + key: newsupersecurekey + role: admin +``` + +Now, to access the Admin API, you can use the new key: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes?api_key=newsupersecurekey -i +``` + +### Adding APISIX systemd unit file + +If you installed APISIX via RPM, the APISIX unit file will already be configured and you can start APISIX by: + +```shell +systemctl start apisix +systemctl stop apisix +``` + +If you installed APISIX through other methods, you can create `/usr/lib/systemd/system/apisix.service` and add the [configuration from the template](https://github.com/api7/apisix-build-tools/blob/master/usr/lib/systemd/system/apisix.service). + +See the [Getting Started](./getting-started/README.md) guide for a quick walk-through of using APISIX. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/internal/plugin-runner.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/internal/plugin-runner.md new file mode 100644 index 0000000..4117187 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/internal/plugin-runner.md @@ -0,0 +1,78 @@ +--- +title: The Implementation of Plugin Runner +--- + + + +## Prerequirement + +Each request which runs the extern plugin will trigger an RPC to Plugin Runner over a connection on Unix socket. The data of RPC are serialized with [Flatbuffers](https://github.com/google/flatbuffers). + +Therefore, the Plugin Runner needs to: + +1. handle a connection on Unix socket +2. support Flatbuffers +3. use the proto & generated code in https://github.com/api7/ext-plugin-proto/ + +## Listening to the Path + +APISIX will pass the path of Unix socket as an environment variable `APISIX_LISTEN_ADDRESS` to the Plugin Runner. So the runner needs to read the value and listen to that address during starting. + +## Register Plugins + +The Plugin Runner should be able to load plugins written in the particular language. + +## Handle RPC + +There are two kinds of RPC: PrepareConf & HTTPReqCall + +### Handle PrepareConf + +As people can configure the extern plugin on the side of APISIX, we need a way to sync the plugin configuration to the Plugin Runner. + +When there is a configuration that needs to sync to the Plugin Runner, we will send it via the PrepareConf RPC call. The Plugin Runner should be able to handle the call and store the configuration in a cache, then returns a unique conf token that represents the configuration. + +In the previous design, an idempotent key is sent with the configuration. This field is deprecated and the Plugin Runner can safely ignore it. + +Requests run plugins with particular configuration will bear a particular conf token in the RPC call, and the Plugin Runner is expected to look up actual configuration via the token. + +When the configuration is modified, APISIX will send a new PrepareConf to the Plugin Runner. Currently, there is no way to notify the Plugin Runner that a configuration is removed. Therefore, we introduce another environment variable `APISIX_CONF_EXPIRE_TIME` as the conf cache expire time. The Plugin Runner should be able to cache the conf slightly longer than `APISIX_CONF_EXPIRE_TIME`, and APISIX will send another PrepareConf to refresh the cache if the configuration is still existing after `APISIX_CONF_EXPIRE_TIME` seconds. + +### Handle HTTPReqCall + +Each request which runs the extern plugin will trigger the HTTPReqCall. The HTTPReqCall is almost a serialized version of HTTP request, plus a conf token. The Plugin Runner is expected to tell APISIX what to update by the response of HTTPReqCall RPC call. + +Sometimes the plugin in the Plugin Runner needs to know some information that is not part of the HTTPReqCall request, such as the request start time and the route ID in APISIX. Hence the Plugin Runner needs to reply to an `ExtraInfo` message as the response on the connection which sends the HTTPReqCall request. APISIX will read the `ExtraInfo` message and return the asked information. + +Currently, the information below is passed by `ExtraInfo`: + +* variable value +* request body + +The flow of HTTPReqCall procession is: + +``` +APISIX sends HTTPReqCall +Plugin Runner looks up the plugin configuration by the token in HTTPReqCall +(optional) loop: +    Plugin Runner asks for ExtraInfo +    APISIX replies the ExtraInfo +Plugin Runner replies HTTPReqCall +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/internal/testing-framework.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/internal/testing-framework.md new file mode 100644 index 0000000..7fcdf01 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/internal/testing-framework.md @@ -0,0 +1,376 @@ +--- +title: Introducing APISIX's testing framework +--- + + + +APISIX uses a testing framework based on test-nginx: https://github.com/openresty/test-nginx. +For details, you can check the [documentation](https://metacpan.org/pod/Test::Nginx) of this project. + +If you want to test the CLI behavior of APISIX (`./bin/apisix`), +you need to write a shell script in the t/cli directory to test it. You can refer to the existing test scripts for more details. + +If you want to test the others, you need to write test code based on the framework. + +Here, we briefly describe how to do simple testing based on this framework. + +## Test file + +you need to write test cases in the t/ directory, in a corresponding `.t` file. Note that a single test file should not exceed `800` lines, and if it is too long, it needs to be divided by a suffix. For example: + +``` +t/ +├── admin +│ ├── consumers.t +│ ├── consumers2.t +``` + +Both `consumers.t` and `consumers2.t` contain tests for consumers in the Admin API. + +Some of the test files start with this paragraph: + +``` +add_block_preprocessor(sub { + my ($block) = @_; + + if (! $block->request) { + $block->set_value("request", "GET /t"); + } + + if (! $block->no_error_log && ! $block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); +``` + +It means that all tests in this test file that do not define `request` are set to `GET /t`. The same is true for error_log. + +## Preparing the configuration + +When testing a behavior, we need to prepare the configuration. + +If the configuration is from etcd: +We can set up specific configurations through the Admin API. + +``` +=== TEST 7: refer to empty nodes upstream +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream_id": "1", + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + + ngx.say(message) + } + } +--- request +GET /t +--- response_body +passed +``` + +Then trigger it in a later test: + +``` +=== TEST 8: hit empty nodes upstream +--- request +GET /index.html +--- error_code: 503 +--- error_log +no valid upstream node +``` + +## Preparing the upstream + +To test the code, we need to provide a mock upstream. + +For HTTP request, the upstream code is put in `t/lib/server.lua`. HTTP request with +a given `path` will trigger the method in the same name. For example, a call to `/server_port` +will call the `_M.server_port`. + +For TCP request, a dummy upstream is used: + +``` +local sock = ngx.req.socket() +local data = sock:receive("1") +ngx.say("hello world") +``` + +If you want to custom the TCP upstream logic, you can use: + +``` +--- stream_upstream_code +local sock = ngx.req.socket() +local data = sock:receive("1") +ngx.sleep(0.2) +ngx.say("hello world") +``` + +## Send request + +We can initiate a request with `request` and set the request headers with `more_headers`. + +For example. + +``` +--- request +PUT /hello?xx=y&xx=z&&y=&&z +body part of the request +--- more_headers +X-Req: foo +X-Req: bar +X-Resp: cat +``` + +Lua code can be used to send multiple requests. + +One request after another: + +``` +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + } + } +``` + +Sending multiple requests concurrently: + +``` +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port?var=2&var2=" + + + local t = {} + local ports_count = {} + for i = 1, 180 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err = httpc:request_uri(uri..i, {method = "GET"}) + if not res then + ngx.log(ngx.ERR, err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + } + } +``` + +## Send TCP request + +We can use `stream_request` to send a TCP request, for example: + +``` +--- stream_request +hello +``` + +To send a TLS over TCP request, we can combine `stream_tls_request` with `stream_sni`: + +``` +--- stream_tls_request +mmm +--- stream_sni: xx.com +``` + +## Assertions + +The following assertions are commonly used. + +Check status (if not set, the framework will check if the request has 200 status code). + +``` +--- error_code: 405 +``` + +Check response headers. + +``` +--- response_headers +X-Resp: foo +X-Req: foo, bar +``` + +Check response body. + +``` +--- response_body +[{"count":12, "port": "1982"}] +``` + +Check the TCP response. + +When the request is sent via `stream_request`: + +``` +--- stream_response +receive stream response error: connection reset by peer +``` + +When the request is sent via `stream_tls_request`: + +``` +--- response_body +receive stream response error: connection reset by peer +``` + +Checking the error log (via grep error log with regular expression). + +``` +--- grep_error_log eval +qr/hash_on: header|chash_key: "custom-one"/ +--- grep_error_log_out +hash_on: header +chash_key: "custom-one" +hash_on: header +chash_key: "custom-one" +hash_on: header +chash_key: "custom-one" +hash_on: header +chash_key: "custom-one" +``` + +The default log level is `info`, but you can get the debug level log with `--- log_level: debug`. + +## Upstream + +The test framework listens to multiple ports when it is started. + +* 1980/1981/1982/5044: HTTP upstream port. We provide a mock upstream server for testing. See below for more information. +* 1983: HTTPS upstream port +* 1984: APISIX HTTP port. Can be used to verify HTTP related gateway logic, such as concurrent access to an API. +* 1985: APISIX TCP port. Can be used to verify TCP related gateway logic, such as concurrent access to an API. +* 1994: APISIX HTTPS port. Can be used to verify HTTPS related gateway logic, such as testing certificate matching logic. +* 1995: TCP upstream port +* 2005: APISIX TLS over TCP port. Can be used to verify TLS over TCP related gateway logic, such as concurrent access to an API. + +The methods in `t/lib/server.lua` are executed when accessing the upstream port. `_M.go` is the entry point for this file. +When the request accesses the upstream `/xxx`, the `_M.xxx` method is executed. For example, a request for `/hello` will execute `_M.hello`. +This allows us to write methods inside `t/lib/server.lua` to emulate specific upstream logic, such as sending special responses. + +Note that before adding new methods to `t/lib/server.lua`, make sure that you can reuse existing methods. + +## Run the test + +Assume your current work directory is the root of the apisix source code. + +1. Git clone the latest [test-nginx](https://github.com/openresty/test-nginx) to `../test-nginx`. +2. Run the test: `prove -I. -I../test-nginx/inc -I../test-nginx/lib -r t/path/to/file.t`. + +## Tips + +### Debugging test cases + +The Nginx configuration and logs generated by the test cases are located in the t/servroot directory. The Nginx configuration template for testing is located in t/APISIX.pm. + +### Running only some test cases + +Three notes can be used to control which parts of the tests are executed. + +FIRST & LAST: + +``` +=== TEST 1: vars rule with ! (set) +--- FIRST +--- config +... +--- response_body +passed + + + +=== TEST 2: vars rule with ! (hit) +--- request +GET /hello?name=jack&age=17 +--- LAST +--- error_code: 403 +--- response_body +Fault Injection! +``` + +ONLY: + +``` +=== TEST 1: list empty resources +--- ONLY +--- config +... +--- response_body +{"count":0,"node":{"dir":true,"key":"/apisix/upstreams","nodes":[]}} +``` + +### Executing Shell Commands + +It is possible to execute shell commands while writing tests in test-nginx for APISIX. We expose this feature via `exec` code block. The `stdout` of the executed process can be captured via `response_body` code block and `stderr` (if any) can be captured by filtering error.log through `grep_error_log`. Here is an example: + +``` +=== TEST 1: check exec stdout +--- exec +echo hello world +--- response_body +hello world + + +=== TEST 2: when exec returns an error +--- exec +echxo hello world +--- grep_error_log eval +qr/failed to execute the script [ -~]*/ +--- grep_error_log_out +failed to execute the script with status: 127, reason: exit, stderr: /bin/sh: 1: echxo: not found +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/mtls.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/mtls.md new file mode 100644 index 0000000..d6cfc01 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/mtls.md @@ -0,0 +1,210 @@ +--- +title: Mutual TLS Authentication +keywords: + - Apache APISIX + - Mutual TLS + - mTLS +description: This document describes how you can secure communication to and within APISIX with mTLS. +--- + + + +## Protect Admin API + +### Why use it + +Mutual TLS authentication provides a better way to prevent unauthorized access to APISIX. + +The clients will provide their certificates to the server and the server will check whether the cert is signed by the supplied CA and decide whether to serve the request. + +### How to configure + +1. Generate self-signed key pairs, including ca, server, client key pairs. + +2. Modify configuration items in `conf/config.yaml`: + +```yaml title="conf/config.yaml" + admin_listen: + ip: 127.0.0.1 + port: 9180 + https_admin: true + + admin_api_mtls: + admin_ssl_ca_cert: "/data/certs/mtls_ca.crt" # Path of your self-signed ca cert. + admin_ssl_cert: "/data/certs/mtls_server.crt" # Path of your self-signed server side cert. + admin_ssl_cert_key: "/data/certs/mtls_server.key" # Path of your self-signed server side key. +``` + +3. Run command: + +```shell +apisix init +apisix reload +``` + +### How client calls + +Please replace the following certificate paths and domain name with your real ones. + +* Note: The same CA certificate as the server needs to be used * + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl --cacert /data/certs/mtls_ca.crt --key /data/certs/mtls_client.key --cert /data/certs/mtls_client.crt https://admin.apisix.dev:9180/apisix/admin/routes -H "X-API-KEY: $admin_key" +``` + +## etcd with mTLS + +### How to configure + +You need to configure `etcd.tls` for APISIX to work on an etcd cluster with mTLS enabled as shown below: + +```yaml title="conf/config.yaml" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + tls: + cert: /data/certs/etcd_client.pem # path of certificate used by the etcd client + key: /data/certs/etcd_client.key # path of key used by the etcd client +``` + +If APISIX does not trust the CA certificate that used by etcd server, we need to set up the CA certificate. + +```yaml title="conf/config.yaml" +apisix: + ssl: + ssl_trusted_certificate: /path/to/certs/ca-certificates.crt # path of CA certificate used by the etcd server +``` + +## Protect Route + +### Why use it + +Using mTLS is a way to verify clients cryptographically. It is useful and important in cases where you want to have encrypted and secure traffic in both directions. + +* Note: the mTLS protection only happens in HTTPS. If your route can also be accessed via HTTP, you should add additional protection in HTTP or disable the access via HTTP.* + +### How to configure + +We provide a [tutorial](./tutorials/client-to-apisix-mtls.md) that explains in detail how to configure mTLS between the client and APISIX. + +When configuring `ssl`, use parameter `client.ca` and `client.depth` to configure the root CA that signing client certificates and the max length of certificate chain. Please refer to [Admin API](./admin-api.md#ssl) for details. + +Here is an example shell script to create SSL with mTLS (id is `1`, changes admin API url if needed): + +```shell +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert": "'"$(cat t/certs/mtls_server.crt)"'", + "key": "'"$(cat t/certs/mtls_server.key)"'", + "snis": [ + "admin.apisix.dev" + ], + "client": { + "ca": "'"$(cat t/certs/mtls_ca.crt)"'", + "depth": 10 + } +}' +``` + +Send a request to verify: + +```bash +curl --resolve 'mtls.test.com::' "https://:/hello" -k --cert ./client.pem --key ./client.key + +* Added admin.apisix.dev:9443:127.0.0.1 to DNS cache +* Hostname admin.apisix.dev was found in DNS cache +* Trying 127.0.0.1:9443... +* Connected to admin.apisix.dev (127.0.0.1) port 9443 (#0) +* ALPN: offers h2 +* ALPN: offers http/1.1 +* CAfile: t/certs/mtls_ca.crt +* CApath: none +* [CONN-0-0][CF-SSL] (304) (OUT), TLS handshake, Client hello (1): +* [CONN-0-0][CF-SSL] (304) (IN), TLS handshake, Server hello (2): +* [CONN-0-0][CF-SSL] (304) (IN), TLS handshake, Unknown (8): +* [CONN-0-0][CF-SSL] (304) (IN), TLS handshake, Request CERT (13): +* [CONN-0-0][CF-SSL] (304) (IN), TLS handshake, Certificate (11): +* [CONN-0-0][CF-SSL] (304) (IN), TLS handshake, CERT verify (15): +* [CONN-0-0][CF-SSL] (304) (IN), TLS handshake, Finished (20): +* [CONN-0-0][CF-SSL] (304) (OUT), TLS handshake, Certificate (11): +* [CONN-0-0][CF-SSL] (304) (OUT), TLS handshake, CERT verify (15): +* [CONN-0-0][CF-SSL] (304) (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / AEAD-AES256-GCM-SHA384 +* ALPN: server accepted h2 +* Server certificate: +* subject: C=cn; ST=GuangDong; L=ZhuHai; CN=admin.apisix.dev; OU=ops +* start date: Dec 1 10:17:24 2022 GMT +* expire date: Aug 18 10:17:24 2042 GMT +* subjectAltName: host "admin.apisix.dev" matched cert's "admin.apisix.dev" +* issuer: C=cn; ST=GuangDong; L=ZhuHai; CN=ca.apisix.dev; OU=ops +* SSL certificate verify ok. +* Using HTTP2, server supports multiplexing +* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0 +* h2h3 [:method: GET] +* h2h3 [:path: /hello] +* h2h3 [:scheme: https] +* h2h3 [:authority: admin.apisix.dev:9443] +* h2h3 [user-agent: curl/7.87.0] +* h2h3 [accept: */*] +* Using Stream ID: 1 (easy handle 0x13000bc00) +> GET /hello HTTP/2 +> Host: admin.apisix.dev:9443 +> user-agent: curl/7.87.0 +> accept: */* +``` + +Please make sure that the SNI fits the certificate domain. + +## mTLS Between APISIX and Upstream + +### Why use it + +Sometimes the upstream requires mTLS. In this situation, the APISIX acts as the client, it needs to provide client certificate to communicate with upstream. + +### How to configure + +When configuring `upstreams`, we could use parameter `tls.client_cert` and `tls.client_key` to configure the client certificate APISIX used to communicate with upstreams. Please refer to [Admin API](./admin-api.md#upstream) for details. + +This feature requires APISIX to run on [APISIX-Runtime](./FAQ.md#how-do-i-build-the-apisix-runtime-environment). + +Here is a similar shell script to patch a existed upstream with mTLS (changes admin API url if needed): + +```shell +curl http://127.0.0.1:9180/apisix/admin/upstreams/1 \ +-H "X-API-KEY: $admin_key" -X PATCH -d ' +{ + "tls": { + "client_cert": "'"$(cat t/certs/mtls_client.crt)"'", + "client_key": "'"$(cat t/certs/mtls_client.key)"'" + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugin-develop.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugin-develop.md new file mode 100644 index 0000000..aa5e6c0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugin-develop.md @@ -0,0 +1,503 @@ +--- +title: Plugin Develop +--- + + + +This documentation is about developing plugin in Lua. For other languages, +see [external plugin](./external-plugin.md). + +## Where to put your plugins + +Use the `extra_lua_path` parameter in `conf/config.yaml` file to load your custom plugin code (or use `extra_lua_cpath` for compiled `.so` or `.dll` file). + +For example, you can create a directory `/path/to/example`: + +```yaml +apisix: + ... + extra_lua_path: "/path/to/example/?.lua" +``` + +The structure of the `example` directory should look like this: + +``` +├── example +│   └── apisix +│   ├── plugins +│   │   └── 3rd-party.lua +│   └── stream +│   └── plugins +│   └── 3rd-party.lua +``` + +:::note + +The directory (`/path/to/example`) must contain the `/apisix/plugins` subdirectory. + +::: + +## Enable the plugin + +To enable your custom plugin, add the plugin list to `conf/config.yaml` and append your plugin name. For instance: + +```yaml +plugins: # See `conf/config.yaml.example` for an example + - ... # Add existing plugins + - your-plugin # Add your custom plugin name (name is the plugin name defined in the code) +``` + +:::warning + +In particular, most APISIX plugins are enabled by default when the plugins field configuration is not defined (The default enabled plugins can be found in [apisix/cli/config.lua](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua)). + +Once the plugins configuration is defined in `conf/config.yaml`, the new plugins list will replace the default configuration instead of merging. Therefore, when defining the `plugins` field, make sure to include the built-in plugins that are being used. To maintain consistency with the default behavior, you can include all the default enabled plugins defined in `apisix/cli/config.lua`. + +::: + +## Writing plugins + +The [`example-plugin`](https://github.com/apache/apisix/blob/master/apisix/plugins/example-plugin.lua) plugin in this repo provides an example. + +### Naming and priority + +Specify the plugin name (the name is the unique identifier of the plugin and cannot be duplicate) and priority in the code. + +```lua +local plugin_name = "example-plugin" + +local _M = { + version = 0.1, + priority = 0, + name = plugin_name, + schema = schema, + metadata_schema = metadata_schema, +} +``` + +Note: The priority of the new plugin cannot be same to any existing ones, you can use the `/v1/schema` method of [control API](./control-api.md#get-v1schema) to view the priority of all plugins. In addition, plugins with higher priority value will be executed first in a given phase (see the definition of `phase` in [choose-phase-to-run](#choose-phase-to-run)). For example, the priority of example-plugin is 0 and the priority of ip-restriction is 3000. Therefore, the ip-restriction plugin will be executed first, then the example-plugin plugin. It's recommended to use priority 1 ~ 99 for your plugin unless you want it to run before some builtin plugins. + +Note: the order of the plugins is not related to the order of execution. + +### Schema and check + +Write [JSON Schema](https://json-schema.org) descriptions and check functions. Similarly, take the example-plugin plugin as an example to see its +configuration data: + +```json +{ + "example-plugin": { + "i": 1, + "s": "s", + "t": [1] + } +} +``` + +Let's look at its schema description : + +```lua +local schema = { + type = "object", + properties = { + i = {type = "number", minimum = 0}, + s = {type = "string"}, + t = {type = "array", minItems = 1}, + ip = {type = "string"}, + port = {type = "integer"}, + }, + required = {"i"}, +} +``` + +The schema defines a non-negative number `i`, a string `s`, a non-empty array of `t`, and `ip` / `port`. Only `i` is required. + +At the same time, we need to implement the __check_schema(conf, schema_type)__ method to complete the specification verification. + +```lua +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end +``` + +:::note + +Note: the project has provided the public method "__core.schema.check__", which can be used directly to complete JSON +verification. + +::: + +The input parameter **schema_type** is used to distinguish between different schemas types. For example, many plugins need to use some [metadata](./terminology/plugin-metadata.md), so they define the plugin's `metadata_schema`. + +```lua title="example-plugin.lua" +-- schema definition for metadata +local metadata_schema = { + type = "object", + properties = { + ikey = {type = "number", minimum = 0}, + skey = {type = "string"}, + }, + required = {"ikey", "skey"}, +} + +function _M.check_schema(conf, schema_type) + --- check schema for metadata + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + return core.schema.check(schema, conf) +end +``` + +Another example, the [key-auth](https://github.com/apache/apisix/blob/master/apisix/plugins/key-auth.lua) plugin needs to provide a `consumer_schema` to check the configuration of the `plugins` attribute of the `consumer` resource in order to be used with the [Consumer](./admin-api.md#consumer) resource. + +```lua title="key-auth.lua" + +local consumer_schema = { + type = "object", + properties = { + key = {type = "string"}, + }, + required = {"key"}, +} + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_CONSUMER then + return core.schema.check(consumer_schema, conf) + else + return core.schema.check(schema, conf) + end +end +``` + +### Choose phase to run + +Determine which [phase](./terminology/plugin.md#plugins-execution-lifecycle) to run, generally access or rewrite. If you don't know the [OpenResty lifecycle](https://github.com/openresty/lua-nginx-module/blob/master/README.markdown#directives), it's +recommended to learn about it in advance. For example `key-auth` is an authentication plugin, thus the authentication should be completed +before forwarding the request to any upstream service. Therefore, the plugin must be executed in the rewrite phases. +Similarly, if you want to modify or process the response body or headers you can do that in the `body_filter` or in the `header_filter` phases respectively. + +The following code snippet shows how to implement any logic relevant to the plugin in the OpenResty log phase. + +```lua +function _M.log(conf, ctx) +-- Implement logic here +end +``` + +**Note : we can't invoke `ngx.exit`, `ngx.redirect` or `core.respond.exit` in rewrite phase and access phase. if need to exit, just return the status and body, the plugin engine will make the exit happen with the returned status and body. [example](https://github.com/apache/apisix/blob/35269581e21473e1a27b11cceca6f773cad0192a/apisix/plugins/limit-count.lua#L177)** + +### extra phase + +Besides OpenResty's phases, we also provide extra phases to satisfy specific purpose: + +* `delayed_body_filter` + +```lua +function _M.delayed_body_filter(conf, ctx) + -- delayed_body_filter is called after body_filter + -- it is used by the tracing plugins to end the span right after body_filter +end +``` + +### Implement the logic + +Write the logic of the plugin in the corresponding phase. There are two parameters `conf` and `ctx` in the phase method, take the `limit-conn` plugin configuration as an example. + +#### conf parameter + +The `conf` parameter is the relevant configuration information of the plugin, you can use `core.log.warn(core.json.encode(conf))` to output it to `error.log` for viewing, as shown below: + +```lua +function _M.access(conf, ctx) + core.log.warn(core.json.encode(conf)) + ...... +end +``` + +conf: + +```json +{ + "rejected_code": 503, + "burst": 0, + "default_conn_delay": 0.1, + "conn": 1, + "key": "remote_addr" +} +``` + +#### ctx parameter + +The `ctx` parameter caches data information related to the request. You can use `core.log.warn(core.json.encode(ctx, true))` to output it to `error.log` for viewing, as shown below : + +```lua +function _M.access(conf, ctx) + core.log.warn(core.json.encode(ctx, true)) + ...... +end +``` + +### Others + +If your plugin has a new code directory of its own, and you need to redistribute it with the APISIX source code, you will need to modify the `Makefile` to create directory, such as: + +``` +$(INSTALL) -d $(INST_LUADIR)/apisix/plugins/skywalking +$(INSTALL) apisix/plugins/skywalking/*.lua $(INST_LUADIR)/apisix/plugins/skywalking/ +``` + +There are other fields in the `_M` which affect the plugin's behavior. + +```lua +local _M = { + ... + type = 'auth', + run_policy = 'prefer_route', +} +``` + +`run_policy` field can be used to control the behavior of the plugin execution. +When this field set to `prefer_route`, and the plugin has been configured both +in the global and at the route level, only the route level one will take effect. + +`type` field is required to be set to `auth` if your plugin needs to work with consumer. + +## Load plugin and replace plugin + +Using `require "apisix.plugins.3rd-party"` will load your plugin, just like `require "apisix.plugins.jwt-auth"` will load the `jwt-auth` plugin. + +Sometimes you may want to override a method instead of a whole file. In this case, you can configure `lua_module_hook` in `conf/config.yaml` +to introduce your hook. + +Assume that your configuration is as follows: + +```yaml +apisix: + ... + extra_lua_path: "/path/to/example/?.lua" + lua_module_hook: "my_hook" +``` + +The `example/my_hook.lua` will be loaded when APISIX starts, and you can use this hook to replace a method in APISIX. +The example of [my_hook.lua](https://github.com/apache/apisix/blob/master/example/my_hook.lua) can be found under the `example` directory of this project. + +## Check external dependencies + +If you have dependencies on external libraries, check the dependent items. If your plugin needs to use shared memory, it +needs to declare via [customizing Nginx configuration](./customize-nginx-configuration.md), for example : + +```yaml +# put this in config.yaml: +nginx_config: + http_configuration_snippet: | + # for openid-connect plugin + lua_shared_dict discovery 1m; # cache for discovery metadata documents + lua_shared_dict jwks 1m; # cache for JWKs + lua_shared_dict introspection 10m; # cache for JWT verification results +``` + +The plugin itself provides the init method. It is convenient for plugins to perform some initialization after +the plugin is loaded. If you need to clean up the initialization, you can put it in the corresponding destroy method. + +Note : if the dependency of some plugin needs to be initialized when Nginx start, you may need to add logic to the initialization +method "http_init" in the file `apisix/init.lua`, and you may need to add some processing on generated part of Nginx +configuration file in `apisix/cli/ngx_tpl.lua` file. But it is easy to have an impact on the overall situation according to the +existing plugin mechanism, **we do not recommend this unless you have a complete grasp of the code**. + +## Encrypted storage fields + +Some plugins require parameters to be stored encrypted, such as the `password` parameter of the `basic-auth` plugin. This plugin needs to specify in the `schema` which parameters need to be stored encrypted. + +```lua +encrypt_fields = {"password"} +``` + +If it is a nested parameter, such as the `clickhouse.password` parameter of the `error-log-logger` plugin, it needs to be separated by `.`: + +```lua +encrypt_fields = {"clickhouse.password"} +``` + +Currently not supported yet: + +1. more than two levels of nesting +2. fields in arrays + +Parameters can be stored encrypted by specifying `encrypt_fields = {"password"}` in the `schema`. APISIX will provide the following functionality. + +- When adding and updating resources, APISIX automatically encrypts the parameters declared in `encrypt_fields` and stores them in etcd +- When fetching resources and when running the plugin, APISIX automatically decrypts the parameters declared in `encrypt_fields` + +By default, APISIX has `data_encryption` enabled with [two default keys](https://github.com/apache/apisix/blob/85563f016c35834763376894e45908b2fb582d87/apisix/cli/config.lua#L75), you can modify them in `config.yaml`. + +```yaml +apisix: + data_encryption: + enable: true + keyring: + - ... +``` + +APISIX will try to decrypt the data with keys in the order of the keys in the keyring (only for parameters declared in `encrypt_fields`). If the decryption fails, the next key will be tried until the decryption succeeds. + +If none of the keys in `keyring` can decrypt the data, the original data is used. + +## Register public API + +A plugin can register API which exposes to the public. Take batch-requests plugin as an example, this plugin registers `POST /apisix/batch-requests` to allow developers to group multiple API requests into a single HTTP request/response cycle: + +```lua +function batch_requests() + -- ... +end + +function _M.api() + -- ... + return { + { + methods = {"POST"}, + uri = "/apisix/batch-requests", + handler = batch_requests, + } + } +end +``` + +Note that the public API will not be exposed by default, you will need to use the [public-api plugin](plugins/public-api.md) to expose it. + +## Register control API + +If you only want to expose the API to the localhost or intranet, you can expose it via [Control API](./control-api.md). + +Take a look at example-plugin plugin: + +```lua +local function hello() + local args = ngx.req.get_uri_args() + if args["json"] then + return 200, {msg = "world"} + else + return 200, "world\n" + end +end + + +function _M.control_api() + return { + { + methods = {"GET"}, + uris = {"/v1/plugin/example-plugin/hello"}, + handler = hello, + } + } +end +``` + +If you don't change the default control API configuration, the plugin will be expose `GET /v1/plugin/example-plugin/hello` which can only be accessed via `127.0.0.1`. Test with the following command: + +```shell +curl -i -X GET "http://127.0.0.1:9090/v1/plugin/example-plugin/hello" +``` + +[Read more about control API introduction](./control-api.md) + +## Register custom variables + +We can use variables in many places of APISIX. For example, customizing log format in http-logger, using it as the key of `limit-*` plugins. In some situations, the builtin variables are not enough. Therefore, APISIX allows developers to register their variables globally, and use them as normal builtin variables. + +For instance, let's register a variable called `a6_labels_zone` to fetch the value of the `zone` label in a route: + +``` +local core = require "apisix.core" + +core.ctx.register_var("a6_labels_zone", function(ctx) + local route = ctx.matched_route and ctx.matched_route.value + if route and route.labels then + return route.labels.zone + end + return nil +end) +``` + +After that, any get operation to `$a6_labels_zone` will call the registered getter to fetch the value. + +Note that the custom variables can't be used in features that depend on the Nginx directive, like `access_log_format`. + +## Write test cases + +For functions, write and improve the test cases of various dimensions, do a comprehensive test for your plugin! The +test cases of plugins are all in the "__t/plugin__" directory. You can go ahead to find out. APISIX uses +[****test-nginx****](https://github.com/openresty/test-nginx) as the test framework. A test case (.t file) is usually +divided into prologue and data parts by \__data\__. Here we will briefly introduce the data part, that is, the part +of the real test case. For example, the key-auth plugin: + +```perl +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.key-auth") + local ok, err = plugin.check_schema({key = 'test-key'}, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +[error] +``` + +A test case consists of three parts : + +- __Program code__ : configuration content of Nginx location +- __Input__ : http request information +- __Output check__ : status, header, body, error log check + +When we request __/t__, which config in the configuration file, the Nginx will call "__content_by_lua_block__" instruction to +complete the Lua script, and finally return. The assertion of the use case is response_body return "done", +"__no_error_log__" means to check the "__error.log__" of Nginx. There must be no ERROR level record. The log files for the unit test +are located in the following folder: 't/servroot/logs'. + +The above test case represents a simple scenario. Most scenarios will require multiple steps to validate. To do this, create multiple tests `=== TEST 1`, `=== TEST 2`, and so on. These tests will be executed sequentially, allowing you to break down scenarios into a sequence of atomic steps. + +Additionally, there are some convenience testing endpoints which can be found [here](https://github.com/apache/apisix/blob/master/t/lib/server.lua#L36). For example, see [proxy-rewrite](https://github.com/apache/apisix/blob/master/t/plugin/proxy-rewrite.t). In test 42, the upstream `uri` is made to redirect `/test?new_uri=hello` to `/hello` (which always returns `hello world`). In test 43, the response body is confirmed to equal `hello world`, meaning the proxy-rewrite configuration added with test 42 worked correctly. + +Refer the following [document](building-apisix.md) to setup the testing framework. + +### Attach the test-nginx execution process: + +According to the path we configured in the makefile and some configuration items at the front of each __.t__ file, the +framework will assemble into a complete nginx.conf file. "__t/servroot__" is the working directory of Nginx and start the +Nginx instance. according to the information provided by the test case, initiate the http request and check that the +return items of HTTP include HTTP status, HTTP response header, HTTP response body and so on. + +## Additional Resource(s) + +- Key Concepts - [Plugins](https://apisix.apache.org/docs/apisix/terminology/plugin/) +- [Apache APISIX Extensions Guide](https://apisix.apache.org/blog/2021/10/29/extension-guide/) +- [Create a Custom Plugin in Lua](https://docs.api7.ai/apisix/how-to-guide/custom-plugins/create-plugin-in-lua) +- [example-plugin code](https://github.com/apache/apisix/blob/master/apisix/plugins/example-plugin.lua) diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-aws-content-moderation.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-aws-content-moderation.md new file mode 100644 index 0000000..9684482 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-aws-content-moderation.md @@ -0,0 +1,247 @@ +--- +title: ai-aws-content-moderation +keywords: + - Apache APISIX + - API Gateway + - Plugin + - ai-aws-content-moderation +description: This document contains information about the Apache APISIX ai-aws-content-moderation Plugin. +--- + + + +## Description + +The `ai-aws-content-moderation` plugin processes the request body to check for toxicity and rejects the request if it exceeds the configured threshold. + +**_This plugin must be used in routes that proxy requests to LLMs only._** + +**_As of now, the plugin only supports the integration with [AWS Comprehend](https://aws.amazon.com/comprehend/) for content moderation. PRs for introducing support for other service providers are welcomed._** + +## Plugin Attributes + +| **Field** | **Required** | **Type** | **Description** | +| ---------------------------- | ------------ | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| comprehend.access_key_id | Yes | String | AWS access key ID | +| comprehend.secret_access_key | Yes | String | AWS secret access key | +| comprehend.region | Yes | String | AWS region | +| comprehend.endpoint | No | String | AWS Comprehend service endpoint. Must match the pattern `^https?://` | +| comprehend.ssl_verify | No | String | Enables SSL certificate verification. | +| moderation_categories | No | Object | Key-value pairs of moderation category and their score. In each pair, the key should be one of the `PROFANITY`, `HATE_SPEECH`, `INSULT`, `HARASSMENT_OR_ABUSE`, `SEXUAL`, or `VIOLENCE_OR_THREAT`; and the value should be between 0 and 1 (inclusive). | +| moderation_threshold | No | Number | The degree to which content is harmful, offensive, or inappropriate. A higher value indicates more toxic content allowed. Range: 0 - 1. Default: 0.5 | + +## Example usage + +First initialise these shell variables: + +```shell +ADMIN_API_KEY=edd1c9f034335f136f87ad84b625c8f1 +ACCESS_KEY_ID=aws-comprehend-access-key-id-here +SECRET_ACCESS_KEY=aws-comprehend-secret-access-key-here +OPENAI_KEY=open-ai-key-here +``` + +Create a route with the `ai-aws-content-moderation` and `ai-proxy` plugin like so: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/post", + "plugins": { + "ai-aws-content-moderation": { + "comprehend": { + "access_key_id": "'"$ACCESS_KEY_ID"'", + "secret_access_key": "'"$SECRET_ACCESS_KEY"'", + "region": "us-east-1" + }, + "moderation_categories": { + "PROFANITY": 0.5 + } + }, + "ai-proxy": { + "auth": { + "header": { + "api-key": "'"$OPENAI_KEY"'" + } + }, + "model": { + "provider": "openai", + "name": "gpt-4", + "options": { + "max_tokens": 512, + "temperature": 1.0 + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +The `ai-proxy` plugin is used here as it simplifies access to LLMs. However, you may configure the LLM in the upstream configuration as well. + +Now send a request: + +```shell +curl http://127.0.0.1:9080/post -i -XPOST -H 'Content-Type: application/json' -d '{ + "messages": [ + { + "role": "user", + "content": "" + } + ] +}' +``` + +Then the request will be blocked with error like this: + +```text +HTTP/1.1 400 Bad Request +Date: Thu, 03 Oct 2024 11:53:15 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.10.0 + +request body exceeds PROFANITY threshold +``` + +Send a request with compliant content in the request body: + +```shell +curl http://127.0.0.1:9080/post -i -XPOST -H 'Content-Type: application/json' -d '{ + "messages": [ + { + "role": "system", + "content": "You are a mathematician" + }, + { "role": "user", "content": "What is 1+1?" } + ] +}' +``` + +This request will be proxied normally to the configured LLM. + +```text +HTTP/1.1 200 OK +Date: Thu, 03 Oct 2024 11:53:00 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.10.0 + +{"choices":[{"finish_reason":"stop","index":0,"message":{"content":"1+1 equals 2.","role":"assistant"}}],"created":1727956380,"id":"chatcmpl-AEEg8Pe5BAW5Sw3C1gdwXnuyulIkY","model":"gpt-4o-2024-05-13","object":"chat.completion","system_fingerprint":"fp_67802d9a6d","usage":{"completion_tokens":7,"prompt_tokens":23,"total_tokens":30}} +``` + +You can also configure filters on other moderation categories like so: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/post", + "plugins": { + "ai-aws-content-moderation": { + "comprehend": { + "access_key_id": "'"$ACCESS_KEY_ID"'", + "secret_access_key": "'"$SECRET_ACCESS_KEY"'", + "region": "us-east-1" + }, + "moderation_categories": { + "PROFANITY": 0.5, + "HARASSMENT_OR_ABUSE": 0.7, + "SEXUAL": 0.2 + } + }, + "ai-proxy": { + "auth": { + "header": { + "api-key": "'"$OPENAI_KEY"'" + } + }, + "model": { + "provider": "openai", + "name": "gpt-4", + "options": { + "max_tokens": 512, + "temperature": 1.0 + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +If none of the `moderation_categories` are configured, request bodies will be moderated on the basis of overall toxicity. +The default `moderation_threshold` is 0.5, it can be configured like so. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/post", + "plugins": { + "ai-aws-content-moderation": { + "provider": { + "comprehend": { + "access_key_id": "'"$ACCESS_KEY_ID"'", + "secret_access_key": "'"$SECRET_ACCESS_KEY"'", + "region": "us-east-1" + } + }, + "moderation_threshold": 0.7, + "llm_provider": "openai" + }, + "ai-proxy": { + "auth": { + "header": { + "api-key": "'"$OPENAI_KEY"'" + } + }, + "model": { + "provider": "openai", + "name": "gpt-4", + "options": { + "max_tokens": 512, + "temperature": 1.0 + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-prompt-decorator.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-prompt-decorator.md new file mode 100644 index 0000000..44ee59e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-prompt-decorator.md @@ -0,0 +1,109 @@ +--- +title: ai-prompt-decorator +keywords: + - Apache APISIX + - API Gateway + - Plugin + - ai-prompt-decorator +description: This document contains information about the Apache APISIX ai-prompt-decorator Plugin. +--- + + + +## Description + +The `ai-prompt-decorator` plugin simplifies access to LLM providers, such as OpenAI and Anthropic, and their models by appending or prepending prompts into the request. + +## Plugin Attributes + +| **Field** | **Required** | **Type** | **Description** | +| ----------------- | --------------- | -------- | --------------------------------------------------- | +| `prepend` | Conditionally\* | Array | An array of prompt objects to be prepended | +| `prepend.role` | Yes | String | Role of the message (`system`, `user`, `assistant`) | +| `prepend.content` | Yes | String | Content of the message. Minimum length: 1 | +| `append` | Conditionally\* | Array | An array of prompt objects to be appended | +| `append.role` | Yes | String | Role of the message (`system`, `user`, `assistant`) | +| `append.content` | Yes | String | Content of the message. Minimum length: 1 | + +\* **Conditionally Required**: At least one of `prepend` or `append` must be provided. + +## Example usage + +Create a route with the `ai-prompt-decorator` plugin like so: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/v1/chat/completions", + "plugins": { + "ai-prompt-decorator": { + "prepend":[ + { + "role": "system", + "content": "I have exams tomorrow so explain conceptually and briefly" + } + ], + "append":[ + { + "role": "system", + "content": "End the response with an analogy." + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "api.openai.com:443": 1 + }, + "pass_host": "node", + "scheme": "https" + } + }' +``` + +Now send a request: + +```shell +curl http://127.0.0.1:9080/v1/chat/completions -i -XPOST -H 'Content-Type: application/json' -d '{ + "model": "gpt-4", + "messages": [{ "role": "user", "content": "What is TLS Handshake?" }] +}' -H "Authorization: Bearer " +``` + +Then the request body will be modified to something like this: + +```json +{ + "model": "gpt-4", + "messages": [ + { + "role": "system", + "content": "I have exams tomorrow so explain conceptually and briefly" + }, + { "role": "user", "content": "What is TLS Handshake?" }, + { + "role": "system", + "content": "End the response with an analogy." + } + ] +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-prompt-guard.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-prompt-guard.md new file mode 100644 index 0000000..51ec3c5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-prompt-guard.md @@ -0,0 +1,89 @@ +--- +title: ai-prompt-guard +keywords: + - Apache APISIX + - API Gateway + - Plugin + - ai-prompt-guard +description: This document contains information about the Apache APISIX ai-prompt-guard Plugin. +--- + + + +## Description + +The `ai-prompt-guard` plugin safeguards your AI endpoints by inspecting and validating incoming prompt messages. It checks the content of requests against user-defined allowed and denied patterns to ensure that only approved inputs are processed. Based on its configuration, the plugin can either examine just the latest message or the entire conversation history, and it can be set to check prompts from all roles or only from end users. + +When both **allow** and **deny** patterns are configured, the plugin first ensures that at least one allowed pattern is matched. If none match, the request is rejected with a _"Request doesn't match allow patterns"_ error. If an allowed pattern is found, it then checks for any occurrences of denied patterns—rejecting the request with a _"Request contains prohibited content"_ error if any are detected. + +## Plugin Attributes + +| **Field** | **Required** | **Type** | **Description** | +| ------------------------------ | ------------ | --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| match_all_roles | No | boolean | If set to `true`, the plugin will check prompt messages from all roles. Otherwise, it only validates when its role is `"user"`. Default is `false`. | +| match_all_conversation_history | No | boolean | When enabled, all messages in the conversation history are concatenated and checked. If `false`, only the content of the last message is examined. Default is `false`. | +| allow_patterns | No | array | A list of regex patterns. When provided, the prompt must match **at least one** pattern to be considered valid. | +| deny_patterns | No | array | A list of regex patterns. If any of these patterns match the prompt content, the request is rejected. | + +## Example usage + +Create a route with the `ai-prompt-guard` plugin like so: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/v1/chat/completions", + "plugins": { + "ai-prompt-guard": { + "match_all_roles": true, + "allow_patterns": [ + "goodword" + ], + "deny_patterns": [ + "badword" + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "api.openai.com:443": 1 + }, + "pass_host": "node", + "scheme": "https" + } + }' +``` + +Now send a request: + +```shell +curl http://127.0.0.1:9080/v1/chat/completions -i -XPOST -H 'Content-Type: application/json' -d '{ + "model": "gpt-4", + "messages": [{ "role": "user", "content": "badword request" }] +}' -H "Authorization: Bearer " +``` + +The request will fail with 400 error and following response. + +```bash +{"message":"Request doesn't match allow patterns"} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-prompt-template.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-prompt-template.md new file mode 100644 index 0000000..9ca4e1f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-prompt-template.md @@ -0,0 +1,102 @@ +--- +title: ai-prompt-template +keywords: + - Apache APISIX + - API Gateway + - Plugin + - ai-prompt-template +description: This document contains information about the Apache APISIX ai-prompt-template Plugin. +--- + + + +## Description + +The `ai-prompt-template` plugin simplifies access to LLM providers, such as OpenAI and Anthropic, and their models by predefining the request format +using a template, which only allows users to pass customized values into template variables. + +## Plugin Attributes + +| **Field** | **Required** | **Type** | **Description** | +| ------------------------------------- | ------------ | -------- | --------------------------------------------------------------------------------------------------------------------------- | +| `templates` | Yes | Array | An array of template objects | +| `templates.name` | Yes | String | Name of the template. | +| `templates.template.model` | Yes | String | Model of the AI Model, for example `gpt-4` or `gpt-3.5`. See your LLM provider API documentation for more available models. | +| `templates.template.messages.role` | Yes | String | Role of the message (`system`, `user`, `assistant`) | +| `templates.template.messages.content` | Yes | String | Content of the message. | + +## Example usage + +Create a route with the `ai-prompt-template` plugin like so: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/v1/chat/completions", + "upstream": { + "type": "roundrobin", + "nodes": { + "api.openai.com:443": 1 + }, + "scheme": "https", + "pass_host": "node" + }, + "plugins": { + "ai-prompt-template": { + "templates": [ + { + "name": "level of detail", + "template": { + "model": "gpt-4", + "messages": [ + { + "role": "user", + "content": "Explain about {{ topic }} in {{ level }}." + } + ] + } + } + ] + } + } + }' +``` + +Now send a request: + +```shell +curl http://127.0.0.1:9080/v1/chat/completions -i -XPOST -H 'Content-Type: application/json' -d '{ + "template_name": "level of detail", + "topic": "psychology", + "level": "brief" +}' -H "Authorization: Bearer " +``` + +Then the request body will be modified to something like this: + +```json +{ + "model": "some model", + "messages": [ + { "role": "user", "content": "Explain about psychology in brief." } + ] +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-proxy-multi.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-proxy-multi.md new file mode 100644 index 0000000..368228c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-proxy-multi.md @@ -0,0 +1,1005 @@ +--- +title: ai-proxy-multi +keywords: + - Apache APISIX + - API Gateway + - Plugin + - ai-proxy-multi + - AI + - LLM +description: The ai-proxy-multi Plugin extends the capabilities of ai-proxy with load balancing, retries, fallbacks, and health chekcs, simplifying the integration with OpenAI, DeepSeek, AIMLAPI, and other OpenAI-compatible APIs. +--- + + + + + + + +## Description + +The `ai-proxy-multi` Plugin simplifies access to LLM and embedding models by transforming Plugin configurations into the designated request format for OpenAI, DeepSeek, AIMLAPI, and other OpenAI-compatible APIs. It extends the capabilities of [`ai-proxy-multi`](./ai-proxy.md) with load balancing, retries, fallbacks, and health checks. + +In addition, the Plugin also supports logging LLM request information in the access log, such as token usage, model, time to the first response, and more. + +## Request Format + +| Name | Type | Required | Description | +| ------------------ | ------ | -------- | --------------------------------------------------- | +| `messages` | Array | True | An array of message objects. | +| `messages.role` | String | True | Role of the message (`system`, `user`, `assistant`).| +| `messages.content` | String | True | Content of the message. | + +## Attributes + +| Name | Type | Required | Default | Valid Values | Description | +|------------------------------------|----------------|----------|-----------------------------------|--------------|-------------| +| fallback_strategy | string | False | instance_health_and_rate_limiting | instance_health_and_rate_limiting | Fallback strategy. When set, the Plugin will check whether the specified instance’s token has been exhausted when a request is forwarded. If so, forward the request to the next instance regardless of the instance priority. When not set, the Plugin will not forward the request to low priority instances when token of the high priority instance is exhausted. | +| balancer | object | False | | | Load balancing configurations. | +| balancer.algorithm | string | False | roundrobin | [roundrobin, chash] | Load balancing algorithm. When set to `roundrobin`, weighted round robin algorithm is used. When set to `chash`, consistent hashing algorithm is used. | +| balancer.hash_on | string | False | | [vars, headers, cookie, consumer, vars_combinations] | Used when `type` is `chash`. Support hashing on [NGINX variables](https://nginx.org/en/docs/varindex.html), headers, cookie, consumer, or a combination of [NGINX variables](https://nginx.org/en/docs/varindex.html). | +| balancer.key | string | False | | | Used when `type` is `chash`. When `hash_on` is set to `header` or `cookie`, `key` is required. When `hash_on` is set to `consumer`, `key` is not required as the consumer name will be used as the key automatically. | +| instances | array[object] | True | | | LLM instance configurations. | +| instances.name | string | True | | | Name of the LLM service instance. | +| instances.provider | string | True | | [openai, deepseek, aimlapi, openai-compatible] | LLM service provider. When set to `openai`, the Plugin will proxy the request to `api.openai.com`. When set to `deepseek`, the Plugin will proxy the request to `api.deepseek.com`. When set to `aimlapi`, the Plugin uses the OpenAI-compatible driver and proxies the request to `api.aimlapi.com` by default. When set to `openai-compatible`, the Plugin will proxy the request to the custom endpoint configured in `override`. | +| instances.priority | integer | False | 0 | | Priority of the LLM instance in load balancing. `priority` takes precedence over `weight`. | +| instances.weight | string | True | 0 | greater or equal to 0 | Weight of the LLM instance in load balancing. | +| instances.auth | object | True | | | Authentication configurations. | +| instances.auth.header | object | False | | | Authentication headers. At least one of the `header` and `query` should be configured. | +| instances.auth.query | object | False | | | Authentication query parameters. At least one of the `header` and `query` should be configured. | +| instances.options | object | False | | | Model configurations. In addition to `model`, you can configure additional parameters and they will be forwarded to the upstream LLM service in the request body. For instance, if you are working with OpenAI, DeepSeek, or AIMLAPI, you can configure additional parameters such as `max_tokens`, `temperature`, `top_p`, and `stream`. See your LLM provider's API documentation for more available options. | +| instances.options.model | string | False | | | Name of the LLM model, such as `gpt-4` or `gpt-3.5`. See your LLM provider's API documentation for more available models. | +| logging | object | False | | | Logging configurations. | +| logging.summaries | boolean | False | false | | If true, log request LLM model, duration, request, and response tokens. | +| logging.payloads | boolean | False | false | | If true, log request and response payload. | +| logging.override | object | False | | | Override setting. | +| logging.override.endpoint | string | False | | | LLM provider endpoint to replace the default endpoint with. If not configured, the Plugin uses the default OpenAI endpoint `https://api.openai.com/v1/chat/completions`. | +| checks | object | False | | | Health check configurations. Note that at the moment, OpenAI, DeepSeek, and AIMLAPI do not provide an official health check endpoint. Other LLM services that you can configure under `openai-compatible` provider may have available health check endpoints. | +| checks.active | object | True | | | Active health check configurations. | +| checks.active.type | string | False | http | [http, https, tcp] | Type of health check connection. | +| checks.active.timeout | number | False | 1 | | Health check timeout in seconds. | +| checks.active.concurrency | integer | False | 10 | | Number of upstream nodes to be checked at the same time. | +| checks.active.host | string | False | | | HTTP host. | +| checks.active.port | integer | False | | between 1 and 65535 inclusive | HTTP port. | +| checks.active.http_path | string | False | / | | Path for HTTP probing requests. | +| checks.active.https_verify_certificate | boolean | False | true | | If true, verify the node's TLS certificate. | +| timeout | integer | False | 30000 | greater than or equal to 1 | Request timeout in milliseconds when requesting the LLM service. | +| keepalive | boolean | False | true | | If true, keep the connection alive when requesting the LLM service. | +| keepalive_timeout | integer | False | 60000 | greater than or equal to 1000 | Request timeout in milliseconds when requesting the LLM service. | +| keepalive_pool | integer | False | 30 | | Keepalive pool size for when connecting with the LLM service. | +| ssl_verify | boolean | False | true | | If true, verify the LLM service's certificate. | + +## Examples + +The examples below demonstrate how you can configure `ai-proxy-multi` for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Load Balance between Instances + +The following example demonstrates how you can configure two models for load balancing, forwarding 80% of the traffic to one instance and 20% to the other. + +For demonstration and easier differentiation, you will be configuring one OpenAI instance and one DeepSeek instance as the upstream LLM services. + +Create a Route as such and update with your LLM providers, models, API keys, and endpoints if applicable: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-proxy-multi-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "openai-instance", + "provider": "openai", + "weight": 8, + "auth": { + "header": { + "Authorization": "Bearer '"$OPENAI_API_KEY"'" + } + }, + "options": { + "model": "gpt-4" + } + }, + { + "name": "deepseek-instance", + "provider": "deepseek", + "weight": 2, + "auth": { + "header": { + "Authorization": "Bearer '"$DEEPSEEK_API_KEY"'" + } + }, + "options": { + "model": "deepseek-chat" + } + } + ] + } + } + }' +``` + +Send 10 POST requests to the Route with a system prompt and a sample user question in the request body, to see the number of requests forwarded to OpenAI and DeepSeek: + +```shell +openai_count=0 +deepseek_count=0 + +for i in {1..10}; do + model=$(curl -s "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }' | jq -r '.model') + + if [[ "$model" == *"gpt-4"* ]]; then + ((openai_count++)) + elif [[ "$model" == "deepseek-chat" ]]; then + ((deepseek_count++)) + fi +done + +echo "OpenAI responses: $openai_count" +echo "DeepSeek responses: $deepseek_count" +``` + +You should see a response similar to the following: + +```text +OpenAI responses: 8 +DeepSeek responses: 2 +``` + +### Configure Instance Priority and Rate Limiting + +The following example demonstrates how you can configure two models with different priorities and apply rate limiting on the instance with a higher priority. In the case where `fallback_strategy` is set to `instance_health_and_rate_limiting`, the Plugin should continue to forward requests to the low priority instance once the high priority instance's rate limiting quota is fully consumed. + +Create a Route as such and update with your LLM providers, models, API keys, and endpoints if applicable: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-proxy-multi-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-proxy-multi": { + "fallback_strategy: "instance_health_and_rate_limiting", + "instances": [ + { + "name": "openai-instance", + "provider": "openai", + "priority": 1, + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$OPENAI_API_KEY"'" + } + }, + "options": { + "model": "gpt-4" + } + }, + { + "name": "deepseek-instance", + "provider": "deepseek", + "priority": 0, + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$DEEPSEEK_API_KEY"'" + } + }, + "options": { + "model": "deepseek-chat" + } + } + ] + }, + "ai-rate-limiting": { + "instances": [ + { + "name": "openai-instance", + "limit": 10, + "time_window": 60 + } + ], + "limit_strategy": "total_tokens" + } + } + }' +``` + +Send a POST request to the Route with a system prompt and a sample user question in the request body: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }' +``` + +You should receive a response similar to the following: + +```json +{ + ..., + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1+1 equals 2.", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 23, + "completion_tokens": 8, + "total_tokens": 31, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default", + "system_fingerprint": null +} +``` + +Since the `total_tokens` value exceeds the configured quota of `10`, the next request within the 60-second window is expected to be forwarded to the other instance. + +Within the same 60-second window, send another POST request to the route: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "Explain Newton law" } + ] + }' +``` + +You should see a response similar to the following: + +```json +{ + ..., + "model": "deepseek-chat", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Certainly! Newton's laws of motion are three fundamental principles that describe the relationship between the motion of an object and the forces acting on it. They were formulated by Sir Isaac Newton in the late 17th century and are foundational to classical mechanics.\n\n---\n\n### **1. Newton's First Law (Law of Inertia):**\n- **Statement:** An object at rest will remain at rest, and an object in motion will continue moving at a constant velocity (in a straight line at a constant speed), unless acted upon by an external force.\n- **Key Idea:** This law introduces the concept of **inertia**, which is the tendency of an object to resist changes in its state of motion.\n- **Example:** If you slide a book across a table, it eventually stops because of the force of friction acting on it. Without friction, the book would keep moving indefinitely.\n\n---\n\n### **2. Newton's Second Law (Law of Acceleration):**\n- **Statement:** The acceleration of an object is directly proportional to the net force acting on it and inversely proportional to its mass. Mathematically, this is expressed as:\n \\[\n F = ma\n \\]\n where:\n - \\( F \\) = net force applied (in Newtons),\n -" + }, + ... + } + ], + ... +} +``` + +### Load Balance and Rate Limit by Consumers + +The following example demonstrates how you can configure two models for load balancing and apply rate limiting by consumer. + +Create a Consumer `johndoe` and a rate limiting quota of 10 tokens in a 60-second window on `openai-instance` instance: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "johndoe", + "plugins": { + "ai-rate-limiting": { + "instances": [ + { + "name": "openai-instance", + "limit": 10, + "time_window": 60 + } + ], + "rejected_code": 429, + "limit_strategy": "total_tokens" + } + } + }' +``` + +Configure `key-auth` credential for `johndoe`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/johndoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +Create another Consumer `janedoe` and a rate limiting quota of 10 tokens in a 60-second window on `deepseek-instance` instance: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "johndoe", + "plugins": { + "ai-rate-limiting": { + "instances": [ + { + "name": "deepseek-instance", + "limit": 10, + "time_window": 60 + } + ], + "rejected_code": 429, + "limit_strategy": "total_tokens" + } + } + }' +``` + +Configure `key-auth` credential for `janedoe`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/janedoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jane-key-auth", + "plugins": { + "key-auth": { + "key": "jane-key" + } + } + }' +``` + +Create a Route as such and update with your LLM providers, models, API keys, and endpoints if applicable: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-proxy-multi-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "key-auth": {}, + "ai-proxy-multi": { + "fallback_strategy: "instance_health_and_rate_limiting", + "instances": [ + { + "name": "openai-instance", + "provider": "openai", + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$OPENAI_API_KEY"'" + } + }, + "options": { + "model": "gpt-4" + } + }, + { + "name": "deepseek-instance", + "provider": "deepseek", + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$DEEPSEEK_API_KEY"'" + } + }, + "options": { + "model": "deepseek-chat" + } + } + ] + } + } + }' +``` + +Send a POST request to the Route without any consumer key: + +```shell +curl -i "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }' +``` + +You should receive an `HTTP/1.1 401 Unauthorized` response. + +Send a POST request to the Route with `johndoe`'s key: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -H 'apikey: john-key' \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }' +``` + +You should receive a response similar to the following: + +```json +{ + ..., + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1+1 equals 2.", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 23, + "completion_tokens": 8, + "total_tokens": 31, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default", + "system_fingerprint": null +} +``` + +Since the `total_tokens` value exceeds the configured quota of the `openai` instance for `johndoe`, the next request within the 60-second window from `johndoe` is expected to be forwarded to the `deepseek` instance. + +Within the same 60-second window, send another POST request to the Route with `johndoe`'s key: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -H 'apikey: john-key' \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "Explain Newtons laws to me" } + ] + }' +``` + +You should see a response similar to the following: + +```json +{ + ..., + "model": "deepseek-chat", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Certainly! Newton's laws of motion are three fundamental principles that describe the relationship between the motion of an object and the forces acting on it. They were formulated by Sir Isaac Newton in the late 17th century and are foundational to classical mechanics.\n\n---\n\n### **1. Newton's First Law (Law of Inertia):**\n- **Statement:** An object at rest will remain at rest, and an object in motion will continue moving at a constant velocity (in a straight line at a constant speed), unless acted upon by an external force.\n- **Key Idea:** This law introduces the concept of **inertia**, which is the tendency of an object to resist changes in its state of motion.\n- **Example:** If you slide a book across a table, it eventually stops because of the force of friction acting on it. Without friction, the book would keep moving indefinitely.\n\n---\n\n### **2. Newton's Second Law (Law of Acceleration):**\n- **Statement:** The acceleration of an object is directly proportional to the net force acting on it and inversely proportional to its mass. Mathematically, this is expressed as:\n \\[\n F = ma\n \\]\n where:\n - \\( F \\) = net force applied (in Newtons),\n -" + }, + ... + } + ], + ... +} +``` + +Send a POST request to the Route with `janedoe`'s key: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -H 'apikey: jane-key' \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }' +``` + +You should receive a response similar to the following: + +```json +{ + ..., + "model": "deepseek-chat", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "The sum of 1 and 1 is 2. This is a basic arithmetic operation where you combine two units to get a total of two units." + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 14, + "completion_tokens": 31, + "total_tokens": 45, + "prompt_tokens_details": { + "cached_tokens": 0 + }, + "prompt_cache_hit_tokens": 0, + "prompt_cache_miss_tokens": 14 + }, + "system_fingerprint": "fp_3a5770e1b4_prod0225" +} +``` + +Since the `total_tokens` value exceeds the configured quota of the `deepseek` instance for `janedoe`, the next request within the 60-second window from `janedoe` is expected to be forwarded to the `openai` instance. + +Within the same 60-second window, send another POST request to the Route with `janedoe`'s key: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -H 'apikey: jane-key' \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "Explain Newtons laws to me" } + ] + }' +``` + +You should see a response similar to the following: + +```json +{ + ..., + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Sure, here are Newton's three laws of motion:\n\n1) Newton's First Law, also known as the Law of Inertia, states that an object at rest will stay at rest, and an object in motion will stay in motion, unless acted on by an external force. In simple words, this law suggests that an object will keep doing whatever it is doing until something causes it to do otherwise. \n\n2) Newton's Second Law states that the force acting on an object is equal to the mass of that object times its acceleration (F=ma). This means that force is directly proportional to mass and acceleration. The heavier the object and the faster it accelerates, the greater the force.\n\n3) Newton's Third Law, also known as the law of action and reaction, states that for every action, there is an equal and opposite reaction. Essentially, any force exerted onto a body will create a force of equal magnitude but in the opposite direction on the object that exerted the first force.\n\nRemember, these laws become less accurate when considering speeds near the speed of light (where Einstein's theory of relativity becomes more appropriate) or objects very small or very large. However, for everyday situations, they provide a good model of how things move.", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + ... +} +``` + +This shows `ai-proxy-multi` load balance the traffic with respect to the rate limiting rules in `ai-rate-limiting` by consumers. + +### Restrict Maximum Number of Completion Tokens + +The following example demonstrates how you can restrict the number of `completion_tokens` used when generating the chat completion. + +For demonstration and easier differentiation, you will be configuring one OpenAI instance and one DeepSeek instance as the upstream LLM services. + +Create a Route as such and update with your LLM providers, models, API keys, and endpoints if applicable: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-proxy-multi-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "openai-instance", + "provider": "openai", + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$OPENAI_API_KEY"'" + } + }, + "options": { + "model": "gpt-4", + "max_tokens": 50 + } + }, + { + "name": "deepseek-instance", + "provider": "deepseek", + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$DEEPSEEK_API_KEY"'" + } + }, + "options": { + "model": "deepseek-chat", + "max_tokens": 100 + } + } + ] + } + } + }' +``` + +Send a POST request to the Route with a system prompt and a sample user question in the request body: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "Explain Newtons law" } + ] + }' +``` + +If the request is proxied to OpenAI, you should see a response similar to the following, where the content is truncated per 50 `max_tokens` threshold: + +```json +{ + ..., + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Newton's Laws of Motion are three physical laws that form the bedrock for classical mechanics. They describe the relationship between a body and the forces acting upon it, and the body's motion in response to those forces. \n\n1. Newton's First Law", + "refusal": null + }, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 20, + "completion_tokens": 50, + "total_tokens": 70, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default", + "system_fingerprint": null +} +``` + +If the request is proxied to DeepSeek, you should see a response similar to the following, where the content is truncated per 100 `max_tokens` threshold: + +```json +{ + ..., + "model": "deepseek-chat", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Newton's Laws of Motion are three fundamental principles that form the foundation of classical mechanics. They describe the relationship between a body and the forces acting upon it, and the body's motion in response to those forces. Here's a brief explanation of each law:\n\n1. **Newton's First Law (Law of Inertia):**\n - **Statement:** An object will remain at rest or in uniform motion in a straight line unless acted upon by an external force.\n - **Explanation:** This law" + }, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 10, + "completion_tokens": 100, + "total_tokens": 110, + "prompt_tokens_details": { + "cached_tokens": 0 + }, + "prompt_cache_hit_tokens": 0, + "prompt_cache_miss_tokens": 10 + }, + "system_fingerprint": "fp_3a5770e1b4_prod0225" +} +``` + +### Proxy to Embedding Models + +The following example demonstrates how you can configure the `ai-proxy-multi` Plugin to proxy requests and load balance between embedding models. + +Create a Route as such and update with your LLM providers, embedding models, API keys, and endpoints: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-proxy-multi-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "openai-instance", + "provider": "openai", + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$OPENAI_API_KEY"'" + } + }, + "options": { + "model": "text-embedding-3-small" + }, + "override": { + "endpoint": "https://api.openai.com/v1/embeddings" + } + }, + { + "name": "az-openai-instance", + "provider": "openai-compatible", + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$AZ_OPENAI_API_KEY"'" + } + }, + "options": { + "model": "text-embedding-3-small" + }, + "override": { + "endpoint": "https://ai-plugin-developer.openai.azure.com/openai/deployments/text-embedding-3-small/embeddings?api-version=2023-05-15" + } + } + ] + } + } + }' +``` + +Send a POST request to the Route with an input string: + +```shell +curl "http://127.0.0.1:9080/embeddings" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "input": "hello world" + }' +``` + +You should receive a response similar to the following: + +```json +{ + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + -0.0067144386, + -0.039197803, + 0.034177095, + 0.028763203, + -0.024785956, + -0.04201061, + ... + ], + } + ], + "model": "text-embedding-3-small", + "usage": { + "prompt_tokens": 2, + "total_tokens": 2 + } +} +``` + +### Enable Active Health Checks + +The following example demonstrates how you can configure the `ai-proxy-multi` Plugin to proxy requests and load balance between models, and enable active health check to improve service availability. You can enable health check on one or multiple instances. + +Create a Route as such and update the LLM providers, embedding models, API keys, and health check related configurations: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-proxy-multi-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "llm-instance-1", + "provider": "openai-compatible", + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$YOUR_LLM_API_KEY"'" + } + }, + "options": { + "model": "'"$YOUR_LLM_MODEL"'" + } + }, + { + "name": "llm-instance-2", + "provider": "openai-compatible", + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$YOUR_LLM_API_KEY"'" + } + }, + "options": { + "model": "'"$YOUR_LLM_MODEL"'" + }, + "checks": { + "active": { + "type": "https", + "host": "yourhost.com", + "http_path": "/your/probe/path", + "healthy": { + "interval": 2, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 3 + } + } + } + } + ] + } + } + }' +``` + +For verification, the behaviours should be consistent with the verification in [active health checks](../tutorials/health-check.md). + +### Include LLM Information in Access Log + +The following example demonstrates how you can log LLM request related information in the gateway's access log to improve analytics and audit. The following variables are available: + +* `request_type`: Type of request, where the value could be `traditional_http`, `ai_chat`, or `ai_stream`. +* `llm_time_to_first_token`: Duration from request sending to the first token received from the LLM service, in milliseconds. +* `llm_model`: LLM model. +* `llm_prompt_tokens`: Number of tokens in the prompt. +* `llm_completion_tokens`: Number of chat completion tokens in the prompt. + +:::note + +The usage in this example will become available in APISIX 3.13.0. + +::: + +Update the access log format in your configuration file to include additional LLM related variables: + +```yaml title="conf/config.yaml" +nginx_config: + http: + access_log_format: "$remote_addr - $remote_user [$time_local] $http_host \"$request_line\" $status $body_bytes_sent $request_time \"$http_referer\" \"$http_user_agent\" $upstream_addr $upstream_status $upstream_response_time \"$upstream_scheme://$upstream_host$upstream_uri\" \"$apisix_request_id\" \"$request_type\" \"$llm_time_to_first_token\" \"$llm_model\" \"$llm_prompt_tokens\" \"$llm_completion_tokens\"" +``` + +Reload APISIX for configuration changes to take effect. + +Next, create a Route with the `ai-proxy-multi` Plugin and send a request. For instance, if the request is forwarded to OpenAI and you receive the following response: + +```json +{ + ..., + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1+1 equals 2.", + "refusal": null, + "annotations": [] + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 23, + "completion_tokens": 8, + "total_tokens": 31, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + ... + }, + "service_tier": "default", + "system_fingerprint": null +} +``` + +In the gateway's access log, you should see a log entry similar to the following: + +```text +192.168.215.1 - - [21/Mar/2025:04:28:03 +0000] api.openai.com "POST /anything HTTP/1.1" 200 804 2.858 "-" "curl/8.6.0" - - - "http://api.openai.com" "5c5e0b95f8d303cb81e4dc456a4b12d9" "ai_chat" "2858" "gpt-4" "23" "8" +``` + +The access log entry shows the request type is `ai_chat`, time to first token is `2858` milliseconds, LLM model is `gpt-4`, prompt token usage is `23`, and completion token usage is `8`. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-proxy.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-proxy.md new file mode 100644 index 0000000..7620266 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-proxy.md @@ -0,0 +1,453 @@ +--- +title: ai-proxy +keywords: + - Apache APISIX + - API Gateway + - Plugin + - ai-proxy + - AI + - LLM +description: The ai-proxy Plugin simplifies access to LLM and embedding models providers by converting Plugin configurations into the required request format for OpenAI, DeepSeek, AIMLAPI, and other OpenAI-compatible APIs. +--- + + + + + + + +## Description + +The `ai-proxy` Plugin simplifies access to LLM and embedding models by transforming Plugin configurations into the designated request format. It supports the integration with OpenAI, DeepSeek, AIMLAPI, and other OpenAI-compatible APIs. + +In addition, the Plugin also supports logging LLM request information in the access log, such as token usage, model, time to the first response, and more. + +## Request Format + +| Name | Type | Required | Description | +| ------------------ | ------ | -------- | --------------------------------------------------- | +| `messages` | Array | True | An array of message objects. | +| `messages.role` | String | True | Role of the message (`system`, `user`, `assistant`).| +| `messages.content` | String | True | Content of the message. | + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|--------------------|--------|----------|---------|------------------------------------------|-------------| +| provider | string | True | | [openai, deepseek, aimlapi, openai-compatible] | LLM service provider. When set to `openai`, the Plugin will proxy the request to `https://api.openai.com/chat/completions`. When set to `deepseek`, the Plugin will proxy the request to `https://api.deepseek.com/chat/completions`. When set to `aimlapi`, the Plugin uses the OpenAI-compatible driver and proxies the request to `https://api.aimlapi.com/v1/chat/completions` by default. When set to `openai-compatible`, the Plugin will proxy the request to the custom endpoint configured in `override`. | +| auth | object | True | | | Authentication configurations. | +| auth.header | object | False | | | Authentication headers. At least one of `header` or `query` must be configured. | +| auth.query | object | False | | | Authentication query parameters. At least one of `header` or `query` must be configured. | +| options | object | False | | | Model configurations. In addition to `model`, you can configure additional parameters and they will be forwarded to the upstream LLM service in the request body. For instance, if you are working with OpenAI, you can configure additional parameters such as `temperature`, `top_p`, and `stream`. See your LLM provider's API documentation for more available options. | +| options.model | string | False | | | Name of the LLM model, such as `gpt-4` or `gpt-3.5`. Refer to the LLM provider's API documentation for available models. | +| override | object | False | | | Override setting. | +| override.endpoint | string | False | | | Custom LLM provider endpoint, required when `provider` is `openai-compatible`. | +| logging | object | False | | | Logging configurations. | +| logging.summaries | boolean | False | false | | If true, logs request LLM model, duration, request, and response tokens. | +| logging.payloads | boolean | False | false | | If true, logs request and response payload. | +| timeout | integer | False | 30000 | ≥ 1 | Request timeout in milliseconds when requesting the LLM service. | +| keepalive | boolean | False | true | | If true, keeps the connection alive when requesting the LLM service. | +| keepalive_timeout | integer | False | 60000 | ≥ 1000 | Keepalive timeout in milliseconds when connecting to the LLM service. | +| keepalive_pool | integer | False | 30 | | Keepalive pool size for the LLM service connection. | +| ssl_verify | boolean | False | true | | If true, verifies the LLM service's certificate. | + +## Examples + +The examples below demonstrate how you can configure `ai-proxy` for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Proxy to OpenAI + +The following example demonstrates how you can configure the API key, model, and other parameters in the `ai-proxy` Plugin and configure the Plugin on a Route to proxy user prompts to OpenAI. + +Obtain the OpenAI [API key](https://openai.com/blog/openai-api) and save it to an environment variable: + +```shell +export OPENAI_API_KEY= +``` + +Create a Route and configure the `ai-proxy` Plugin as such: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-proxy-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer '"$OPENAI_API_KEY"'" + } + }, + "options":{ + "model": "gpt-4" + } + } + } + }' +``` + +Send a POST request to the Route with a system prompt and a sample user question in the request body: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -H "Host: api.openai.com" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }' +``` + +You should receive a response similar to the following: + +```json +{ + ..., + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1+1 equals 2.", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + ... +} +``` + +### Proxy to DeepSeek + +The following example demonstrates how you can configure the `ai-proxy` Plugin to proxy requests to DeekSeek. + +Obtain the DeekSeek API key and save it to an environment variable: + +```shell +export DEEPSEEK_API_KEY= +``` + +Create a Route and configure the `ai-proxy` Plugin as such: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-proxy-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-proxy": { + "provider": "deepseek", + "auth": { + "header": { + "Authorization": "Bearer '"$DEEPSEEK_API_KEY"'" + } + }, + "options": { + "model": "deepseek-chat" + } + } + } + }' +``` + +Send a POST request to the Route with a sample question in the request body: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { + "role": "system", + "content": "You are an AI assistant that helps people find information." + }, + { + "role": "user", + "content": "Write me a 50-word introduction for Apache APISIX." + } + ] + }' +``` + +You should receive a response similar to the following: + +```json +{ + ... + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Apache APISIX is a dynamic, real-time, high-performance API gateway and cloud-native platform. It provides rich traffic management features like load balancing, dynamic upstream, canary release, circuit breaking, authentication, observability, and more. Designed for microservices and serverless architectures, APISIX ensures scalability, security, and seamless integration with modern DevOps workflows." + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + ... +} +``` + +### Proxy to Azure OpenAI + +The following example demonstrates how you can configure the `ai-proxy` Plugin to proxy requests to other LLM services, such as Azure OpenAI. + +Obtain the Azure OpenAI API key and save it to an environment variable: + +```shell +export AZ_OPENAI_API_KEY= +``` + +Create a Route and configure the `ai-proxy` Plugin as such: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-proxy-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-proxy": { + "provider": "openai-compatible", + "auth": { + "header": { + "api-key": "'"$AZ_OPENAI_API_KEY"'" + } + }, + "options":{ + "model": "gpt-4" + }, + "override": { + "endpoint": "https://api7-auzre-openai.openai.azure.com/openai/deployments/gpt-4/chat/completions?api-version=2024-02-15-preview" + } + } + } + }' +``` + +Send a POST request to the Route with a sample question in the request body: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { + "role": "system", + "content": "You are an AI assistant that helps people find information." + }, + { + "role": "user", + "content": "Write me a 50-word introduction for Apache APISIX." + } + ], + "max_tokens": 800, + "temperature": 0.7, + "frequency_penalty": 0, + "presence_penalty": 0, + "top_p": 0.95, + "stop": null + }' +``` + +You should receive a response similar to the following: + +```json +{ + "choices": [ + { + ..., + "message": { + "content": "Apache APISIX is a modern, cloud-native API gateway built to handle high-performance and low-latency use cases. It offers a wide range of features, including load balancing, rate limiting, authentication, and dynamic routing, making it an ideal choice for microservices and cloud-native architectures.", + "role": "assistant" + } + } + ], + ... +} +``` + +### Proxy to Embedding Models + +The following example demonstrates how you can configure the `ai-proxy` Plugin to proxy requests to embedding models. This example will use the OpenAI embedding model endpoint. + +Obtain the OpenAI [API key](https://openai.com/blog/openai-api) and save it to an environment variable: + +```shell +export OPENAI_API_KEY= +``` + +Create a Route and configure the `ai-proxy` Plugin as such: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-proxy-route", + "uri": "/embeddings", + "methods": ["POST"], + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer '"$OPENAI_API_KEY"'" + } + }, + "options":{ + "model": "text-embedding-3-small", + "encoding_format": "float" + }, + "override": { + "endpoint": "https://api.openai.com/v1/embeddings" + } + } + } + }' +``` + +Send a POST request to the Route with an input string: + +```shell +curl "http://127.0.0.1:9080/embeddings" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "input": "hello world" + }' +``` + +You should receive a response similar to the following: + +```json +{ + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + -0.0067144386, + -0.039197803, + 0.034177095, + 0.028763203, + -0.024785956, + -0.04201061, + ... + ], + } + ], + "model": "text-embedding-3-small", + "usage": { + "prompt_tokens": 2, + "total_tokens": 2 + } +} +``` + +### Include LLM Information in Access Log + +The following example demonstrates how you can log LLM request related information in the gateway's access log to improve analytics and audit. The following variables are available: + +* `request_type`: Type of request, where the value could be `traditional_http`, `ai_chat`, or `ai_stream`. +* `llm_time_to_first_token`: Duration from request sending to the first token received from the LLM service, in milliseconds. +* `llm_model`: LLM model. +* `llm_prompt_tokens`: Number of tokens in the prompt. +* `llm_completion_tokens`: Number of chat completion tokens in the prompt. + +:::note + +The usage will become available in APISIX 3.13.0. + +::: + +Update the access log format in your configuration file to include additional LLM related variables: + +```yaml title="conf/config.yaml" +nginx_config: + http: + access_log_format: "$remote_addr $remote_user [$time_local] $http_host \"$request_line\" $status $body_bytes_sent $request_time \"$http_referer\" \"$http_user_agent\" $upstream_addr $upstream_status $upstream_response_time \"$upstream_scheme://$upstream_host$upstream_uri\" \"$apisix_request_id\" \"$request_type\" \"$llm_time_to_first_token\" \"$llm_model\" \"$llm_prompt_tokens\" \"$llm_completion_tokens\"" +``` + +Reload APISIX for configuration changes to take effect. + +Now if you create a Route and send a request following the [Proxy to OpenAI example](#proxy-to-openai), you should receive a response similar to the following: + +```json +{ + ..., + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1+1 equals 2.", + "refusal": null, + "annotations": [] + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 23, + "completion_tokens": 8, + "total_tokens": 31, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + ... + }, + "service_tier": "default", + "system_fingerprint": null +} +``` + +In the gateway's access log, you should see a log entry similar to the following: + +```text +192.168.215.1 - [21/Mar/2025:04:28:03 +0000] api.openai.com "POST /anything HTTP/1.1" 200 804 2.858 "-" "curl/8.6.0" - "http://api.openai.com" "5c5e0b95f8d303cb81e4dc456a4b12d9" "ai_chat" "2858" "gpt-4" "23" "8" +``` + +The access log entry shows the request type is `ai_chat`, time to first token is `2858` milliseconds, LLM model is `gpt-4`, prompt token usage is `23`, and completion token usage is `8`. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-rag.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-rag.md new file mode 100644 index 0000000..844b307 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-rag.md @@ -0,0 +1,235 @@ +--- +title: ai-rag +keywords: + - Apache APISIX + - API Gateway + - Plugin + - ai-rag + - AI + - LLM +description: The ai-rag Plugin enhances LLM outputs with Retrieval-Augmented Generation (RAG), efficiently retrieving relevant documents to improve accuracy and contextual relevance in responses. +--- + + + + + + + +## Description + +The `ai-rag` Plugin provides Retrieval-Augmented Generation (RAG) capabilities with LLMs. It facilitates the efficient retrieval of relevant documents or information from external data sources, which are used to enhance the LLM responses, thereby improving the accuracy and contextual relevance of the generated outputs. + +The Plugin supports using [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) and [Azure AI Search](https://azure.microsoft.com/en-us/products/ai-services/ai-search) services for generating embeddings and performing vector search. + +**_As of now only [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) and [Azure AI Search](https://azure.microsoft.com/en-us/products/ai-services/ai-search) services are supported for generating embeddings and performing vector search respectively. PRs for introducing support for other service providers are welcomed._** + +## Attributes + +| Name | Required | Type | Description | +| ----------------------------------------------- | ------------ | -------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| embeddings_provider | True | object | Configurations of the embedding models provider. | +| embeddings_provider.azure_openai | True | object | Configurations of [Azure OpenAI](https://azure.microsoft.com/en-us/products/ai-services/openai-service) as the embedding models provider. | +| embeddings_provider.azure_openai.endpoint | True | string | Azure OpenAI embedding model endpoint. | +| embeddings_provider.azure_openai.api_key | True | string | Azure OpenAI API key. | +| vector_search_provider | True | object | Configuration for the vector search provider. | +| vector_search_provider.azure_ai_search | True | object | Configuration for Azure AI Search. | +| vector_search_provider.azure_ai_search.endpoint | True | string | Azure AI Search endpoint. | +| vector_search_provider.azure_ai_search.api_key | True | string | Azure AI Search API key. | + +## Request Body Format + +The following fields must be present in the request body. + +| Field | Type | Description | +| -------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------- | +| ai_rag | object | Request body RAG specifications. | +| ai_rag.embeddings | object | Request parameters required to generate embeddings. Contents will depend on the API specification of the configured provider. | +| ai_rag.vector_search | object | Request parameters required to perform vector search. Contents will depend on the API specification of the configured provider. | + +- Parameters of `ai_rag.embeddings` + + - Azure OpenAI + + | Name | Required | Type | Description | + | --------------- | ------------ | -------- | -------------------------------------------------------------------------------------------------------------------------- | + | input | True | string | Input text used to compute embeddings, encoded as a string. | + | user | False | string | A unique identifier representing your end-user, which can help in monitoring and detecting abuse. | + | encoding_format | False | string | The format to return the embeddings in. Can be either `float` or `base64`. Defaults to `float`. | + | dimensions | False | integer | The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. | + +For other parameters please refer to the [Azure OpenAI embeddings documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings). + +- Parameters of `ai_rag.vector_search` + + - Azure AI Search + + | Field | Required | Type | Description | + | --------- | ------------ | -------- | ---------------------------- | + | fields | True | String | Fields for the vector search. | + + For other parameters please refer the [Azure AI Search documentation](https://learn.microsoft.com/en-us/rest/api/searchservice/documents/search-post). + +Example request body: + +```json +{ + "ai_rag": { + "vector_search": { "fields": "contentVector" }, + "embeddings": { + "input": "which service is good for devops", + "dimensions": 1024 + } + } +} +``` + +## Example + +To follow along the example, create an [Azure account](https://portal.azure.com) and complete the following steps: + +* In [Azure AI Foundry](https://oai.azure.com/portal), deploy a generative chat model, such as `gpt-4o`, and an embedding model, such as `text-embedding-3-large`. Obtain the API key and model endpoints. +* Follow [Azure's example](https://github.com/Azure/azure-search-vector-samples/blob/main/demo-python/code/basic-vector-workflow/azure-search-vector-python-sample.ipynb) to prepare for a vector search in [Azure AI Search](https://azure.microsoft.com/en-us/products/ai-services/ai-search) using Python. The example will create a search index called `vectest` with the desired schema and upload the [sample data](https://github.com/Azure/azure-search-vector-samples/blob/main/data/text-sample.json) which contains 108 descriptions of various Azure services, for embeddings `titleVector` and `contentVector` to be generated based on `title` and `content`. Complete all the setups before performing vector searches in Python. +* In [Azure AI Search](https://azure.microsoft.com/en-us/products/ai-services/ai-search), [obtain the Azure vector search API key and the search service endpoint](https://learn.microsoft.com/en-us/azure/search/search-get-started-vector?tabs=api-key#retrieve-resource-information). + +Save the API keys and endpoints to environment variables: + +```shell +# replace with your values + +AZ_OPENAI_DOMAIN=https://ai-plugin-developer.openai.azure.com +AZ_OPENAI_API_KEY=9m7VYroxITMDEqKKEnpOknn1rV7QNQT7DrIBApcwMLYJQQJ99ALACYeBjFXJ3w3AAABACOGXGcd +AZ_CHAT_ENDPOINT=${AZ_OPENAI_DOMAIN}/openai/deployments/gpt-4o/chat/completions?api-version=2024-02-15-preview +AZ_EMBEDDING_MODEL=text-embedding-3-large +AZ_EMBEDDINGS_ENDPOINT=${AZ_OPENAI_DOMAIN}/openai/deployments/${AZ_EMBEDDING_MODEL}/embeddings?api-version=2023-05-15 + +AZ_AI_SEARCH_SVC_DOMAIN=https://ai-plugin-developer.search.windows.net +AZ_AI_SEARCH_KEY=IFZBp3fKVdq7loEVe9LdwMvVdZrad9A4lPH90AzSeC06SlR +AZ_AI_SEARCH_INDEX=vectest +AZ_AI_SEARCH_ENDPOINT=${AZ_AI_SEARCH_SVC_DOMAIN}/indexes/${AZ_AI_SEARCH_INDEX}/docs/search?api-version=2024-07-01 +``` + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Integrate with Azure for RAG-Enhaned Responses + +The following example demonstrates how you can use the [`ai-proxy`](./ai-proxy.md) Plugin to proxy requests to Azure OpenAI LLM and use the `ai-rag` Plugin to generate embeddings and perform vector search to enhance LLM responses. + +Create a Route as such: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "ai-rag-route", + "uri": "/rag", + "plugins": { + "ai-rag": { + "embeddings_provider": { + "azure_openai": { + "endpoint": "'"$AZ_EMBEDDINGS_ENDPOINT"'", + "api_key": "'"$AZ_OPENAI_API_KEY"'" + } + }, + "vector_search_provider": { + "azure_ai_search": { + "endpoint": "'"$AZ_AI_SEARCH_ENDPOINT"'", + "api_key": "'"$AZ_AI_SEARCH_KEY"'" + } + } + }, + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "api-key": "'"$AZ_OPENAI_API_KEY"'" + } + }, + "model": "gpt-4o", + "override": { + "endpoint": "'"$AZ_CHAT_ENDPOINT"'" + } + } + } +}' +``` + +Send a POST request to the Route with the vector fields name, embedding model dimensions, and an input prompt in the request body: + +```shell +curl "http://127.0.0.1:9080/rag" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "ai_rag":{ + "vector_search":{ + "fields":"contentVector" + }, + "embeddings":{ + "input":"Which Azure services are good for DevOps?", + "dimensions":1024 + } + } + }' +``` + +You should receive an `HTTP/1.1 200 OK` response similar to the following: + +```json +{ + "choices": [ + { + "content_filter_results": { + ... + }, + "finish_reason": "length", + "index": 0, + "logprobs": null, + "message": { + "content": "Here is a list of Azure services categorized along with a brief description of each based on the provided JSON data:\n\n### Developer Tools\n- **Azure DevOps**: A suite of services that help you plan, build, and deploy applications, including Azure Boards, Azure Repos, Azure Pipelines, Azure Test Plans, and Azure Artifacts.\n- **Azure DevTest Labs**: A fully managed service to create, manage, and share development and test environments in Azure, supporting custom templates, cost management, and integration with Azure DevOps.\n\n### Containers\n- **Azure Kubernetes Service (AKS)**: A managed container orchestration service based on Kubernetes, simplifying deployment and management of containerized applications with features like automatic upgrades and scaling.\n- **Azure Container Instances**: A serverless container runtime to run and scale containerized applications without managing the underlying infrastructure.\n- **Azure Container Registry**: A fully managed Docker registry service to store and manage container images and artifacts.\n\n### Web\n- **Azure App Service**: A fully managed platform for building, deploying, and scaling web apps, mobile app backends, and RESTful APIs with support for multiple programming languages.\n- **Azure SignalR Service**: A fully managed real-time messaging service to build and scale real-time web applications.\n- **Azure Static Web Apps**: A serverless hosting service for modern web applications using static front-end technologies and serverless APIs.\n\n### Compute\n- **Azure Virtual Machines**: Infrastructure-as-a-Service (IaaS) offering for deploying and managing virtual machines in the cloud.\n- **Azure Functions**: A serverless compute service to run event-driven code without managing infrastructure.\n- **Azure Batch**: A job scheduling service to run large-scale parallel and high-performance computing (HPC) applications.\n- **Azure Service Fabric**: A platform to build, deploy, and manage scalable and reliable microservices and container-based applications.\n- **Azure Quantum**: A quantum computing service to build and run quantum applications.\n- **Azure Stack Edge**: A managed edge computing appliance to run Azure services and AI workloads on-premises or at the edge.\n\n### Security\n- **Azure Bastion**: A fully managed service providing secure and scalable remote access to virtual machines.\n- **Azure Security Center**: A unified security management service to protect workloads across Azure and on-premises infrastructure.\n- **Azure DDoS Protection**: A cloud-based service to protect applications and resources from distributed denial-of-service (DDoS) attacks.\n\n### Databases\n", + "role": "assistant" + } + } + ], + "created": 1740625850, + "id": "chatcmpl-B54gQdumpfioMPIybFnirr6rq9ZZS", + "model": "gpt-4o-2024-05-13", + "object": "chat.completion", + "prompt_filter_results": [ + { + "prompt_index": 0, + "content_filter_results": { + ... + } + } + ], + "system_fingerprint": "fp_65792305e4", + "usage": { + ... + } +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-rate-limiting.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-rate-limiting.md new file mode 100644 index 0000000..84bad61 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-rate-limiting.md @@ -0,0 +1,873 @@ +--- +title: ai-rate-limiting +keywords: + - Apache APISIX + - API Gateway + - Plugin + - ai-rate-limiting + - AI + - LLM +description: The ai-rate-limiting Plugin enforces token-based rate limiting for LLM service requests, preventing overuse, optimizing API consumption, and ensuring efficient resource allocation. +--- + + + + + + + +## Description + +The `ai-rate-limiting` Plugin enforces token-based rate limiting for requests sent to LLM services. It helps manage API usage by controlling the number of tokens consumed within a specified time frame, ensuring fair resource allocation and preventing excessive load on the service. It is often used with [`ai-proxy`](./ai-proxy.md) or [`ai-proxy-multi`](./ai-proxy-multi.md) plugin. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|------------------------------|----------------|----------|----------|---------------------------------------------------------|-------------| +| limit | integer | False | | >0 | The maximum number of tokens allowed within a given time interval. At least one of `limit` and `instances.limit` should be configured. | +| time_window | integer | False | | >0 | The time interval corresponding to the rate limiting `limit` in seconds. At least one of `time_window` and `instances.time_window` should be configured. | +| show_limit_quota_header | boolean | False | true | | If true, includes `X-AI-RateLimit-Limit-*`, `X-AI-RateLimit-Remaining-*`, and `X-AI-RateLimit-Reset-*` headers in the response, where `*` is the instance name. | +| limit_strategy | string | False | total_tokens | [total_tokens, prompt_tokens, completion_tokens] | Type of token to apply rate limiting. `total_tokens` is the sum of `prompt_tokens` and `completion_tokens`. | +| instances | array[object] | False | | | LLM instance rate limiting configurations. | +| instances.name | string | True | | | Name of the LLM service instance. | +| instances.limit | integer | True | | >0 | The maximum number of tokens allowed within a given time interval for an instance. | +| instances.time_window | integer | True | | >0 | The time interval corresponding to the rate limiting `limit` in seconds for an instance. | +| rejected_code | integer | False | 503 | [200, 599] | The HTTP status code returned when a request exceeding the quota is rejected. | +| rejected_msg | string | False | | | The response body returned when a request exceeding the quota is rejected. | + +## Examples + +The examples below demonstrate how you can configure `ai-rate-limiting` for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Apply Rate Limiting with `ai-proxy` + +The following example demonstrates how you can use `ai-proxy` to proxy LLM traffic and use `ai-rate-limiting` to configure token-based rate limiting on the instance. + +Create a Route as such and update with your LLM providers, models, API keys, and endpoints, if applicable: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-rate-limiting-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer '"$OPENAI_API_KEY"'" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0 + } + }, + "ai-rate-limiting": { + "limit": 300, + "time_window": 30, + "limit_strategy": "prompt_tokens" + } + } + }' +``` + +Send a POST request to the Route with a system prompt and a sample user question in the request body: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }' +``` + +You should receive a response similar to the following: + +```json +{ + ... + "model": "deepseek-chat", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1 + 1 equals 2. This is a fundamental arithmetic operation where adding one unit to another results in a total of two units." + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + ... +} +``` + +If the rate limiting quota of 300 prompt tokens has been consumed in a 30-second window, all additional requests will be rejected. + +### Rate Limit One Instance Among Multiple + +The following example demonstrates how you can use `ai-proxy-multi` to configure two models for load balancing, forwarding 80% of the traffic to one instance and 20% to the other. Additionally, use `ai-rate-limiting` to configure token-based rate limiting on the instance that receives 80% of the traffic, such that when the configured quota is fully consumed, the additional traffic will be forwarded to the other instance. + +Create a Route which applies rate limiting quota of 100 total tokens in a 30-second window on the `deepseek-instance-1` instance, and update with your LLM providers, models, API keys, and endpoints, if applicable: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-rate-limiting-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-rate-limiting": { + "instances": [ + { + "name": "deepseek-instance-1", + "provider": "deepseek", + "weight": 8, + "auth": { + "header": { + "Authorization": "Bearer '"$DEEPSEEK_API_KEY"'" + } + }, + "options": { + "model": "deepseek-chat" + } + }, + { + "name": "deepseek-instance-2", + "provider": "deepseek", + "weight": 2, + "auth": { + "header": { + "Authorization": "Bearer '"$DEEPSEEK_API_KEY"'" + } + }, + "options": { + "model": "deepseek-chat" + } + } + ] + }, + "ai-rate-limiting": { + "instances": [ + { + "name": "deepseek-instance-1", + "limit_strategy": "total_tokens", + "limit": 100, + "time_window": 30 + } + ] + } + } + }' +``` + +Send a POST request to the Route with a system prompt and a sample user question in the request body: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }' +``` + +You should receive a response similar to the following: + +```json +{ + ... + "model": "deepseek-chat", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1 + 1 equals 2. This is a fundamental arithmetic operation where adding one unit to another results in a total of two units." + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + ... +} +``` + +If `deepseek-instance-1` instance rate limiting quota of 100 tokens has been consumed in a 30-second window, the additional requests will all be forwarded to `deepseek-instance-2`, which is not rate limited. + +### Apply the Same Quota to All Instances + +The following example demonstrates how you can apply the same rate limiting quota to all LLM upstream instances in `ai-rate-limiting`. + +For demonstration and easier differentiation, you will be configuring one OpenAI instance and one DeepSeek instance as the upstream LLM services. + +Create a Route which applies a rate limiting quota of 100 total tokens for all instances within a 60-second window, and update with your LLM providers, models, API keys, and endpoints, if applicable: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-rate-limiting-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-rate-limiting": { + "instances": [ + { + "name": "openai-instance", + "provider": "openai", + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$OPENAI_API_KEY"'" + } + }, + "options": { + "model": "gpt-4" + } + }, + { + "name": "deepseek-instance", + "provider": "deepseek", + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$DEEPSEEK_API_KEY"'" + } + }, + "options": { + "model": "deepseek-chat" + } + } + ] + }, + "ai-rate-limiting": { + "limit": 100, + "time_window": 60, + "rejected_code": 429, + "limit_strategy": "total_tokens" + } + } + }' +``` + +Send a POST request to the Route with a system prompt and a sample user question in the request body: + +```shell +curl -i "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "Explain Newtons laws" } + ] + }' +``` + +You should receive a response from either LLM instance, similar to the following: + +```json +{ + ..., + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Sure! Sir Isaac Newton formulated three laws of motion that describe the motion of objects. These laws are widely used in physics and engineering for studying and understanding how things move. Here they are:\n\n1. Newton's First Law - Law of Inertia: An object at rest tends to stay at rest and an object in motion tends to stay in motion with the same speed and in the same direction unless acted upon by an unbalanced force. This is also known as the principle of inertia.\n\n2. Newton's Second Law of Motion - Force and Acceleration: The acceleration of an object is directly proportional to the net force acting on it and inversely proportional to its mass. This is usually formulated as F=ma where F is the force applied, m is the mass of the object and a is the acceleration produced.\n\n3. Newton's Third Law - Action and Reaction: For every action, there is an equal and opposite reaction. This means that any force exerted on a body will create a force of equal magnitude but in the opposite direction on the object that exerted the first force.\n\nIn simple terms: \n1. If you slide a book on a table and let go, it will stop because of the friction (or force) between it and the table.\n2.", + "refusal": null + }, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 23, + "completion_tokens": 256, + "total_tokens": 279, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default", + "system_fingerprint": null +} +``` + +Since the `total_tokens` value exceeds the configured quota of `100`, the next request within the 60-second window is expected to be forwarded to the other instance. + +Within the same 60-second window, send another POST request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "Explain Newtons laws" } + ] + }' +``` + +You should receive a response from the other LLM instance, similar to the following: + +```json +{ + ... + "model": "deepseek-chat", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Sure! Newton's laws of motion are three fundamental principles that describe the relationship between the motion of an object and the forces acting on it. They were formulated by Sir Isaac Newton in the late 17th century and are foundational to classical mechanics. Here's an explanation of each law:\n\n---\n\n### **1. Newton's First Law (Law of Inertia)**\n- **Statement**: An object will remain at rest or in uniform motion in a straight line unless acted upon by an external force.\n- **What it means**: This law introduces the concept of **inertia**, which is the tendency of an object to resist changes in its state of motion. If no net force acts on an object, its velocity (speed and direction) will not change.\n- **Example**: A book lying on a table will stay at rest unless you push it. Similarly, a hockey puck sliding on ice will keep moving at a constant speed unless friction or another force slows it down.\n\n---\n\n### **2. Newton's Second Law (Law of Acceleration)**\n- **Statement**: The acceleration of an object is directly proportional to the net force acting on it and inversely proportional to its mass. Mathematically, this is expressed as:\n \\[\n F = ma\n \\]\n" + }, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 13, + "completion_tokens": 256, + "total_tokens": 269, + "prompt_tokens_details": { + "cached_tokens": 0 + }, + "prompt_cache_hit_tokens": 0, + "prompt_cache_miss_tokens": 13 + }, + "system_fingerprint": "fp_3a5770e1b4_prod0225" +} +``` + +Since the `total_tokens` value exceeds the configured quota of `100`, the next request within the 60-second window is expected to be rejected. + +Within the same 60-second window, send a third POST request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "Explain Newtons laws" } + ] + }' +``` + +You should receive an `HTTP 429 Too Many Requests` response and observe the following headers: + +```text +X-AI-RateLimit-Limit-openai-instance: 100 +X-AI-RateLimit-Remaining-openai-instance: 0 +X-AI-RateLimit-Reset-openai-instance: 0 +X-AI-RateLimit-Limit-deepseek-instance: 100 +X-AI-RateLimit-Remaining-deepseek-instance: 0 +X-AI-RateLimit-Reset-deepseek-instance: 0 +``` + +### Configure Instance Priority and Rate Limiting + +The following example demonstrates how you can configure two models with different priorities and apply rate limiting on the instance with a higher priority. In the case where `fallback_strategy` is set to `instance_health_and_rate_limiting`, the Plugin should continue to forward requests to the low priority instance once the high priority instance's rate limiting quota is fully consumed. + +Create a Route as such to set rate limiting and a higher priority on `openai-instance` instance and set the `fallback_strategy` to `instance_health_and_rate_limiting`. Update with your LLM providers, models, API keys, and endpoints, if applicable: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-rate-limiting-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "ai-proxy-multi": { + "fallback_strategy: "instance_health_and_rate_limiting", + "instances": [ + { + "name": "openai-instance", + "provider": "openai", + "priority": 1, + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$OPENAI_API_KEY"'" + } + }, + "options": { + "model": "gpt-4" + } + }, + { + "name": "deepseek-instance", + "provider": "deepseek", + "priority": 0, + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$DEEPSEEK_API_KEY"'" + } + }, + "options": { + "model": "deepseek-chat" + } + } + ] + }, + "ai-rate-limiting": { + "instances": [ + { + "name": "openai-instance", + "limit": 10, + "time_window": 60 + } + ], + "limit_strategy": "total_tokens" + } + } + }' +``` + +Send a POST request to the Route with a system prompt and a sample user question in the request body: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }' +``` + +You should receive a response similar to the following: + +```json +{ + ..., + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1+1 equals 2.", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 23, + "completion_tokens": 8, + "total_tokens": 31, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default", + "system_fingerprint": null +} +``` + +Since the `total_tokens` value exceeds the configured quota of `10`, the next request within the 60-second window is expected to be forwarded to the other instance. + +Within the same 60-second window, send another POST request to the Route: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "Explain Newton law" } + ] + }' +``` + +You should see a response similar to the following: + +```json +{ + ..., + "model": "deepseek-chat", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Certainly! Newton's laws of motion are three fundamental principles that describe the relationship between the motion of an object and the forces acting on it. They were formulated by Sir Isaac Newton in the late 17th century and are foundational to classical mechanics.\n\n---\n\n### **1. Newton's First Law (Law of Inertia):**\n- **Statement:** An object at rest will remain at rest, and an object in motion will continue moving at a constant velocity (in a straight line at a constant speed), unless acted upon by an external force.\n- **Key Idea:** This law introduces the concept of **inertia**, which is the tendency of an object to resist changes in its state of motion.\n- **Example:** If you slide a book across a table, it eventually stops because of the force of friction acting on it. Without friction, the book would keep moving indefinitely.\n\n---\n\n### **2. Newton's Second Law (Law of Acceleration):**\n- **Statement:** The acceleration of an object is directly proportional to the net force acting on it and inversely proportional to its mass. Mathematically, this is expressed as:\n \\[\n F = ma\n \\]\n where:\n - \\( F \\) = net force applied (in Newtons),\n -" + }, + ... + } + ], + ... +} +``` + +### Load Balance and Rate Limit by Consumers + +The following example demonstrates how you can configure two models for load balancing and apply rate limiting by Consumer. + +Create a Consumer `johndoe` and a rate limiting quota of 10 tokens in a 60-second window on `openai-instance` instance: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "johndoe", + "plugins": { + "ai-rate-limiting": { + "instances": [ + { + "name": "openai-instance", + "limit": 10, + "time_window": 60 + } + ], + "rejected_code": 429, + "limit_strategy": "total_tokens" + } + } + }' +``` + +Configure `key-auth` credential for `johndoe`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/johndoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +Create another Consumer `janedoe` and a rate limiting quota of 10 tokens in a 60-second window on `deepseek-instance` instance: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "johndoe", + "plugins": { + "ai-rate-limiting": { + "instances": [ + { + "name": "deepseek-instance", + "limit": 10, + "time_window": 60 + } + ], + "rejected_code": 429, + "limit_strategy": "total_tokens" + } + } + }' +``` + +Configure `key-auth` credential for `janedoe`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/janedoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jane-key-auth", + "plugins": { + "key-auth": { + "key": "jane-key" + } + } + }' +``` + +Create a Route as such and update with your LLM providers, models, API keys, and endpoints, if applicable: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ai-rate-limiting-route", + "uri": "/anything", + "methods": ["POST"], + "plugins": { + "key-auth": {}, + "ai-proxy-multi": { + "fallback_strategy: "instance_health_and_rate_limiting", + "instances": [ + { + "name": "openai-instance", + "provider": "openai", + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$OPENAI_API_KEY"'" + } + }, + "options": { + "model": "gpt-4" + } + }, + { + "name": "deepseek-instance", + "provider": "deepseek", + "weight": 0, + "auth": { + "header": { + "Authorization": "Bearer '"$DEEPSEEK_API_KEY"'" + } + }, + "options": { + "model": "deepseek-chat" + } + } + ] + } + } + }' +``` + +Send a POST request to the Route without any Consumer key: + +```shell +curl -i "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }' +``` + +You should receive an `HTTP/1.1 401 Unauthorized` response. + +Send a POST request to the Route with `johndoe`'s key: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -H 'apikey: john-key' \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }' +``` + +You should receive a response similar to the following: + +```json +{ + ..., + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "1+1 equals 2.", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 23, + "completion_tokens": 8, + "total_tokens": 31, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default", + "system_fingerprint": null +} +``` + +Since the `total_tokens` value exceeds the configured quota of the `openai` instance for `johndoe`, the next request within the 60-second window from `johndoe` is expected to be forwarded to the `deepseek` instance. + +Within the same 60-second window, send another POST request to the Route with `johndoe`'s key: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -H 'apikey: john-key' \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "Explain Newtons laws to me" } + ] + }' +``` + +You should see a response similar to the following: + +```json +{ + ..., + "model": "deepseek-chat", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Certainly! Newton's laws of motion are three fundamental principles that describe the relationship between the motion of an object and the forces acting on it. They were formulated by Sir Isaac Newton in the late 17th century and are foundational to classical mechanics.\n\n---\n\n### **1. Newton's First Law (Law of Inertia):**\n- **Statement:** An object at rest will remain at rest, and an object in motion will continue moving at a constant velocity (in a straight line at a constant speed), unless acted upon by an external force.\n- **Key Idea:** This law introduces the concept of **inertia**, which is the tendency of an object to resist changes in its state of motion.\n- **Example:** If you slide a book across a table, it eventually stops because of the force of friction acting on it. Without friction, the book would keep moving indefinitely.\n\n---\n\n### **2. Newton's Second Law (Law of Acceleration):**\n- **Statement:** The acceleration of an object is directly proportional to the net force acting on it and inversely proportional to its mass. Mathematically, this is expressed as:\n \\[\n F = ma\n \\]\n where:\n - \\( F \\) = net force applied (in Newtons),\n -" + }, + ... + } + ], + ... +} +``` + +Send a POST request to the Route with `janedoe`'s key: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -H 'apikey: jane-key' \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }' +``` + +You should receive a response similar to the following: + +```json +{ + ..., + "model": "deepseek-chat", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "The sum of 1 and 1 is 2. This is a basic arithmetic operation where you combine two units to get a total of two units." + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 14, + "completion_tokens": 31, + "total_tokens": 45, + "prompt_tokens_details": { + "cached_tokens": 0 + }, + "prompt_cache_hit_tokens": 0, + "prompt_cache_miss_tokens": 14 + }, + "system_fingerprint": "fp_3a5770e1b4_prod0225" +} +``` + +Since the `total_tokens` value exceeds the configured quota of the `deepseek` instance for `janedoe`, the next request within the 60-second window from `janedoe` is expected to be forwarded to the `openai` instance. + +Within the same 60-second window, send another POST request to the Route with `janedoe`'s key: + +```shell +curl "http://127.0.0.1:9080/anything" -X POST \ + -H "Content-Type: application/json" \ + -H 'apikey: jane-key' \ + -d '{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "Explain Newtons laws to me" } + ] + }' +``` + +You should see a response similar to the following: + +```json +{ + ..., + "model": "gpt-4-0613", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Sure, here are Newton's three laws of motion:\n\n1) Newton's First Law, also known as the Law of Inertia, states that an object at rest will stay at rest, and an object in motion will stay in motion, unless acted on by an external force. In simple words, this law suggests that an object will keep doing whatever it is doing until something causes it to do otherwise. \n\n2) Newton's Second Law states that the force acting on an object is equal to the mass of that object times its acceleration (F=ma). This means that force is directly proportional to mass and acceleration. The heavier the object and the faster it accelerates, the greater the force.\n\n3) Newton's Third Law, also known as the law of action and reaction, states that for every action, there is an equal and opposite reaction. Essentially, any force exerted onto a body will create a force of equal magnitude but in the opposite direction on the object that exerted the first force.\n\nRemember, these laws become less accurate when considering speeds near the speed of light (where Einstein's theory of relativity becomes more appropriate) or objects very small or very large. However, for everyday situations, they provide a good model of how things move.", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + ... +} +``` + +This shows `ai-proxy-multi` load balance the traffic with respect to the rate limiting rules in `ai-rate-limiting` by Consumers. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-request-rewrite.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-request-rewrite.md new file mode 100644 index 0000000..584391d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ai-request-rewrite.md @@ -0,0 +1,177 @@ +--- +title: ai-request-rewrite +keywords: + - Apache APISIX + - AI Gateway + - Plugin + - ai-request-rewrite +description: The ai-request-rewrite plugin intercepts client requests before they are forwarded to the upstream service. It sends a predefined prompt, along with the original request body, to a specified LLM service. The LLM processes the input and returns a modified request body, which is then used for the upstream request. This allows dynamic transformation of API requests based on AI-generated content. +--- + + + +## Description + +The `ai-request-rewrite` plugin intercepts client requests before they are forwarded to the upstream service. It sends a predefined prompt, along with the original request body, to a specified LLM service. The LLM processes the input and returns a modified request body, which is then used for the upstream request. This allows dynamic transformation of API requests based on AI-generated content. + +## Plugin Attributes + +| **Field** | **Required** | **Type** | **Description** | +| ------------------------- | ------------ | -------- | ------------------------------------------------------------------------------------ | +| prompt | Yes | String | The prompt send to LLM service. | +| provider | Yes | String | Name of the LLM service. Available options: openai, deekseek, aimlapi and openai-compatible. When `aimlapi` is selected, the plugin uses the OpenAI-compatible driver with a default endpoint of `https://api.aimlapi.com/v1/chat/completions`. | +| auth | Yes | Object | Authentication configuration | +| auth.header | No | Object | Authentication headers. Key must match pattern `^[a-zA-Z0-9._-]+$`. | +| auth.query | No | Object | Authentication query parameters. Key must match pattern `^[a-zA-Z0-9._-]+$`. | +| options | No | Object | Key/value settings for the model | +| options.model | No | String | Model to execute. Examples: "gpt-3.5-turbo" for openai, "deepseek-chat" for deekseek, or "qwen-turbo" for openai-compatible or aimlapi services | +| override.endpoint | No | String | Override the default endpoint when using OpenAI-compatible services (e.g., self-hosted models or third-party LLM services). When the provider is 'openai-compatible', the endpoint field is required. | +| timeout | No | Integer | Total timeout in milliseconds for requests to LLM service, including connect, send, and read timeouts. Range: 1 - 60000. Default: 30000| +| keepalive | No | Boolean | Enable keepalive for requests to LLM service. Default: true | +| keepalive_timeout | No | Integer | Keepalive timeout in milliseconds for requests to LLM service. Minimum: 1000. Default: 60000 | +| keepalive_pool | No | Integer | Keepalive pool size for requests to LLM service. Minimum: 1. Default: 30 | +| ssl_verify | No | Boolean | SSL verification for requests to LLM service. Default: true | + +## How it works + +![image](https://github.com/user-attachments/assets/c7288e4f-00fc-46ca-b69e-d3d74d7085ca) + +## Examples + +The examples below demonstrate how you can configure `ai-request-rewrite` for different scenarios. + +:::note + +You can fetch the admin_key from config.yaml and save to an environment variable with the following command: + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') + +::: + +### Redact sensitive information + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "uri": "/anything", + "plugins": { + "ai-request-rewrite": { + "prompt": "Given a JSON request body, identify and mask any sensitive information such as credit card numbers, social security numbers, and personal identification numbers (e.g., passport or driver'\''s license numbers). Replace detected sensitive values with a masked format (e.g., \"*** **** **** 1234\") for credit card numbers. Ensure the JSON structure remains unchanged.", + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer " + } + }, + "options": { + "model": "gpt-4" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Now send a request: + +```shell +curl "http://127.0.0.1:9080/anything" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "John Doe", + "email": "john.doe@example.com", + "credit_card": "4111 1111 1111 1111", + "ssn": "123-45-6789", + "address": "123 Main St" + }' +``` + +The request body send to the LLM Service is as follows: + +```json +{ + "messages": [ + { + "role": "system", + "content": "Given a JSON request body, identify and mask any sensitive information such as credit card numbers, social security numbers, and personal identification numbers (e.g., passport or driver's license numbers). Replace detected sensitive values with a masked format (e.g., '*** **** **** 1234') for credit card numbers). Ensure the JSON structure remains unchanged." + }, + { + "role": "user", + "content": "{\n\"name\":\"John Doe\",\n\"email\":\"john.doe@example.com\",\n\"credit_card\":\"4111 1111 1111 1111\",\n\"ssn\":\"123-45-6789\",\n\"address\":\"123 Main St\"\n}" + } + ] +} + +``` + +The LLM processes the input and returns a modified request body, which replace detected sensitive values with a masked format then used for the upstream request: + +```json +{ + "name": "John Doe", + "email": "john.doe@example.com", + "credit_card": "**** **** **** 1111", + "ssn": "***-**-6789", + "address": "123 Main St" +} +``` + +### Send request to an OpenAI compatible LLM + +Create a route with the `ai-request-rewrite` plugin with `provider` set to `openai-compatible` and the endpoint of the model set to `override.endpoint` like so: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "uri": "/anything", + "plugins": { + "ai-request-rewrite": { + "prompt": "Given a JSON request body, identify and mask any sensitive information such as credit card numbers, social security numbers, and personal identification numbers (e.g., passport or driver'\''s license numbers). Replace detected sensitive values with a masked format (e.g., '*** **** **** 1234') for credit card numbers). Ensure the JSON structure remains unchanged.", + "provider": "openai-compatible", + "auth": { + "header": { + "Authorization": "Bearer " + } + }, + "options": { + "model": "qwen-plus", + "max_tokens": 1024, + "temperature": 1 + }, + "override": { + "endpoint": "https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/api-breaker.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/api-breaker.md new file mode 100644 index 0000000..c60070d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/api-breaker.md @@ -0,0 +1,136 @@ +--- +title: api-breaker +keywords: + - Apache APISIX + - API Gateway + - API Breaker +description: This document describes the information about the Apache APISIX api-breaker Plugin, you can use it to protect Upstream services. +--- + + + +## Description + +The `api-breaker` Plugin implements circuit breaker functionality to protect Upstream services. + +:::note + +Whenever the Upstream service responds with a status code from the configured `unhealthy.http_statuses` list for the configured `unhealthy.failures` number of times, the Upstream service will be considered unhealthy. + +The request is then retried in 2, 4, 8, 16 ... seconds until the `max_breaker_sec`. + +In an unhealthy state, if the Upstream service responds with a status code from the configured list `healthy.http_statuses` for `healthy.successes` times, the service is considered healthy again. + +::: + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|-------------------------|----------------|----------|---------|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| break_response_code | integer | True | | [200, ..., 599] | HTTP error code to return when Upstream is unhealthy. | +| break_response_body | string | False | | | Body of the response message to return when Upstream is unhealthy. | +| break_response_headers | array[object] | False | | [{"key":"header_name","value":"can contain Nginx $var"}] | Headers of the response message to return when Upstream is unhealthy. Can only be configured when the `break_response_body` attribute is configured. The values can contain APISIX variables. For example, we can use `{"key":"X-Client-Addr","value":"$remote_addr:$remote_port"}`. | +| max_breaker_sec | integer | False | 300 | >=3 | Maximum time in seconds for circuit breaking. | +| unhealthy.http_statuses | array[integer] | False | [500] | [500, ..., 599] | Status codes of Upstream to be considered unhealthy. | +| unhealthy.failures | integer | False | 3 | >=1 | Number of failures within a certain period of time for the Upstream service to be considered unhealthy. | +| healthy.http_statuses | array[integer] | False | [200] | [200, ..., 499] | Status codes of Upstream to be considered healthy. | +| healthy.successes | integer | False | 3 | >=1 | Number of consecutive healthy requests for the Upstream service to be considered healthy. | + +## Enable Plugin + +The example below shows how you can configure the Plugin on a specific Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "api-breaker": { + "break_response_code": 502, + "unhealthy": { + "http_statuses": [500, 503], + "failures": 3 + }, + "healthy": { + "http_statuses": [200], + "successes": 1 + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +In this configuration, a response code of `500` or `503` three times within a certain period of time triggers the unhealthy status of the Upstream service. A response code of `200` restores its healthy status. + +## Example usage + +Once you have configured the Plugin as shown above, you can test it out by sending a request. + +```shell +curl -i -X POST "http://127.0.0.1:9080/hello" +``` + +If the Upstream service responds with an unhealthy response code, you will receive the configured response code (`break_response_code`). + +```shell +HTTP/1.1 502 Bad Gateway +... + +502 Bad Gateway + +

502 Bad Gateway

+
openresty
+ + +``` + +## Delete Plugin + +To remove the `api-breaker` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/attach-consumer-label.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/attach-consumer-label.md new file mode 100644 index 0000000..2e977a4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/attach-consumer-label.md @@ -0,0 +1,180 @@ +--- +title: attach-consumer-label +keywords: + - Apache APISIX + - API Gateway + - API Consumer +description: This article describes the Apache APISIX attach-consumer-label plugin, which you can use to pass custom consumer labels to upstream services. +--- + + + +## Description + +The `attach-consumer-label` plugin attaches custom consumer-related labels, in addition to `X-Consumer-Username` and `X-Credential-Indentifier`, to authenticated requests, for upstream services to differentiate between consumers and implement additional logics. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|----------|--------|----------|---------|--------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| headers | object | True | | | Key-value pairs of consumer labels to be attached to request headers, where key is the request header name, such as `X-Consumer-Role`, and the value is a reference to the custom label key, such as `$role`. Note that the value should always start with a dollar sign (`$`). If a referenced consumer value is not configured on the consumer, the corresponding header will not be attached to the request. | + +## Enable Plugin + +The following example demonstrates how you can attach custom labels to request headers before authenticated requests are forwarded to upstream services. If the request is rejected, you should not see any consumer labels attached to request headers. If a certain label value is not configured on the consumer but referenced in the `attach-consumer-label` plugin, the corresponding header will also not be attached. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +Create a consumer `john` with custom labels: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "username": "john", + # highlight-start + "labels": { + // Annotate 1 + "department": "devops", + // Annotate 2 + "company": "api7" + } + # highlight-end + }' +``` + +❶ Label the `department` information for the consumer. + +❷ Label the `company` information for the consumer. + +Configure the `key-auth` credential for the consumer `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +Create a route enabling the `key-auth` and `attach-consumer-label` plugins: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "attach-consumer-label-route", + "uri": "/get", + "plugins": { + "key-auth": {}, + # highlight-start + "attach-consumer-label": { + "headers": { + // Annotate 1 + "X-Consumer-Department": "$department", + // Annotate 2 + "X-Consumer-Company": "$company", + // Annotate 3 + "X-Consumer-Role": "$role" + } + } + # highlight-end + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +❶ Attach the `department` consumer label value in the `X-Consumer-Department` request header. + +❷ Attach the `company` consumer label value in the `X-Consumer-Company` request header. + +❸ Attach the `role` consumer label value in the `X-Consumer-Role` request header. As the `role` label is not configured on the consumer, it is expected that the header will not appear in the request forwarded to the upstream service. + +:::tip + +The consumer label references must be prefixed by a dollar sign (`$`). + +::: + +To verify, send a request to the route with the valid credential: + +```shell +curl -i "http://127.0.0.1:9080/get" -H 'apikey: john-key' +``` + +You should see an `HTTP/1.1 200 OK` response similar to the following: + +```text +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Apikey": "john-key", + "Host": "127.0.0.1", + # highlight-start + "X-Consumer-Username": "john", + "X-Credential-Indentifier": "cred-john-key-auth", + "X-Consumer-Company": "api7", + "X-Consumer-Department": "devops", + # highlight-end + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66e5107c-5bb3e24f2de5baf733aec1cc", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "192.168.65.1, 205.198.122.37", + "url": "http://127.0.0.1/get" +} +``` + +## Delete plugin + +To remove the Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/attach-consumer-label-route" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/get", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/authz-casbin.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/authz-casbin.md new file mode 100644 index 0000000..8f49fba --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/authz-casbin.md @@ -0,0 +1,263 @@ +--- +title: authz-casbin +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Authz Casbin + - authz-casbin +description: This document contains information about the Apache APISIX authz-casbin Plugin. +--- + + + +## Description + +The `authz-casbin` Plugin is an authorization Plugin based on [Lua Casbin](https://github.com/casbin/lua-casbin/). This Plugin supports powerful authorization scenarios based on various [access control models](https://casbin.org/docs/en/supported-models). + +## Attributes + +| Name | Type | Required | Description | +|-------------|--------|----------|----------------------------------------------------------------------------------------| +| model_path | string | True | Path of the Casbin model configuration file. | +| policy_path | string | True | Path of the Casbin policy file. | +| model | string | True | Casbin model configuration in text format. | +| policy | string | True | Casbin policy in text format. | +| username | string | True | Header in the request that will be used in the request to pass the username (subject). | + +:::note + +You must either specify the `model_path`, `policy_path`, and the `username` attributes or specify the `model`, `policy` and the `username` attributes in the Plugin configuration for it to be valid. + +If you wish to use a global Casbin configuration, you can first specify `model` and `policy` attributes in the Plugin metadata and only the `username` attribute in the Plugin configuration. All Routes will use the Plugin configuration this way. + +::: + +## Metadata + +| Name | Type | Required | Description | +|--------|--------|----------|--------------------------------------------| +| model | string | True | Casbin model configuration in text format. | +| policy | string | True | Casbin policy in text format. | + +## Enable Plugin + +You can enable the Plugin on a Route by either using the model/policy file paths or using the model/policy text in Plugin configuration/metadata. + +### By using model/policy file paths + +The example below shows setting up Casbin authentication from your model/policy configuration file: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "authz-casbin": { + "model_path": "/path/to/model.conf", + "policy_path": "/path/to/policy.csv", + "username": "user" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" +}' +``` + +### By using model/policy text in Plugin configuration + +The example below shows setting up Casbin authentication from your model/policy text in your Plugin configuration: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "authz-casbin": { + "model": "[request_definition] + r = sub, obj, act + + [policy_definition] + p = sub, obj, act + + [role_definition] + g = _, _ + + [policy_effect] + e = some(where (p.eft == allow)) + + [matchers] + m = (g(r.sub, p.sub) || keyMatch(r.sub, p.sub)) && keyMatch(r.obj, p.obj) && keyMatch(r.act, p.act)", + + "policy": "p, *, /, GET + p, admin, *, * + g, alice, admin", + + "username": "user" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" +}' +``` + +### By using model/policy text in Plugin metadata + +First, you need to send a `PUT` request to the Admin API to add the `model` and `policy` text to the Plugin metadata. + +All Routes configured this way will use a single Casbin enforcer with the configured Plugin metadata. You can also update the model/policy in this way and the Plugin will automatically update to the new configuration. + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/authz-casbin -H "X-API-KEY: $admin_key" -i -X PUT -d ' +{ +"model": "[request_definition] +r = sub, obj, act + +[policy_definition] +p = sub, obj, act + +[role_definition] +g = _, _ + +[policy_effect] +e = some(where (p.eft == allow)) + +[matchers] +m = (g(r.sub, p.sub) || keyMatch(r.sub, p.sub)) && keyMatch(r.obj, p.obj) && keyMatch(r.act, p.act)", + +"policy": "p, *, /, GET +p, admin, *, * +g, alice, admin" +}' +``` + +Once you have updated the Plugin metadata, you can add the Plugin to a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "authz-casbin": { + "username": "user" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" +}' +``` + +:::note + +The Plugin Route configuration has a higher precedence than the Plugin metadata configuration. If the model/policy configuration is present in the Plugin Route configuration, it is used instead of the metadata configuration. + +::: + +## Example usage + +We define the example model as: + +```conf +[request_definition] +r = sub, obj, act + +[policy_definition] +p = sub, obj, act + +[role_definition] +g = _, _ + +[policy_effect] +e = some(where (p.eft == allow)) + +[matchers] +m = (g(r.sub, p.sub) || keyMatch(r.sub, p.sub)) && keyMatch(r.obj, p.obj) && keyMatch(r.act, p.act) +``` + +And the example policy as: + +```conf +p, *, /, GET +p, admin, *, * +g, alice, admin +``` + +See [examples](https://github.com/casbin/lua-casbin/tree/master/examples) for more policy and model configurations. + +The above configuration will let anyone access the homepage (`/`) using a `GET` request while only users with admin permissions can access other pages and use other request methods. + +So if we make a get request to the homepage: + +```shell +curl -i http://127.0.0.1:9080/ -X GET +``` + +But if an unauthorized user tries to access any other page, they will get a 403 error: + +```shell +curl -i http://127.0.0.1:9080/res -H 'user: bob' -X GET +HTTP/1.1 403 Forbidden +``` + +And only users with admin privileges can access the endpoints: + +```shell +curl -i http://127.0.0.1:9080/res -H 'user: alice' -X GET +``` + +## Delete Plugin + +To remove the `authz-casbin` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/*", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/authz-casdoor.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/authz-casdoor.md new file mode 100644 index 0000000..5f91c5f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/authz-casdoor.md @@ -0,0 +1,118 @@ +--- +title: authz-casdoor +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Authz Casdoor + - authz-casdoor +description: This document contains information about the Apache APISIX authz-casdoor Plugin. +--- + + + +## Description + +The `authz-casdoor` Plugin can be used to add centralized authentication with [Casdoor](https://casdoor.org/). + +## Attributes + +| Name | Type | Required | Description | +|---------------|--------|----------|----------------------------------------------| +| endpoint_addr | string | True | URL of Casdoor. | +| client_id | string | True | Client ID in Casdoor. | +| client_secret | string | True | Client secret in Casdoor. | +| callback_url | string | True | Callback URL used to receive state and code. | + +NOTE: `encrypt_fields = {"client_secret"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +:::info IMPORTANT + +`endpoint_addr` and `callback_url` should not end with '/'. + +::: + +:::info IMPORTANT + +The `callback_url` must belong to the URI of your Route. See the code snippet below for an example configuration. + +::: + +## Enable Plugin + +You can enable the Plugin on a specific Route as shown below: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/anything/*", + "plugins": { + "authz-casdoor": { + "endpoint_addr":"http://localhost:8000", + "callback_url":"http://localhost:9080/anything/callback", + "client_id":"7ceb9b7fda4a9061ec1c", + "client_secret":"3416238e1edf915eac08b8fe345b2b95cdba7e04" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +## Example usage + +Once you have enabled the Plugin, a new user visiting this Route would first be processed by the `authz-casdoor` Plugin. They would be redirected to the login page of Casdoor. + +After successfully logging in, Casdoor will redirect this user to the `callback_url` with GET parameters `code` and `state` specified. The Plugin will also request for an access token and confirm whether the user is really logged in. This process is only done once and subsequent requests are left uninterrupted. + +Once this is done, the user is redirected to the original URL they wanted to visit. + +## Delete Plugin + +To remove the `authz-casdoor` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/anything/*", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/authz-keycloak.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/authz-keycloak.md new file mode 100644 index 0000000..bb968e6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/authz-keycloak.md @@ -0,0 +1,241 @@ +--- +title: authz-keycloak +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Authz Keycloak + - authz-keycloak +description: This document contains information about the Apache APISIX authz-keycloak Plugin. +--- + + + +## Description + +The `authz-keycloak` Plugin can be used to add authentication with [Keycloak Identity Server](https://www.keycloak.org/). + +:::tip + +Although this Plugin was developed to work with Keycloak, it should work with any OAuth/OIDC and UMA compliant identity providers as well. + +::: + +Refer to [Authorization Services Guide](https://www.keycloak.org/docs/latest/authorization_services/) for more information on Keycloak. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|----------------------------------------------|---------------|----------|-----------------------------------------------|--------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| discovery | string | False | | https://host.domain/realms/foo/.well-known/uma2-configuration | URL to [discovery document](https://www.keycloak.org/docs/latest/authorization_services/index.html) of Keycloak Authorization Services. | +| token_endpoint | string | False | | https://host.domain/realms/foo/protocol/openid-connect/token | An OAuth2-compliant token endpoint that supports the `urn:ietf:params:oauth:grant-type:uma-ticket` grant type. If provided, overrides the value from discovery. | +| resource_registration_endpoint | string | False | | https://host.domain/realms/foo/authz/protection/resource_set | A UMA-compliant resource registration endpoint. If provided, overrides the value from discovery. | +| client_id | string | True | | | The identifier of the resource server to which the client is seeking access. | +| client_secret | string | False | | | The client secret, if required. You can use APISIX secret to store and reference this value. APISIX currently supports storing secrets in two ways. [Environment Variables and HashiCorp Vault](../terminology/secret.md) | +| grant_type | string | False | "urn:ietf:params:oauth:grant-type:uma-ticket" | ["urn:ietf:params:oauth:grant-type:uma-ticket"] | | +| policy_enforcement_mode | string | False | "ENFORCING" | ["ENFORCING", "PERMISSIVE"] | | +| permissions | array[string] | False | | | An array of strings, each representing a set of one or more resources and scopes the client is seeking access. | +| lazy_load_paths | boolean | False | false | | When set to true, dynamically resolves the request URI to resource(s) using the resource registration endpoint instead of the static permission. | +| http_method_as_scope | boolean | False | false | | When set to true, maps the HTTP request type to scope of the same name and adds to all requested permissions. | +| timeout | integer | False | 3000 | [1000, ...] | Timeout in ms for the HTTP connection with the Identity Server. | +| access_token_expires_in | integer | False | 300 | [1, ...] | Expiration time(s) of the access token. | +| access_token_expires_leeway | integer | False | 0 | [0, ...] | Expiration leeway(s) for access_token renewal. When set, the token will be renewed access_token_expires_leeway seconds before expiration. This avoids errors in cases where the access_token just expires when reaching the OAuth Resource Server. | +| refresh_token_expires_in | integer | False | 3600 | [1, ...] | The expiration time(s) of the refresh token. | +| refresh_token_expires_leeway | integer | False | 0 | [0, ...] | Expiration leeway(s) for refresh_token renewal. When set, the token will be renewed refresh_token_expires_leeway seconds before expiration. This avoids errors in cases where the refresh_token just expires when reaching the OAuth Resource Server. | +| ssl_verify | boolean | False | true | | When set to true, verifies if TLS certificate matches hostname. | +| cache_ttl_seconds | integer | False | 86400 (equivalent to 24h) | positive integer >= 1 | Maximum time in seconds up to which the Plugin caches discovery documents and tokens used by the Plugin to authenticate to Keycloak. | +| keepalive | boolean | False | true | | When set to true, enables HTTP keep-alive to keep connections open after use. Set to `true` if you are expecting a lot of requests to Keycloak. | +| keepalive_timeout | integer | False | 60000 | positive integer >= 1000 | Idle time after which the established HTTP connections will be closed. | +| keepalive_pool | integer | False | 5 | positive integer >= 1 | Maximum number of connections in the connection pool. | +| access_denied_redirect_uri | string | False | | [1, 2048] | URI to redirect the user to instead of returning an error message like `"error_description":"not_authorized"`. | +| password_grant_token_generation_incoming_uri | string | False | | /api/token | Set this to generate token using the password grant type. The Plugin will compare incoming request URI to this value. | + +NOTE: `encrypt_fields = {"client_secret"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +### Discovery and endpoints + +It is recommended to use the `discovery` attribute as the `authz-keycloak` Plugin can discover the Keycloak API endpoints from it. + +If set, the `token_endpoint` and `resource_registration_endpoint` will override the values obtained from the discovery document. + +### Client ID and secret + +The Plugin needs the `client_id` attribute for identification and to specify the context in which to evaluate permissions when interacting with Keycloak. + +If the `lazy_load_paths` attribute is set to true, then the Plugin additionally needs to obtain an access token for itself from Keycloak. In such cases, if the client access to Keycloak is confidential, you need to configure the `client_secret` attribute. + +### Policy enforcement mode + +The `policy_enforcement_mode` attribute specifies how policies are enforced when processing authorization requests sent to the server. + +#### `ENFORCING` mode + +Requests are denied by default even when there is no policy associated with a resource. + +The `policy_enforcement_mode` is set to `ENFORCING` by default. + +#### `PERMISSIVE` mode + +Requests are allowed when there is no policy associated with a given resource. + +### Permissions + +When handling incoming requests, the Plugin can determine the permissions to check with Keycloak statically or dynamically from the properties of the request. + +If the `lazy_load_paths` attribute is set to `false`, the permissions are taken from the `permissions` attribute. Each entry in `permissions` needs to be formatted as expected by the token endpoint's `permission` parameter. See [Obtaining Permissions](https://www.keycloak.org/docs/latest/authorization_services/index.html#_service_obtaining_permissions). + +:::note + +A valid permission can be a single resource or a resource paired with on or more scopes. + +::: + +If the `lazy_load_paths` attribute is set to `true`, the request URI is resolved to one or more resources configured in Keycloak using the resource registration endpoint. The resolved resources are used as the permissions to check. + +:::note + +This requires the Plugin to obtain a separate access token for itself from the token endpoint. So, make sure to set the `Service Accounts Enabled` option in the client settings in Keycloak. + +Also make sure that the issued access token contains the `resource_access` claim with the `uma_protection` role to ensure that the Plugin is able to query resources through the Protection API. + +::: + +### Automatically mapping HTTP method to scope + +The `http_method_as_scope` is often used together with `lazy_load_paths` but can also be used with a static permission list. + +If the `http_method_as_scope` attribute is set to `true`, the Plugin maps the request's HTTP method to the scope with the same name. The scope is then added to every permission to check. + +If the `lazy_load_paths` attribute is set to false, the Plugin adds the mapped scope to any of the static permissions configured in the `permissions` attribute—even if they contain on or more scopes already. + +### Generating a token using `password` grant + +To generate a token using `password` grant, you can set the value of the `password_grant_token_generation_incoming_uri` attribute. + +If the incoming URI matches the configured attribute and the request method is POST, a token is generated using the `token_endpoint`. + +You also need to add `application/x-www-form-urlencoded` as `Content-Type` header and `username` and `password` as parameters. + +The example below shows a request if the `password_grant_token_generation_incoming_uri` is `/api/token`: + +```shell +curl --location --request POST 'http://127.0.0.1:9080/api/token' \ +--header 'Accept: application/json, text/plain, */*' \ +--header 'Content-Type: application/x-www-form-urlencoded' \ +--data-urlencode 'username=' \ +--data-urlencode 'password=' +``` + +## Enable Plugin + +The example below shows how you can enable the `authz-keycloak` Plugin on a specific Route. `${realm}` represents the realm name in Keycloak. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/5 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/get", + "plugins": { + "authz-keycloak": { + "token_endpoint": "http://127.0.0.1:8090/realms/${realm}/protocol/openid-connect/token", + "permissions": ["resource name#scope name"], + "client_id": "Client ID" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + } +}' +``` + +## Example usage + +Once you have enabled the Plugin on a Route you can use it. + +First, you have to get the JWT token from Keycloak: + +```shell +curl "http:///realms//protocol/openid-connect/token" \ + -d "client_id=" \ + -d "client_secret=" \ + -d "username=" \ + -d "password=" \ + -d "grant_type=password" +``` + +You should see a response similar to the following: + +```text +{"access_token":"eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJoT3ludlBPY2d6Y3VWWnYtTU42bXZKMUczb0dOX2d6MFo3WFl6S2FSa1NBIn0.eyJleHAiOjE3MDMyOTAyNjAsImlhdCI6MTcwMzI4OTk2MCwianRpIjoiMjJhOGFmMzItNDM5Mi00Yzg3LThkM2UtZDkyNDVmZmNiYTNmIiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguMS44Mzo4MDgwL3JlYWxtcy9xdWlja3N0YXJ0LXJlYWxtIiwiYXVkIjoiYWNjb3VudCIsInN1YiI6IjAyZWZlY2VlLTBmYTgtNDg1OS1iYmIwLTgyMGZmZDdjMWRmYSIsInR5cCI6IkJlYXJlciIsImF6cCI6ImFwaXNpeC1xdWlja3N0YXJ0LWNsaWVudCIsInNlc3Npb25fc3RhdGUiOiI1YzIzZjVkZC1hN2ZhLTRlMmItOWQxNC02MmI1YzYyNmU1NDYiLCJhY3IiOiIxIiwicmVhbG1fYWNjZXNzIjp7InJvbGVzIjpbImRlZmF1bHQtcm9sZXMtcXVpY2tzdGFydC1yZWFsbSIsIm9mZmxpbmVfYWNjZXNzIiwidW1hX2F1dGhvcml6YXRpb24iXX0sInJlc291cmNlX2FjY2VzcyI6eyJhY2NvdW50Ijp7InJvbGVzIjpbIm1hbmFnZS1hY2NvdW50IiwibWFuYWdlLWFjY291bnQtbGlua3MiLCJ2aWV3LXByb2ZpbGUiXX19LCJzY29wZSI6ImVtYWlsIHByb2ZpbGUiLCJzaWQiOiI1YzIzZjVkZC1hN2ZhLTRlMmItOWQxNC02MmI1YzYyNmU1NDYiLCJlbWFpbF92ZXJpZmllZCI6ZmFsc2UsInByZWZlcnJlZF91c2VybmFtZSI6InF1aWNrc3RhcnQtdXNlciJ9.WNZQiLRleqCxw-JS-MHkqXnX_BPA9i6fyVHqF8l-L-2QxcqTAwbIp7AYKX-z90CG6EdRXOizAEkQytB32eVWXaRkLeTYCI7wIrT8XSVTJle4F88ohuBOjDfRR61yFh5k8FXXdAyRzcR7tIeE2YUFkRqw1gCT_VEsUuXPqm2wTKOmZ8fRBf4T-rP4-ZJwPkHAWc_nG21TmLOBCSulzYqoC6Lc-OvX5AHde9cfRuXx-r2HhSYs4cXtvX-ijA715MY634CQdedheoGca5yzPsJWrAlBbCruN2rdb4u5bDxKU62pJoJpmAsR7d5qYpYVA6AsANDxHLk2-W5F7I_IxqR0YQ","expires_in":300,"refresh_expires_in":1800,"refresh_token":"eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJjN2IwYmY4NC1kYjk0LTQ5YzctYWIyZC01NmU3ZDc1MmRkNDkifQ.eyJleHAiOjE3MDMyOTE3NjAsImlhdCI6MTcwMzI4OTk2MCwianRpIjoiYzcyZjAzMzctYmZhNS00MWEzLTlhYjEtZmJlNGY0NmZjMDgxIiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguMS44Mzo4MDgwL3JlYWxtcy9xdWlja3N0YXJ0LXJlYWxtIiwiYXVkIjoiaHR0cDovLzE5Mi4xNjguMS44Mzo4MDgwL3JlYWxtcy9xdWlja3N0YXJ0LXJlYWxtIiwic3ViIjoiMDJlZmVjZWUtMGZhOC00ODU5LWJiYjAtODIwZmZkN2MxZGZhIiwidHlwIjoiUmVmcmVzaCIsImF6cCI6ImFwaXNpeC1xdWlja3N0YXJ0LWNsaWVudCIsInNlc3Npb25fc3RhdGUiOiI1YzIzZjVkZC1hN2ZhLTRlMmItOWQxNC02MmI1YzYyNmU1NDYiLCJzY29wZSI6ImVtYWlsIHByb2ZpbGUiLCJzaWQiOiI1YzIzZjVkZC1hN2ZhLTRlMmItOWQxNC02MmI1YzYyNmU1NDYifQ.7AH7ppbVOlkYc9CoJ7kLSlDUkmFuNga28Amugn2t724","token_type":"Bearer","not-before-policy":0,"session_state":"5c23f5dd-a7fa-4e2b-9d14-62b5c626e546","scope":"email profile"} +``` + +Now you can make requests with the access token: + +```shell +curl http://127.0.0.1:9080/get -H 'Authorization: Bearer ${ACCESS_TOKEN}' +``` + +To learn more about how you can integrate authorization policies into your API workflows you can checkout the unit test [authz-keycloak.t](https://github.com/apache/apisix/blob/master/t/plugin/authz-keycloak.t). + +Run the following Docker image and go to `http://localhost:8090` to view the associated policies for the unit tests. + +```bash +docker run -e KEYCLOAK_USER=admin -e KEYCLOAK_PASSWORD=123456 -p 8090:8080 sshniro/keycloak-apisix +``` + +The image below shows how the policies are configured in the Keycloak server: + +![Keycloak policy design](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/plugin/authz-keycloak.png) + +## Delete Plugin + +To remove the `authz-keycloak` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/5 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/get", + "plugins": { + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + } +}' +``` + +## Plugin roadmap + +- Currently, the `authz-keycloak` Plugin requires you to define the resource name and the required scopes to enforce policies for a Route. Keycloak's official adapted (Java, Javascript) provides path matching by querying Keycloak paths dynamically and lazy loading the paths to identity resources. Upcoming releases of the Plugin will support this function. + +- To support reading scope and configurations from the Keycloak JSON file. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/aws-lambda.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/aws-lambda.md new file mode 100644 index 0000000..f934485 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/aws-lambda.md @@ -0,0 +1,217 @@ +--- +title: aws-lambda +keywords: + - Apache APISIX + - Plugin + - AWS Lambda + - aws-lambda +description: This document contains information about the Apache APISIX aws-lambda Plugin. +--- + + + +## Description + +The `aws-lambda` Plugin is used for integrating APISIX with [AWS Lambda](https://aws.amazon.com/lambda/) and [Amazon API Gateway](https://aws.amazon.com/api-gateway/) as a dynamic upstream to proxy all requests for a particular URI to the AWS Cloud. + +When enabled, the Plugin terminates the ongoing request to the configured URI and initiates a new request to the AWS Lambda Gateway URI on behalf of the client with configured authorization details, request headers, body and parameters (all three passed from the original request). It returns the response with headers, status code and the body to the client that initiated the request with APISIX. + +This Plugin supports authorization via AWS API key and AWS IAM secrets. The Plugin implements [AWS Signature Version 4 signing](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html) for IAM secrets. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|----------------------|---------|----------|---------|--------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| function_uri | string | True | | | AWS API Gateway endpoint which triggers the lambda serverless function. | +| authorization | object | False | | | Authorization credentials to access the cloud function. | +| authorization.apikey | string | False | | | Generated API Key to authorize requests to the AWS Gateway endpoint. | +| authorization.iam | object | False | | | Used for AWS IAM role based authorization performed via AWS v4 request signing. See [IAM authorization schema](#iam-authorization-schema). | +| authorization.iam.accesskey | string | True | | Generated access key ID from AWS IAM console. | +| authorization.iam.secretkey | string | True | | Generated access key secret from AWS IAM console. | +| authorization.iam.aws_region | string | False | "us-east-1" | AWS region where the request is being sent. | +| authorization.iam.service | string | False | "execute-api" | The service that is receiving the request. For Amazon API gateway APIs, it should be set to `execute-api`. For Lambda function, it should be set to `lambda`. | +| timeout | integer | False | 3000 | [100,...] | Proxy request timeout in milliseconds. | +| ssl_verify | boolean | False | true | true/false | When set to `true` performs SSL verification. | +| keepalive | boolean | False | true | true/false | When set to `true` keeps the connection alive for reuse. | +| keepalive_pool | integer | False | 5 | [1,...] | Maximum number of requests that can be sent on this connection before closing it. | +| keepalive_timeout | integer | False | 60000 | [1000,...] | Time is ms for connection to remain idle without closing. | + +## Enable Plugin + +The example below shows how you can configure the Plugin on a specific Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "aws-lambda": { + "function_uri": "https://x9w6z07gb9.execute-api.us-east-1.amazonaws.com/default/test-apisix", + "authorization": { + "apikey": "" + }, + "ssl_verify":false + } + }, + "uri": "/aws" +}' +``` + +Now, any requests (HTTP/1.1, HTTPS, HTTP2) to the endpoint `/aws` will invoke the configured AWS Functions URI and the response will be proxied back to the client. + +In the example below, AWS Lambda takes in name from the query and returns a message "Hello $name": + +```shell +curl -i -XGET localhost:9080/aws\?name=APISIX +``` + +```shell +HTTP/1.1 200 OK +Content-Type: application/json +Connection: keep-alive +Date: Sat, 27 Nov 2021 13:08:27 GMT +x-amz-apigw-id: JdwXuEVxIAMFtKw= +x-amzn-RequestId: 471289ab-d3b7-4819-9e1a-cb59cac611e0 +Content-Length: 16 +X-Amzn-Trace-Id: Root=1-61a22dca-600c552d1c05fec747fd6db0;Sampled=0 +Server: APISIX/2.10.2 + +"Hello, APISIX!" +``` + +Another example of a request where the client communicates with APISIX via HTTP/2 is shown below. Before proceeding, make sure you have configured `enable_http2: true` in your configuration file `config.yaml` for port `9081` and reloaded APISIX. See [`config.yaml.example`](https://github.com/apache/apisix/blob/master/conf/config.yaml.example) for the example configuration. + +```shell +curl -i -XGET --http2 --http2-prior-knowledge localhost:9081/aws\?name=APISIX +``` + +```shell +HTTP/2 200 +content-type: application/json +content-length: 16 +x-amz-apigw-id: JdwulHHrIAMFoFg= +date: Sat, 27 Nov 2021 13:10:53 GMT +x-amzn-trace-id: Root=1-61a22e5d-342eb64077dc9877644860dd;Sampled=0 +x-amzn-requestid: a2c2b799-ecc6-44ec-b586-38c0e3b11fe4 +server: APISIX/2.10.2 + +"Hello, APISIX!" +``` + +Similarly, the function can be triggered via AWS API Gateway by using AWS IAM permissions for authorization. The Plugin includes authentication signatures in HTTP calls via AWS v4 request signing. The example below shows this method: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "aws-lambda": { + "function_uri": "https://ajycz5e0v9.execute-api.us-east-1.amazonaws.com/default/test-apisix", + "authorization": { + "iam": { + "accesskey": "", + "secretkey": "" + } + }, + "ssl_verify": false + } + }, + "uri": "/aws" +}' +``` + +:::note + +This approach assumes that you have already an IAM user with programmatic access enabled with the required permissions (`AmazonAPIGatewayInvokeFullAccess`) to access the endpoint. + +::: + +### Configuring path forwarding + +The `aws-lambda` Plugin also supports URL path forwarding while proxying requests to the AWS upstream. Extensions to the base request path gets appended to the `function_uri` specified in the Plugin configuration. + +:::info IMPORTANT + +The `uri` configured on a Route must end with `*` for this feature to work properly. APISIX Routes are matched strictly and the `*` implies that any subpath to this URI would be matched to the same Route. + +::: + +The example below configures this feature: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "aws-lambda": { + "function_uri": "https://x9w6z07gb9.execute-api.us-east-1.amazonaws.com", + "authorization": { + "apikey": "" + }, + "ssl_verify":false + } + }, + "uri": "/aws/*" +}' +``` + +Now, any requests to the path `aws/default/test-apisix` will invoke the AWS Lambda Function and the added path is forwarded: + +```shell +curl -i -XGET http://127.0.0.1:9080/aws/default/test-apisix\?name\=APISIX +``` + +```shell +HTTP/1.1 200 OK +Content-Type: application/json +Connection: keep-alive +Date: Wed, 01 Dec 2021 14:23:27 GMT +X-Amzn-Trace-Id: Root=1-61a7855f-0addc03e0cf54ddc683de505;Sampled=0 +x-amzn-RequestId: f5f4e197-9cdd-49f9-9b41-48f0d269885b +Content-Length: 16 +x-amz-apigw-id: JrHG8GC4IAMFaGA= +Server: APISIX/2.11.0 + +"Hello, APISIX!" +``` + +## Delete Plugin + +To remove the `aws-lambda` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/aws", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/azure-functions.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/azure-functions.md new file mode 100644 index 0000000..07663eb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/azure-functions.md @@ -0,0 +1,199 @@ +--- +title: azure-functions +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Azure Functions + - azure-functions +description: This document contains information about the Apache APISIX azure-functions Plugin. +--- + + +## Description + +The `azure-functions` Plugin is used to integrate APISIX with [Azure Serverless Function](https://azure.microsoft.com/en-in/services/functions/) as a dynamic upstream to proxy all requests for a particular URI to the Microsoft Azure Cloud. + +When enabled, the Plugin terminates the ongoing request to the configured URI and initiates a new request to Azure Functions on behalf of the client with configured authorization details, request headers, body and parameters (all three passed from the original request). It returns back the response with headers, status code and the body to the client that initiated the request with APISIX. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|------------------------|---------|----------|---------|--------------|---------------------------------------------------------------------------------------------------------------------------------------| +| function_uri | string | True | | | Azure FunctionS endpoint which triggers the serverless function. For example, `http://test-apisix.azurewebsites.net/api/HttpTrigger`. | +| authorization | object | False | | | Authorization credentials to access Azure Functions. | +| authorization.apikey | string | False | | | Generated API key to authorize requests. | +| authorization.clientid | string | False | | | Azure AD client ID to authorize requests. | +| timeout | integer | False | 3000 | [100,...] | Proxy request timeout in milliseconds. | +| ssl_verify | boolean | False | true | true/false | When set to `true` performs SSL verification. | +| keepalive | boolean | False | true | true/false | When set to `true` keeps the connection alive for reuse. | +| keepalive_pool | integer | False | 5 | [1,...] | Maximum number of requests that can be sent on this connection before closing it. | +| keepalive_timeout | integer | False | 60000 | [1000,...] | Time is ms for connection to remain idle without closing. | + +## Metadata + +| Name | Type | Required | Default | Description | +|-----------------|--------|----------|---------|----------------------------------------------------------------------| +| master_apikey | string | False | "" | API Key secret that could be used to access the Azure Functions URI. | +| master_clientid | string | False | "" | Azure AD client ID that could be used to authorize the function URI. | + +Metadata can be used in the `azure-functions` Plugin for an authorization fallback. If there are no authorization details in the Plugin's attributes, the `master_apikey` and `master_clientid` configured in the metadata is used. + +The relative order priority is as follows: + +1. Plugin looks for `x-functions-key` or `x-functions-clientid` key inside the header from the request to APISIX. +2. If not found, the Plugin checks the configured attributes for authorization details. If present, it adds the respective header to the request sent to the Azure Functions. +3. If authorization details are not configured in the Plugin's attributes, APISIX fetches the metadata and uses the master keys. + +To add a new master API key, you can make a request to `/apisix/admin/plugin_metadata` with the required metadata as shown below: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/azure-functions -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "master_apikey" : "" +}' +``` + +## Enable Plugin + +You can configure the Plugin on a specific Route as shown below assuming that you already have your Azure Functions up and running: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "azure-functions": { + "function_uri": "http://test-apisix.azurewebsites.net/api/HttpTrigger", + "authorization": { + "apikey": "" + } + } + }, + "uri": "/azure" +}' +``` + +Now, any requests (HTTP/1.1, HTTPS, HTTP2) to the endpoint `/azure` will invoke the configured Azure Functions URI and the response will be proxied back to the client. + +In the example below, the Azure Function takes in name from the query and returns a message "Hello $name": + +```shell +curl -i -XGET http://localhost:9080/azure\?name=APISIX +``` + +```shell +HTTP/1.1 200 OK +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Request-Context: appId=cid-v1:38aae829-293b-43c2-82c6-fa94aec0a071 +Date: Wed, 17 Nov 2021 14:46:55 GMT +Server: APISIX/2.10.2 + +Hello, APISIX +``` + +Another example of a request where the client communicates with APISIX via HTTP/2 is shown below. Before proceeding, make sure you have configured `enable_http2: true` in your configuration file `config.yaml` for port `9081` and reloaded APISIX. See [`config.yaml.example`](https://github.com/apache/apisix/blob/master/conf/config.yaml.example) for the example configuration. + +```shell +curl -i -XGET --http2 --http2-prior-knowledge http://localhost:9081/azure\?name=APISIX +``` + +```shell +HTTP/2 200 +content-type: text/plain; charset=utf-8 +request-context: appId=cid-v1:38aae829-293b-43c2-82c6-fa94aec0a071 +date: Wed, 17 Nov 2021 14:54:07 GMT +server: APISIX/2.10.2 + +Hello, APISIX +``` + +### Configuring path forwarding + +The `azure-functions` Plugins also supports URL path forwarding while proxying requests to the Azure Functions upstream. Extensions to the base request path gets appended to the `function_uri` specified in the Plugin configuration. + +:::info IMPORTANT + +The `uri` configured on a Route must end with `*` for this feature to work properly. APISIX Routes are matched strictly and the `*` implies that any subpath to this URI would be matched to the same Route. + +::: + +The example below configures this feature: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "azure-functions": { + "function_uri": "http://app-bisakh.azurewebsites.net/api", + "authorization": { + "apikey": "" + } + } + }, + "uri": "/azure/*" +}' +``` + +Now, any requests to the path `azure/HttpTrigger1` will invoke the Azure Function and the added path is forwarded: + +```shell +curl -i -XGET http://127.0.0.1:9080/azure/HttpTrigger1\?name\=APISIX\ +``` + +```shell +HTTP/1.1 200 OK +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Date: Wed, 01 Dec 2021 14:19:53 GMT +Request-Context: appId=cid-v1:4d4b6221-07f1-4e1a-9ea0-b86a5d533a94 +Server: APISIX/2.11.0 + +Hello, APISIX +``` + +## Delete Plugin + +To remove the `azure-functions` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/azure", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/basic-auth.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/basic-auth.md new file mode 100644 index 0000000..37e3859 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/basic-auth.md @@ -0,0 +1,514 @@ +--- +title: basic-auth +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Basic Auth + - basic-auth +description: The basic-auth Plugin adds basic access authentication for Consumers to authenticate themselves before being able to access Upstream resources. +--- + + + + + + + +## Description + +The `basic-auth` Plugin adds [basic access authentication](https://en.wikipedia.org/wiki/Basic_access_authentication) for [Consumers](../terminology/consumer.md) to authenticate themselves before being able to access Upstream resources. + +When a Consumer is successfully authenticated, APISIX adds additional headers, such as `X-Consumer-Username`, `X-Credential-Indentifier`, and other Consumer custom headers if configured, to the request, before proxying it to the Upstream service. The Upstream service will be able to differentiate between consumers and implement additional logics as needed. If any of these values is not available, the corresponding header will not be added. + +## Attributes + +For Consumer/Credentials: + +| Name | Type | Required | Description | +|----------|--------|----------|------------------------------------------------------------------------------------------------------------------------| +| username | string | True | Unique basic auth username for a consumer. | +| password | string | True | Basic auth password for the consumer. | + +NOTE: `encrypt_fields = {"password"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +For Route: + +| Name | Type | Required | Default | Description | +|------------------|---------|----------|---------|------------------------------------------------------------------------| +| hide_credentials | boolean | False | false | If true, do not pass the authorization request header to Upstream services. | +| anonymous_consumer | boolean | False | false | Anonymous Consumer name. If configured, allow anonymous users to bypass the authentication. | + +## Examples + +The examples below demonstrate how you can work with the `basic-auth` Plugin for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Implement Basic Authentication on Route + +The following example demonstrates how to implement basic authentication on a Route. + +Create a Consumer `johndoe`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "johndoe" + }' +``` + +Create `basic-auth` Credential for the consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/johndoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-basic-auth", + "plugins": { + "basic-auth": { + "username": "johndoe", + "password": "john-key" + } + } + }' +``` + +Create a Route with `basic-auth`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "basic-auth-route", + "uri": "/anything", + "plugins": { + "basic-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +#### Verify with a Valid Key + +Send a request to with the valid key: + +```shell +curl -i "http://127.0.0.1:9080/anything" -u johndoe:john-key +``` + +You should see an `HTTP/1.1 200 OK` response similar to the following: + +```json +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Apikey": "john-key", + "Authorization": "Basic am9obmRvZTpqb2huLWtleQ==", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66e5107c-5bb3e24f2de5baf733aec1cc", + "X-Consumer-Username": "john", + "X-Credential-Indentifier": "cred-john-basic-auth", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "192.168.65.1, 205.198.122.37", + "url": "http://127.0.0.1/get" +} +``` + +#### Verify with an Invalid Key + +Send a request with an invalid key: + +```shell +curl -i "http://127.0.0.1:9080/anything" -u johndoe:invalid-key +``` + +You should see an `HTTP/1.1 401 Unauthorized` response with the following: + +```text +{"message":"Invalid user authorization"} +``` + +#### Verify without a Key + +Send a request to without a key: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should see an `HTTP/1.1 401 Unauthorized` response with the following: + +```text +{"message":"Missing authorization in request"} +``` + +### Hide Authentication Information From Upstream + +The following example demonstrates how to prevent the key from being sent to the Upstream services by configuring `hide_credentials`. In APISIX, the authentication key is forwarded to the Upstream services by default, which might lead to security risks in some circumstances and you should consider updating `hide_credentials`. + +Create a Consumer `johndoe`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "johndoe" + }' +``` + +Create `basic-auth` Credential for the consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/johndoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-basic-auth", + "plugins": { + "basic-auth": { + "username": "johndoe", + "password": "john-key" + } + } + }' +``` + +#### Without Hiding Credentials + +Create a Route with `basic-auth` and configure `hide_credentials` to `false`, which is the default configuration: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ +-H "X-API-KEY: ${admin_key}" \ +-d '{ + "id": "basic-auth-route", + "uri": "/anything", + "plugins": { + "basic-auth": { + "hide_credentials": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +Send a request with the valid key: + +```shell +curl -i "http://127.0.0.1:9080/anything" -u johndoe:john-key +``` + +You should see an `HTTP/1.1 200 OK` response with the following: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Authorization": "Basic am9obmRvZTpqb2huLWtleQ==", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66cc2195-22bd5f401b13480e63c498c6", + "X-Consumer-Username": "john", + "X-Credential-Indentifier": "cred-john-basic-auth", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "192.168.65.1, 43.228.226.23", + "url": "http://127.0.0.1/anything" +} +``` + +Note that the credentials are visible to the Upstream service in base64-encoded format. + +:::tip + +You can also pass the base64-encoded credentials in the request using the `Authorization` header as such: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H "Authorization: Basic am9obmRvZTpqb2huLWtleQ==" +``` + +::: + +#### Hide Credentials + +Update the plugin's `hide_credentials` to `true`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/basic-auth-route" -X PATCH \ +-H "X-API-KEY: ${admin_key}" \ +-d '{ + "plugins": { + "basic-auth": { + "hide_credentials": true + } + } +}' +``` + +Send a request with the valid key: + +```shell +curl -i "http://127.0.0.1:9080/anything" -u johndoe:john-key +``` + +You should see an `HTTP/1.1 200 OK` response with the following: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66cc21a7-4f6ac87946e25f325167d53a", + "X-Consumer-Username": "john", + "X-Credential-Indentifier": "cred-john-basic-auth", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "192.168.65.1, 43.228.226.23", + "url": "http://127.0.0.1/anything" +} +``` + +Note that the credentials are no longer visible to the Upstream service. + +### Add Consumer Custom ID to Header + +The following example demonstrates how you can attach a Consumer custom ID to authenticated request in the `Consumer-Custom-Id` header, which can be used to implement additional logics as needed. + +Create a Consumer `johndoe` with a custom ID label: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "johndoe", + "labels": { + "custom_id": "495aec6a" + } + }' +``` + +Create `basic-auth` Credential for the consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/johndoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-basic-auth", + "plugins": { + "basic-auth": { + "username": "johndoe", + "password": "john-key" + } + } + }' +``` + +Create a Route with `basic-auth`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "basic-auth-route", + "uri": "/anything", + "plugins": { + "basic-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +To verify, send a request to the Route with the valid key: + +```shell +curl -i "http://127.0.0.1:9080/anything" -u johndoe:john-key +``` + +You should see an `HTTP/1.1 200 OK` response with the `X-Consumer-Custom-Id` similar to the following: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Authorization": "Basic am9obmRvZTpqb2huLWtleQ==", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66ea8d64-33df89052ae198a706e18c2a", + "X-Consumer-Username": "johndoe", + "X-Credential-Identifier": "cred-john-basic-auth", + "X-Consumer-Custom-Id": "495aec6a", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "192.168.65.1, 205.198.122.37", + "url": "http://127.0.0.1/anything" +} +``` + +### Rate Limit with Anonymous Consumer + +The following example demonstrates how you can configure different rate limiting policies by regular and anonymous consumers, where the anonymous Consumer does not need to authenticate and has less quotas. + +Create a regular Consumer `johndoe` and configure the `limit-count` Plugin to allow for a quota of 3 within a 30-second window: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "johndoe", + "plugins": { + "limit-count": { + "count": 3, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +Create the `basic-auth` Credential for the Consumer `johndoe`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/johndoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-basic-auth", + "plugins": { + "basic-auth": { + "username": "johndoe", + "password": "john-key" + } + } + }' +``` + +Create an anonymous user `anonymous` and configure the `limit-count` Plugin to allow for a quota of 1 within a 30-second window: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +Create a Route and configure the `basic-auth` Plugin to accept anonymous Consumer `anonymous` from bypassing the authentication: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "basic-auth-route", + "uri": "/anything", + "plugins": { + "basic-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +To verify, send five consecutive requests with `john`'s key: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -u johndoe:john-key -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +You should see the following response, showing that out of the 5 requests, 3 requests were successful (status code 200) while the others were rejected (status code 429). + +```text +200: 3, 429: 2 +``` + +Send five anonymous requests: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +You should see the following response, showing that only one request was successful: + +```text +200: 1, 429: 4 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/batch-requests.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/batch-requests.md new file mode 100644 index 0000000..b9945f4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/batch-requests.md @@ -0,0 +1,225 @@ +--- +title: batch-requests +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Batch Requests +description: This document contains information about the Apache APISIX batch-request Plugin. +--- + + + +## Description + +After enabling the batch-requests plugin, users can assemble multiple requests into one request and send them to the gateway. The gateway will parse the corresponding requests from the request body and then individually encapsulate them into separate requests. Instead of the user initiating multiple HTTP requests to the gateway, the gateway will use the HTTP pipeline method, go through several stages such as route matching, forwarding to the corresponding upstream, and then return the combined results to the client after merging. + +![batch-request](https://static.apiseven.com/uploads/2023/06/27/ATzEuOn4_batch-request.png) + +In cases where the client needs to access multiple APIs, this will significantly improve performance. + +:::note + +The request headers in the user’s original request (except for headers starting with “Content-”, such as “Content-Type”) will be assigned to each request in the HTTP pipeline. Therefore, to the gateway, these HTTP pipeline requests sent to itself are no different from external requests initiated directly by users. They can only access pre-configured routes and will undergo a complete authentication process, so there are no security issues. + +If the request headers of the original request conflict with those configured in the plugin, the request headers configured in the plugin will take precedence (except for the real_ip_header specified in the configuration file). + +::: + +## Attributes + +None. + +## API + +This plugin adds `/apisix/batch-requests` as an endpoint. + +:::note + +You may need to use the [public-api](public-api.md) plugin to expose this endpoint. + +::: + +## Enable Plugin + +You can enable the `batch-requests` Plugin by adding it to your configuration file (`conf/config.yaml`): + +```yaml title="conf/config.yaml" +plugins: + - ... + - batch-requests +``` + +## Configuration + +By default, the maximum body size that can be sent to `/apisix/batch-requests` can't be larger than 1 MiB. You can change this configuration of the Plugin through the endpoint `apisix/admin/plugin_metadata/batch-requests`: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/batch-requests -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "max_body_size": 4194304 +}' +``` + +## Metadata + +| Name | Type | Required | Default | Valid values | Description | +| ------------- | ------- | -------- | ------- | ------------ | ------------------------------------------ | +| max_body_size | integer | True | 1048576 | [1, ...] | Maximum size of the request body in bytes. | + +## Request and response format + +This plugin will create an API endpoint in APISIX to handle batch requests. + +### Request + +| Name | Type | Required | Default | Description | +| -------- |------------------------------------| -------- | ------- | ----------------------------- | +| query | object | False | | Query string for the request. | +| headers | object | False | | Headers for all the requests. | +| timeout | integer | False | 30000 | Timeout in ms. | +| pipeline | array[[HttpRequest](#httprequest)] | True | | Details of the request. | + +#### HttpRequest + +| Name | Type | Required | Default | Valid | Description | +| ---------- | ------- | -------- | ------- | -------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | +| version | string | False | 1.1 | [1.0, 1.1] | HTTP version. | +| method | string | False | GET | ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS", "CONNECT", "TRACE"] | HTTP method. | +| query | object | False | | | Query string for the request. If set, overrides the value of the global query string. | +| headers | object | False | | | Headers for the request. If set, overrides the value of the global query string. | +| path | string | True | | | Path of the HTTP request. | +| body | string | False | | | Body of the HTTP request. | +| ssl_verify | boolean | False | false | | Set to verify if the SSL certs matches the hostname. | + +### Response + +The response is an array of [HttpResponses](#httpresponse). + +#### HttpResponse + +| Name | Type | Description | +| ------- | ------- | ---------------------- | +| status | integer | HTTP status code. | +| reason | string | HTTP reason-phrase. | +| body | string | HTTP response body. | +| headers | object | HTTP response headers. | + +## Specifying a custom URI + +You can specify a custom URI with the [public-api](public-api.md) Plugin. + +You can set the URI you want when creating the Route and change the configuration of the public-api Plugin: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/br -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/batch-requests", + "plugins": { + "public-api": { + "uri": "/apisix/batch-requests" + } + } +}' +``` + +## Example usage + +First, you need to setup a Route to the batch request API. We will use the [public-api](public-api.md) Plugin for this: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/br -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/apisix/batch-requests", + "plugins": { + "public-api": {} + } +}' +``` + +Now you can make a request to the batch request API (`/apisix/batch-requests`): + +```shell +curl --location --request POST 'http://127.0.0.1:9080/apisix/batch-requests' \ +--header 'Content-Type: application/json' \ +--data '{ + "headers": { + "Content-Type": "application/json", + "admin-jwt":"xxxx" + }, + "timeout": 500, + "pipeline": [ + { + "method": "POST", + "path": "/community.GiftSrv/GetGifts", + "body": "test" + }, + { + "method": "POST", + "path": "/community.GiftSrv/GetGifts", + "body": "test2" + } + ] +}' +``` + +This will give a response: + +```json +[ + { + "status": 200, + "reason": "OK", + "body": "{\"ret\":500,\"msg\":\"error\",\"game_info\":null,\"gift\":[],\"to_gets\":0,\"get_all_msg\":\"\"}", + "headers": { + "Connection": "keep-alive", + "Date": "Sat, 11 Apr 2020 17:53:20 GMT", + "Content-Type": "application/json", + "Content-Length": "81", + "Server": "APISIX web server" + } + }, + { + "status": 200, + "reason": "OK", + "body": "{\"ret\":500,\"msg\":\"error\",\"game_info\":null,\"gift\":[],\"to_gets\":0,\"get_all_msg\":\"\"}", + "headers": { + "Connection": "keep-alive", + "Date": "Sat, 11 Apr 2020 17:53:20 GMT", + "Content-Type": "application/json", + "Content-Length": "81", + "Server": "APISIX web server" + } + } +] +``` + +## Delete Plugin + +You can remove `batch-requests` from your list of Plugins in your configuration file (`conf/config.yaml`). diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/body-transformer.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/body-transformer.md new file mode 100644 index 0000000..7e433be --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/body-transformer.md @@ -0,0 +1,609 @@ +--- +title: body-transformer +keywords: + - Apache APISIX + - API Gateway + - Plugin + - BODY TRANSFORMER + - body-transformer +description: The body-transformer Plugin performs template-based transformations to transform the request and/or response bodies from one format to another, for example, from JSON to JSON, JSON to HTML, or XML to YAML. +--- + + + + + + + +## Description + +The `body-transformer` Plugin performs template-based transformations to transform the request and/or response bodies from one format to another, for example, from JSON to JSON, JSON to HTML, or XML to YAML. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ------------- | ------- | -------- | ------- | ------------ | ------------------------------------------ | +| `request` | object | False | | | Request body transformation configuration. | +| `request.input_format` | string | False | | [`xml`,`json`,`encoded`,`args`,`plain`,`multipart`] | Request body original media type. If unspecified, the value would be determined by the `Content-Type` header to apply the corresponding decoder. The `xml` option corresponds to `text/xml` media type. The `json` option corresponds to `application/json` media type. The `encoded` option corresponds to `application/x-www-form-urlencoded` media type. The `args` option corresponds to GET requests. The `plain` option corresponds to `text/plain` media type. The `multipart` option corresponds to `multipart/related` media type. If the media type is neither type, the value would be left unset and the transformation template will be directly applied. | +| `request.template` | string | True | | | Request body transformation template. The template uses [lua-resty-template](https://github.com/bungle/lua-resty-template) syntax. See the [template syntax](https://github.com/bungle/lua-resty-template#template-syntax) for more details. You can also use auxiliary functions `_escape_json()` and `_escape_xml()` to escape special characters such as double quotes, `_body` to access request body, and `_ctx` to access context variables. | +| `request.template_is_base64` | boolean | False | false | | Set to true if the template is base64 encoded. | +| `response` | object | False | | | Response body transformation configuration. | +| `response.input_format` | string | False | | [`xml`,`json`] | Response body original media type. If unspecified, the value would be determined by the `Content-Type` header to apply the corresponding decoder. If the media type is neither `xml` nor `json`, the value would be left unset and the transformation template will be directly applied. | +| `response.template` | string | True | | | Response body transformation template. | +| `response.template_is_base64` | boolean | False | false | | Set to true if the template is base64 encoded. | + +## Examples + +The examples below demonstrate how you can configure `body-transformer` for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +The transformation template uses [lua-resty-template](https://github.com/bungle/lua-resty-template) syntax. See the [template syntax](https://github.com/bungle/lua-resty-template#template-syntax) to learn more. + +You can also use auxiliary functions `_escape_json()` and `_escape_xml()` to escape special characters such as double quotes, `_body` to access request body, and `_ctx` to access context variables. + +In all cases, you should ensure that the transformation template is a valid JSON string. + +### Transform between JSON and XML SOAP + +The following example demonstrates how to transform the request body from JSON to XML and the response body from XML to JSON when working with a SOAP Upstream service. + +Start the sample SOAP service: + +```shell +cd /tmp +git clone https://github.com/spring-guides/gs-soap-service.git +cd gs-soap-service/complete +./mvnw spring-boot:run +``` + +Create the request and response transformation templates: + +```shell +req_template=$(cat < + + + + {{_escape_xml(name)}} + + + +EOF +) + +rsp_template=$(cat < 18 then + context._multipart:set_simple("status", "adult") + else + context._multipart:set_simple("status", "minor") + end + + local body = context._multipart:tostring() +%}{* body *} +EOF +) +``` + +Create a Route with `body-transformer`, which sets the `input_format` to `multipart` and uses the previously created request template for transformation: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "body-transformer-route", + "uri": "/anything", + "plugins": { + "body-transformer": { + "request": { + "input_format": "multipart", + "template": "'"$req_template"'" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a multipart POST request to the Route: + +```shell +curl -X POST \ + -F "name=john" \ + -F "age=10" \ + "http://127.0.0.1:9080/anything" +``` + +You should see a response similar to the following: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": { + "age": "10", + "name": "john", + "status": "minor" + }, + "headers": { + "Accept": "*/*", + "Content-Length": "361", + "Content-Type": "multipart/form-data; boundary=------------------------qtPjk4c8ZjmGOXNKzhqnOP", + ... + }, + ... +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/brotli.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/brotli.md new file mode 100644 index 0000000..3c7517a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/brotli.md @@ -0,0 +1,138 @@ +--- +title: brotli +keywords: + - Apache APISIX + - API Gateway + - Plugin + - brotli +description: This document contains information about the Apache APISIX brotli Plugin. +--- + + + +## Description + +The `brotli` Plugin dynamically sets the behavior of [brotli in Nginx](https://github.com/google/ngx_brotli). + +## Prerequisites + +This Plugin requires brotli shared libraries. + +The example commands to build and install brotli shared libraries: + +``` shell +wget https://github.com/google/brotli/archive/refs/tags/v1.1.0.zip +unzip v1.1.0.zip +cd brotli-1.1.0 && mkdir build && cd build +cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local/brotli .. +sudo cmake --build . --config Release --target install +sudo sh -c "echo /usr/local/brotli/lib >> /etc/ld.so.conf.d/brotli.conf" +sudo ldconfig +``` + +:::caution + +If the upstream is returning a compressed response, then the Brotli plugin won't be able to compress it. + +::: + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|----------------|----------------------|----------|---------------|--------------|-----------------------------------------------------------------------------------------| +| types | array[string] or "*" | False | ["text/html"] | | Dynamically sets the `brotli_types` directive. Special value `"*"` matches any MIME type. | +| min_length | integer | False | 20 | >= 1 | Dynamically sets the `brotli_min_length` directive. | +| comp_level | integer | False | 6 | [0, 11] | Dynamically sets the `brotli_comp_level` directive. | +| mode | integer | False | 0 | [0, 2] | Dynamically sets the `brotli decompress mode`, more info in [RFC 7932](https://tools.ietf.org/html/rfc7932). | +| lgwin | integer | False | 19 | [0, 10-24] | Dynamically sets the `brotli sliding window size`, `lgwin` is Base 2 logarithm of the sliding window size, set to `0` lets compressor decide over the optimal value, more info in [RFC 7932](https://tools.ietf.org/html/rfc7932). | +| lgblock | integer | False | 0 | [0, 16-24] | Dynamically sets the `brotli input block size`, `lgblock` is Base 2 logarithm of the maximum input block size, set to `0` lets compressor decide over the optimal value, more info in [RFC 7932](https://tools.ietf.org/html/rfc7932). | +| http_version | number | False | 1.1 | 1.1, 1.0 | Like the `gzip_http_version` directive, sets the minimum HTTP version of a request required to compress a response. | +| vary | boolean | False | false | | Like the `gzip_vary` directive, enables or disables inserting the “Vary: Accept-Encoding” response header field. | + +## Enable Plugin + +The example below enables the `brotli` Plugin on the specified Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/", + "plugins": { + "brotli": { + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } +}' +``` + +## Example usage + +Once you have configured the Plugin as shown above, you can make a request as shown below: + +```shell +curl http://127.0.0.1:9080/ -i -H "Accept-Encoding: br" +``` + +``` +HTTP/1.1 200 OK +Content-Type: text/html; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Date: Tue, 05 Dec 2023 03:06:49 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/3.6.0 +Content-Encoding: br + +Warning: Binary output can mess up your terminal. Use "--output -" to tell +Warning: curl to output it to your terminal anyway, or consider "--output +Warning: " to save to a file. +``` + +## Delete Plugin + +To remove the `brotli` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/cas-auth.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/cas-auth.md new file mode 100644 index 0000000..37d23b0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/cas-auth.md @@ -0,0 +1,117 @@ +--- +title: cas-auth +keywords: + - Apache APISIX + - API Gateway + - Plugin + - CAS AUTH + - cas-auth +description: This document contains information about the Apache APISIX cas-auth Plugin. +--- + + + +## Description + +The `cas-auth` Plugin can be used to access CAS (Central Authentication Service 2.0) IdP (Identity Provider) +to do authentication, from the SP (service provider) perspective. + +## Attributes + +| Name | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | +| `idp_uri` | string | True | URI of IdP. | +| `cas_callback_uri` | string | True | redirect uri used to callback the SP from IdP after login or logout. | +| `logout_uri` | string | True | logout uri to trigger logout. | + +## Enable Plugin + +You can enable the Plugin on a specific Route as shown below: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/cas1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET", "POST"], + "host" : "127.0.0.1", + "uri": "/anything/*", + "plugins": { + "cas-auth": { + "idp_uri": "http://127.0.0.1:8080/realms/test/protocol/cas", + "cas_callback_uri": "/anything/cas_callback", + "logout_uri": "/anything/logout" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } +}' + +``` + +## Configuration description + +Once you have enabled the Plugin, a new user visiting this Route would first be processed by the `cas-auth` Plugin. +If no login session exists, the user would be redirected to the login page of `idp_uri`. + +After successfully logging in from IdP, IdP will redirect this user to the `cas_callback_uri` with +GET parameters CAS ticket specified. If the ticket gets verified, the login session would be created. + +This process is only done once and subsequent requests are left uninterrupted. +Once this is done, the user is redirected to the original URL they wanted to visit. + +Later, the user could visit `logout_uri` to start logout process. The user would be redirected to `idp_uri` to do logout. + +Note that, `cas_callback_uri` and `logout_uri` should be +either full qualified address (e.g. `http://127.0.0.1:9080/anything/logout`), +or path only (e.g. `/anything/logout`), but it is recommended to be path only to keep consistent. + +These uris need to be captured by the route where the current APISIX is located. +For example, if the `uri` of the current route is `/api/v1/*`, `cas_callback_uri` can be filled in as `/api/v1/cas_callback`. + +## Delete Plugin + +To remove the `cas-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/cas1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET", "POST"], + "uri": "/anything/*", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/chaitin-waf.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/chaitin-waf.md new file mode 100644 index 0000000..c5ae8f4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/chaitin-waf.md @@ -0,0 +1,284 @@ +--- +title: chaitin-waf +keywords: + - Apache APISIX + - API Gateway + - Plugin + - WAF +description: This document contains basic information about the Apache APISIX `chaitin-waf` plugin. +--- + + + +## Description + +After enabling the chaitin-waf plugin, the traffic will be forwarded to the Chaitin WAF service for detection and +prevention of various web application attacks, ensuring the security of the application and user data. + +## Response Headers + +Depending on the plugin configuration, it is optional to add additional response headers. + +The response headers are listed below: + +- **X-APISIX-CHAITIN-WAF**: Whether APISIX forwards the request to the WAF server. + - yes: forwarded + - no: not forwarded + - unhealthy: matches the match variables, but no WAF server is available. + - err: an error occurred during the execution of the plugin. Also includes the **X-APISIX-CHAITIN-WAF-ERROR** header. + - waf-err: error while interacting with the WAF server. Also includes the **X-APISIX-CHAITIN-WAF-ERROR** header. + - timeout: request to the WAF server timed out. +- **X-APISIX-CHAITIN-WAF-ERROR**: Debug header. Contains WAF error message. +- **X-APISIX-CHAITIN-WAF-TIME**: The time in milliseconds that APISIX spent interacting with WAF. +- **X-APISIX-CHAITIN-WAF-STATUS**: The status code returned to APISIX by the WAF server. +- **X-APISIX-CHAITIN-WAF-ACTION**: The action returned to APISIX by the WAF server. + - pass: request valid and passed. + - reject: request rejected by WAF service. +- **X-APISIX-CHAITIN-WAF-SERVER**: Debug header. Indicates which WAF server was selected. + +## Plugin Metadata + +| Name | Type | Required | Default value | Description | +|--------------------------|---------------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------| +| nodes | array(object) | true | | A list of addresses for the Chaitin SafeLine WAF service. | +| nodes[0].host | string | true | | The address of Chaitin SafeLine WAF service. Supports IPv4, IPv6, Unix Socket, etc. | +| nodes[0].port | integer | false | 80 | The port of the Chaitin SafeLine WAF service. | +| mode | string | false | block | The global default mode if a Route doesn't specify its own: `off`, `monitor`, or `block`. | +| config | object | false | | WAF configuration defaults if none are specified on the Route. | +| config.connect_timeout | integer | false | 1000 | Connect timeout, in milliseconds. | +| config.send_timeout | integer | false | 1000 | Send timeout, in milliseconds. | +| config.read_timeout | integer | false | 1000 | Read timeout, in milliseconds. | +| config.req_body_size | integer | false | 1024 | Request body size, in KB. | +| config.keepalive_size | integer | false | 256 | Maximum concurrent idle connections to the SafeLine WAF detection service. | +| config.keepalive_timeout | integer | false | 60000 | Idle connection timeout, in milliseconds. | +| config.real_client_ip | boolean | false | true | Specifies whether to use the `X-Forwarded-For` as the client IP (if present). If `false`, uses the direct client IP from the connection. | + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```bash +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/chaitin-waf -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "nodes": [ + { + "host": "unix:/path/to/safeline/resources/detector/snserver.sock", + "port": 8000 + } + ], + "mode": "block", + "config": { + "real_client_ip": true + } +}' + +``` + +## Attributes + +| Name | Type | Required | Default value | Description | +|--------------------------|---------------|----------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| mode | string | false | block | Determines how the plugin behaves for matched requests. Valid values are `off`, `monitor`, or `block`. When set to `off`, the plugin skips WAF checks. In `monitor` mode, the plugin logs potential blocks without actually blocking the request. In `block` mode, the plugin enforces blocks as determined by the WAF service. | +| match | array[object] | false | | A list of matching rules. The plugin evaluates these rules to decide whether to perform the WAF check on a request. If empty, all requests are processed. | +| match.vars | array[array] | false | | List of variables used for matching requests. Each rule is specified as `[variable, operator, value]` (for example, `["http_waf", "==", "true"]`). These variables refer to NGINX internal variables. For supported operators, see [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list). | +| append_waf_resp_header | bool | false | true | Determines whether the plugin adds WAF-related response headers (such as `X-APISIX-CHAITIN-WAF`, `X-APISIX-CHAITIN-WAF-ACTION`, etc.) to the response. | +| append_waf_debug_header | bool | false | false | Determines whether debugging headers (such as `X-APISIX-CHAITIN-WAF-ERROR` and `X-APISIX-CHAITIN-WAF-SERVER`) are added. Effective only when `append_waf_resp_header` is enabled. | +| config | object | false | | Provides route-specific configuration for the Chaitin SafeLine WAF service. Settings here override the corresponding metadata defaults when specified. | +| config.connect_timeout | integer | false | 1000 | The connect timeout for the WAF server, in milliseconds. | +| config.send_timeout | integer | false | 1000 | The send timeout for transmitting data to the WAF server, in milliseconds. | +| config.read_timeout | integer | false | 1000 | The read timeout for receiving data from the WAF server, in milliseconds. | +| config.req_body_size | integer | false | 1024 | The maximum allowed request body size, in KB. | +| config.keepalive_size | integer | false | 256 | The maximum number of idle connections to the WAF detection service that can be maintained concurrently. | +| config.keepalive_timeout | integer | false | 60000 | The idle connection timeout for the WAF service, in milliseconds. | +| config.real_client_ip | boolean | false | true | Specifies whether to determine the client IP from the `X-Forwarded-For` header. If set to `false`, the plugin uses the direct client IP from the connection. | + +Below is a sample Route configuration that uses: + +- httpbun.org as the upstream backend. +- mode set to monitor, so the plugin only logs potential blocks. +- A matching rule that triggers the plugin when the custom header waf: true is set. +- An override to disable the `real client IP` logic by setting config.real_client_ip to false. + +```bash +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" \ + -X PUT -d ' +{ + "uri": "/*", + "plugins": { + "chaitin-waf": { + "mode": "monitor", + "match": [ + { + "vars": [ + ["http_waf", "==", "true"] + ] + } + ], + "config": { + "real_client_ip": false + }, + "append_waf_resp_header": true, + "append_waf_debug_header": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbun.org:80": 1 + } + } +}' +``` + +## Test Plugin + +With the sample configuration described above (including your chosen `mode` and `real_client_ip` settings), the plugin behaves as follows: + +- **If the `match` condition is not satisfied** (for example, `waf: true` is missing), the request proceeds normally without contacting the WAF. You can observe: + + ```bash + curl -H "Host: httpbun.org" http://127.0.0.1:9080/get -i + + HTTP/1.1 200 OK + Content-Type: application/json + Content-Length: 408 + Connection: keep-alive + X-APISIX-CHAITIN-WAF: no + Date: Wed, 19 Jul 2023 09:30:42 GMT + X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596 + Server: APISIX/3.3.0 + + { + "args": {}, + "headers": { + "Accept": "*/*", + "Connection": "close", + "Host": "httpbun.org", + "User-Agent": "curl/8.1.2", + "X-Forwarded-For": "127.0.0.1", + "X-Forwarded-Host": "httpbun.org", + "X-Forwarded-Port": "9080", + "X-Forwarded-Proto": "http", + "X-Real-Ip": "127.0.0.1" + }, + "method": "GET", + "origin": "127.0.0.1, 122.231.76.178", + "url": "http://httpbun.org/get" + } + ``` + +- **Potential injection requests** (e.g., containing SQL snippets) are forwarded unmodified if they do not meet the plugin’s match rules, and might result in a `404 Not Found` or other response from the upstream: + + ```bash + curl -H "Host: httpbun.org" http://127.0.0.1:9080/getid=1%20AND%201=1 -i + + HTTP/1.1 404 Not Found + Content-Type: text/plain; charset=utf-8 + Content-Length: 19 + Connection: keep-alive + X-APISIX-CHAITIN-WAF: no + Date: Wed, 19 Jul 2023 09:30:28 GMT + X-Content-Type-Options: nosniff + X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596 + Server: APISIX/3.3.0 + + 404 page not found + ``` + +- **Matching safe requests** (those that satisfy `match.vars`, such as `-H "waf: true"`) are checked by the WAF. If deemed harmless, you see: + + ```bash + curl -H "Host: httpbun.org" -H "waf: true" http://127.0.0.1:9080/get -i + + HTTP/1.1 200 OK + Content-Type: application/json + Content-Length: 427 + Connection: keep-alive + X-APISIX-CHAITIN-WAF-TIME: 2 + X-APISIX-CHAITIN-WAF-STATUS: 200 + X-APISIX-CHAITIN-WAF: yes + X-APISIX-CHAITIN-WAF-ACTION: pass + Date: Wed, 19 Jul 2023 09:29:58 GMT + X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596 + Server: APISIX/3.3.0 + + { + "args": {}, + "headers": { + "Accept": "*/*", + "Connection": "close", + "Host": "httpbun.org", + "User-Agent": "curl/8.1.2", + "Waf": "true", + "X-Forwarded-For": "127.0.0.1", + "X-Forwarded-Host": "httpbun.org", + "X-Forwarded-Port": "9080", + "X-Forwarded-Proto": "http", + "X-Real-Ip": "127.0.0.1" + }, + "method": "GET", + "origin": "127.0.0.1, 122.231.76.178", + "url": "http://httpbun.org/get" + } + ``` + +- **Suspicious requests** that meet the plugin’s match rules and are flagged by the WAF are typically rejected with a 403 status, along with headers that include `X-APISIX-CHAITIN-WAF-ACTION: reject`. For example: + + ```bash + curl -H "Host: httpbun.org" -H "waf: true" http://127.0.0.1:9080/getid=1%20AND%201=1 -i + + HTTP/1.1 403 Forbidden + Date: Wed, 19 Jul 2023 09:29:06 GMT + Content-Type: text/plain; charset=utf-8 + Transfer-Encoding: chunked + Connection: keep-alive + X-APISIX-CHAITIN-WAF: yes + X-APISIX-CHAITIN-WAF-TIME: 2 + X-APISIX-CHAITIN-WAF-ACTION: reject + X-APISIX-CHAITIN-WAF-STATUS: 403 + Server: APISIX/3.3.0 + Set-Cookie: sl-session=UdywdGL+uGS7q8xMfnJlbQ==; Domain=; Path=/; Max-Age=86400 + + {"code": 403, "success":false, "message": "blocked by Chaitin SafeLine Web Application Firewall", "event_id": "51a268653f2c4189bfa3ec66afbcb26d"} + ``` + +## Delete Plugin + +To remove the `chaitin-waf` plugin, you can delete the corresponding JSON configuration from the Plugin configuration. +APISIX will automatically reload and you do not have to restart for this to take effect: + +```bash +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbun.org:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/clickhouse-logger.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/clickhouse-logger.md new file mode 100644 index 0000000..023f9e9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/clickhouse-logger.md @@ -0,0 +1,207 @@ +--- +title: clickhouse-logger +keywords: + - Apache APISIX + - API Gateway + - Plugin + - ClickHouse Logger +description: This document contains information about the Apache APISIX clickhouse-logger Plugin. +--- + + + +## Description + +The `clickhouse-logger` Plugin is used to push logs to [ClickHouse](https://clickhouse.com/) database. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|---------------|---------|----------|---------------------|--------------|----------------------------------------------------------------| +| endpoint_addr | Deprecated | True | | | Use `endpoint_addrs` instead. ClickHouse endpoints. | +| endpoint_addrs | array | True | | | ClickHouse endpoints. | +| database | string | True | | | Name of the database to store the logs. | +| logtable | string | True | | | Table name to store the logs. | +| user | string | True | | | ClickHouse username. | +| password | string | True | | | ClickHouse password. | +| timeout | integer | False | 3 | [1,...] | Time to keep the connection alive for after sending a request. | +| name | string | False | "clickhouse logger" | | Unique identifier for the logger. If you use Prometheus to monitor APISIX metrics, the name is exported in `apisix_batch_process_entries`. | +| ssl_verify | boolean | False | true | [true,false] | When set to `true`, verifies SSL. | +| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| include_req_body | boolean | False | false | [false, true] | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to Nginx's limitations. | +| include_req_body_expr | array | False | | | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| include_resp_body | boolean | False | false | [false, true] | When set to `true` includes the response body in the log. | +| include_resp_body_expr | array | False | | | Filter for when the `include_resp_body` attribute is set to `true`. Response body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | + +NOTE: `encrypt_fields = {"password"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +### Example of default log format + +```json +{ + "response": { + "status": 200, + "size": 118, + "headers": { + "content-type": "text/plain", + "connection": "close", + "server": "APISIX/3.7.0", + "content-length": "12" + } + }, + "client_ip": "127.0.0.1", + "upstream_latency": 3, + "apisix_latency": 98.999998092651, + "upstream": "127.0.0.1:1982", + "latency": 101.99999809265, + "server": { + "version": "3.7.0", + "hostname": "localhost" + }, + "route_id": "1", + "start_time": 1704507612177, + "service_id": "", + "request": { + "method": "POST", + "querystring": { + "foo": "unknown" + }, + "headers": { + "host": "localhost", + "connection": "close", + "content-length": "18" + }, + "size": 110, + "uri": "/hello?foo=unknown", + "url": "http://localhost:1984/hello?foo=unknown" + } +} +``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `clickhouse-logger` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/clickhouse-logger -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +You can use the clickhouse docker image to create a container like so: + +```shell +docker run -d -p 8123:8123 -p 9000:9000 -p 9009:9009 --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server +``` + +Then create a table in your ClickHouse database to store the logs. + +```shell +curl -X POST 'http://localhost:8123/' \ +--data-binary 'CREATE TABLE default.test (host String, client_ip String, route_id String, service_id String, `@timestamp` String, PRIMARY KEY(`@timestamp`)) ENGINE = MergeTree()' --user default: +``` + +## Enable Plugin + +If multiple endpoints are configured, they will be written randomly. +The example below shows how you can enable the Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "", + "database": "default", + "logtable": "test", + "endpoint_addrs": ["http://127.0.0.1:8123"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## Example usage + +Now, if you make a request to APISIX, it will be logged in your ClickHouse database: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +Now, if you check for the rows in the table, you will get the following output: + +```shell +curl 'http://localhost:8123/?query=select%20*%20from%20default.test' +127.0.0.1 127.0.0.1 1 2023-05-08T19:15:53+05:30 +``` + +## Delete Plugin + +To remove the `clickhouse-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/client-control.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/client-control.md new file mode 100644 index 0000000..eb702bf --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/client-control.md @@ -0,0 +1,113 @@ +--- +title: client-control +keywords: + - Apache APISIX + - API Gateway + - Client Control +description: This document describes the Apache APISIX client-control Plugin, you can use it to control NGINX behavior to handle a client request dynamically. +--- + + + +## Description + +The `client-control` Plugin can be used to dynamically control the behavior of NGINX to handle a client request, by setting the max size of the request body. + +:::info IMPORTANT + +This Plugin requires APISIX to run on APISIX-Runtime. See [apisix-build-tools](https://github.com/api7/apisix-build-tools) for more info. + +::: + +## Attributes + +| Name | Type | Required | Valid values | Description | +| ------------- | ------- | -------- | ------------ | ------------------------------------------------------------------------------------------------------------------------------------ | +| max_body_size | integer | False | [0,...] | Set the maximum limit for the client request body and dynamically adjust the size of [`client_max_body_size`](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size), measured in bytes. If you set the `max_body_size` to 0, then the size of the client's request body will not be checked. | + +## Enable Plugin + +The example below enables the Plugin on a specific Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "client-control": { + "max_body_size" : 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## Example usage + +Now since you have configured the `max_body_size` to `1` above, you will get the following message when you make a request: + +```shell +curl -i http://127.0.0.1:9080/index.html -d '123' +``` + +```shell +HTTP/1.1 413 Request Entity Too Large +... + +413 Request Entity Too Large + +

413 Request Entity Too Large

+
openresty
+ + +``` + +## Delete Plugin + +To remove the `client-control` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload, and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/consumer-restriction.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/consumer-restriction.md new file mode 100644 index 0000000..8686752 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/consumer-restriction.md @@ -0,0 +1,347 @@ +--- +title: consumer-restriction +keywords: + - Apache APISIX + - API Gateway + - Consumer restriction +description: The Consumer Restriction Plugin allows users to configure access restrictions on Consumer, Route, Service, or Consumer Group. +--- + + + +## Description + +The `consumer-restriction` Plugin allows users to configure access restrictions on Consumer, Route, Service, or Consumer Group. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| -------------------------- | ------------- | -------- | ------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| type | string | False | consumer_name | ["consumer_name", "consumer_group_id", "service_id", "route_id"] | Type of object to base the restriction on. | +| whitelist | array[string] | True | | | List of objects to whitelist. Has a higher priority than `allowed_by_methods`. | +| blacklist | array[string] | True | | | List of objects to blacklist. Has a higher priority than `whitelist`. | +| rejected_code | integer | False | 403 | [200,...] | HTTP status code returned when the request is rejected. | +| rejected_msg | string | False | | | Message returned when the request is rejected. | +| allowed_by_methods | array[object] | False | | | List of allowed configurations for Consumer settings, including a username of the Consumer and a list of allowed HTTP methods. | +| allowed_by_methods.user | string | False | | | A username for a Consumer. | +| allowed_by_methods.methods | array[string] | False | | ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS", "CONNECT", "TRACE", "PURGE"] | List of allowed HTTP methods for a Consumer. | + +:::note + +The different values in the `type` attribute have these meanings: + +- `consumer_name`: Username of the Consumer to restrict access to a Route or a Service. +- `consumer_group_id`: ID of the Consumer Group to restrict access to a Route or a Service. +- `service_id`: ID of the Service to restrict access from a Consumer. Need to be used with an Authentication Plugin. +- `route_id`: ID of the Route to restrict access from a Consumer. + +::: + +## Example usage + +### Restricting by `consumer_name` + +The example below shows how you can use the `consumer-restriction` Plugin on a Route to restrict specific consumers. + +You can first create two consumers `jack1` and `jack2`: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "username": "jack1", + "plugins": { + "basic-auth": { + "username":"jack2019", + "password": "123456" + } + } +}' + +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "username": "jack2", + "plugins": { + "basic-auth": { + "username":"jack2020", + "password": "123456" + } + } +}' +``` + +Next, you can configure the Plugin to the Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "whitelist": [ + "jack1" + ] + } + } +}' +``` + +Now, this configuration will only allow `jack1` to access your Route: + +```shell +curl -u jack2019:123456 http://127.0.0.1:9080/index.html +``` + +```shell +HTTP/1.1 200 OK +``` + +And requests from `jack2` are blocked: + +```shell +curl -u jack2020:123456 http://127.0.0.1:9080/index.html -i +``` + +```shell +HTTP/1.1 403 Forbidden +... +{"message":"The consumer_name is forbidden."} +``` + +### Restricting by `allowed_by_methods` + +The example below configures the Plugin to a Route to restrict `jack1` to only make `POST` requests: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "allowed_by_methods":[{ + "user": "jack1", + "methods": ["POST"] + }] + } + } +}' +``` + +Now if `jack1` makes a `GET` request, the access is restricted: + +```shell +curl -u jack2019:123456 http://127.0.0.1:9080/index.html +``` + +```shell +HTTP/1.1 403 Forbidden +... +{"message":"The consumer_name is forbidden."} +``` + +To also allow `GET` requests, you can update the Plugin configuration and it would be reloaded automatically: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "allowed_by_methods":[{ + "user": "jack1", + "methods": ["POST","GET"] + }] + } + } +}' +``` + +Now, if a `GET` request is made: + +```shell +curl -u jack2019:123456 http://127.0.0.1:9080/index.html +``` + +```shell +HTTP/1.1 200 OK +``` + +### Restricting by `service_id` + +To restrict a Consumer from accessing a Service, you also need to use an Authentication Plugin. The example below uses the [key-auth](./key-auth.md) Plugin. + +First, you can create two services: + +```shell +curl http://127.0.0.1:9180/apisix/admin/services/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "new service 001" +}' + +curl http://127.0.0.1:9180/apisix/admin/services/2 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "new service 002" +}' +``` + +Then configure the `consumer-restriction` Plugin on the Consumer with the `key-auth` Plugin and the `service_id` to whitelist. + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "new_consumer", + "plugins": { + "key-auth": { + "key": "auth-jack" + }, + "consumer-restriction": { + "type": "service_id", + "whitelist": [ + "1" + ], + "rejected_code": 403 + } + } +}' +``` + +Finally, you can configure the `key-auth` Plugin and bind the service to the Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "service_id": 1, + "plugins": { + "key-auth": { + } + } +}' +``` + +Now, if you test the Route, you should be able to access the Service: + +```shell +curl http://127.0.0.1:9080/index.html -H 'apikey: auth-jack' -i +``` + +```shell +HTTP/1.1 200 OK +... +``` + +Now, if the Route is configured to the Service with `service_id` `2`: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "service_id": 2, + "plugins": { + "key-auth": { + } + } +}' +``` + +Since the Service is not in the whitelist, it cannot be accessed: + +```shell +curl http://127.0.0.1:9080/index.html -H 'apikey: auth-jack' -i +``` + +```shell +HTTP/1.1 403 Forbidden +... +{"message":"The service_id is forbidden."} +``` + +## Delete Plugin + +To remove the `consumer-restriction` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {} + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/cors.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/cors.md new file mode 100644 index 0000000..a5fa0d5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/cors.md @@ -0,0 +1,142 @@ +--- +title: cors +keywords: + - Apache APISIX + - API Gateway + - CORS +description: This document contains information about the Apache APISIX cors Plugin. +--- + + + +## Description + +The `cors` Plugins lets you enable [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) easily. + +## Attributes + +### CORS attributes + +| Name | Type | Required | Default | Description | +|---------------------------|---------|----------|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| allow_origins | string | False | "*" | Origins to allow CORS. Use the `scheme://host:port` format. For example, `https://somedomain.com:8081`. If you have multiple origins, use a `,` to list them. If `allow_credential` is set to `false`, you can enable CORS for all origins by using `*`. If `allow_credential` is set to `true`, you can forcefully allow CORS on all origins by using `**` but it will pose some security issues. | +| allow_methods | string | False | "*" | Request methods to enable CORS on. For example `GET`, `POST`. Use `,` to add multiple methods. If `allow_credential` is set to `false`, you can enable CORS for all methods by using `*`. If `allow_credential` is set to `true`, you can forcefully allow CORS on all methods by using `**` but it will pose some security issues. | +| allow_headers | string | False | "*" | Headers in the request allowed when accessing a cross-origin resource. Use `,` to add multiple headers. If `allow_credential` is set to `false`, you can enable CORS for all request headers by using `*`. If `allow_credential` is set to `true`, you can forcefully allow CORS on all request headers by using `**` but it will pose some security issues. | +| expose_headers | string | False | | Headers in the response allowed when accessing a cross-origin resource. Use `,` to add multiple headers. If `allow_credential` is set to `false`, you can enable CORS for all response headers by using `*`. If not specified, the plugin will not modify the `Access-Control-Expose-Headers header`. See [Access-Control-Expose-Headers - MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers) for more details. | +| max_age | integer | False | 5 | Maximum time in seconds the result is cached. If the time is within this limit, the browser will check the cached result. Set to `-1` to disable caching. Note that the maximum value is browser dependent. See [Access-Control-Max-Age](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age#Directives) for more details. | +| allow_credential | boolean | False | false | When set to `true`, allows requests to include credentials like cookies. According to CORS specification, if you set this to `true`, you cannot use '*' to allow all for the other attributes. | +| allow_origins_by_regex | array | False | nil | Regex to match origins that allow CORS. For example, `[".*\.test.com$"]` can match all subdomains of `test.com`. When set to specified range, only domains in this range will be allowed, no matter what `allow_origins` is. | +| allow_origins_by_metadata | array | False | nil | Origins to enable CORS referenced from `allow_origins` set in the Plugin metadata. For example, if `"allow_origins": {"EXAMPLE": "https://example.com"}` is set in the Plugin metadata, then `["EXAMPLE"]` can be used to allow CORS on the origin `https://example.com`. | + +:::info IMPORTANT + +1. The `allow_credential` attribute is sensitive and must be used carefully. If set to `true` the default value `*` of the other attributes will be invalid and they should be specified explicitly. +2. When using `**` you are vulnerable to security risks like CSRF. Make sure that this meets your security levels before using it. + +::: + +### Resource Timing attributes + +| Name | Type | Required | Default | Description | +|---------------------------|---------|----------|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| timing_allow_origins | string | False | nil | Origin to allow to access the resource timing information. See [Timing-Allow-Origin](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Timing-Allow-Origin). Use the `scheme://host:port` format. For example, `https://somedomain.com:8081`. If you have multiple origins, use a `,` to list them. | +| timing_allow_origins_by_regex | array | False | nil | Regex to match with origin for enabling access to the resource timing information. For example, `[".*\.test.com"]` can match all subdomain of `test.com`. When set to specified range, only domains in this range will be allowed, no matter what `timing_allow_origins` is. | + +:::note + +The Timing-Allow-Origin header is defined in the Resource Timing API, but it is related to the CORS concept. + +Suppose you have 2 domains, `domain-A.com` and `domain-B.com`. +You are on a page on `domain-A.com`, you have an XHR call to a resource on `domain-B.com` and you need its timing information. +You can allow the browser to show this timing information only if you have cross-origin permissions on `domain-B.com`. +So, you have to set the CORS headers first, then access the `domain-B.com` URL, and if you set [Timing-Allow-Origin](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Timing-Allow-Origin), the browser will show the requested timing information. + +::: + +## Metadata + +| Name | Type | Required | Description | +|---------------|--------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| allow_origins | object | False | A map with origin reference and allowed origins. The keys in the map are used in the attribute `allow_origins_by_metadata` and the value are equivalent to the `allow_origins` attribute of the Plugin. | + +## Enable Plugin + +You can enable the Plugin on a specific Route or Service: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "cors": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + } +}' +``` + +## Example usage + +After enabling the Plugin, you can make a request to the server and see the CORS headers returned: + +```shell +curl http://127.0.0.1:9080/hello -v +``` + +```shell +... +< Server: APISIX web server +< Access-Control-Allow-Origin: * +< Access-Control-Allow-Methods: * +< Access-Control-Allow-Headers: * +< Access-Control-Max-Age: 5 +... +``` + +## Delete Plugin + +To remove the `cors` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/csrf.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/csrf.md new file mode 100644 index 0000000..f519827 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/csrf.md @@ -0,0 +1,154 @@ +--- +title: csrf +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Cross-site request forgery + - csrf +description: The CSRF Plugin can be used to protect your API against CSRF attacks using the Double Submit Cookie method. +--- + + + +## Description + +The `csrf` Plugin can be used to protect your API against [CSRF attacks](https://en.wikipedia.org/wiki/Cross-site_request_forgery) using the [Double Submit Cookie](https://en.wikipedia.org/wiki/Cross-site_request_forgery#Double_Submit_Cookie) method. + +This Plugin considers the `GET`, `HEAD` and `OPTIONS` methods to be safe operations (`safe-methods`) and such requests are not checked for interception by an attacker. Other methods are termed as `unsafe-methods`. + +## Attributes + +| Name | Type | Required | Default | Description | +|---------|--------|----------|---------------------|---------------------------------------------------------------------------------------------| +| name | string | False | `apisix-csrf-token` | Name of the token in the generated cookie. | +| expires | number | False | `7200` | Expiration time in seconds of the CSRF cookie. Set to `0` to skip checking expiration time. | +| key | string | True | | Secret key used to encrypt the cookie. | + +NOTE: `encrypt_fields = {"key"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +## Enable Plugin + +The example below shows how you can enable the Plugin on a specific Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT-d ' +{ + "uri": "/hello", + "plugins": { + "csrf": { + "key": "edd1c9f034335f136f87ad84b625c8f1" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:9001": 1 + } + } +}' +``` + +The Route is now protected and trying to access it with methods other than `GET` will be blocked with a 401 status code. + +Sending a `GET` request to the `/hello` endpoint will send back a cookie with an encrypted token. The name of the token can be set through the `name` attribute of the Plugin configuration and if unset, it defaults to `apisix-csrf-token`. + +:::note + +A new cookie is returned for each request. + +::: + +For subsequent requests with `unsafe-methods`, you need to read the encrypted token from the cookie and append the token to the request header by setting the field name to the `name` attribute in the Plugin configuration. + +## Example usage + +After you have configured the Plugin as shown above, trying to directly make a `POST` request to the `/hello` Route will result in an error: + +```shell +curl -i http://127.0.0.1:9080/hello -X POST +``` + +```shell +HTTP/1.1 401 Unauthorized +... +{"error_msg":"no csrf token in headers"} +``` + +To get the cookie with the encrypted token, you can make a `GET` request: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +```shell +HTTP/1.1 200 OK +Set-Cookie: apisix-csrf-token=eyJyYW5kb20iOjAuNjg4OTcyMzA4ODM1NDMsImV4cGlyZXMiOjcyMDAsInNpZ24iOiJcL09uZEF4WUZDZGYwSnBiNDlKREtnbzVoYkJjbzhkS0JRZXVDQm44MG9ldz0ifQ==;path=/;Expires=Mon, 13-Dec-21 09:33:55 GMT +``` + +This token must then be read from the cookie and added to the request header for subsequent `unsafe-methods` requests. + +For example, you can use [js-cookie](https://github.com/js-cookie/js-cookie) to read the cookie and [axios](https://github.com/axios/axios) to send requests: + +```js +const token = Cookie.get('apisix-csrf-token'); + +const instance = axios.create({ + headers: {'apisix-csrf-token': token} +}); +``` + +Also make sure that you carry the cookie. + +You can also use curl to send the request: + +```shell +curl -i http://127.0.0.1:9080/hello -X POST -H 'apisix-csrf-token: eyJyYW5kb20iOjAuNjg4OTcyMzA4ODM1NDMsImV4cGlyZXMiOjcyMDAsInNpZ24iOiJcL09uZEF4WUZDZGYwSnBiNDlKREtnbzVoYkJjbzhkS0JRZXVDQm44MG9ldz0ifQ==' -b 'apisix-csrf-token=eyJyYW5kb20iOjAuNjg4OTcyMzA4ODM1NDMsImV4cGlyZXMiOjcyMDAsInNpZ24iOiJcL09uZEF4WUZDZGYwSnBiNDlKREtnbzVoYkJjbzhkS0JRZXVDQm44MG9ldz0ifQ==' +``` + +```shell +HTTP/1.1 200 OK +``` + +## Delete Plugin + +To remove the `csrf` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/datadog.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/datadog.md new file mode 100644 index 0000000..4b2d5b4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/datadog.md @@ -0,0 +1,164 @@ +--- +title: datadog +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Datadog +description: This document contains information about the Apache APISIX datadog Plugin. +--- + + + +## Description + +The `datadog` monitoring Plugin is for seamless integration of APISIX with [Datadog](https://www.datadoghq.com/), one of the most used monitoring and observability platform for cloud applications. + +When enabled, the Plugin supports multiple metric capture types for request and response cycles. + +This Plugin, pushes its custom metrics to the [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/?tab=hostagent) server over UDP protocol and comes bundled with [Datadog agent](https://docs.datadoghq.com/agent/). + +DogStatsD implements the StatsD protocol which collects the custom metrics for the Apache APISIX agent, aggregates them into a single data point, and sends it to the configured Datadog server. + +This Plugin provides the ability to push metrics as a batch to the external Datadog agent, reusing the same datagram socket. It might take some time to receive the log data. It will be automatically sent after the timer function in the [batch processor](../batch-processor.md) expires. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ----------- | ------- | -------- | ------- | ------------ | -------------------------------------------------------------------------------------- | +| prefer_name | boolean | False | true | [true,false] | When set to `false`, uses Route/Service ID instead of name (default) with metric tags. | + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +## Metadata + +You can configure the Plugin through the Plugin metadata. + +| Name | Type | Required | Default | Description | +| ------------- | ------- | -------- | ------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| host | string | False | "127.0.0.1" | DogStatsD server host address. | +| port | integer | False | 8125 | DogStatsD server host port. | +| namespace | string | False | "apisix" | Prefix for all custom metrics sent by APISIX agent. Useful for finding entities for metrics graph. For example, `apisix.request.counter`. | +| constant_tags | array | False | [ "source:apisix" ] | Static tags to embed into generated metrics. Useful for grouping metrics over certain signals. | + +:::tip + +See [defining tags](https://docs.datadoghq.com/getting_started/tagging/#defining-tags) to know more about how to effectively use tags. + +::: + +By default, the Plugin expects the DogStatsD service to be available at `127.0.0.1:8125`. If you want to change this, you can update the Plugin metadata as shown below: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/datadog -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "host": "172.168.45.29", + "port": 8126, + "constant_tags": [ + "source:apisix", + "service:custom" + ], + "namespace": "apisix" +}' +``` + +To reset to default configuration, make a PUT request with empty body: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/datadog -H "X-API-KEY: $admin_key" -X PUT -d '{}' +``` + +## Exported metrics + +When the `datadog` Plugin is enabled, the APISIX agent exports the following metrics to the DogStatsD server for each request/response cycle: + +| Metric name | StatsD type | Description | +| ---------------- | ----------- | ----------------------------------------------------------------------------------------------------- | +| Request Counter | Counter | Number of requests received. | +| Request Latency | Histogram | Time taken to process the request (in milliseconds). | +| Upstream latency | Histogram | Time taken to proxy the request to the upstream server till a response is received (in milliseconds). | +| APISIX Latency | Histogram | Time taken by APISIX agent to process the request (in milliseconds). | +| Ingress Size | Timer | Request body size in bytes. | +| Egress Size | Timer | Response body size in bytes. | + +The metrics will be sent to the DogStatsD agent with the following tags: + +- `route_name`: Name specified in the Route schema definition. If not present or if the attribute `prefer_name` is set to false, falls back to the Route ID. +- `service_name`: If a Route has been created with an abstracted Service, the Service name/ID based on the attribute `prefer_name`. +- `consumer`: If the Route is linked to a Consumer, the username will be added as a tag. +- `balancer_ip`: IP address of the Upstream balancer that processed the current request. +- `response_status`: HTTP response status code. +- `scheme`: Request scheme such as HTTP, gRPC, and gRPCs. + +:::note + +If there are no suitable values for any particular tag, the tag will be omitted. + +::: + +## Enable Plugin + +Once you have your Datadog agent running, you can enable the Plugin as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "datadog": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +Now, requests to the endpoint `/hello` will generate metrics and push it to the DogStatsD server. + +## Delete Plugin + +To remove the `datadog` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/degraphql.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/degraphql.md new file mode 100644 index 0000000..6c37571 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/degraphql.md @@ -0,0 +1,337 @@ +--- +title: degraphql +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Degraphql +description: This document contains information about the Apache APISIX degraphql Plugin. +--- + + + +## Description + +The `degraphql` Plugin is used to support decoding RESTful API to GraphQL. + +## Attributes + +| Name | Type | Required | Description | +| -------------- | ------ | -------- | -------------------------------------------------------------------------------------------- | +| query | string | True | The GraphQL query sent to the upstream | +| operation_name | string | False | The name of the operation, is only required if multiple operations are present in the query. | +| variables | array | False | The variables used in the GraphQL query | + +## Example usage + +### Start GraphQL server + +We use docker to deploy a [GraphQL server demo](https://github.com/npalm/graphql-java-demo) as the backend. + +```bash +docker run -d --name grapql-demo -p 8080:8080 npalm/graphql-java-demo +``` + +After starting the server, the following endpoints are now available: + +- http://localhost:8080/graphiql - GraphQL IDE - GrahphiQL +- http://localhost:8080/playground - GraphQL IDE - Prisma GraphQL Client +- http://localhost:8080/altair - GraphQL IDE - Altair GraphQL Client +- http://localhost:8080/ - A simple reacter +- ws://localhost:8080/subscriptions + +### Enable Plugin + +#### Query list + +If we have a GraphQL query like this: + +```graphql +query { + persons { + id + name + } +} +``` + +We can execute it on `http://localhost:8080/playground`, and get the data as below: + +```json +{ + "data": { + "persons": [ + { + "id": "7", + "name": "Niek" + }, + { + "id": "8", + "name": "Josh" + }, + ...... + ] + } +} +``` + +Now we can use RESTful API to query the same data that is proxy by APISIX. + +First, we need to create a route in APISIX, and enable the degreaph plugin on the route, we need to define the GraphQL query in the plugin's config. + +```bash +curl --location --request PUT 'http://localhost:9180/apisix/admin/routes/1' \ +--header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "uri": "/graphql", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + }, + "plugins": { + "degraphql": { + "query": "{\n persons {\n id\n name\n }\n}\n" + } + } +}' +``` + +We convert the GraphQL query + +```graphql +{ + persons { + id + name + } +} +``` + +to JSON string `"{\n persons {\n id\n name\n }\n}\n"`, and put it in the plugin's configuration. + +Then we can query the data by RESTful API: + +```bash +curl --location --request POST 'http://localhost:9080/graphql' +``` + +and get the result: + +```json +{ + "data": { + "persons": [ + { + "id": "7", + "name": "Niek" + }, + { + "id": "8", + "name": "Josh" + }, + ...... + ] + } +} +``` + +#### Query with variables + +If we have a GraphQL query like this: + +```graphql +query($name: String!, $githubAccount: String!) { + persons(filter: { name: $name, githubAccount: $githubAccount }) { + id + name + blog + githubAccount + talks { + id + title + } + } +} + +variables: +{ + "name": "Niek", + "githubAccount": "npalm" +} +``` + +we can execute it on `http://localhost:8080/playground`, and get the data as below: + +```json +{ + "data": { + "persons": [ + { + "id": "7", + "name": "Niek", + "blog": "https://040code.github.io", + "githubAccount": "npalm", + "talks": [ + { + "id": "19", + "title": "GraphQL - The Next API Language" + }, + { + "id": "20", + "title": "Immutable Infrastructure" + } + ] + } + ] + } +} +``` + +We convert the GraphQL query to JSON string like `"query($name: String!, $githubAccount: String!) {\n persons(filter: { name: $name, githubAccount: $githubAccount }) {\n id\n name\n blog\n githubAccount\n talks {\n id\n title\n }\n }\n}"`, so we create a route like this: + +```bash +curl --location --request PUT 'http://localhost:9180/apisix/admin/routes/1' \ +--header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "uri": "/graphql", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + }, + "plugins": { + "degraphql": { + "query": "query($name: String!, $githubAccount: String!) {\n persons(filter: { name: $name, githubAccount: $githubAccount }) {\n id\n name\n blog\n githubAccount\n talks {\n id\n title\n }\n }\n}", + "variables": [ + "name", + "githubAccount" + ] + } + } +}' +``` + +We define the `variables` in the plugin's config, and the `variables` is an array, which contains the variables' name in the GraphQL query, so that we can pass the query variables by RESTful API. + +Query the data by RESTful API that proxy by APISIX: + +```bash +curl --location --request POST 'http://localhost:9080/graphql' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "name": "Niek", + "githubAccount": "npalm" +}' +``` + +and get the result: + +```json +{ + "data": { + "persons": [ + { + "id": "7", + "name": "Niek", + "blog": "https://040code.github.io", + "githubAccount": "npalm", + "talks": [ + { + "id": "19", + "title": "GraphQL - The Next API Language" + }, + { + "id": "20", + "title": "Immutable Infrastructure" + } + ] + } + ] + } +} +``` + +which is the same as the result of the GraphQL query. + +It's also possible to get the same result via GET request: + +```bash +curl 'http://localhost:9080/graphql?name=Niek&githubAccount=npalm' +``` + +```json +{ + "data": { + "persons": [ + { + "id": "7", + "name": "Niek", + "blog": "https://040code.github.io", + "githubAccount": "npalm", + "talks": [ + { + "id": "19", + "title": "GraphQL - The Next API Language" + }, + { + "id": "20", + "title": "Immutable Infrastructure" + } + ] + } + ] + } +} +``` + +In the GET request, the variables are passed in the query string. + +## Delete Plugin + +To remove the `degraphql` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/graphql", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/dubbo-proxy.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/dubbo-proxy.md new file mode 100644 index 0000000..eff2ceb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/dubbo-proxy.md @@ -0,0 +1,191 @@ +--- +title: dubbo-proxy +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Apache Dubbo + - dubbo-proxy +description: This document contains information about the Apache APISIX dubbo-proxy Plugin. +--- + + + +## Description + +The `dubbo-proxy` Plugin allows you to proxy HTTP requests to [Apache Dubbo](https://dubbo.apache.org/en/index.html). + +:::info IMPORTANT + +If you are using OpenResty, you need to build it with Dubbo support. See [How do I build the APISIX runtime environment](./../FAQ.md#how-do-i-build-the-apisix-runtime-environment) for details. + +::: + +## Runtime Attributes + +| Name | Type | Required | Default | Description | +| --------------- | ------ | -------- | -------------------- | ------------------------------- | +| service_name | string | True | | Dubbo provider service name. | +| service_version | string | True | | Dubbo provider service version. | +| method | string | False | The path of the URI. | Dubbo provider service method. | + +## Static Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ------------------------ | ------ | -------- | ------- | ------------ | --------------------------------------------------------------- | +| upstream_multiplex_count | number | True | 32 | >= 1 | Maximum number of multiplex requests in an upstream connection. | + +## Enable Plugin + +To enable the `dubbo-proxy` Plugin, you have to add it in your configuration file (`conf/config.yaml`): + +```yaml title="conf/config.yaml" +plugins: + - ... + - dubbo-proxy +``` + +Now, when APISIX is reloaded, you can add it to a specific Route as shown below: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "nodes": { + "127.0.0.1:20880": 1 + }, + "type": "roundrobin" +}' + +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uris": [ + "/hello" + ], + "plugins": { + "dubbo-proxy": { + "service_name": "org.apache.dubbo.sample.tengine.DemoService", + "service_version": "0.0.0", + "method": "tengineDubbo" + } + }, + "upstream_id": 1 +}' +``` + +## Example usage + +You can follow the [Quick Start](https://github.com/alibaba/tengine/tree/master/modules/mod_dubbo#quick-start) guide in Tengine with the configuration above for testing. + +APISIX dubbo plugin uses `hessian2` as the serialization protocol. It supports only `Map` as the request and response data type. + +### Application + +Your dubbo config should be configured to use `hessian2` as the serialization protocol. + +```yml +dubbo: + ... + protocol: + ... + serialization: hessian2 +``` + +Your application should implement the interface with the request and response data type as `Map`. + +```java +public interface DemoService { + Map sayHello(Map context); +} +``` + +### Request and Response + +If you need to pass request data, you can add the data to the HTTP request header. The plugin will convert the HTTP request header to the request data of the Dubbo service. Here is a sample HTTP request that passes `user` information: + +```bash +curl -i -X POST 'http://localhost:9080/hello' \ + --header 'user: apisix' + + +HTTP/1.1 200 OK +Date: Mon, 15 Jan 2024 10:15:57 GMT +Content-Type: text/plain; charset=utf-8 +... +hello: apisix +... +Server: APISIX/3.8.0 +``` + +If the returned data is: + +```json +{ + "status": "200", + "header1": "value1", + "header2": "value2", + "body": "body of the message" +} +``` + +The converted HTTP response will be: + +``` +HTTP/1.1 200 OK +... +header1: value1 +header2: value2 +... + +body of the message +``` + +## Delete Plugin + +To remove the `dubbo-proxy` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uris": [ + "/hello" + ], + "plugins": { + }, + "upstream_id": 1 + } +}' +``` + +To completely disable the `dubbo-proxy` Plugin, you can remove it from your configuration file (`conf/config.yaml`): + +```yaml title="conf/config.yaml" +plugins: + # - dubbo-proxy +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/echo.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/echo.md new file mode 100644 index 0000000..da99f64 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/echo.md @@ -0,0 +1,116 @@ +--- +title: echo +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Echo +description: This document contains information about the Apache APISIX echo Plugin. +--- + + + +## Description + +The `echo` Plugin is to help users understand how they can develop an APISIX Plugin. + +This Plugin addresses common functionalities in phases like init, rewrite, access, balancer, header filter, body filter and log. + +:::caution WARNING + +The `echo` Plugin is built as an example. It has missing cases and should **not** be used in production environments. + +::: + +## Attributes + +| Name | Type | Requirement | Default | Valid | Description | +| ----------- | ------ | ----------- | ------- | ----- | ----------------------------------------- | +| before_body | string | optional | | | Body to use before the filter phase. | +| body | string | optional | | | Body that replaces the Upstream response. | +| after_body | string | optional | | | Body to use after the modification phase. | +| headers | object | optional | | | New headers to use for the response. | + +At least one of `before_body`, `body`, and `after_body` must be specified. + +## Enable Plugin + +The example below shows how you can enable the `echo` Plugin for a specific Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "echo": { + "before_body": "before the body modification " + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +## Example usage + +First, we configure the Plugin as mentioned above. We can then make a request as shown below: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +``` +HTTP/1.1 200 OK +... +before the body modification hello world +``` + +## Delete Plugin + +To remove the `echo` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/elasticsearch-logger.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/elasticsearch-logger.md new file mode 100644 index 0000000..80ea076 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/elasticsearch-logger.md @@ -0,0 +1,445 @@ +--- +title: elasticsearch-logger +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Elasticsearch-logger +description: The elasticsearch-logger Plugin pushes request and response logs in batches to Elasticsearch and supports the customization of log formats. +--- + + + + + + + +## Description + +The `elasticsearch-logger` Plugin pushes request and response logs in batches to [Elasticsearch](https://www.elastic.co) and supports the customization of log formats. When enabled, the Plugin will serialize the request context information to [Elasticsearch Bulk format](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) and add them to the queue, before they are pushed to Elasticsearch. See [batch processor](../batch-processor.md) for more details. + +## Attributes + +| Name | Type | Required | Default | Description | +| ------------- | ------- | -------- | --------------------------- | ------------------------------------------------------------ | +| endpoint_addrs | array[string] | True | | Elasticsearch API endpoint addresses. If multiple endpoints are configured, they will be written randomly. | +| field | object | True | | Elasticsearch `field` configuration. | +| field.index | string | True | | Elasticsearch [_index field](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field). | +| log_format | object | False | | Custom log format in key-value pairs in JSON format. Support [APISIX](../apisix-variable.md) or [NGINX variables](http://nginx.org/en/docs/varindex.html) in values. | +| auth | array | False | | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) configuration. | +| auth.username | string | True | | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) username. | +| auth.password | string | True | | Elasticsearch [authentication](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) password. | +| ssl_verify | boolean | False | true | If true, perform SSL verification. | +| timeout | integer | False | 10 | Elasticsearch send data timeout in seconds. | +| include_req_body | boolean | False | false | If true, include the request body in the log. Note that if the request body is too big to be kept in the memory, it can not be logged due to NGINX's limitations. | +| include_req_body_expr | array[array] | False | | An array of one or more conditions in the form of [lua-resty-expr](https://github.com/api7/lua-resty-expr). Used when the `include_req_body` is true. Request body would only be logged when the expressions configured here evaluate to true. | +| include_resp_body | boolean | False | false | If true, include the response body in the log. | +| include_resp_body_expr | array[array] | False | | An array of one or more conditions in the form of [lua-resty-expr](https://github.com/api7/lua-resty-expr). Used when the `include_resp_body` is true. Response body would only be logged when the expressions configured here evaluate to true. | + +NOTE: `encrypt_fields = {"auth.password"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +## Plugin Metadata + +| Name | Type | Required | Default | Description | +|------|------|----------|---------|-------------| +| log_format | object | False | | Custom log format in key-value pairs in JSON format. Support [APISIX variables](../apisix-variable.md) and [NGINX variables](http://nginx.org/en/docs/varindex.html) in values. | + +## Examples + +The examples below demonstrate how you can configure `elasticsearch-logger` Plugin for different scenarios. + +To follow along the examples, start an Elasticsearch instance in Docker: + +```shell +docker run -d \ + --name elasticsearch \ + --network apisix-quickstart-net \ + -v elasticsearch_vol:/usr/share/elasticsearch/data/ \ + -p 9200:9200 \ + -p 9300:9300 \ + -e ES_JAVA_OPTS="-Xms512m -Xmx512m" \ + -e discovery.type=single-node \ + -e xpack.security.enabled=false \ + docker.elastic.co/elasticsearch/elasticsearch:7.17.1 +``` + +Start a Kibana instance in Docker to visualize the indexed data in Elasticsearch: + +```shell +docker run -d \ + --name kibana \ + --network apisix-quickstart-net \ + -p 5601:5601 \ + -e ELASTICSEARCH_HOSTS="http://elasticsearch:9200" \ + docker.elastic.co/kibana/kibana:7.17.1 +``` + +If successful, you should see the Kibana dashboard on [localhost:5601](http://localhost:5601). + +:::note + +You can fetch the APISIX `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Log in the Default Log Format + +The following example demonstrates how you can enable the `elasticsearch-logger` Plugin on a route, which logs client requests and responses to the Route and pushes logs to Elasticsearch. + +Create a Route with `elasticsearch-logger` to configure the `index` field as `gateway`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "elasticsearch-logger-route", + "uri": "/anything", + "plugins": { + "elasticsearch-logger": { + "endpoint_addrs": ["http://elasticsearch:9200"], + "field": { + "index": "gateway" + } + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +Send a request to the Route to generate a log entry: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response. + +Navigate to the Kibana dashboard on [localhost:5601](http://localhost:5601) and under __Discover__ tab, create a new index pattern `gateway` to fetch the data from Elasticsearch. Once configured, navigate back to the __Discover__ tab and you should see a log generated, similar to the following: + +```json +{ + "_index": "gateway", + "_id": "CE-JL5QBOkdYRG7kEjTJ", + "_version": 1, + "_score": 1, + "_source": { + "request": { + "headers": { + "host": "127.0.0.1:9080", + "accept": "*/*", + "user-agent": "curl/8.6.0" + }, + "size": 85, + "querystring": {}, + "method": "GET", + "url": "http://127.0.0.1:9080/anything", + "uri": "/anything" + }, + "response": { + "headers": { + "content-type": "application/json", + "access-control-allow-credentials": "true", + "server": "APISIX/3.11.0", + "content-length": "390", + "access-control-allow-origin": "*", + "connection": "close", + "date": "Mon, 13 Jan 2025 10:18:14 GMT" + }, + "status": 200, + "size": 618 + }, + "route_id": "elasticsearch-logger-route", + "latency": 585.00003814697, + "apisix_latency": 18.000038146973, + "upstream_latency": 567, + "upstream": "50.19.58.113:80", + "server": { + "hostname": "0b9a772e68f8", + "version": "3.11.0" + }, + "service_id": "", + "client_ip": "192.168.65.1" + }, + "fields": { + ... + } +} +``` + +### Log Request and Response Headers With Plugin Metadata + +The following example demonstrates how you can customize log format using [Plugin Metadata](../terminology/plugin-metadata.md) and [NGINX variables](http://nginx.org/en/docs/varindex.html) to log specific headers from request and response. + +In APISIX, [Plugin Metadata](../terminology/plugin-metadata.md) is used to configure the common metadata fields of all Plugin instances of the same plugin. It is useful when a Plugin is enabled across multiple resources and requires a universal update to their metadata fields. + +First, create a Route with `elasticsearch-logger` as follows: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "elasticsearch-logger-route", + "uri": "/anything", + "plugins": { + "elasticsearch-logger": { + "endpoint_addrs": ["http://elasticsearch:9200"], + "field": { + "index": "gateway" + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +Next, configure the Plugin metadata for `elasticsearch-logger`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/plugin_metadata/elasticsearch-logger" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr", + "env": "$http_env", + "resp_content_type": "$sent_http_Content_Type" + } + }' +``` + +Send a request to the Route with the `env` header: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H "env: dev" +``` + +You should receive an `HTTP/1.1 200 OK` response. + +Navigate to the Kibana dashboard on [localhost:5601](http://localhost:5601) and under __Discover__ tab, create a new index pattern `gateway` to fetch the data from Elasticsearch, if you have not done so already. Once configured, navigate back to the __Discover__ tab and you should see a log generated, similar to the following: + +```json +{ + "_index": "gateway", + "_id": "Ck-WL5QBOkdYRG7kODS0", + "_version": 1, + "_score": 1, + "_source": { + "client_ip": "192.168.65.1", + "route_id": "elasticsearch-logger-route", + "@timestamp": "2025-01-06T10:32:36+00:00", + "host": "127.0.0.1", + "resp_content_type": "application/json" + }, + "fields": { + ... + } +} +``` + +### Log Request Bodies Conditionally + +The following example demonstrates how you can conditionally log request body. + +Create a Route with `elasticsearch-logger` to only log request body if the URL query string `log_body` is `true`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "plugins": { + "elasticsearch-logger": { + "endpoint_addrs": ["http://elasticsearch:9200"], + "field": { + "index": "gateway" + }, + "include_req_body": true, + "include_req_body_expr": [["arg_log_body", "==", "yes"]] + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + }, + "uri": "/anything", + "id": "elasticsearch-logger-route" +}' +``` + +Send a request to the Route with an URL query string satisfying the condition: + +```shell +curl -i "http://127.0.0.1:9080/anything?log_body=yes" -X POST -d '{"env": "dev"}' +``` + +You should receive an `HTTP/1.1 200 OK` response. + +Navigate to the Kibana dashboard on [localhost:5601](http://localhost:5601) and under __Discover__ tab, create a new index pattern `gateway` to fetch the data from Elasticsearch, if you have not done so already. Once configured, navigate back to the __Discover__ tab and you should see a log generated, similar to the following: + +```json +{ + "_index": "gateway", + "_id": "Dk-cL5QBOkdYRG7k7DSW", + "_version": 1, + "_score": 1, + "_source": { + "request": { + "headers": { + "user-agent": "curl/8.6.0", + "accept": "*/*", + "content-length": "14", + "host": "127.0.0.1:9080", + "content-type": "application/x-www-form-urlencoded" + }, + "size": 182, + "querystring": { + "log_body": "yes" + }, + "body": "{\"env\": \"dev\"}", + "method": "POST", + "url": "http://127.0.0.1:9080/anything?log_body=yes", + "uri": "/anything?log_body=yes" + }, + "start_time": 1735965595203, + "response": { + "headers": { + "content-type": "application/json", + "server": "APISIX/3.11.0", + "access-control-allow-credentials": "true", + "content-length": "548", + "access-control-allow-origin": "*", + "connection": "close", + "date": "Mon, 13 Jan 2025 11:02:32 GMT" + }, + "status": 200, + "size": 776 + }, + "route_id": "elasticsearch-logger-route", + "latency": 703.9999961853, + "apisix_latency": 34.999996185303, + "upstream_latency": 669, + "upstream": "34.197.122.172:80", + "server": { + "hostname": "0b9a772e68f8", + "version": "3.11.0" + }, + "service_id": "", + "client_ip": "192.168.65.1" + }, + "fields": { + ... + } +} +``` + +Send a request to the Route without any URL query string: + +```shell +curl -i "http://127.0.0.1:9080/anything" -X POST -d '{"env": "dev"}' +``` + +Navigate to the Kibana dashboard __Discover__ tab and you should see a log generated, but without the request body: + +```json +{ + "_index": "gateway", + "_id": "EU-eL5QBOkdYRG7kUDST", + "_version": 1, + "_score": 1, + "_source": { + "request": { + "headers": { + "content-type": "application/x-www-form-urlencoded", + "accept": "*/*", + "content-length": "14", + "host": "127.0.0.1:9080", + "user-agent": "curl/8.6.0" + }, + "size": 169, + "querystring": {}, + "method": "POST", + "url": "http://127.0.0.1:9080/anything", + "uri": "/anything" + }, + "start_time": 1735965686363, + "response": { + "headers": { + "content-type": "application/json", + "access-control-allow-credentials": "true", + "server": "APISIX/3.11.0", + "content-length": "510", + "access-control-allow-origin": "*", + "connection": "close", + "date": "Mon, 13 Jan 2025 11:15:54 GMT" + }, + "status": 200, + "size": 738 + }, + "route_id": "elasticsearch-logger-route", + "latency": 680.99999427795, + "apisix_latency": 4.9999942779541, + "upstream_latency": 676, + "upstream": "34.197.122.172:80", + "server": { + "hostname": "0b9a772e68f8", + "version": "3.11.0" + }, + "service_id": "", + "client_ip": "192.168.65.1" + }, + "fields": { + ... + } +} +``` + +:::info + +If you have customized the `log_format` in addition to setting `include_req_body` or `include_resp_body` to `true`, the Plugin would not include the bodies in the logs. + +As a workaround, you may be able to use the NGINX variable `$request_body` in the log format, such as: + +```json +{ + "elasticsearch-logger": { + ..., + "log_format": {"body": "$request_body"} + } +} +``` + +::: diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/error-log-logger.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/error-log-logger.md new file mode 100644 index 0000000..039e429 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/error-log-logger.md @@ -0,0 +1,181 @@ +--- +title: error-log-logger +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Error log logger +description: This document contains information about the Apache APISIX error-log-logger Plugin. +--- + + +## Description + +The `error-log-logger` Plugin is used to push APISIX's error logs (`error.log`) to TCP, [Apache SkyWalking](https://skywalking.apache.org/), Apache Kafka or ClickHouse servers. You can also set the error log level to send the logs to server. + +It might take some time to receive the log data. It will be automatically sent after the timer function in the [batch processor](../batch-processor.md) expires. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|----------------------------------|---------|----------|--------------------------------|-----------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------| +| tcp.host | string | True | | | IP address or the hostname of the TCP server. | +| tcp.port | integer | True | | [0,...] | Target upstream port. | +| tcp.tls | boolean | False | false | | When set to `true` performs SSL verification. | +| tcp.tls_server_name | string | False | | | Server name for the new TLS extension SNI. | +| skywalking.endpoint_addr | string | False | http://127.0.0.1:12900/v3/logs | | Apache SkyWalking HTTP endpoint. | +| skywalking.service_name | string | False | APISIX | | Service name for the SkyWalking reporter. | +| skywalking.service_instance_name | String | False | APISIX Instance Name | | Service instance name for the SkyWalking reporter. Set it to `$hostname` to directly get the local hostname. | +| clickhouse.endpoint_addr | String | False | http://127.0.0.1:8213 | | ClickHouse endpoint. | +| clickhouse.user | String | False | default | | ClickHouse username. | +| clickhouse.password | String | False | | | ClickHouse password. | +| clickhouse.database | String | False | | | Name of the database to store the logs. | +| clickhouse.logtable | String | False | | | Table name to store the logs. | +| kafka.brokers | array | True | | | List of Kafka brokers (nodes). | +| kafka.brokers.host | string | True | | | The host of Kafka broker, e.g, `192.168.1.1`. | +| kafka.brokers.port | integer | True | | [0, 65535] | The port of Kafka broker | +| kafka.brokers.sasl_config | object | False | | | The sasl config of Kafka broker | +| kafka.brokers.sasl_config.mechanism | string | False | "PLAIN" | ["PLAIN"] | The mechaism of sasl config | +| kafka.brokers.sasl_config.user | string | True | | | The user of sasl_config. If sasl_config exists, it's required. | +| kafka.brokers.sasl_config.password | string | True | | | The password of sasl_config. If sasl_config exists, it's required. | +| kafka.kafka_topic | string | True | | | Target topic to push the logs for organisation. | +| kafka.producer_type | string | False | async | ["async", "sync"] | Message sending mode of the producer. | +| kafka.required_acks | integer | False | 1 | [0, 1, -1] | Number of acknowledgements the leader needs to receive for the producer to consider the request complete. This controls the durability of the sent records. The attribute follows the same configuration as the Kafka `acks` attribute. See [Apache Kafka documentation](https://kafka.apache.org/documentation/#producerconfigs_acks) for more. | +| kafka.key | string | False | | | Key used for allocating partitions for messages. | +| kafka.cluster_name | integer | False | 1 | [0,...] | Name of the cluster. Used when there are two or more Kafka clusters. Only works if the `producer_type` attribute is set to `async`. | +| kafka.meta_refresh_interval | integer | False | 30 | [1,...] | `refresh_interval` parameter in [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) specifies the time to auto refresh the metadata, in seconds.| +| timeout | integer | False | 3 | [1,...] | Timeout (in seconds) for the upstream to connect and send data. | +| keepalive | integer | False | 30 | [1,...] | Time in seconds to keep the connection alive after sending data. | +| level | string | False | WARN | ["STDERR", "EMERG", "ALERT", "CRIT", "ERR", "ERROR", "WARN", "NOTICE", "INFO", "DEBUG"] | Log level to filter the error logs. `ERR` is same as `ERROR`. | + +NOTE: `encrypt_fields = {"clickhouse.password"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +### Example of default log format + +```text +["2024/01/06 16:04:30 [warn] 11786#9692271: *1 [lua] plugin.lua:205: load(): new plugins: {"error-log-logger":true}, context: init_worker_by_lua*","\n","2024/01/06 16:04:30 [warn] 11786#9692271: *1 [lua] plugin.lua:255: load_stream(): new plugins: {"limit-conn":true,"ip-restriction":true,"syslog":true,"mqtt-proxy":true}, context: init_worker_by_lua*","\n"] +``` + +## Enable Plugin + +To enable the Plugin, you can add it in your configuration file (`conf/config.yaml`): + +```yaml title="conf/config.yaml" +plugins: + - request-id + - hmac-auth + - api-breaker + - error-log-logger +``` + +Once you have enabled the Plugin, you can configure it through the Plugin metadata. + +### Configuring TCP server address + +You can set the TCP server address by configuring the Plugin metadata as shown below: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/error-log-logger -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "tcp": { + "host": "127.0.0.1", + "port": 1999 + }, + "inactive_timeout": 1 +}' +``` + +### Configuring SkyWalking OAP server address + +You can configure the SkyWalking OAP server address as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/error-log-logger -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "skywalking": { + "endpoint_addr":"http://127.0.0.1:12800/v3/logs" + }, + "inactive_timeout": 1 +}' +``` + +### Configuring ClickHouse server details + +The Plugin sends the error log as a string to the `data` field of a table in your ClickHouse server. + +You can configure it as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/error-log-logger -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "clickhouse": { + "user": "default", + "password": "a", + "database": "error_log", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:8123" + } +}' +``` + +### Configuring Kafka server + +The Plugin sends the error log to Kafka, you can configure it as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/error-log-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "kafka":{ + "brokers":[ + { + "host":"127.0.0.1", + "port":9092 + } + ], + "kafka_topic":"test2" + }, + "level":"ERROR", + "inactive_timeout":1 +}' +``` + +## Delete Plugin + +To remove the Plugin, you can remove it from your configuration file (`conf/config.yaml`): + +```yaml title="conf/config.yaml" +plugins: + - request-id + - hmac-auth + - api-breaker + # - error-log-logger +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ext-plugin-post-req.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ext-plugin-post-req.md new file mode 100644 index 0000000..e1abc8c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ext-plugin-post-req.md @@ -0,0 +1,33 @@ +--- +title: ext-plugin-post-req +keywords: + - Apache APISIX + - Plugin + - ext-plugin-post-req +description: This document contains information about the Apache APISIX ext-plugin-post-req Plugin. +--- + + + +## Description + +`ext-plugin-post-req` differs from the [ext-plugin-pre-req](./ext-plugin-pre-req.md) Plugin in that it runs after executing the built-in Lua Plugins and before proxying to the Upstream. + +You can learn more about the configuration from the [ext-plugin-pre-req](./ext-plugin-pre-req.md) Plugin document. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ext-plugin-post-resp.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ext-plugin-post-resp.md new file mode 100644 index 0000000..ed51c6d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ext-plugin-post-resp.md @@ -0,0 +1,111 @@ +--- +title: ext-plugin-post-resp +keywords: + - Apache APISIX + - API Gateway + - Plugin + - ext-plugin-post-resp +description: This document contains information about the Apache APISIX ext-plugin-post-resp Plugin. +--- + + + +## Description + +The `ext-plugin-post-resp` Plugin is for running specific external Plugins in the Plugin Runner before executing the built-in Lua Plugins. + +The `ext-plugin-post-resp` plugin will be executed after the request gets a response from the upstream. + +This plugin uses [lua-resty-http](https://github.com/api7/lua-resty-http) library under the hood to send requests to the upstream, due to which the [proxy-control](./proxy-control.md), [proxy-mirror](./proxy-mirror.md), and [proxy-cache](./proxy-cache.md) plugins are not available to be used alongside this plugin. Also, [mTLS Between APISIX and Upstream](../mtls.md#mtls-between-apisix-and-upstream) is not yet supported. + +See [External Plugin](../external-plugin.md) to learn more. + +:::note + +Execution of External Plugins will affect the response of the current request. + +::: + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|-------------------|---------|----------|---------|-----------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------| +| conf | array | False | | [{"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"}] | List of Plugins and their configurations to be executed on the Plugin Runner. | +| allow_degradation | boolean | False | false | | Sets Plugin degradation when the Plugin Runner is not available. When set to `true`, requests are allowed to continue. | + +## Enable Plugin + +The example below enables the `ext-plugin-post-resp` Plugin on a specific Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "ext-plugin-post-resp": { + "conf" : [ + {"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"} + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## Example usage + +Once you have configured the External Plugin as shown above, you can make a request to execute the Plugin: + +```shell +curl -i http://127.0.0.1:9080/index.html +``` + +This will reach the configured Plugin Runner and the `ext-plugin-A` will be executed. + +## Delete Plugin + +To remove the `ext-plugin-post-resp` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ext-plugin-pre-req.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ext-plugin-pre-req.md new file mode 100644 index 0000000..ec15df6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ext-plugin-pre-req.md @@ -0,0 +1,107 @@ +--- +title: ext-plugin-pre-req +keywords: + - Apache APISIX + - API Gateway + - Plugin + - ext-plugin-pre-req +description: This document contains information about the Apache APISIX ext-plugin-pre-req Plugin. +--- + + + +## Description + +The `ext-plugin-pre-req` Plugin is for running specific external Plugins in the Plugin Runner before executing the built-in Lua Plugins. + +See [External Plugin](../external-plugin.md) to learn more. + +:::note + +Execution of External Plugins will affect the behavior of the current request. + +::: + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|-------------------|---------|----------|---------|-----------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------| +| conf | array | False | | [{"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"}] | List of Plugins and their configurations to be executed on the Plugin Runner. | +| allow_degradation | boolean | False | false | | Sets Plugin degradation when the Plugin Runner is not available. When set to `true`, requests are allowed to continue. | + +## Enable Plugin + +The example below enables the `ext-plugin-pre-req` Plugin on a specific Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "ext-plugin-pre-req": { + "conf" : [ + {"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"} + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## Example usage + +Once you have configured the External Plugin as shown above, you can make a request to execute the Plugin: + +```shell +curl -i http://127.0.0.1:9080/index.html +``` + +This will reach the configured Plugin Runner and the `ext-plugin-A` will be executed. + +## Delete Plugin + +To remove the `ext-plugin-pre-req` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/fault-injection.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/fault-injection.md new file mode 100644 index 0000000..16d05b1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/fault-injection.md @@ -0,0 +1,293 @@ +--- +title: fault-injection +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Fault Injection + - fault-injection +description: This document contains information about the Apache APISIX fault-injection Plugin. +--- + + + +## Description + +The `fault-injection` Plugin can be used to test the resiliency of your application. This Plugin will be executed before the other configured Plugins. + +The `abort` attribute will directly return the specified HTTP code to the client and skips executing the subsequent Plugins. + +The `delay` attribute delays a request and executes the subsequent Plugins. + +## Attributes + +| Name | Type | Requirement | Default | Valid | Description | +|-------------------|---------|-------------|---------|------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------| +| abort.http_status | integer | required | | [200, ...] | HTTP status code of the response to return to the client. | +| abort.body | string | optional | | | Body of the response returned to the client. Nginx variables like `client addr: $remote_addr\n` can be used in the body. | +| abort.headers | object | optional | | | Headers of the response returned to the client. The values in the header can contain Nginx variables like `$remote_addr`. | +| abort.percentage | integer | optional | | [0, 100] | Percentage of requests to be aborted. | +| abort.vars | array[] | optional | | | Rules which are matched before executing fault injection. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for a list of available expressions. | +| delay.duration | number | required | | | Duration of the delay. Can be decimal. | +| delay.percentage | integer | optional | | [0, 100] | Percentage of requests to be delayed. | +| delay.vars | array[] | optional | | | Rules which are matched before executing fault injection. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for a list of available expressions. | + +:::info IMPORTANT + +To use the `fault-injection` Plugin one of `abort` or `delay` must be specified. + +::: + +:::tip + +`vars` can have expressions from [lua-resty-expr](https://github.com/api7/lua-resty-expr) and can flexibly implement AND/OR relationship between rules. For example: + +```json +[ + [ + [ "arg_name","==","jack" ], + [ "arg_age","==",18 ] + ], + [ + [ "arg_name2","==","allen" ] + ] +] +``` + +This means that the relationship between the first two expressions is AND, and the relationship between them and the third expression is OR. + +::: + +## Enable Plugin + +You can enable the `fault-injection` Plugin on a specific Route as shown below: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "Fault Injection!" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +Similarly, to enable a `delay` fault: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "delay": { + "duration": 3 + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +You can also enable the Plugin with both `abort` and `delay` which can have `vars` for matching: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 403, + "body": "Fault Injection!\n", + "vars": [ + [ + [ "arg_name","==","jack" ] + ] + ] + }, + "delay": { + "duration": 2, + "vars": [ + [ + [ "http_age","==","18" ] + ] + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +## Example usage + +Once you have enabled the Plugin as shown above, you can make a request to the configured Route: + +```shell +curl http://127.0.0.1:9080/hello -i +``` + +``` +HTTP/1.1 200 OK +Date: Mon, 13 Jan 2020 13:50:04 GMT +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server + +Fault Injection! +``` + +And if we configure the `delay` fault: + +```shell +time curl http://127.0.0.1:9080/hello -i +``` + +``` +HTTP/1.1 200 OK +Content-Type: application/octet-stream +Content-Length: 6 +Connection: keep-alive +Server: APISIX web server +Date: Tue, 14 Jan 2020 14:30:54 GMT +Last-Modified: Sat, 11 Jan 2020 12:46:21 GMT + +hello + +real 0m3.034s +user 0m0.007s +sys 0m0.010s +``` + +### Fault injection with criteria matching + +You can enable the `fault-injection` Plugin with the `vars` attribute to set specific rules: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 403, + "body": "Fault Injection!\n", + "vars": [ + [ + [ "arg_name","==","jack" ] + ] + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +Now, we can test the Route. First, we test with a different `name` argument: + +```shell +curl "http://127.0.0.1:9080/hello?name=allen" -i +``` + +You will get the expected response without the fault injected: + +``` +HTTP/1.1 200 OK +Content-Type: application/octet-stream +Transfer-Encoding: chunked +Connection: keep-alive +Date: Wed, 20 Jan 2021 07:21:57 GMT +Server: APISIX/2.2 + +hello +``` + +Now if we set the `name` to match our configuration, the `fault-injection` Plugin is executed: + +```shell +curl "http://127.0.0.1:9080/hello?name=jack" -i +``` + +``` +HTTP/1.1 403 Forbidden +Date: Wed, 20 Jan 2021 07:23:37 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/2.2 + +Fault Injection! +``` + +## Delete Plugin + +To remove the `fault-injection` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/file-logger.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/file-logger.md new file mode 100644 index 0000000..05f2d49 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/file-logger.md @@ -0,0 +1,226 @@ +--- +title: file-logger +keywords: + - Apache APISIX + - API Gateway + - Plugin + - File Logger +description: This document contains information about the Apache APISIX file-logger Plugin. +--- + + + +## Description + +The `file-logger` Plugin is used to push log streams to a specific location. + +:::tip + +- `file-logger` plugin can count request and response data for individual routes locally, which is useful for [debugging](../debug-mode.md). +- `file-logger` plugin can get [APISIX variables](../apisix-variable.md) and [NGINX variables](http://nginx.org/en/docs/varindex.html), while `access.log` can only use NGINX variables. +- `file-logger` plugin support hot-loaded so that we can change its configuration at any time with immediate effect. +- `file-logger` plugin saves every data in JSON format. +- The user can modify the functions executed by the `file-logger` during the `log phase` to collect the information they want. + +::: + +## Attributes + +| Name | Type | Required | Description | +| ---- | ------ | -------- | ------------- | +| path | string | True | Log file path. | +| log_format | object | False | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| include_req_body | boolean | False | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to Nginx's limitations. | +| include_req_body_expr | array | False | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| include_resp_body | boolean | False | When set to `true` includes the response body in the log file. | +| include_resp_body_expr | array | False | When the `include_resp_body` attribute is set to `true`, use this to filter based on [lua-resty-expr](https://github.com/api7/lua-resty-expr). If present, only logs the response into file if the expression evaluates to `true`. | +| match | array[array] | False | Logs will be recorded when the rule matching is successful if the option is set. See [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) for a list of available expressions. | + +### Example of default log format + + ```json + { + "service_id": "", + "apisix_latency": 100.99999809265, + "start_time": 1703907485819, + "latency": 101.99999809265, + "upstream_latency": 1, + "client_ip": "127.0.0.1", + "route_id": "1", + "server": { + "version": "3.7.0", + "hostname": "localhost" + }, + "request": { + "headers": { + "host": "127.0.0.1:1984", + "content-type": "application/x-www-form-urlencoded", + "user-agent": "lua-resty-http/0.16.1 (Lua) ngx_lua/10025", + "content-length": "12" + }, + "method": "POST", + "size": 194, + "url": "http://127.0.0.1:1984/hello?log_body=no", + "uri": "/hello?log_body=no", + "querystring": { + "log_body": "no" + } + }, + "response": { + "headers": { + "content-type": "text/plain", + "connection": "close", + "content-length": "12", + "server": "APISIX/3.7.0" + }, + "status": 200, + "size": 123 + }, + "upstream": "127.0.0.1:1982" + } + ``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +The example below shows how you can configure through the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/file-logger -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## Enable Plugin + +The example below shows how you can enable the Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "file-logger": { + "path": "logs/file.log" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:9001": 1 + } + }, + "uri": "/hello" +}' +``` + +## Example usage + +Now, if you make a request, it will be logged in the path you specified: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +You will be able to find the `file.log` file in the configured `logs` directory. + +## Filter logs + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "file-logger": { + "path": "logs/file.log", + "match": [ + [ + [ "arg_name","==","jack" ] + ] + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:9001": 1 + } + }, + "uri": "/hello" +}' +``` + +Test: + +```shell +curl -i http://127.0.0.1:9080/hello?name=jack +``` + +Log records can be seen in `logs/file.log`. + +```shell +curl -i http://127.0.0.1:9080/hello?name=rose +``` + +Log records cannot be seen in `logs/file.log`. + +## Delete Plugin + +To remove the `file-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:9001": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/forward-auth.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/forward-auth.md new file mode 100644 index 0000000..b1aacc8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/forward-auth.md @@ -0,0 +1,186 @@ +--- +title: forward-auth +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Forward Authentication + - forward-auth +description: This document contains information about the Apache APISIX forward-auth Plugin. +--- + + + +## Description + +The `forward-auth` Plugin implements a classic external authentication model. When authentication fails, you can have a custom error message or redirect the user to an authentication page. + +This Plugin moves the authentication and authorization logic to a dedicated external service. APISIX forwards the user's requests to the external service, blocks the original request, and replaces the result when the external service responds with a non 2xx status code. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ----------------- | ------------- | -------- | ------- | -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| uri | string | True | | | URI of the authorization service. | +| ssl_verify | boolean | False | true | | When set to `true`, verifies the SSL certificate. | +| request_method | string | False | GET | ["GET","POST"] | HTTP method for a client to send requests to the authorization service. When set to `POST` the request body is send to the authorization service. | +| request_headers | array[string] | False | | | Client request headers to be sent to the authorization service. If not set, only the headers provided by APISIX are sent (for example, `X-Forwarded-XXX`). | +| upstream_headers | array[string] | False | | | Authorization service response headers to be forwarded to the Upstream. If not set, no headers are forwarded to the Upstream service. | +| client_headers | array[string] | False | | | Authorization service response headers to be sent to the client when authorization fails. If not set, no headers will be sent to the client. | +| timeout | integer | False | 3000ms | [1, 60000]ms | Timeout for the authorization service HTTP call. | +| keepalive | boolean | False | true | | When set to `true`, keeps the connection alive for multiple requests. | +| keepalive_timeout | integer | False | 60000ms | [1000, ...]ms | Idle time after which the connection is closed. | +| keepalive_pool | integer | False | 5 | [1, ...]ms | Connection pool limit. | +| allow_degradation | boolean | False | false | | When set to `true`, allows authentication to be skipped when authentication server is unavailable. | +| status_on_error | integer | False | 403 | [200,...,599] | Sets the HTTP status that is returned to the client when there is a network error to the authorization service. The default status is “403” (HTTP Forbidden). | + +## Data definition + +APISIX will generate and send the request headers listed below to the authorization service: + +| Scheme | HTTP Method | Host | URI | Source IP | +| ----------------- | ------------------ | ---------------- | --------------- | --------------- | +| X-Forwarded-Proto | X-Forwarded-Method | X-Forwarded-Host | X-Forwarded-Uri | X-Forwarded-For | + +## Example usage + +First, you need to setup your external authorization service. The example below uses Apache APISIX's [serverless](./serverless.md) Plugin to mock the service: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/auth' \ + -H "X-API-KEY: $admin_key" \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/auth", + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": [ + "return function (conf, ctx) + local core = require(\"apisix.core\"); + local authorization = core.request.header(ctx, \"Authorization\"); + if authorization == \"123\" then + core.response.exit(200); + elseif authorization == \"321\" then + core.response.set_header(\"X-User-ID\", \"i-am-user\"); + core.response.exit(200); + else core.response.set_header(\"Location\", \"http://example.com/auth\"); + core.response.exit(403); + end + end" + ] + } + } +}' +``` + +Now you can configure the `forward-auth` Plugin to a specific Route: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/1' \ + -H "X-API-KEY: $admin_key" \ + -d '{ + "uri": "/headers", + "plugins": { + "forward-auth": { + "uri": "http://127.0.0.1:9080/auth", + "request_headers": ["Authorization"], + "upstream_headers": ["X-User-ID"], + "client_headers": ["Location"] + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } +}' +``` + +Now if we send the authorization details in the request header: + +```shell +curl http://127.0.0.1:9080/headers -H 'Authorization: 123' +``` + +``` +{ + "headers": { + "Authorization": "123", + "Next": "More-headers" + } +} +``` + +The authorization service response can also be forwarded to the Upstream: + +```shell +curl http://127.0.0.1:9080/headers -H 'Authorization: 321' +``` + +``` +{ + "headers": { + "Authorization": "321", + "X-User-ID": "i-am-user", + "Next": "More-headers" + } +} +``` + +When authorization fails, the authorization service can send custom response back to the user: + +```shell +curl -i http://127.0.0.1:9080/headers +``` + +``` +HTTP/1.1 403 Forbidden +Location: http://example.com/auth +``` + +## Delete Plugin + +To remove the `forward-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/gm.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/gm.md new file mode 100644 index 0000000..99776f6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/gm.md @@ -0,0 +1,31 @@ +--- +title: GM +keywords: + - Apache APISIX + - Plugin + - GM +description: This article introduces the basic information and usage of the Apache APISIX `gm` plugin. +--- + + + +:::info +The function usage scenarios introduced in this article are mainly in China, so this article only has a Chinese version temporarily. You can click [here](https://apisix.apache.org/zh/docs/apisix/plugins/gm/) for more details. If you are interested in this feature, welcome to translate this document. +::: diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/google-cloud-logging.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/google-cloud-logging.md new file mode 100644 index 0000000..85b9723 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/google-cloud-logging.md @@ -0,0 +1,223 @@ +--- +title: google-cloud-logging +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Google Cloud logging +description: This document contains information about the Apache APISIX google-cloud-logging Plugin. +--- + + +## Description + +The `google-cloud-logging` Plugin is used to send APISIX access logs to [Google Cloud Logging Service](https://cloud.google.com/logging/). + +This plugin also allows to push logs as a batch to your Google Cloud Logging Service. It might take some time to receive the log data. It will be automatically sent after the timer function in the [batch processor](../batch-processor.md) expires. + +## Attributes + +| Name | Required | Default | Description | +|-------------------------|----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| auth_config | True | | Either `auth_config` or `auth_file` must be provided. | +| auth_config.client_email | True | | Email address of the Google Cloud service account. | +| auth_config.private_key | True | | Private key of the Google Cloud service account. | +| auth_config.project_id | True | | Project ID in the Google Cloud service account. | +| auth_config.token_uri | True | https://oauth2.googleapis.com/token | Token URI of the Google Cloud service account. | +| auth_config.entries_uri | False | https://logging.googleapis.com/v2/entries:write | Google Cloud Logging Service API. | +| auth_config.scope | False | ["https://www.googleapis.com/auth/logging.read", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/logging.admin", "https://www.googleapis.com/auth/cloud-platform"] | Access scopes of the Google Cloud service account. See [OAuth 2.0 Scopes for Google APIs](https://developers.google.com/identity/protocols/oauth2/scopes#logging). | +| auth_config.scopes | Deprecated | ["https://www.googleapis.com/auth/logging.read", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/logging.admin", "https://www.googleapis.com/auth/cloud-platform"] | Access scopes of the Google Cloud service account. Use `auth_config.scope` instead. | +| auth_file | True | | Path to the Google Cloud service account authentication JSON file. Either `auth_config` or `auth_file` must be provided. | +| ssl_verify | False | true | When set to `true`, enables SSL verification as mentioned in [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). | +| resource | False | {"type": "global"} | Google monitor resource. See [MonitoredResource](https://cloud.google.com/logging/docs/reference/v2/rest/v2/MonitoredResource) for more details. | +| log_id | False | apisix.apache.org%2Flogs | Google Cloud logging ID. See [LogEntry](https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry) for details. | +| log_format | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +NOTE: `encrypt_fields = {"auth_config.private_key"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +### Example of default log format + +```json +{ + "insertId": "0013a6afc9c281ce2e7f413c01892bdc", + "labels": { + "source": "apache-apisix-google-cloud-logging" + }, + "logName": "projects/apisix/logs/apisix.apache.org%2Flogs", + "httpRequest": { + "requestMethod": "GET", + "requestUrl": "http://localhost:1984/hello", + "requestSize": 59, + "responseSize": 118, + "status": 200, + "remoteIp": "127.0.0.1", + "serverIp": "127.0.0.1:1980", + "latency": "0.103s" + }, + "resource": { + "type": "global" + }, + "jsonPayload": { + "service_id": "", + "route_id": "1" + }, + "timestamp": "2024-01-06T03:34:45.065Z" +} +``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `google-cloud-logging` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/google-cloud-logging -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```json +{"partialSuccess":false,"entries":[{"jsonPayload":{"client_ip":"127.0.0.1","host":"localhost","@timestamp":"2023-01-09T14:47:25+08:00","route_id":"1"},"resource":{"type":"global"},"insertId":"942e81f60b9157f0d46bc9f5a8f0cc40","logName":"projects/apisix/logs/apisix.apache.org%2Flogs","timestamp":"2023-01-09T14:47:25+08:00","labels":{"source":"apache-apisix-google-cloud-logging"}}]} +``` + +## Enable Plugin + +### Full configuration + +The example below shows a complete configuration of the Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "google-cloud-logging": { + "auth_config":{ + "project_id":"apisix", + "client_email":"your service account email@apisix.iam.gserviceaccount.com", + "private_key":"-----BEGIN RSA PRIVATE KEY-----your private key-----END RSA PRIVATE KEY-----", + "token_uri":"https://oauth2.googleapis.com/token", + "scope":[ + "https://www.googleapis.com/auth/logging.admin" + ], + "entries_uri":"https://logging.googleapis.com/v2/entries:write" + }, + "resource":{ + "type":"global" + }, + "log_id":"apisix.apache.org%2Flogs", + "inactive_timeout":10, + "max_retry_count":0, + "buffer_duration":60, + "retry_delay":1, + "batch_max_size":1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +### Minimal configuration + +The example below shows a bare minimum configuration of the Plugin on a Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "google-cloud-logging": { + "auth_config":{ + "project_id":"apisix", + "client_email":"your service account email@apisix.iam.gserviceaccount.com", + "private_key":"-----BEGIN RSA PRIVATE KEY-----your private key-----END RSA PRIVATE KEY-----" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## Example usage + +Now, if you make a request to APISIX, it will be logged in your Google Cloud Logging Service. + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +You can then login and view the logs in [Google Cloud Logging Service](https://console.cloud.google.com/logs/viewer). + +## Delete Plugin + +To remove the `google-cloud-logging` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/grpc-transcode.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/grpc-transcode.md new file mode 100644 index 0000000..46285a6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/grpc-transcode.md @@ -0,0 +1,391 @@ +--- +title: grpc-transcode +keywords: + - Apache APISIX + - API Gateway + - Plugin + - gRPC Transcode + - grpc-transcode +description: This document contains information about the Apache APISIX grpc-transcode Plugin. +--- + + + +## Description + +The `grpc-transcode` Plugin converts between HTTP and gRPC requests. + +APISIX takes in an HTTP request, transcodes it and forwards it to a gRPC service, gets the response and returns it back to the client in HTTP format. + + + +## Attributes + +| Name | Type | Required | Default | Description | +| --------- | ------------------------------------------------------ | -------- | ------- | ------------------------------------ | +| proto_id | string/integer | True | | id of the the proto content. | +| service | string | True | | Name of the gRPC service. | +| method | string | True | | Method name of the gRPC service. | +| deadline | number | False | 0 | Deadline for the gRPC service in ms. | +| pb_option | array[string([pb_option_def](#options-for-pb_option))] | False | | protobuf options. | +| show_status_in_body | boolean | False | false | Whether to display the parsed `grpc-status-details-bin` in the response body | +| status_detail_type | string | False | | The message type corresponding to the [details](https://github.com/googleapis/googleapis/blob/b7cb84f5d42e6dba0fdcc2d8689313f6a8c9d7b9/google/rpc/status.proto#L46) part of `grpc-status-details-bin`, if not specified, this part will not be decoded | + +### Options for pb_option + +| Type | Valid values | +|-----------------|-------------------------------------------------------------------------------------------| +| enum as result | `enum_as_name`, `enum_as_value` | +| int64 as result | `int64_as_number`, `int64_as_string`, `int64_as_hexstring` | +| default values | `auto_default_values`, `no_default_values`, `use_default_values`, `use_default_metatable` | +| hooks | `enable_hooks`, `disable_hooks` | + +## Enable Plugin + +Before enabling the Plugin, you have to add the content of your `.proto` or `.pb` files to APISIX. + +You can use the `/admin/protos/id` endpoint and add the contents of the file to the `content` field: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/protos/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + }" +}' +``` + +If your proto file contains imports, or if you want to combine multiple proto files, you can generate a `.pb` file and use it in APISIX. + +For example, if we have a file called `proto/helloworld.proto` which imports another proto file: + +```proto +syntax = "proto3"; + +package helloworld; +import "proto/import.proto"; +... +``` + +We first generate a `.pb` file from the proto files: + +```shell +protoc --include_imports --descriptor_set_out=proto.pb proto/helloworld.proto +``` + +The output binary file, `proto.pb` will contain both `helloworld.proto` and `import.proto`. + +We can now use the content of `proto.pb` in the `content` field of the API request. + +As the content of the proto is binary, we encode it in `base64` and configure the content in APISIX: + +```shell +curl http://127.0.0.1:9180/apisix/admin/protos/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "content" : "'"$(base64 -w0 /path/to/proto.pb)"'" +}' +``` + +You should see an `HTTP/1.1 201 Created` response with the following: + +``` +{"node":{"value":{"create_time":1643879753,"update_time":1643883085,"content":"CmgKEnByb3RvL2ltcG9ydC5wcm90bxIDcGtnIhoKBFVzZXISEgoEbmFtZRgBIAEoCVIEbmFtZSIeCghSZXNwb25zZRISCgRib2R5GAEgASgJUgRib2R5QglaBy4vcHJvdG9iBnByb3RvMwq9AQoPcHJvdG8vc3JjLnByb3RvEgpoZWxsb3dvcmxkGhJwcm90by9pbXBvcnQucHJvdG8iPAoHUmVxdWVzdBIdCgR1c2VyGAEgASgLMgkucGtnLlVzZXJSBHVzZXISEgoEYm9keRgCIAEoCVIEYm9keTI5CgpUZXN0SW1wb3J0EisKA1J1bhITLmhlbGxvd29ybGQuUmVxdWVzdBoNLnBrZy5SZXNwb25zZSIAQglaBy4vcHJvdG9iBnByb3RvMw=="},"key":"\/apisix\/proto\/1"}} +``` + +Now, we can enable the `grpc-transcode` Plugin to a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/111 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHello" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50051": 1 + } + } +}' +``` + +:::note + +The Upstream service used here should be a gRPC service. Note that the `scheme` is set to `grpc`. + +You can use the [grpc_server_example](https://github.com/api7/grpc_server_example) for testing. + +::: + +## Example usage + +Once you configured the Plugin as mentioned above, you can make a request to APISIX to get a response back from the gRPC service (through APISIX): + +```shell +curl -i http://127.0.0.1:9080/grpctest?name=world +``` + +Response: + +```shell +HTTP/1.1 200 OK +Date: Fri, 16 Aug 2019 11:55:36 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server +Proxy-Connection: keep-alive + +{"message":"Hello world"} +``` + +You can also configure the `pb_option` as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/23 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/zeebe/WorkflowInstanceCreate", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "gateway_protocol.Gateway", + "method": "CreateWorkflowInstance", + "pb_option":["int64_as_string"] + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:26500": 1 + } + } +}' +``` + +Now if you check the configured Route: + +```shell +curl -i "http://127.0.0.1:9080/zeebe/WorkflowInstanceCreate?bpmnProcessId=order-process&version=1&variables=\{\"orderId\":\"7\",\"ordervalue\":99\}" +``` + +``` +HTTP/1.1 200 OK +Date: Wed, 13 Nov 2019 03:38:27 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +grpc-encoding: identity +grpc-accept-encoding: gzip +Server: APISIX web server +Trailer: grpc-status +Trailer: grpc-message + +{"workflowKey":"#2251799813685260","workflowInstanceKey":"#2251799813688013","bpmnProcessId":"order-process","version":1} +``` + +## Show `grpc-status-details-bin` in response body + +If the gRPC service returns an error, there may be a `grpc-status-details-bin` field in the response header describing the error, which you can decode and display in the response body. + +Upload the proto file: + +```shell +curl http://127.0.0.1:9180/apisix/admin/protos/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc GetErrResp (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + repeated string items = 2; + } + message HelloReply { + string message = 1; + repeated string items = 2; + }" +}' +``` + +Enable the `grpc-transcode` plugin,and set the option `show_status_in_body` to `true`: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "GetErrResp", + "show_status_in_body": true + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50051": 1 + } + } +}' +``` + +Access the route configured above: + +```shell +curl -i http://127.0.0.1:9080/grpctest?name=world +``` + +Response: + +```Shell +HTTP/1.1 503 Service Temporarily Unavailable +Date: Wed, 10 Aug 2022 08:59:46 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +grpc-status: 14 +grpc-message: Out of service +grpc-status-details-bin: CA4SDk91dCBvZiBzZXJ2aWNlGlcKKnR5cGUuZ29vZ2xlYXBpcy5jb20vaGVsbG93b3JsZC5FcnJvckRldGFpbBIpCAESHFRoZSBzZXJ2ZXIgaXMgb3V0IG9mIHNlcnZpY2UaB3NlcnZpY2U +Server: APISIX web server + +{"error":{"details":[{"type_url":"type.googleapis.com\/helloworld.ErrorDetail","value":"\b\u0001\u0012\u001cThe server is out of service\u001a\u0007service"}],"message":"Out of service","code":14}} +``` + +Note that there is an undecoded field in the return body. If you need to decode the field, you need to add the `message type` of the field in the uploaded proto file. + +```shell +curl http://127.0.0.1:9180/apisix/admin/protos/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc GetErrResp (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + repeated string items = 2; + } + message HelloReply { + string message = 1; + repeated string items = 2; + } + message ErrorDetail { + int64 code = 1; + string message = 2; + string type = 3; + }" +}' +``` + +Also configure the option `status_detail_type` to `helloworld.ErrorDetail`. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "GetErrResp", + "show_status_in_body": true, + "status_detail_type": "helloworld.ErrorDetail" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50051": 1 + } + } +}' +``` + +The fully decoded result is returned. + +```Shell +HTTP/1.1 503 Service Temporarily Unavailable +Date: Wed, 10 Aug 2022 09:02:46 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +grpc-status: 14 +grpc-message: Out of service +grpc-status-details-bin: CA4SDk91dCBvZiBzZXJ2aWNlGlcKKnR5cGUuZ29vZ2xlYXBpcy5jb20vaGVsbG93b3JsZC5FcnJvckRldGFpbBIpCAESHFRoZSBzZXJ2ZXIgaXMgb3V0IG9mIHNlcnZpY2UaB3NlcnZpY2U +Server: APISIX web server + +{"error":{"details":[{"type":"service","message":"The server is out of service","code":1}],"message":"Out of service","code":14}} +``` + +## Delete Plugin + +To remove the `grpc-transcode` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/111 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/grpctest", + "plugins": {}, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50051": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/grpc-web.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/grpc-web.md new file mode 100644 index 0000000..8739145 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/grpc-web.md @@ -0,0 +1,110 @@ +--- +title: grpc-web +keywords: + - Apache APISIX + - API Gateway + - Plugin + - gRPC Web + - grpc-web +description: This document contains information about the Apache APISIX grpc-web Plugin. +--- + + + +## Description + +The `grpc-web` Plugin is a proxy Plugin that can process [gRPC Web](https://github.com/grpc/grpc-web) requests from JavaScript clients to a gRPC service. + +## Attributes + +| Name | Type | Required | Default | Description | +|-------------------------|---------|----------|-----------------------------------------|----------------------------------------------------------------------------------------------------------| +| cors_allow_headers | string | False | "content-type,x-grpc-web,x-user-agent" | Headers in the request allowed when accessing a cross-origin resource. Use `,` to add multiple headers. | + +## Enable Plugin + +You can enable the `grpc-web` Plugin on a specific Route as shown below: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri":"/grpc/web/*", + "plugins":{ + "grpc-web":{} + }, + "upstream":{ + "scheme":"grpc", + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + } +}' +``` + +:::info IMPORTANT + +While using the `grpc-web` Plugin, always use a prefix matching pattern (`/*`, `/grpc/example/*`) for matching Routes. This is because the gRPC Web client passes the package name, the service interface name, the method name and other information in the proto in the URI. For example, `/path/a6.RouteService/Insert`. + +So, when absolute matching is used, the Plugin would not be hit and the information from the proto would not be extracted. + +::: + +## Example usage + +Refer to [gRPC-Web Client Runtime Library](https://www.npmjs.com/package/grpc-web) or [Apache APISIX gRPC Web Test Framework](https://github.com/apache/apisix/tree/master/t/plugin/grpc-web) to learn how to setup your web client. + +Once you have your gRPC Web client running, you can make a request to APISIX from the browser or through Node.js. + +:::note + +The supported request methods are `POST` and `OPTIONS`. See [CORS support](https://github.com/grpc/grpc-web/blob/master/doc/browser-features.md#cors-support). + +The supported `Content-Type` includes `application/grpc-web`, `application/grpc-web-text`, `application/grpc-web+proto`, and `application/grpc-web-text+proto`. See [Protocol differences vs gRPC over HTTP2](https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md#protocol-differences-vs-grpc-over-http2). + +::: + +## Delete Plugin + +To remove the `grpc-web` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri":"/grpc/web/*", + "plugins":{}, + "upstream":{ + "scheme":"grpc", + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/gzip.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/gzip.md new file mode 100644 index 0000000..a8a16dc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/gzip.md @@ -0,0 +1,123 @@ +--- +title: gzip +keywords: + - Apache APISIX + - API Gateway + - Plugin + - gzip +description: This document contains information about the Apache APISIX gzip Plugin. +--- + + + +## Description + +The `gzip` Plugin dynamically sets the behavior of [gzip in Nginx](https://docs.nginx.com/nginx/admin-guide/web-server/compression/). +When the `gzip` plugin is enabled, the client needs to include `Accept-Encoding: gzip` in the request header to indicate support for gzip compression. Upon receiving the request, APISIX dynamically determines whether to compress the response content based on the client's support and server configuration. If the conditions are met, `APISIX` adds the `Content-Encoding: gzip` header to the response, indicating that the response content has been compressed using gzip. Upon receiving the response, the client uses the corresponding decompression algorithm based on the `Content-Encoding` header to decompress the response content and obtain the original response content. + +:::info IMPORTANT + +This Plugin requires APISIX to run on [APISIX-Runtime](../FAQ.md#how-do-i-build-the-apisix-runtime-environment). + +::: + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|----------------|----------------------|----------|---------------|--------------|-----------------------------------------------------------------------------------------| +| types | array[string] or "*" | False | ["text/html"] | | Dynamically sets the `gzip_types` directive. Special value `"*"` matches any MIME type. | +| min_length | integer | False | 20 | >= 1 | Dynamically sets the `gzip_min_length` directive. | +| comp_level | integer | False | 1 | [1, 9] | Dynamically sets the `gzip_comp_level` directive. | +| http_version | number | False | 1.1 | 1.1, 1.0 | Dynamically sets the `gzip_http_version` directive. | +| buffers.number | integer | False | 32 | >= 1 | Dynamically sets the `gzip_buffers` directive parameter `number`. | +| buffers.size | integer | False | 4096 | >= 1 | Dynamically sets the `gzip_buffers` directive parameter `size`. The unit is in bytes. | +| vary | boolean | False | false | | Dynamically sets the `gzip_vary` directive. | + +## Enable Plugin + +The example below enables the `gzip` Plugin on the specified Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "gzip": { + "buffers": { + "number": 8 + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## Example usage + +Once you have configured the Plugin as shown above, you can make a request as shown below: + +```shell +curl http://127.0.0.1:9080/index.html -i -H "Accept-Encoding: gzip" +``` + +``` +HTTP/1.1 404 Not Found +Content-Type: text/html; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Date: Wed, 21 Jul 2021 03:52:55 GMT +Server: APISIX/2.7 +Content-Encoding: gzip + +Warning: Binary output can mess up your terminal. Use "--output -" to tell +Warning: curl to output it to your terminal anyway, or consider "--output +Warning: " to save to a file. +``` + +## Delete Plugin + +To remove the `gzip` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/hmac-auth.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/hmac-auth.md new file mode 100644 index 0000000..354af2a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/hmac-auth.md @@ -0,0 +1,760 @@ +--- +title: hmac-auth +keywords: + - Apache APISIX + - API Gateway + - Plugin + - HMAC Authentication + - hmac-auth +description: The hmac-auth Plugin supports HMAC authentication to ensure request integrity, preventing modifications during transmission and enhancing API security. +--- + + + +## Description + +The `hmac-auth` Plugin supports HMAC (Hash-based Message Authentication Code) authentication as a mechanism to ensure the integrity of requests, preventing them from being modified during transmissions. To use the Plugin, you would configure HMAC secret keys on [Consumers](../terminology/consumer.md) and enable the Plugin on Routes or Services. + +When a Consumer is successfully authenticated, APISIX adds additional headers, such as `X-Consumer-Username`, `X-Credential-Indentifier`, and other Consumer custom headers if configured, to the request, before proxying it to the Upstream service. The Upstream service will be able to differentiate between consumers and implement additional logics as needed. If any of these values is not available, the corresponding header will not be added. + +Once enabled, the Plugin verifies the HMAC signature in the request's `Authorization` header and check that incoming requests are from trusted sources. Specifically, when APISIX receives an HMAC-signed request, the key ID is extracted from the `Authorization` header. APISIX then retrieves the corresponding Consumer configuration, including the secret key. If the key ID is valid and exists, APISIX generates an HMAC signature using the request's `Date` header and the secret key. If this generated signature matches the signature provided in the `Authorization` header, the request is authenticated and forwarded to Upstream services. + +The Plugin implementation is based on [draft-cavage-http-signatures](https://www.ietf.org/archive/id/draft-cavage-http-signatures-12.txt). + +## Attributes + +The following attributes are available for configurations on Consumers or Credentials. + +| Name | Type | Required | Default | Valid values | Description | +|-----------------------|---------------|----------|---------------|---------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| key_id | string | True | | | Unique identifier for the Consumer, which identifies the associated configurations such as the secret key. | +| secret_key | string | True | | | Secret key used to generate an HMAC. This field supports saving the value in Secret Manager using the [APISIX Secret](../terminology/secret.md) resource. | + +The following attributes are available for configurations on Routes or Services. + +| Name | Type | Required | Default | Valid values | Description | +|-----------------------|---------------|----------|---------------|---------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| allowed_algorithms | array[string] | False | ["hmac-sha1","hmac-sha256","hmac-sha512"] | combination of "hmac-sha1","hmac-sha256",and "hmac-sha512" | The list of HMAC algorithms allowed. | +| clock_skew | integer | False | 300 | >=1 | Maximum allowable time difference in seconds between the client request's timestamp and APISIX server's current time. This helps account for discrepancies in time synchronization between the client’s and server’s clocks and protect against replay attacks. The timestamp in the Date header (must be in GMT format) will be used for the calculation. | +| signed_headers | array[string] | False | | | The list of HMAC-signed headers that should be included in the client request's HMAC signature. | +| validate_request_body | boolean | False | false | | If true, validate the integrity of the request body to ensure it has not been tampered with during transmission. Specifically, the Plugin creates a SHA-256 base64-encoded digest and compare it to the `Digest` header. If the Digest` header is missing or if the digests do not match, the validation fails. | +| hide_credentials | boolean | False | false | | If true, do not pass the authorization request header to Upstream services. | +| anonymous_consumer | string | False | | | Anonymous Consumer name. If configured, allow anonymous users to bypass the authentication. | + +NOTE: `encrypt_fields = {"secret_key"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +## Examples + +The examples below demonstrate how you can work with the `hmac-auth` Plugin for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Implement HMAC Authentication on a Route + +The following example demonstrates how to implement HMAC authentications on a route. You will also attach a Consumer custom ID to authenticated request in the `Consumer-Custom-Id` header, which can be used to implement additional logics as needed. + +Create a Consumer `john` with a custom ID label: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john", + "labels": { + "custom_id": "495aec6a" + } + }' +``` + +Create `hmac-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-hmac-auth", + "plugins": { + "hmac-auth": { + "key_id": "john-key", + "secret_key": "john-secret-key" + } + } + }' +``` + +Create a Route with the `hmac-auth` Plugin using its default configurations: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "hmac-auth-route", + "uri": "/get", + "methods": ["GET"], + "plugins": { + "hmac-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Generate a signature. You can use the below Python snippet or other stack of your choice: + +```python title="hmac-sig-header-gen.py" +import hmac +import hashlib +import base64 +from datetime import datetime, timezone + +key_id = "john-key" # key id +secret_key = b"john-secret-key" # secret key +request_method = "GET" # HTTP method +request_path = "/get" # Route URI +algorithm= "hmac-sha256" # can use other algorithms in allowed_algorithms + +# get current datetime in GMT +# note: the signature will become invalid after the clock skew (default 300s) +# you can regenerate the signature after it becomes invalid, or increase the clock +# skew to prolong the validity within the advised security boundary +gmt_time = datetime.now(timezone.utc).strftime('%a, %d %b %Y %H:%M:%S GMT') + +# construct the signing string (ordered) +# the date and any subsequent custom headers should be lowercased and separated by a +# single space character, i.e. `:` +# https://datatracker.ietf.org/doc/html/draft-cavage-http-signatures-12#section-2.1.6 +signing_string = ( + f"{key_id}\n" + f"{request_method} {request_path}\n" + f"date: {gmt_time}\n" +) + +# create signature +signature = hmac.new(secret_key, signing_string.encode('utf-8'), hashlib.sha256).digest() +signature_base64 = base64.b64encode(signature).decode('utf-8') + +# construct the request headers +headers = { + "Date": gmt_time, + "Authorization": ( + f'Signature keyId="{key_id}",algorithm="{algorithm}",' + f'headers="@request-target date",' + f'signature="{signature_base64}"' + ) +} + +# print headers +print(headers) +``` + +Run the script: + +```shell +python3 hmac-sig-header-gen.py +``` + +You should see the request headers printed: + +```text +{'Date': 'Fri, 06 Sep 2024 06:41:29 GMT', 'Authorization': 'Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="wWfKQvPDr0wHQ4IHdluB4IzeNZcj0bGJs2wvoCOT5rM="'} +``` + +Using the headers generated, send a request to the route: + +```shell +curl -X GET "http://127.0.0.1:9080/get" \ + -H "Date: Fri, 06 Sep 2024 06:41:29 GMT" \ + -H 'Authorization: Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="wWfKQvPDr0wHQ4IHdluB4IzeNZcj0bGJs2wvoCOT5rM="' +``` + +You should see an `HTTP/1.1 200 OK` response similar to the following: + +```json +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Authorization": "Signature keyId=\"john-key\",algorithm=\"hmac-sha256\",headers=\"@request-target date\",signature=\"wWfKQvPDr0wHQ4IHdluB4IzeNZcj0bGJs2wvoCOT5rM=\"", + "Date": "Fri, 06 Sep 2024 06:41:29 GMT", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66d96513-2e52d4f35c9b6a2772d667ea", + "X-Consumer-Username": "john", + "X-Credential-Identifier": "cred-john-hmac-auth", + "X-Consumer-Custom-Id": "495aec6a", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "192.168.65.1, 34.0.34.160", + "url": "http://127.0.0.1/get" +} +``` + +### Hide Authorization Information From Upstream + +As seen the in the [last example](#implement-hmac-authentication-on-a-route), the `Authorization` header passed to the Upstream includes the signature and all other details. This could potentially introduce security risks. + +The following example demonstrates how to prevent these information from being sent to the Upstream service. + +Update the Plugin configuration to set `hide_credentials` to `true`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/hmac-auth-route" -X PATCH \ +-H "X-API-KEY: ${admin_key}" \ +-d '{ + "plugins": { + "hmac-auth": { + "hide_credentials": true + } + } +}' +``` + +Send a request to the route: + +```shell +curl -X GET "http://127.0.0.1:9080/get" \ + -H "Date: Fri, 06 Sep 2024 06:41:29 GMT" \ + -H 'Authorization: Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="wWfKQvPDr0wHQ4IHdluB4IzeNZcj0bGJs2wvoCOT5rM="' +``` + +You should see an `HTTP/1.1 200 OK` response and notice the `Authorization` header is entirely removed: + +```json +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66d96513-2e52d4f35c9b6a2772d667ea", + "X-Consumer-Username": "john", + "X-Credential-Identifier": "cred-john-hmac-auth", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "192.168.65.1, 34.0.34.160", + "url": "http://127.0.0.1/get" +} +``` + +### Enable Body Validation + +The following example demonstrates how to enable body validation to ensure the integrity of the request body. + +Create a Consumer `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john" + }' +``` + +Create `hmac-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-hmac-auth", + "plugins": { + "hmac-auth": { + "key_id": "john-key", + "secret_key": "john-secret-key" + } + } + }' +``` + +Create a Route with the `hmac-auth` Plugin as such: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "hmac-auth-route", + "uri": "/post", + "methods": ["POST"], + "plugins": { + "hmac-auth": { + "validate_request_body": true + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Generate a signature. You can use the below Python snippet or other stack of your choice: + +```python title="hmac-sig-digest-header-gen.py" +import hmac +import hashlib +import base64 +from datetime import datetime, timezone + +key_id = "john-key" # key id +secret_key = b"john-secret-key" # secret key +request_method = "POST" # HTTP method +request_path = "/post" # Route URI +algorithm= "hmac-sha256" # can use other algorithms in allowed_algorithms +body = '{"name": "world"}' # example request body + +# get current datetime in GMT +# note: the signature will become invalid after the clock skew (default 300s). +# you can regenerate the signature after it becomes invalid, or increase the clock +# skew to prolong the validity within the advised security boundary +gmt_time = datetime.now(timezone.utc).strftime('%a, %d %b %Y %H:%M:%S GMT') + +# construct the signing string (ordered) +# the date and any subsequent custom headers should be lowercased and separated by a +# single space character, i.e. `:` +# https://datatracker.ietf.org/doc/html/draft-cavage-http-signatures-12#section-2.1.6 +signing_string = ( + f"{key_id}\n" + f"{request_method} {request_path}\n" + f"date: {gmt_time}\n" +) + +# create signature +signature = hmac.new(secret_key, signing_string.encode('utf-8'), hashlib.sha256).digest() +signature_base64 = base64.b64encode(signature).decode('utf-8') + +# create the SHA-256 digest of the request body and base64 encode it +body_digest = hashlib.sha256(body.encode('utf-8')).digest() +body_digest_base64 = base64.b64encode(body_digest).decode('utf-8') + +# construct the request headers +headers = { + "Date": gmt_time, + "Digest": f"SHA-256={body_digest_base64}", + "Authorization": ( + f'Signature keyId="{key_id}",algorithm="hmac-sha256",' + f'headers="@request-target date",' + f'signature="{signature_base64}"' + ) +} + +# print headers +print(headers) +``` + +Run the script: + +```shell +python3 hmac-sig-digest-header-gen.py +``` + +You should see the request headers printed: + +```text +{'Date': 'Fri, 06 Sep 2024 09:16:16 GMT', 'Digest': 'SHA-256=78qzJuLwSpZ8HacsTdFCQJWxzPMOf8bYctRk2ySLpS8=', 'Authorization': 'Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="rjS6NxOBKmzS8CZL05uLiAfE16hXdIpMD/L/HukOTYE="'} +``` + +Using the headers generated, send a request to the route: + +```shell +curl "http://127.0.0.1:9080/post" -X POST \ + -H "Date: Fri, 06 Sep 2024 09:16:16 GMT" \ + -H "Digest: SHA-256=78qzJuLwSpZ8HacsTdFCQJWxzPMOf8bYctRk2ySLpS8=" \ + -H 'Authorization: Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="rjS6NxOBKmzS8CZL05uLiAfE16hXdIpMD/L/HukOTYE="' \ + -d '{"name": "world"}' +``` + +You should see an `HTTP/1.1 200 OK` response similar to the following: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": { + "{\"name\": \"world\"}": "" + }, + "headers": { + "Accept": "*/*", + "Authorization": "Signature keyId=\"john-key\",algorithm=\"hmac-sha256\",headers=\"@request-target date\",signature=\"rjS6NxOBKmzS8CZL05uLiAfE16hXdIpMD/L/HukOTYE=\"", + "Content-Length": "17", + "Content-Type": "application/x-www-form-urlencoded", + "Date": "Fri, 06 Sep 2024 09:16:16 GMT", + "Digest": "SHA-256=78qzJuLwSpZ8HacsTdFCQJWxzPMOf8bYctRk2ySLpS8=", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66d978c3-49f929ad5237da5340bbbeb4", + "X-Consumer-Username": "john", + "X-Credential-Identifier": "cred-john-hmac-auth", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "origin": "192.168.65.1, 34.0.34.160", + "url": "http://127.0.0.1/post" +} +``` + +If you send a request without the digest or with an invalid digest: + +```shell +curl "http://127.0.0.1:9080/post" -X POST \ + -H "Date: Fri, 06 Sep 2024 09:16:16 GMT" \ + -H "Digest: SHA-256=78qzJuLwSpZ8HacsTdFCQJWxzPMOf8bYctRk2ySLpS8=" \ + -H 'Authorization: Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="rjS6NxOBKmzS8CZL05uLiAfE16hXdIpMD/L/HukOTYE="' \ + -d '{"name": "world"}' +``` + +You should see an `HTTP/1.1 401 Unauthorized` response with the following message: + +```text +{"message":"client request can't be validated"} +``` + +### Mandate Signed Headers + +The following example demonstrates how you can mandate certain headers to be signed in the request's HMAC signature. + +Create a Consumer `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john" + }' +``` + +Create `hmac-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-hmac-auth", + "plugins": { + "hmac-auth": { + "key_id": "john-key", + "secret_key": "john-secret-key" + } + } + }' +``` + +Create a Route with the `hmac-auth` Plugin which requires three headers to be present in the HMAC signature: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "hmac-auth-route", + "uri": "/get", + "methods": ["GET"], + "plugins": { + "hmac-auth": { + "signed_headers": ["date","x-custom-header-a","x-custom-header-b"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Generate a signature. You can use the below Python snippet or other stack of your choice: + +```python title="hmac-sig-req-header-gen.py" +import hmac +import hashlib +import base64 +from datetime import datetime, timezone + +key_id = "john-key" # key id +secret_key = b"john-secret-key" # secret key +request_method = "GET" # HTTP method +request_path = "/get" # Route URI +algorithm= "hmac-sha256" # can use other algorithms in allowed_algorithms +custom_header_a = "hello123" # required custom header +custom_header_b = "world456" # required custom header + +# get current datetime in GMT +# note: the signature will become invalid after the clock skew (default 300s) +# you can regenerate the signature after it becomes invalid, or increase the clock +# skew to prolong the validity within the advised security boundary +gmt_time = datetime.now(timezone.utc).strftime('%a, %d %b %Y %H:%M:%S GMT') + +# construct the signing string (ordered) +# the date and any subsequent custom headers should be lowercased and separated by a +# single space character, i.e. `:` +# https://datatracker.ietf.org/doc/html/draft-cavage-http-signatures-12#section-2.1.6 +signing_string = ( + f"{key_id}\n" + f"{request_method} {request_path}\n" + f"date: {gmt_time}\n" + f"x-custom-header-a: {custom_header_a}\n" + f"x-custom-header-b: {custom_header_b}\n" +) + +# create signature +signature = hmac.new(secret_key, signing_string.encode('utf-8'), hashlib.sha256).digest() +signature_base64 = base64.b64encode(signature).decode('utf-8') + +# construct the request headers +headers = { + "Date": gmt_time, + "Authorization": ( + f'Signature keyId="{key_id}",algorithm="hmac-sha256",' + f'headers="@request-target date x-custom-header-a x-custom-header-b",' + f'signature="{signature_base64}"' + ), + "x-custom-header-a": custom_header_a, + "x-custom-header-b": custom_header_b +} + +# print headers +print(headers) +``` + +Run the script: + +```shell +python3 hmac-sig-req-header-gen.py +``` + +You should see the request headers printed: + +```text +{'Date': 'Fri, 06 Sep 2024 09:58:49 GMT', 'Authorization': 'Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date x-custom-header-a x-custom-header-b",signature="MwJR8JOhhRLIyaHlJ3Snbrf5hv0XwdeeRiijvX3A3yE="', 'x-custom-header-a': 'hello123', 'x-custom-header-b': 'world456'} +``` + +Using the headers generated, send a request to the route: + +```shell +curl -X GET "http://127.0.0.1:9080/get" \ + -H "Date: Fri, 06 Sep 2024 09:58:49 GMT" \ + -H 'Authorization: Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date x-custom-header-a x-custom-header-b",signature="MwJR8JOhhRLIyaHlJ3Snbrf5hv0XwdeeRiijvX3A3yE="' \ + -H "x-custom-header-a: hello123" \ + -H "x-custom-header-b: world456" +``` + +You should see an `HTTP/1.1 200 OK` response similar to the following: + +```json +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Authorization": "Signature keyId=\"john-key\",algorithm=\"hmac-sha256\",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"MwJR8JOhhRLIyaHlJ3Snbrf5hv0XwdeeRiijvX3A3yE=\"", + "Date": "Fri, 06 Sep 2024 09:58:49 GMT", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66d98196-64a58db25ece71c077999ecd", + "X-Consumer-Username": "john", + "X-Credential-Identifier": "cred-john-hmac-auth", + "X-Custom-Header-A": "hello123", + "X-Custom-Header-B": "world456", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "192.168.65.1, 103.97.2.206", + "url": "http://127.0.0.1/get" +} +``` + +### Rate Limit with Anonymous Consumer + +The following example demonstrates how you can configure different rate limiting policies by regular and anonymous consumers, where the anonymous Consumer does not need to authenticate and has less quotas. + +Create a regular Consumer `john` and configure the `limit-count` Plugin to allow for a quota of 3 within a 30-second window: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john", + "plugins": { + "limit-count": { + "count": 3, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +Create the `hmac-auth` Credential for the Consumer `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-hmac-auth", + "plugins": { + "hmac-auth": { + "key_id": "john-key", + "secret_key": "john-secret-key" + } + } + }' +``` + +Create an anonymous user `anonymous` and configure the `limit-count` Plugin to allow for a quota of 1 within a 30-second window: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +Create a Route and configure the `hmac-auth` Plugin to accept anonymous Consumer `anonymous` from bypassing the authentication: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "hmac-auth-route", + "uri": "/get", + "methods": ["GET"], + "plugins": { + "hmac-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Generate a signature. You can use the below Python snippet or other stack of your choice: + +```python title="hmac-sig-header-gen.py" +import hmac +import hashlib +import base64 +from datetime import datetime, timezone + +key_id = "john-key" # key id +secret_key = b"john-secret-key" # secret key +request_method = "GET" # HTTP method +request_path = "/get" # Route URI +algorithm= "hmac-sha256" # can use other algorithms in allowed_algorithms + +# get current datetime in GMT +# note: the signature will become invalid after the clock skew (default 300s) +# you can regenerate the signature after it becomes invalid, or increase the clock +# skew to prolong the validity within the advised security boundary +gmt_time = datetime.now(timezone.utc).strftime('%a, %d %b %Y %H:%M:%S GMT') + +# construct the signing string (ordered) +# the date and any subsequent custom headers should be lowercased and separated by a +# single space character, i.e. `:` +# https://datatracker.ietf.org/doc/html/draft-cavage-http-signatures-12#section-2.1.6 +signing_string = ( + f"{key_id}\n" + f"{request_method} {request_path}\n" + f"date: {gmt_time}\n" +) + +# create signature +signature = hmac.new(secret_key, signing_string.encode('utf-8'), hashlib.sha256).digest() +signature_base64 = base64.b64encode(signature).decode('utf-8') + +# construct the request headers +headers = { + "Date": gmt_time, + "Authorization": ( + f'Signature keyId="{key_id}",algorithm="{algorithm}",' + f'headers="@request-target date",' + f'signature="{signature_base64}"' + ) +} + +# print headers +print(headers) +``` + +Run the script: + +```shell +python3 hmac-sig-header-gen.py +``` + +You should see the request headers printed: + +```text +{'Date': 'Mon, 21 Oct 2024 17:31:18 GMT', 'Authorization': 'Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="ztFfl9w7LmCrIuPjRC/DWSF4gN6Bt8dBBz4y+u1pzt8="'} +``` + +To verify, send five consecutive requests with the generated headers: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H "Date: Mon, 21 Oct 2024 17:31:18 GMT" -H 'Authorization: Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="ztFfl9w7LmCrIuPjRC/DWSF4gN6Bt8dBBz4y+u1pzt8="' -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +You should see the following response, showing that out of the 5 requests, 3 requests were successful (status code 200) while the others were rejected (status code 429). + +```text +200: 3, 429: 2 +``` + +Send five anonymous requests: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +You should see the following response, showing that only one request was successful: + +```text +200: 1, 429: 4 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/http-dubbo.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/http-dubbo.md new file mode 100755 index 0000000..f550098 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/http-dubbo.md @@ -0,0 +1,128 @@ +--- +title: http-dubbo +keywords: + - Apache APISIX + - API Gateway + - Plugin + - http-dubbo + - http to dubbo + - transcode +description: This document contains information about the Apache APISIX http-dubbo Plugin. +--- + + + +## Description + +The `http-dubbo` plugin can transcode between http and Dubbo (Note: in +Dubbo 2.x, the serialization type of the upstream service must be fastjson). + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|--------------------------|---------|----------|---------|--------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| service_name | string | True | | | Dubbo service name | +| service_version | string | False | 0.0.0 | | Dubbo service version | +| method | string | True | | | Dubbo service method name | +| params_type_desc | string | True | | | Description of the Dubbo service method signature | +| serialization_header_key | string | False | | | If `serialization_header_key` is set, the plugin will read this request header to determine if the body has already been serialized according to the Dubbo protocol. If the value of this request header is true, the plugin will not modify the body content and will directly consider it as Dubbo request parameters. If it is false, the developer is required to pass parameters in the format of Dubbo's generic invocation, and the plugin will handle serialization. Note: Due to differences in precision between Lua and Java, serialization by the plugin may lead to parameter precision discrepancies. | +| serialized | boolean | False | false | [true, false] | Same as `serialization_header_key`. Priority is lower than `serialization_header_key`. | +| connect_timeout | number | False | 6000 | | Upstream tcp connect timeout | +| read_timeout | number | False | 6000 | | Upstream tcp read_timeout | +| send_timeout | number | False | 6000 | | Upstream tcp send_timeout | + +## Enable Plugin + +The example below enables the `http-dubbo` Plugin on the specified Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/TestService/testMethod", + "plugins": { + "http-dubbo": { + "method": "testMethod", + "params_type_desc": "Ljava/lang/Long;Ljava/lang/Integer;", + "serialized": true, + "service_name": "com.xxx.xxx.TestService", + "service_version": "0.0.0" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:20880": 1 + } + } +}' +``` + +## Example usage + +Once you have configured the Plugin as shown above, you can make a request as shown below: + +```shell +curl --location 'http://127.0.0.1:9080/TestService/testMethod' \ +--data '1 +2' +``` + +## How to Get `params_type_desc` + +```java +Method[] declaredMethods = YourService.class.getDeclaredMethods(); +String params_type_desc = ReflectUtils.getDesc(Arrays.stream(declaredMethods).filter(it -> it.getName().equals("yourmethod")).findAny().get().getParameterTypes()); + +// If there are method overloads, you need to find the method you want to expose. +// ReflectUtils is a Dubbo implementation. +``` + +## How to Serialize JSON According to Dubbo Protocol + +To prevent loss of precision, we recommend using pre-serialized bodies for requests. The serialization rules for Dubbo's +fastjson are as follows: + +- Convert each parameter to a JSON string using toJSONString. +- Separate each parameter with a newline character `\n`. + +Some languages and libraries may produce unchanged results when calling toJSONString on strings or numbers. In such +cases, you may need to manually handle some special cases. For example: + +- The string `abc"` needs to be encoded as `"abc\""`. +- The string `123` needs to be encoded as `"123"`. + +Abstract class, parent class, or generic type as input parameter signature, when the input parameter requires a specific +type. Serialization requires writing specific type information. +Refer to [WriteClassName](https://github.com/alibaba/fastjson/wiki/SerializerFeature_cn) for more details. + +## Delete Plugin + +To remove the `http-dubbo` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. +APISIX will automatically reload and you do not have to restart for this to take effect. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/http-logger.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/http-logger.md new file mode 100644 index 0000000..d07375b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/http-logger.md @@ -0,0 +1,194 @@ +--- +title: http-logger +keywords: + - Apache APISIX + - API Gateway + - Plugin + - HTTP Logger +description: This document contains information about the Apache APISIX http-logger Plugin. Using this Plugin, you can push APISIX log data to HTTP or HTTPS servers. +--- + + + +## Description + +The `http-logger` Plugin is used to push log data requests to HTTP/HTTPS servers. + +This will allow the ability to send log data requests as JSON objects to monitoring tools and other HTTP servers. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ---------------------- | ------- | -------- | ------------- | -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| uri | string | True | | | URI of the HTTP/HTTPS server. | +| auth_header | string | False | | | Authorization headers if required. | +| timeout | integer | False | 3 | [1,...] | Time to keep the connection alive for after sending a request. | +| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| include_req_body | boolean | False | false | [false, true] | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to Nginx's limitations. | +| include_req_body_expr | array | False | | | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| include_resp_body | boolean | False | false | [false, true] | When set to `true` includes the response body in the log. | +| include_resp_body_expr | array | False | | | When the `include_resp_body` attribute is set to `true`, use this to filter based on [lua-resty-expr](https://github.com/api7/lua-resty-expr). If present, only logs the response if the expression evaluates to `true`. | +| concat_method | string | False | "json" | ["json", "new_line"] | Sets how to concatenate logs. When set to `json`, uses `json.encode` for all pending logs and when set to `new_line`, also uses `json.encode` but uses the newline (`\n`) to concatenate lines. | +| ssl_verify | boolean | False | false | [false, true] | When set to `true` verifies the SSL certificate. | + +:::note + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +::: + +### Example of default log format + + ```json + { + "service_id": "", + "apisix_latency": 100.99999809265, + "start_time": 1703907485819, + "latency": 101.99999809265, + "upstream_latency": 1, + "client_ip": "127.0.0.1", + "route_id": "1", + "server": { + "version": "3.7.0", + "hostname": "localhost" + }, + "request": { + "headers": { + "host": "127.0.0.1:1984", + "content-type": "application/x-www-form-urlencoded", + "user-agent": "lua-resty-http/0.16.1 (Lua) ngx_lua/10025", + "content-length": "12" + }, + "method": "POST", + "size": 194, + "url": "http://127.0.0.1:1984/hello?log_body=no", + "uri": "/hello?log_body=no", + "querystring": { + "log_body": "no" + } + }, + "response": { + "headers": { + "content-type": "text/plain", + "connection": "close", + "content-length": "12", + "server": "APISIX/3.7.0" + }, + "status": 200, + "size": 123 + }, + "upstream": "127.0.0.1:1982" + } + ``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `http-logger` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/http-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## Enable Plugin + +The example below shows how you can enable the Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "http-logger": { + "uri": "http://mockbin.org/bin/:ID" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +As an example the [mockbin](http://mockbin.org/bin/create) server is used for mocking an HTTP server to see the logs produced by APISIX. + +## Example usage + +Now, if you make a request to APISIX, it will be logged in your mockbin server: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +## Delete Plugin + +To disable this Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/inspect.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/inspect.md new file mode 100644 index 0000000..9f5b3fe --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/inspect.md @@ -0,0 +1,188 @@ +--- +title: inspect +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Inspect + - Dynamic Lua Debugging +description: This document contains information about the Apache APISIX inspect Plugin. +--- + + + +## Description + +It's useful to set arbitrary breakpoint in any Lua file to inspect the context information, +e.g. print local variables if some condition satisfied. + +In this way, you don't need to modify the source code of your project, and just get diagnose information +on demand, i.e. dynamic logging. + +This plugin supports setting breakpoints within both interpretd function and jit compiled function. +The breakpoint could be at any position within the function. The function could be global/local/module/ananymous. + +## Features + +* Set breakpoint at any position +* Dynamic breakpoint +* customized breakpoint handler +* You could define one-shot breakpoint +* Work for jit compiled function +* If function reference specified, then performance impact is only bound to that function (JIT compiled code will not trigger debug hook, so they would run fast even if hook is enabled) +* If all breakpoints deleted, jit could recover + +## Operation Graph + +![Operation Graph](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/plugin/inspect.png) + +## API to define hook in hooks file + +### require("apisix.inspect.dbg").set_hook(file, line, func, filter_func) + +The breakpoint is specified by `file` (full qualified or short file name) and the `line` number. + +The `func` specified the scope (which function or global) of jit cache to flush: + +* If the breakpoint is related to a module function or +global function, you should set it that function reference, then only the jit cache of that function would +be flushed, and it would not affect other caches to avoid slowing down other parts of the program. + +* If the breakpointis related to local function or anonymous function, +then you have to set it to `nil` (because no way to get function reference), which would flush the whole jit cache of Lua vm. + +You attach a `filter_func` function of the breakpoint, the function takes the `info` as argument and returns +true of false to determine whether the breakpoint would be removed. You could setup one-shot breakpoint +at ease. + +The `info` is a hash table which contains below keys: + +* `finfo`: `debug.getinfo(level, "nSlf")` +* `uv`: upvalues hash table +* `vals`: local variables hash table + +## Attributes + +| Name | Type | Required | Default | Description | +|--------------------|---------|----------|---------|------------------------------------------------------------------------------------------------| +| delay | integer | False | 3 | Time in seconds specifying how often to check the hooks file. | +| hooks_file | string | False | "/usr/local/apisix/plugin_inspect_hooks.lua" | Lua file to define hooks, which could be a link file. Ensure only administrator could write this file, otherwise it may be a security risk. | + +## Enable Plugin + +Plugin is enabled by default: + +```yaml title="apisix/cli/config.lua" +local _M = { + plugins = { + "inspect", + ... + }, + plugin_attr = { + inspect = { + delay = 3, + hooks_file = "/usr/local/apisix/plugin_inspect_hooks.lua" + }, + ... + }, + ... +} +``` + +## Example usage + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```bash +# create test route +curl http://127.0.0.1:9180/apisix/admin/routes/test_limit_req -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/get", + "plugins": { + "limit-req": { + "rate": 100, + "burst": 0, + "rejected_code": 503, + "key_type": "var", + "key": "remote_addr" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } +}' + +# create a hooks file to set a test breakpoint +# Note that the breakpoint is associated with the line number, +# so if the Lua code changes, you need to adjust the line number in the hooks file +cat </usr/local/apisix/example_hooks.lua +local dbg = require "apisix.inspect.dbg" + +dbg.set_hook("limit-req.lua", 88, require("apisix.plugins.limit-req").access, function(info) + ngx.log(ngx.INFO, debug.traceback("foo traceback", 3)) + ngx.log(ngx.INFO, dbg.getname(info.finfo)) + ngx.log(ngx.INFO, "conf_key=", info.vals.conf_key) + return true +end) + +--- more breakpoints could be defined via dbg.set_hook() +--- ... +EOF + +# enable the hooks file +ln -sf /usr/local/apisix/example_hooks.lua /usr/local/apisix/plugin_inspect_hooks.lua + +# check errors.log to confirm the test breakpoint is enabled +2022/09/01 00:55:38 [info] 2754534#2754534: *3700 [lua] init.lua:29: setup_hooks(): set hooks: err=nil, hooks=["limit-req.lua#88"], context: ngx.timer + +# access the test route +curl -i http://127.0.0.1:9080/get + +# check errors.log to confirm the test breakpoint is triggered +2022/09/01 00:55:52 [info] 2754534#2754534: *4070 [lua] resty_inspect_hooks.lua:4: foo traceback +stack traceback: + /opt/lua-resty-inspect/lib/resty/inspect/dbg.lua:50: in function + /opt/apisix.fork/apisix/plugins/limit-req.lua:88: in function 'phase_func' + /opt/apisix.fork/apisix/plugin.lua:900: in function 'run_plugin' + /opt/apisix.fork/apisix/init.lua:456: in function 'http_access_phase' + access_by_lua(nginx.conf:303):2: in main chunk, client: 127.0.0.1, server: _, request: "GET /get HTTP/1.1", host: "127.0.0.1:9080" +2022/09/01 00:55:52 [info] 2754534#2754534: *4070 [lua] resty_inspect_hooks.lua:5: /opt/apisix.fork/apisix/plugins/limit-req.lua:88 (phase_func), client: 127.0.0.1, server: _, request: "GET /get HTTP/1.1", host: "127.0.0.1:9080" +2022/09/01 00:55:52 [info] 2754534#2754534: *4070 [lua] resty_inspect_hooks.lua:6: conf_key=remote_addr, client: 127.0.0.1, server: _, request: "GET /get HTTP/1.1", host: "127.0.0.1:9080" +``` + +## Delete Plugin + +To remove the `inspect` Plugin, you can remove it from your configuration file (`conf/config.yaml`): + +```yaml title="conf/config.yaml" +plugins: + # - inspect +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ip-restriction.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ip-restriction.md new file mode 100644 index 0000000..e332a3c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ip-restriction.md @@ -0,0 +1,154 @@ +--- +title: ip-restriction +keywords: + - Apache APISIX + - API Gateway + - Plugin + - IP restriction + - ip-restriction +description: The ip-restriction Plugin supports restricting access to upstream resources by IP addresses, through either configuring a whitelist or blacklist of IP addresses. +--- + + + + + + + +## Description + +The `ip-restriction` Plugin supports restricting access to upstream resources by IP addresses, through either configuring a whitelist or blacklist of IP addresses. Restricting IP to resources helps prevent unauthorized access and harden API security. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|---------------|---------------|----------|----------------------------------|--------------|------------------------------------------------------------------------| +| whitelist | array[string] | False | | | List of IPs or CIDR ranges to whitelist. | +| blacklist | array[string] | False | | | List of IPs or CIDR ranges to blacklist. | +| message | string | False | "Your IP address is not allowed" | [1, 1024] | Message returned when the IP address is not allowed access. | +| response_code | integer | False | 403 | [403, 404] | HTTP response code returned when the IP address is not allowed access. | + +:::note + +At least one of the `whitelist` or `blacklist` should be configured, but they cannot be configured at the same time. + +::: + +## Examples + +The examples below demonstrate how you can configure the `ip-restriction` Plugin for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Restrict Access by Whitelisting + +The following example demonstrates how you can whitelist a list of IP addresses that should have access to the upstream resource and customize the error message for access denial. + +Create a Route with the `ip-restriction` Plugin to whitelist a range of IPs and customize the error message when the access is denied: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ip-restriction-route", + "uri": "/anything", + "plugins": { + "ip-restriction": { + "whitelist": [ + "192.168.0.1/24" + ], + "message": "Access denied" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +If your IP is allowed, you should receive an `HTTP/1.1 200 OK` response. If not, you should receive an `HTTP/1.1 403 Forbidden` response with the following error message: + +```text +{"message":"Access denied"} +``` + +### Restrict Access Using Modified IP + +The following example demonstrates how you can modify the IP used for IP restriction, using the `real-ip` Plugin. This is particularly useful if APISIX is behind a reverse proxy and the real client IP is not available to APISIX. + +Create a Route with the `ip-restriction` Plugin to whitelist a specific IP address and obtain client IP address from the URL parameter `realip`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ip-restriction-route", + "uri": "/anything", + "plugins": { + "ip-restriction": { + "whitelist": [ + "192.168.1.241" + ] + }, + "real-ip": { + "source": "arg_realip" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything?realip=192.168.1.241" +``` + +You should receive an `HTTP/1.1 200 OK` response. + +Send another request with a different IP address: + +```shell +curl -i "http://127.0.0.1:9080/anything?realip=192.168.10.24" +``` + +You should receive an `HTTP/1.1 403 Forbidden` response. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/jwe-decrypt.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/jwe-decrypt.md new file mode 100644 index 0000000..3e55a31 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/jwe-decrypt.md @@ -0,0 +1,198 @@ +--- +title: jwe-decrypt +keywords: + - Apache APISIX + - API Gateway + - Plugin + - JWE Decrypt + - jwe-decrypt +description: This document contains information about the Apache APISIX jwe-decrypt Plugin. +--- + + + +## Description + +The `jwe-decrypt` Plugin is used to decrypt [JWE](https://datatracker.ietf.org/doc/html/rfc7516) authorization headers in requests to an APISIX [Service](../terminology/service.md) or [Route](../terminology/route.md). + +This Plugin adds an endpoint `/apisix/plugin/jwe/encrypt` for JWE encryption. For decryption, the key should be configured in [Consumer](../terminology/consumer.md). + +## Attributes + +For Consumer: + +| Name | Type | Required | Default | Valid values | Description | +|---------------|---------|-------------------------------------------------------|---------|-----------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| +| key | string | True | | | Unique key for a Consumer. | +| secret | string | True | | | The decryption key. Must be 32 characters. The key could be saved in a secret manager using the [Secret](../terminology/secret.md) resource. | +| is_base64_encoded | boolean | False | false | | Set to true if the secret is base64 encoded. | + +:::note + +After enabling `is_base64_encoded`, your `secret` length may exceed 32 chars. You only need to make sure that the length after decoding is still 32 chars. + +::: + +For Route: + +| Name | Type | Required | Default | Description | +|--------|--------|----------|---------------|---------------------------------------------------------------------| +| header | string | True | Authorization | The header to get the token from. | +| forward_header | string | True | Authorization | Set the header name that passes the plaintext to the Upstream. | +| strict | boolean | False | true | If true, throw a 403 error if JWE token is missing from the request. If false, do not throw an error if JWE token cannot be found. | + +## Example usage + +First, create a Consumer with `jwe-decrypt` and configure the decryption key: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "jack", + "plugins": { + "jwe-decrypt": { + "key": "user-key", + "secret": "-secret-length-must-be-32-chars-" + } + } +}' +``` + +Next, create a Route with `jwe-decrypt` enabled to decrypt the authorization header: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/anything*", + "plugins": { + "jwe-decrypt": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +### Encrypt Data with JWE + +The Plugin creates an internal endpoint `/apisix/plugin/jwe/encrypt` to encrypt data with JWE. To expose it publicly, create a Route with the [public-api](public-api.md) Plugin: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/jwenew -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/apisix/plugin/jwe/encrypt", + "plugins": { + "public-api": {} + } +}' +``` + +Send a request to the endpoint passing the key configured in Consumer to the URI parameter to encrypt some sample data in the payload: + +```shell +curl -G --data-urlencode 'payload={"uid":10000,"uname":"test"}' 'http://127.0.0.1:9080/apisix/plugin/jwe/encrypt?key=user-key' -i +``` + +You should see a response similar to the following, with the JWE encrypted data in the response body: + +``` +HTTP/1.1 200 OK +Date: Mon, 25 Sep 2023 02:38:16 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.5.0 +Apisix-Plugins: public-api + +eyJhbGciOiJkaXIiLCJraWQiOiJ1c2VyLWtleSIsImVuYyI6IkEyNTZHQ00ifQ..MTIzNDU2Nzg5MDEy.hfzMJ0YfmbMcJ0ojgv4PYAHxPjlgMivmv35MiA.7nilnBt2dxLR_O6kf-HQUA +``` + +### Decrypt Data with JWE + +Send a request to the route with the JWE encrypted data in the `Authorization` header: + +```shell +curl http://127.0.0.1:9080/anything/hello -H 'Authorization: eyJhbGciOiJkaXIiLCJraWQiOiJ1c2VyLWtleSIsImVuYyI6IkEyNTZHQ00ifQ..MTIzNDU2Nzg5MDEy.hfzMJ0YfmbMcJ0ojgv4PYAHxPjlgMivmv35MiA.7nilnBt2dxLR_O6kf-HQUA' -i +``` + +You should see a response similar to the following, where the `Authorization` header shows the plaintext of the payload: + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 452 +Connection: keep-alive +Date: Mon, 25 Sep 2023 02:38:59 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/3.5.0 +Apisix-Plugins: jwe-decrypt + +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Authorization": "{\"uid\":10000,\"uname\":\"test\"}", + "Host": "127.0.0.1", + "User-Agent": "curl/8.1.2", + "X-Amzn-Trace-Id": "Root=1-6510f2c3-1586ec011a22b5094dbe1896", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1, 119.143.79.94", + "url": "http://127.0.0.1/anything/hello" +} +``` + +## Delete Plugin + +To remove the `jwe-decrypt` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/anything*", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/jwt-auth.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/jwt-auth.md new file mode 100644 index 0000000..773a304 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/jwt-auth.md @@ -0,0 +1,911 @@ +--- +title: jwt-auth +keywords: + - Apache APISIX + - API Gateway + - Plugin + - JWT Auth + - jwt-auth +description: The jwt-auth Plugin supports the use of JSON Web Token (JWT) as a mechanism for clients to authenticate themselves before accessing Upstream resources. +--- + + + + + + + +## Description + +The `jwt-auth` Plugin supports the use of [JSON Web Token (JWT)](https://jwt.io/) as a mechanism for clients to authenticate themselves before accessing Upstream resources. + +Once enabled, the Plugin exposes an endpoint to create JWT credentials by [Consumers](../terminology/consumer.md). The process generates a token that client requests should carry to identify themselves to APISIX. The token can be included in the request URL query string, request header, or cookie. APISIX will then verify the token to determine if a request should be allowed or denied to access Upstream resources. + +When a Consumer is successfully authenticated, APISIX adds additional headers, such as `X-Consumer-Username`, `X-Credential-Indentifier`, and other Consumer custom headers if configured, to the request, before proxying it to the Upstream service. The Upstream service will be able to differentiate between consumers and implement additional logics as needed. If any of these values is not available, the corresponding header will not be added. + +## Attributes + +For Consumer/Credential: + +| Name | Type | Required | Default | Valid values | Description | +|---------------|---------|-------------------------------------------------------|---------|-----------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| key | string | True | | non-empty | Unique key for a Consumer. | +| secret | string | False | | non-empty | Shared key used to sign and verify the JWT when the algorithm is symmetric. Required when using `HS256` or `HS512` as the algorithm. If unspecified, the secret will be auto-generated. This field supports saving the value in Secret Manager using the [APISIX Secret](../terminology/secret.md) resource. | +| public_key | string | True if `RS256` or `ES256` is set for the `algorithm` attribute. | | | RSA or ECDSA public key. This field supports saving the value in Secret Manager using the [APISIX Secret](../terminology/secret.md) resource. | +| algorithm | string | False | HS256 | ["HS256", "HS512", "RS256", "ES256"] | Encryption algorithm. | +| exp | integer | False | 86400 | [1,...] | Expiry time of the token in seconds. | +| base64_secret | boolean | False | false | | Set to true if the secret is base64 encoded. | +| lifetime_grace_period | integer | False | 0 | [0,...] | Grace period in seconds. Used to account for clock skew between the server generating the JWT and the server validating the JWT. | +| key_claim_name | string | False | key | | The claim in the JWT payload that identifies the associated secret, such as `iss`. | + +NOTE: `encrypt_fields = {"secret"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +For Routes or Services: + +| Name | Type | Required | Default | Description | +|--------|--------|----------|---------------|---------------------------------------------------------------------| +| header | string | False | authorization | The header to get the token from. | +| query | string | False | jwt | The query string to get the token from. Lower priority than header. | +| cookie | string | False | jwt | The cookie to get the token from. Lower priority than query. | +| hide_credentials| boolean | False | false | If true, do not pass the header, query, or cookie with JWT to Upstream services. | +| key_claim_name | string | False | key | The name of the JWT claim that contains the user key (corresponds to Consumer's key attribute). | +| anonymous_consumer | string | False | false | Anonymous Consumer name. If configured, allow anonymous users to bypass the authentication. | +| store_in_ctx | boolean | False | false | Set to true will store the JWT payload in the request context (`ctx.jwt_auth_payload`). This allows lower-priority plugins that run afterwards on the same request to retrieve and use the JWT token. | + +You can implement `jwt-auth` with [HashiCorp Vault](https://www.vaultproject.io/) to store and fetch secrets and RSA keys pairs from its [encrypted KV engine](https://developer.hashicorp.com/vault/docs/secrets/kv) using the [APISIX Secret](../terminology/secret.md) resource. + +## Examples + +The examples below demonstrate how you can work with the `jwt-auth` Plugin for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Use JWT for Consumer Authentication + +The following example demonstrates how to implement JWT for Consumer key authentication. + +Create a Consumer `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +Create `jwt-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "jack-key", + "secret": "jack-hs256-secret" + } + } + }' +``` + +Create a Route with `jwt-auth` plugin: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-route", + "uri": "/headers", + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +To issue a JWT for `jack`, you could use [JWT.io's debugger](https://jwt.io/#debugger-io) or other utilities. If you are using [JWT.io's debugger](https://jwt.io/#debugger-io), do the following: + +* Select __HS256__ in the __Algorithm__ dropdown. +* Update the secret in the __Verify Signature__ section to be `jack-hs256-secret`. +* Update payload with Consumer key `jack-key`; and add `exp` or `nbf` in UNIX timestamp. + + Your payload should look similar to the following: + + ```json + { + "key": "jack-key", + "nbf": 1729132271 + } + ``` + +Copy the generated JWT under the __Encoded__ section and save to a variable: + +```text +jwt_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsIm5iZiI6MTcyOTEzMjI3MX0.0VDKUzNkSaa_H5g_rGNbNtDcKJ9fBGgcGC56AsVsV-I +``` + +Send a request to the Route with the JWT in the `Authorization` header: + +```shell +curl -i "http://127.0.0.1:9080/headers" -H "Authorization: ${jwt_token}" +``` + +You should receive an `HTTP/1.1 200 OK` response similar to the following: + +```text +{ + "headers": { + "Accept": "*/*", + "Authorization": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE3MjY2NDk2NDAsImtleSI6ImphY2sta2V5In0.kdhumNWrZFxjUvYzWLt4lFr546PNsr9TXuf0Az5opoM", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66ea951a-4d740d724bd2a44f174d4daf", + "X-Consumer-Username": "jack", + "X-Credential-Identifier": "cred-jack-jwt-auth", + "X-Forwarded-Host": "127.0.0.1" + } +} +``` + +In 30 seconds, the token should expire. Send a request with the same token to verify: + +```shell +curl -i "http://127.0.0.1:9080/headers" -H "Authorization: ${jwt_token}" +``` + +You should receive an `HTTP/1.1 401 Unauthorized` response similar to the following: + +```text +{"message":"failed to verify jwt"} +``` + +### Carry JWT in Request Header, Query String, or Cookie + +The following example demonstrates how to accept JWT in specified header, query string, and cookie. + +Create a Consumer `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +Create `jwt-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "jack-key", + "secret": "jack-hs256-secret" + } + } + }' +``` + +Create a Route with `jwt-auth` Plugin, and specify that the request can either carry the token in the header, query, or the cookie: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-route", + "uri": "/get", + "plugins": { + "jwt-auth": { + "header": "jwt-auth-header", + "query": "jwt-query", + "cookie": "jwt-cookie" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +To issue a JWT for `jack`, you could use [JWT.io's debugger](https://jwt.io/#debugger-io) or other utilities. If you are using [JWT.io's debugger](https://jwt.io/#debugger-io), do the following: + +* Select __HS256__ in the __Algorithm__ dropdown. +* Update the secret in the __Verify Signature__ section to be `jack-hs256-secret`. +* Update payload with Consumer key `jack-key`; and add `exp` or `nbf` in UNIX timestamp. + + Your payload should look similar to the following: + + ```json + { + "key": "jack-key", + "nbf": 1729132271 + } + ``` + +Copy the generated JWT under the __Encoded__ section and save to a variable: + +```text +jwt_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsIm5iZiI6MTcyOTEzMjI3MX0.0VDKUzNkSaa_H5g_rGNbNtDcKJ9fBGgcGC56AsVsV-I +``` + +#### Verify With JWT in Header + +Sending request with JWT in the header: + +```shell +curl -i "http://127.0.0.1:9080/get" -H "jwt-auth-header: ${jwt_token}" +``` + +You should receive an `HTTP/1.1 200 OK` response similar to the following: + +```text +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "Jwt-Auth-Header": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTY5NTEyOTA0NH0.EiktFX7di_tBbspbjmqDKoWAD9JG39Wo_CAQ1LZ9voQ", + ... + }, + ... +} +``` + +#### Verify With JWT in Query String + +Sending request with JWT in the query string: + +```shell +curl -i "http://127.0.0.1:9080/get?jwt-query=${jwt_token}" +``` + +You should receive an `HTTP/1.1 200 OK` response similar to the following: + +```text +{ + "args": { + "jwt-query": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTY5NTEyOTA0NH0.EiktFX7di_tBbspbjmqDKoWAD9JG39Wo_CAQ1LZ9voQ" + }, + "headers": { + "Accept": "*/*", + ... + }, + "origin": "127.0.0.1, 183.17.233.107", + "url": "http://127.0.0.1/get?jwt-query=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTY5NTEyOTA0NH0.EiktFX7di_tBbspbjmqDKoWAD9JG39Wo_CAQ1LZ9voQ" +} +``` + +#### Verify With JWT in Cookie + +Sending request with JWT in the cookie: + +```shell +curl -i "http://127.0.0.1:9080/get" --cookie jwt-cookie=${jwt_token} +``` + +You should receive an `HTTP/1.1 200 OK` response similar to the following: + +```text +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Cookie": "jwt-cookie=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTY5NTEyOTA0NH0.EiktFX7di_tBbspbjmqDKoWAD9JG39Wo_CAQ1LZ9voQ", + ... + }, + ... +} +``` + +### Manage Secrets in Environment Variables + +The following example demonstrates how to save `jwt-auth` Consumer key to an environment variable and reference it in configuration. + +APISIX supports referencing system and user environment variables configured through the [NGINX `env` directive](https://nginx.org/en/docs/ngx_core_module.html#env). + +Save the key to an environment variable: + +```shell +JACK_JWT_AUTH_KEY=jack-key +``` + +Create a Consumer `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +Create `jwt-auth` Credential for the Consumer and reference the environment variable in the key: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "$env://JACK_JWT_AUTH_KEY", + "secret": "jack-hs256-secret" + } + } + }' +``` + +Create a Route with `jwt-auth` enabled: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-route", + "uri": "/get", + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +To issue a JWT for `jack`, you could use [JWT.io's debugger](https://jwt.io/#debugger-io) or other utilities. If you are using [JWT.io's debugger](https://jwt.io/#debugger-io), do the following: + +* Select __HS256__ in the __Algorithm__ dropdown. +* Update the secret in the __Verify Signature__ section to be `jack-hs256-secret`. +* Update payload with Consumer key `jack-key`; and add `exp` or `nbf` in UNIX timestamp. + + Your payload should look similar to the following: + + ```json + { + "key": "jack-key", + "nbf": 1729132271 + } + ``` + +Copy the generated JWT under the __Encoded__ section and save to a variable: + +```text +jwt_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsIm5iZiI6MTcyOTEzMjI3MX0.0VDKUzNkSaa_H5g_rGNbNtDcKJ9fBGgcGC56AsVsV-I +``` + +Sending request with JWT in the header: + +```shell +curl -i "http://127.0.0.1:9080/get" -H "Authorization: ${jwt_token}" +``` + +You should receive an `HTTP/1.1 200 OK` response similar to the following: + +```text +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Authorization": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE2OTUxMzMxNTUsImtleSI6Imp3dC1rZXkifQ.jiKuaAJqHNSSQCjXRomwnQXmdkC5Wp5VDPRsJlh1WAQ", + ... + }, + ... +} +``` + +### Manage Secrets in Secret Manager + +The following example demonstrates how to manage `jwt-auth` Consumer key in [HashiCorp Vault](https://www.vaultproject.io) and reference it in Plugin configuration. + +Start a Vault development server in Docker: + +```shell +docker run -d \ + --name vault \ + -p 8200:8200 \ + --cap-add IPC_LOCK \ + -e VAULT_DEV_ROOT_TOKEN_ID=root \ + -e VAULT_DEV_LISTEN_ADDRESS=0.0.0.0:8200 \ + vault:1.9.0 \ + vault server -dev +``` + +APISIX currently supports [Vault KV engine version 1](https://developer.hashicorp.com/vault/docs/secrets/kv#kv-version-1). Enable it in Vault: + +```shell +docker exec -i vault sh -c "VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault secrets enable -path=kv -version=1 kv" +``` + +You should see a response similar to the following: + +```text +Success! Enabled the kv secrets engine at: kv/ +``` + +Create a secret and configure the Vault address and other connection information: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/secrets/vault/jwt" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "uri": "https://127.0.0.1:8200", + "prefix": "kv/apisix", + "token": "root" + }' +``` + +Create a Consumer `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +Create `jwt-auth` Credential for the Consumer and reference the secret in the key: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "$secret://vault/jwt/jack/jwt-key", + "secret": "vault-hs256-secret" + } + } + }' +``` + +Create a Route with `jwt-auth` enabled: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-route", + "uri": "/get", + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Set `jwt-auth` key value to be `jwt-vault-key` in Vault: + +```shell +docker exec -i vault sh -c "VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/jack jwt-key=jwt-vault-key" +``` + +You should see a response similar to the following: + +```text +Success! Data written to: kv/apisix/jack +``` + +To issue a JWT, you could use [JWT.io's debugger](https://jwt.io/#debugger-io) or other utilities. If you are using [JWT.io's debugger](https://jwt.io/#debugger-io), do the following: + +* Select __HS256__ in the __Algorithm__ dropdown. +* Update the secret in the __Verify Signature__ section to be `vault-hs256-secret`. +* Update payload with Consumer key `jwt-vault-key`; and add `exp` or `nbf` in UNIX timestamp. + + Your payload should look similar to the following: + + ```json + { + "key": "jwt-vault-key", + "nbf": 1729132271 + } + ``` + +Copy the generated JWT under the __Encoded__ section and save to a variable: + +```text +jwt_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqd3QtdmF1bHQta2V5IiwibmJmIjoxNzI5MTMyMjcxfQ.faiN93LNP1lGSXqAb4empNJKMRWop8-KgnU58VQn1EE +``` + +Sending request with the token as header: + +```shell +curl -i "http://127.0.0.1:9080/get" -H "Authorization: ${jwt_token}" +``` + +You should receive an `HTTP/1.1 200 OK` response similar to the following: + +```text +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqd3QtdmF1bHQta2V5IiwiZXhwIjoxNjk1MTM4NjM1fQ.Au2liSZ8eQXUJR3SJESwNlIfqZdNyRyxIJK03L4dk_g", + ... + }, + ... +} +``` + +### Sign JWT with RS256 Algorithm + +The following example demonstrates how you can use asymmetric algorithms, such as RS256, to sign and validate JWT when implementing JWT for Consumer authentication. You will be generating RSA key pairs using [openssl](https://openssl-library.org/source/) and generating JWT using [JWT.io](https://jwt.io/#debugger-io) to better understand the composition of JWT. + +Generate a 2048-bit RSA private key and extract the corresponding public key in PEM format: + +```shell +openssl genrsa -out jwt-rsa256-private.pem 2048 +openssl rsa -in jwt-rsa256-private.pem -pubout -out jwt-rsa256-public.pem +``` + +You should see `jwt-rsa256-private.pem` and `jwt-rsa256-public.pem` generated in your current working directory. + +Visit [JWT.io's debugger](https://jwt.io/#debugger-io) and do the following: + +* Select __RS256__ in the __Algorithm__ dropdown. +* Copy and paste the key content into the __Verify Signature__ section. +* Update the payload with `key` matching the Consumer key you would like to use; and `exp` or `nbf` in UNIX timestamp. + +The configuration should look similar to the following: + +
+
+complete configuration of JWT generation on jwt.io +
+
+ +Copy the JWT on the left and save to an environment variable: + +```shell +jwt_token=eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsImV4cCI6MTczNDIzMDQwMH0.XjqM0oszmCggwZs-8PUIlJv8wPJON1la2ET5v70E6TCE32Yq5ibrl-1azaK7IreAer3HtnVHeEfII2rR02v8xfR1TPIjU_oHov4qC-A4tLTbgqGVXI7fCy2WFm3PFh6MEKuRe6M3dCQtCAdkRRQrBr1gWFQZhV3TNeMmmtyIfuJpB7cp4DW5pYFsCcoE1Nw6Tz7dt8k0tPBTPI2Mv9AYfMJ30LHDscOaPNtz8YIk_TOkV9b9mhQudUJ7J_suCZMRxD3iL655jTp2gKsstGKdZa0_W9Reu4-HY3LSc5DS1XtfjuftpuUqgg9FvPU0mK_b0wT_Rq3lbYhcHb9GZ72qiQ +``` + +Create a Consumer `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +Create `jwt-auth` Credential for the Consumer and configure the RSA keys: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "jack-key", + "algorithm": "RS256", + "public_key": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnE0h4k/GWfEbYO/yE2MPjHtNKDLNz4mv1KNIPLxY2ccjPYOtjuug+iZ4MujLV59YfrHriTs0H8jweQfff3pRSMjyEK+4qWTY3TeKBXIEa3pVDeoedSJrgjLBVio6xH7et8ir+QScScfLaJHGB4/l3DDGyEhO782a9teY8brn5hsWX5uLmDJvxtTGAHYi847XOcx2UneW4tZ8wQ6JGBSiSg5qAHan4dFZ7CpixCNNqEcSK6EQ7lKOLeFGG8ys/dHBIEasU4oMlCuJH77+XQQ/shchy+vm9oZfP+grLZkV+nKAd8MQZsid7ZJ/fiB/BmnhGrjtIfh98jwxSx4DgdLhdwIDAQAB\n-----END PUBLIC KEY-----", + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCcTSHiT8ZZ8Rtg7/ITYw+Me00oMs3Pia/Uo0g8vFjZxyM9g62O66D6Jngy6MtXn1h+seuJOzQfyPB5B99/elFIyPIQr7ipZNjdN4oFcgRrelUN6h51ImuCMsFWKjrEft63yKv5BJxJx8tokcYHj+XcMMbISE7vzZr215jxuufmGxZfm4uYMm/G1MYAdiLzjtc5zHZSd5bi1nzBDokYFKJKDmoAdqfh0VnsKmLEI02oRxIroRDuUo4t4UYbzKz90cEgRqxTigyUK4kfvv5dBD+yFyHL6+b2hl8/6CstmRX6coB3wxBmyJ3tkn9+IH8GaeEauO0h+H3yPDFLHgOB0uF3AgMBAAECggEARpY68Daw0Funzq5uN70r/3iLztSqx8hZpQEclXlF8wwQ6S33iqz1JSOMcwlZE7g9wfHd+jrHfndDypT4pVx7KxC86TZCghWuLrFvXqgwQM2dbcxGdwXVYZZEZAJsSeM19+/jYnFnl5ZoUVBMC4w79aX9j+O/6mKDUmjphHmxUuRCFjN0w7BRoYwmS796rSf1eoOcSXh2G9Ycc34DUFDfGpOzabndbmMfOz7W0DyUBG23fgLhNChTUGq8vMaqKXkQ8JKeKdEugSmRGz42HxjWoNlIGBDyB8tPNPT6SXsu/JBskdf9Gb71OWiub381oXC259sz+1K1REb1KSkgyC+bkQKBgQDKCnwXaf8aOIoJPCG53EqQfKScCIYQrvp1Uk3bs5tfYN4HcI3yAUnOqQ3Ux3eY9PfS37urlJXCfCbCnZ6P6xALZnN+aL2zWvZArlHvD6vnXiyevwK5IY+o2EW02h3A548wrGznQSsfX0tum22bEVlRuFfBbpZpizXwrV4ODSNhTwKBgQDGC27QQxah3yq6EbOhJJlJegjawVXEaEp/j4fD3qe/unLbUIFvCz6j9BAbgocDKzqXxlpTtIbnsesdLo7KM3MtYL0XO/87HIsBj9XCVgMkFCcM6YZ6fHnkJl0bs3haU4N9uI/wpokvfvXJp7iC9LUCseBdBj+N6T230HWiSbPjWQKBgQC8zzGKO/8vRNkSqkQmSczQ2/qE6p5G5w6eJy0lfOJdLswvDatJFpUf8PJA/6svoPYb9gOO5AtUNeuPAfeVLSnQTYzu+/kTrJTme0GMdAvE60gtjfmAgvGa64mw6gjWJk+1P92B+2/OIKMAmXXDbWIYMXqpBKzBs1vUMF/uJ68BlwKBgQDEivQem3YKj3/HyWmLstatpP7EmrqTgSzuC3OhX4b7L/5sySirG22/KKgTpSZ4bp5noeJiz/ZSWrAK9fmfkg/sKOV/+XsDHwCVPDnX86SKWbWnitp7FK2jTq94nlQC0H7edhvjqGLdUBJ9XoYu8MvzMLSJnXnVTHSDx832kU6FgQKBgQCbw4Eiu2IcOduIAokmsZl8Smh9ZeyhP2B/UBa1hsiPKQ6bw86QJr2OMbRXLBxtx+HYIfwDo4vXEE862PfoQyu6SjJBNmHiid7XcV06Z104UQNjP7IDLMMF+SASMqYoQWg/5chPfxBgIXnfWqw6TMmND3THY4Oj4Nhf4xeUg3HsaA==\n-----END PRIVATE KEY-----" + } + } + }' +``` + +:::tip + +You should add a newline character after the opening line and before the closing line, for example `-----BEGIN PRIVATE KEY-----\n......\n-----END PRIVATE KEY-----`. + +The key content can be directly concatenated. + +::: + +Create a Route with the `jwt-auth` Plugin: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-route", + "uri": "/headers", + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +To verify, send a request to the Route with the JWT in the `Authorization` header: + +```shell +curl -i "http://127.0.0.1:9080/headers" -H "Authorization: ${jwt_token}" +``` + +You should receive an `HTTP/1.1 200 OK` response similar to the following: + +```json +{ + "headers": { + "Accept": "*/*", + "Authorization": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsImV4cCI6MTczNDIzMDQwMH0.XjqM0oszmCggwZs-8PUIlJv8wPJON1la2ET5v70E6TCE32Yq5ibrl-1azaK7IreAer3HtnVHeEfII2rR02v8xfR1TPIjU_oHov4qC-A4tLTbgqGVXI7fCy2WFm3PFh6MEKuRe6M3dCQtCAdkRRQrBr1gWFQZhV3TNeMmmtyIfuJpB7cp4DW5pYFsCcoE1Nw6Tz7dt8k0tPBTPI2Mv9AYfMJ30LHDscOaPNtz8YIk_TOkV9b9mhQudUJ7J_suCZMRxD3iL655jTp2gKsstGKdZa0_W9Reu4-HY3LSc5DS1XtfjuftpuUqgg9FvPU0mK_b0wT_Rq3lbYhcHb9GZ72qiQ", + ... + } +} +``` + +### Add Consumer Custom ID to Header + +The following example demonstrates how you can attach a Consumer custom ID to authenticated request in the `Consumer-Custom-Id` header, which can be used to implement additional logics as needed. + +Create a Consumer `jack` with a custom ID label: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack", + "labels": { + "custom_id": "495aec6a" + } + }' +``` + +Create `jwt-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "jack-key", + "secret": "jack-hs256-secret" + } + } + }' +``` + +Create a Route with `jwt-auth`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-auth-route", + "uri": "/anything", + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +To issue a JWT for `jack`, you could use [JWT.io's debugger](https://jwt.io/#debugger-io) or other utilities. If you are using [JWT.io's debugger](https://jwt.io/#debugger-io), do the following: + +* Select __HS256__ in the __Algorithm__ dropdown. +* Update the secret in the __Verify Signature__ section to be `jack-hs256-secret`. +* Update payload with Consumer key `jack-key`; and add `exp` or `nbf` in UNIX timestamp. + + Your payload should look similar to the following: + + ```json + { + "key": "jack-key", + "nbf": 1729132271 + } + ``` + +Copy the generated JWT under the __Encoded__ section and save to a variable: + +```text +jwt_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsIm5iZiI6MTcyOTEzMjI3MX0.0VDKUzNkSaa_H5g_rGNbNtDcKJ9fBGgcGC56AsVsV-I +``` + +To verify, send a request to the Route with the JWT in the `Authorization` header: + +```shell +curl -i "http://127.0.0.1:9080/headers" -H "Authorization: ${jwt_token}" +``` + +You should see an `HTTP/1.1 200 OK` response similar to the following, where `X-Consumer-Custom-Id` is attached: + +```json +{ + "headers": { + "Accept": "*/*", + "Authorization": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE3MjY2NDk2NDAsImtleSI6ImphY2sta2V5In0.kdhumNWrZFxjUvYzWLt4lFr546PNsr9TXuf0Az5opoM", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66ea951a-4d740d724bd2a44f174d4daf", + "X-Consumer-Username": "jack", + "X-Credential-Identifier": "cred-jack-jwt-auth", + "X-Consumer-Custom-Id": "495aec6a", + "X-Forwarded-Host": "127.0.0.1" + } +} +``` + +### Rate Limit with Anonymous Consumer + +The following example demonstrates how you can configure different rate limiting policies by regular and anonymous consumers, where the anonymous Consumer does not need to authenticate and has less quotas. + +Create a regular Consumer `jack` and configure the `limit-count` Plugin to allow for a quota of 3 within a 30-second window: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack", + "plugins": { + "limit-count": { + "count": 3, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +Create the `jwt-auth` Credential for the Consumer `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "jack-key", + "secret": "jack-hs256-secret" + } + } + }' +``` + +Create an anonymous user `anonymous` and configure the `limit-count` Plugin to allow for a quota of 1 within a 30-second window: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +Create a Route and configure the `jwt-auth` Plugin to accept anonymous Consumer `anonymous` from bypassing the authentication: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-auth-route", + "uri": "/anything", + "plugins": { + "jwt-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +To issue a JWT for `jack`, you could use [JWT.io's debugger](https://jwt.io/#debugger-io) or other utilities. If you are using [JWT.io's debugger](https://jwt.io/#debugger-io), do the following: + +* Select __HS256__ in the __Algorithm__ dropdown. +* Update the secret in the __Verify Signature__ section to be `jack-hs256-secret`. +* Update payload with role `user`, permission `read`, and Consumer key `jack-key`; as well as `exp` or `nbf` in UNIX timestamp. + + Your payload should look similar to the following: + + ```json + { + "key": "jack-key", + "nbf": 1729132271 + } + ``` + +Copy the generated JWT under the __Encoded__ section and save to a variable: + +```shell +jwt_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsIm5iZiI6MTcyOTEzMjI3MX0.hjtSsEILpko14zb8-ibyxrB2tA5biYY9JrFm3do69vs +``` + +To verify the rate limiting, send five consecutive requests with `jack`'s JWT: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H "Authorization: ${jwt_token}" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +You should see the following response, showing that out of the 5 requests, 3 requests were successful (status code 200) while the others were rejected (status code 429). + +```text +200: 3, 429: 2 +``` + +Send five anonymous requests: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +You should see the following response, showing that only one request was successful: + +```text +200: 1, 429: 4 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/kafka-logger.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/kafka-logger.md new file mode 100644 index 0000000..a1a717c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/kafka-logger.md @@ -0,0 +1,249 @@ +--- +title: kafka-logger +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Kafka Logger +description: This document contains information about the Apache APISIX kafka-logger Plugin. +--- + + + +## Description + +The `kafka-logger` Plugin is used to push logs as JSON objects to Apache Kafka clusters. It works as a Kafka client driver for the ngx_lua Nginx module. + +It might take some time to receive the log data. It will be automatically sent after the timer function in the [batch processor](../batch-processor.md) expires. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ---------------------- | ------- | -------- | -------------- | --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| broker_list | object | True | | | Deprecated, use `brokers` instead. List of Kafka brokers. (nodes). | +| brokers | array | True | | | List of Kafka brokers (nodes). | +| brokers.host | string | True | | | The host of Kafka broker, e.g, `192.168.1.1`. | +| brokers.port | integer | True | | [0, 65535] | The port of Kafka broker | +| brokers.sasl_config | object | False | | | The sasl config of Kafka broker | +| brokers.sasl_config.mechanism | string | False | "PLAIN" | ["PLAIN"] | The mechaism of sasl config | +| brokers.sasl_config.user | string | True | | | The user of sasl_config. If sasl_config exists, it's required. | +| brokers.sasl_config.password | string | True | | | The password of sasl_config. If sasl_config exists, it's required. | +| kafka_topic | string | True | | | Target topic to push the logs for organisation. | +| producer_type | string | False | async | ["async", "sync"] | Message sending mode of the producer. | +| required_acks | integer | False | 1 | [1, -1] | Number of acknowledgements the leader needs to receive for the producer to consider the request complete. This controls the durability of the sent records. The attribute follows the same configuration as the Kafka `acks` attribute. `required_acks` cannot be 0. See [Apache Kafka documentation](https://kafka.apache.org/documentation/#producerconfigs_acks) for more. | +| key | string | False | | | Key used for allocating partitions for messages. | +| timeout | integer | False | 3 | [1,...] | Timeout for the upstream to send data. | +| name | string | False | "kafka logger" | | Unique identifier for the batch processor. If you use Prometheus to monitor APISIX metrics, the name is exported in `apisix_batch_process_entries`. | +| meta_format | enum | False | "default" | ["default","origin"] | Format to collect the request information. Setting to `default` collects the information in JSON format and `origin` collects the information with the original HTTP request. See [examples](#meta_format-example) below. | +| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| include_req_body | boolean | False | false | [false, true] | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to Nginx's limitations. | +| include_req_body_expr | array | False | | | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| max_req_body_bytes | integer | False | 524288 | >=1 | Maximum request body allowed in bytes. Request bodies falling within this limit will be pushed to Kafka. If the size exceeds the configured value, the body will be truncated before being pushed to Kafka. | +| include_resp_body | boolean | False | false | [false, true] | When set to `true` includes the response body in the log. | +| include_resp_body_expr | array | False | | | Filter for when the `include_resp_body` attribute is set to `true`. Response body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| max_resp_body_bytes | integer | False | 524288 | >=1 | Maximum response body allowed in bytes. Response bodies falling within this limit will be pushed to Kafka. If the size exceeds the configured value, the body will be truncated before being pushed to Kafka. | +| cluster_name | integer | False | 1 | [0,...] | Name of the cluster. Used when there are two or more Kafka clusters. Only works if the `producer_type` attribute is set to `async`. | +| producer_batch_num | integer | optional | 200 | [1,...] | `batch_num` parameter in [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka). The merge message and batch is send to the server. Unit is message count. | +| producer_batch_size | integer | optional | 1048576 | [0,...] | `batch_size` parameter in [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) in bytes. | +| producer_max_buffering | integer | optional | 50000 | [1,...] | `max_buffering` parameter in [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) representing maximum buffer size. Unit is message count. | +| producer_time_linger | integer | optional | 1 | [1,...] | `flush_time` parameter in [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) in seconds. | +| meta_refresh_interval | integer | optional | 30 | [1,...] | `refresh_interval` parameter in [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) specifies the time to auto refresh the metadata, in seconds. | + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +:::info IMPORTANT + +The data is first written to a buffer. When the buffer exceeds the `batch_max_size` or `buffer_duration` attribute, the data is sent to the Kafka server and the buffer is flushed. + +If the process is successful, it will return `true` and if it fails, returns `nil` with a string with the "buffer overflow" error. + +::: + +### meta_format example + +- `default`: + + ```json + { + "upstream": "127.0.0.1:1980", + "start_time": 1619414294760, + "client_ip": "127.0.0.1", + "service_id": "", + "route_id": "1", + "request": { + "querystring": { + "ab": "cd" + }, + "size": 90, + "uri": "/hello?ab=cd", + "url": "http://localhost:1984/hello?ab=cd", + "headers": { + "host": "localhost", + "content-length": "6", + "connection": "close" + }, + "body": "abcdef", + "method": "GET" + }, + "response": { + "headers": { + "connection": "close", + "content-type": "text/plain; charset=utf-8", + "date": "Mon, 26 Apr 2021 05:18:14 GMT", + "server": "APISIX/2.5", + "transfer-encoding": "chunked" + }, + "size": 190, + "status": 200 + }, + "server": { + "hostname": "localhost", + "version": "2.5" + }, + "latency": 0 + } + ``` + +- `origin`: + + ```http + GET /hello?ab=cd HTTP/1.1 + host: localhost + content-length: 6 + connection: close + + abcdef + ``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `kafka-logger` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/kafka-logger -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## Enable Plugin + +The example below shows how you can enable the `kafka-logger` Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/5 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "kafka-logger": { + "brokers" : [ + { + "host" :"127.0.0.1", + "port" : 9092 + } + ], + "kafka_topic" : "test2", + "key" : "key1", + "batch_max_size": 1, + "name": "kafka logger" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +This Plugin also supports pushing to more than one broker at a time. You can specify multiple brokers in the Plugin configuration as shown below: + +```json + "brokers" : [ + { + "host" :"127.0.0.1", + "port" : 9092 + }, + { + "host" :"127.0.0.1", + "port" : 9093 + } +], +``` + +## Example usage + +Now, if you make a request to APISIX, it will be logged in your Kafka server: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +## Delete Plugin + +To remove the `kafka-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/kafka-proxy.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/kafka-proxy.md new file mode 100644 index 0000000..3038865 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/kafka-proxy.md @@ -0,0 +1,83 @@ +--- +title: kafka-proxy +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Kafka proxy +description: This document contains information about the Apache APISIX kafka-proxy Plugin. +--- + + + +## Description + +The `kafka-proxy` plugin can be used to configure advanced parameters for the kafka upstream of Apache APISIX, such as SASL authentication. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|-------------------|---------|----------|---------|---------------|------------------------------------| +| sasl | object | optional | | {"username": "user", "password" :"pwd"} | SASL/PLAIN authentication configuration, when this configuration exists, turn on SASL authentication; this object will contain two parameters username and password, they must be configured. | +| sasl.username | string | required | | | SASL/PLAIN authentication username | +| sasl.password | string | required | | | SASL/PLAIN authentication password | + +NOTE: `encrypt_fields = {"sasl.password"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +:::note +If SASL authentication is enabled, the `sasl.username` and `sasl.password` must be set. +The current SASL authentication only supports PLAIN mode, which is the username password login method. +::: + +## Example usage + +When we use scheme as the upstream of kafka, we can add kafka authentication configuration to it through this plugin. + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/r1' \ + -H 'X-API-KEY: ' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/kafka", + "plugins": { + "kafka-proxy": { + "sasl": { + "username": "user", + "password": "pwd" + } + } + }, + "upstream": { + "nodes": { + "kafka-server1:9092": 1, + "kafka-server2:9092": 1, + "kafka-server3:9092": 1 + }, + "type": "none", + "scheme": "kafka" + } +}' +``` + +Now, we can test it by connecting to the `/kafka` endpoint via websocket. + +## Delete Plugin + +To remove the `kafka-proxy` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/key-auth.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/key-auth.md new file mode 100644 index 0000000..655eb3e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/key-auth.md @@ -0,0 +1,571 @@ +--- +title: key-auth +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Key Auth + - key-auth +description: The key-auth Plugin supports the use of an authentication key as a mechanism for clients to authenticate themselves before accessing Upstream resources. +--- + + + + + + + +## Description + +The `key-auth` Plugin supports the use of an authentication key as a mechanism for clients to authenticate themselves before accessing Upstream resources. + +To use the plugin, you would configure authentication keys on [Consumers](../terminology/consumer.md) and enable the Plugin on routes or services. The key can be included in the request URL query string or request header. APISIX will then verify the key to determine if a request should be allowed or denied to access Upstream resources. + +When a Consumer is successfully authenticated, APISIX adds additional headers, such as `X-Consumer-Username`, `X-Credential-Indentifier`, and other Consumer custom headers if configured, to the request, before proxying it to the Upstream service. The Upstream service will be able to differentiate between consumers and implement additional logics as needed. If any of these values is not available, the corresponding header will not be added. + +## Attributes + +For Consumer/Credential: + +| Name | Type | Required | Description | +|------|--------|-------------|----------------------------| +| key | string | True | Unique key for a Consumer. This field supports saving the value in Secret Manager using the [APISIX Secret](../terminology/secret.md) resource. | + +NOTE: `encrypt_fields = {"key"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +For Route: + +| Name | Type | Required | Default | Description | +|--------|--------|-------------|-------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| header | string | False | apikey | The header to get the key from. | +| query | string | False | apikey | The query string to get the key from. Lower priority than header. | +| hide_credentials | boolean | False | false | If true, do not pass the header or query string with key to Upstream services. | +| anonymous_consumer | string | False | false | Anonymous Consumer name. If configured, allow anonymous users to bypass the authentication. | + +## Examples + +The examples below demonstrate how you can work with the `key-auth` Plugin for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Implement Key Authentication on Route + +The following example demonstrates how to implement key authentications on a Route and include the key in the request header. + +Create a Consumer `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +Create `key-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-key-auth", + "plugins": { + "key-auth": { + "key": "jack-key" + } + } + }' +``` + +Create a Route with `key-auth`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "key-auth-route", + "uri": "/anything", + "plugins": { + "key-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +#### Verify with a Valid Key + +Send a request to with the valid key: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H 'apikey: jack-key' +``` + +You should receive an `HTTP/1.1 200 OK` response. + +#### Verify with an Invalid Key + +Send a request with an invalid key: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H 'apikey: wrong-key' +``` + +You should see an `HTTP/1.1 401 Unauthorized` response with the following: + +```text +{"message":"Invalid API key in request"} +``` + +#### Verify without a Key + +Send a request to without a key: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should see an `HTTP/1.1 401 Unauthorized` response with the following: + +```text +{"message":"Missing API key found in request"} +``` + +### Hide Authentication Information From Upstream + +The following example demonstrates how to prevent the key from being sent to the Upstream services by configuring `hide_credentials`. By default, the authentication key is forwarded to the Upstream services, which might lead to security risks in some circumstances. + +Create a Consumer `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +Create `key-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-key-auth", + "plugins": { + "key-auth": { + "key": "jack-key" + } + } + }' +``` + +#### Without Hiding Credentials + +Create a Route with `key-auth` and configure `hide_credentials` to `false`, which is the default configuration: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ +-H "X-API-KEY: ${admin_key}" \ +-d '{ + "id": "key-auth-route", + "uri": "/anything", + "plugins": { + "key-auth": { + "hide_credentials": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +Send a request with the valid key: + +```shell +curl -i "http://127.0.0.1:9080/anything?apikey=jack-key" +``` + +You should see an `HTTP/1.1 200 OK` response with the following: + +```json +{ + "args": { + "auth": "jack-key" + }, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.2.1", + "X-Consumer-Username": "jack", + "X-Credential-Identifier": "cred-jack-key-auth", + "X-Amzn-Trace-Id": "Root=1-6502d8a5-2194962a67aa21dd33f94bb2", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1, 103.248.35.179", + "url": "http://127.0.0.1/anything?apikey=jack-key" +} +``` + +Note that the Credential `jack-key` is visible to the Upstream service. + +#### Hide Credentials + +Update the plugin's `hide_credentials` to `true`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/key-auth-route" -X PATCH \ +-H "X-API-KEY: ${admin_key}" \ +-d '{ + "plugins": { + "key-auth": { + "hide_credentials": true + } + } +}' +``` + +Send a request with the valid key: + +```shell +curl -i "http://127.0.0.1:9080/anything?apikey=jack-key" +``` + +You should see an `HTTP/1.1 200 OK` response with the following: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.2.1", + "X-Consumer-Username": "jack", + "X-Credential-Identifier": "cred-jack-key-auth", + "X-Amzn-Trace-Id": "Root=1-6502d85c-16f34dbb5629a5960183e803", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1, 103.248.35.179", + "url": "http://127.0.0.1/anything" +} +``` + +Note that the Credential `jack-key` is no longer visible to the Upstream service. + +### Demonstrate Priority of Keys in Header and Query + +The following example demonstrates how to implement key authentication by consumers on a Route and customize the URL parameter that should include the key. The example also shows that when the API key is configured in both the header and the query string, the request header has a higher priority. + +Create a Consumer `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +Create `key-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-key-auth", + "plugins": { + "key-auth": { + "key": "jack-key" + } + } + }' +``` + +Create a Route with `key-auth`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ +-H "X-API-KEY: ${admin_key}" \ +-d '{ + "id": "key-auth-route", + "uri": "/anything", + "plugins": { + "key-auth": { + "query": "auth" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +#### Verify with a Valid Key + +Send a request to with the valid key: + +```shell +curl -i "http://127.0.0.1:9080/anything?auth=jack-key" +``` + +You should receive an `HTTP/1.1 200 OK` response. + +#### Verify with an Invalid Key + +Send a request with an invalid key: + +```shell +curl -i "http://127.0.0.1:9080/anything?auth=wrong-key" +``` + +You should see an `HTTP/1.1 401 Unauthorized` response with the following: + +```text +{"message":"Invalid API key in request"} +``` + +#### Verify with a Valid Key in Query String + +However, if you include the valid key in header with the invalid key still in the URL query string: + +```shell +curl -i "http://127.0.0.1:9080/anything?auth=wrong-key" -H 'apikey: jack-key' +``` + +You should see an `HTTP/1.1 200 OK` response. This shows that the key included in the header always has a higher priority. + +### Add Consumer Custom ID to Header + +The following example demonstrates how you can attach a Consumer custom ID to authenticated request in the `Consumer-Custom-Id` header, which can be used to implement additional logics as needed. + +Create a Consumer `jack` with a custom ID label: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack", + "labels": { + "custom_id": "495aec6a" + } + }' +``` + +Create `key-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-key-auth", + "plugins": { + "key-auth": { + "key": "jack-key" + } + } + }' +``` + +Create a Route with `key-auth`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "key-auth-route", + "uri": "/anything", + "plugins": { + "key-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +To verify, send a request to the Route with the valid key: + +```shell +curl -i "http://127.0.0.1:9080/anything?auth=jack-key" +``` + +You should see an `HTTP/1.1 200 OK` response similar to the following: + +```json +{ + "args": { + "auth": "jack-key" + }, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66ea8d64-33df89052ae198a706e18c2a", + "X-Consumer-Username": "jack", + "X-Credential-Identifier": "cred-jack-key-auth", + "X-Consumer-Custom-Id": "495aec6a", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "192.168.65.1, 205.198.122.37", + "url": "http://127.0.0.1/anything?apikey=jack-key" +} +``` + +### Rate Limit with Anonymous Consumer + +The following example demonstrates how you can configure different rate limiting policies by regular and anonymous consumers, where the anonymous Consumer does not need to authenticate and has less quotas. + +Create a regular Consumer `jack` and configure the `limit-count` Plugin to allow for a quota of 3 within a 30-second window: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack", + "plugins": { + "limit-count": { + "count": 3, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +Create the `key-auth` Credential for the Consumer `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-key-auth", + "plugins": { + "key-auth": { + "key": "jack-key" + } + } + }' +``` + +Create an anonymous user `anonymous` and configure the `limit-count` Plugin to allow for a quota of 1 within a 30-second window: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +Create a Route and configure the `key-auth` Plugin to accept anonymous Consumer `anonymous` from bypassing the authentication: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "key-auth-route", + "uri": "/anything", + "plugins": { + "key-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +To verify, send five consecutive requests with `jack`'s key: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H 'apikey: jack-key' -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +You should see the following response, showing that out of the 5 requests, 3 requests were successful (status code 200) while the others were rejected (status code 429). + +```text +200: 3, 429: 2 +``` + +Send five anonymous requests: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +You should see the following response, showing that only one request was successful: + +```text +200: 1, 429: 4 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/lago.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/lago.md new file mode 100644 index 0000000..88ef8bf --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/lago.md @@ -0,0 +1,255 @@ +--- +title: lago +keywords: + - Apache APISIX + - API Gateway + - Plugin + - lago + - monetization + - github.com/getlago/lago +description: The lago plugin reports usage to a Lago instance, which allows users to integrate Lago with APISIX for API monetization. +--- + + + +## Description + +The `lago` plugin pushes requests and responses to [Lago Self-hosted](https://github.com/getlago/lago) and [Lago Cloud](https://getlago.com) via the Lago REST API. the plugin allows you to use it with a variety of APISIX built-in features, such as the APISIX consumer and the request-id plugin. + +This allows for API monetization or let APISIX to be an AI gateway for AI tokens billing scenarios. + +:::note disclaimer + +Lago owns its trademarks and controls its commercial products and open source projects. + +The [https://github.com/getlago/lago](https://github.com/getlago/lago) project uses the `AGPL-3.0` license instead of the `Apache-2.0` license that is the same as Apache APISIX. As a user, you will need to evaluate for yourself whether it is applicable to your business to use the project in a compliant way or to obtain another type of license from Lago. Apache APISIX community does not endorse it. + +The plugin does not contain any proprietary code or SDKs from Lago, it is contributed by contributors to Apache APISIX and licensed under the `Apache-2.0` license, which is in line with any other part of APISIX and you don't need to worry about its compliance. + +::: + +When enabled, the plugin will collect information from the request context (e.g. event code, transaction ID, associated subscription ID) as configured and serialize them into [Event JSON objects](https://getlago.com/docs/api-reference/events/event-object) as required by Lago. They will be added to the buffer and sent to Lago in batches of up to 100. This batch size is a [requirement](https://getlago.com/docs/api-reference/events/batch) from Lago. If you want to modify it, see [batch processor](../batch-processor.md) for more details. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|---|---|---|---|---|---| +| endpoint_addrs | array[string] | True | | | Lago API address, such as `http://127.0.0.1:3000`. It supports both self-hosted Lago and Lago Cloud. If multiple endpoints are configured, the log will be pushed to a randomly selected endpoint from the list. | +| endpoint_uri | string | False | /api/v1/events/batch | | Lago API endpoint for [batch usage events](https://docs.getlago.com/api-reference/events/batch). | +| token | string | True | | | Lago API key created in the Lago dashboard. | +| event_transaction_id | string | True | | | Event's transaction ID, used to identify and de-duplicate the event. It supports string templates containing APISIX and NGINX variables, such as `req_${request_id}`, which allows you to use values returned by upstream services or the `request-id` plugin. | +| event_subscription_id | string | True | | | Event's subscription ID, which is automatically generated or configured when you assign the plan to the customer on Lago. This is used to associate API consumption to a customer subscription and supports string templates containing APISIX and NGINX variables, such as `cus_${consumer_name}`, which allows you to use values returned by upstream services or APISIX consumer. | +| event_code | string | True | | | Lago billable metric's code for associating an event to a specified billable item. | +| event_properties | object | False | | | Event's properties, used to attach information to an event. This allows you to send certain information on an event to Lago, such as the HTTP status to exclude failed requests from billing, or the AI token consumption in the response body for accurate billing. The keys are fixed strings, while the values can be string templates containing APISIX and NGINX variables, such as `${status}`. | +| ssl_verify | boolean | False | true | | If true, verify Lago's SSL certificates. | +| timeout | integer | False | 3000 | [1, 60000] | Timeout for the Lago service HTTP call in milliseconds. | +| keepalive | boolean | False | true | | If true, keep the connection alive for multiple requests. | +| keepalive_timeout | integer | False | 60000 | >=1000 | Keepalive timeout in milliseconds. | +| keepalive_pool | integer | False | 5 | >=1 | Maximum number of connections in the connection pool. | + +This Plugin supports using batch processors to aggregate and process events in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +## Examples + +The examples below demonstrate how you can configure `lago` Plugin for typical scenario. + +To follow along the examples, start a Lago instance. Refer to [https://github.com/getlago/lago](https://github.com/getlago/lago) or use Lago Cloud. + +Follow these brief steps to configure Lago: + +1. Get the Lago API Key (also known as `token`), from the __Developer__ page of the Lago dashboard. +2. Next, create a billable metric used by APISIX, assuming its code is `test`. Set the `Aggregation type` to `Count`; and add a filter with a key of `tier` whose value contains `expensive` to allow us to distinguish between API values, which will be demonstrated later. +3. Create a plan and add the created metric to it. Its code can be configured however you like. In the __Usage-based charges__ section, add the billable metric created previously as a `Metered charge` item. Specify the default price as `$1`. Add a filter, use `tier: expensive` to perform the filtering, and specify its price as `$10`. +4. Select an existing consumer or create a new one to assign the plan you just created. You need to specify a `Subscription external ID` (or you can have Lago generate it), which will be used as the APISIX consumer username. + +Next we need to configure APISIX for demonstrations. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Report API call usage + +The following example demonstrates how you can configure the `lago` Plugin on a Route to measuring API call usage. + +Create a Route with the `lago`, `request-id`, `key-auth` Plugins as such: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "lago-route-1", + "uri": "/get", + "plugins": { + "request-id": { + "include_in_response": true + }, + "key-auth": {}, + "lago": { + "endpoint_addrs": ["http://12.0.0.1:3000"], + "token": "", + "event_transaction_id": "${http_x_request_id}", + "event_subscription_id": "${http_x_consumer_username}", + "event_code": "test" + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +Create a second route with the `lago`, `request-id`, `key-auth` Plugin as such: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "lago-route-2", + "uri": "/anything", + "plugins": { + "request-id": { + "include_in_response": true + }, + "key-auth": {}, + "lago": { + "endpoint_addrs": ["http://12.0.0.1:3000"], + "token": "", + "event_transaction_id": "${http_x_request_id}", + "event_subscription_id": "${http_x_consumer_username}", + "event_code": "test", + "event_properties": { + "tier": "expensive" + } + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +Create a Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "", + "plugins": { + "key-auth": { + "key": "demo" + } + } + }' +``` + +Send three requests to the two routes respectively: + +```shell +curl "http://127.0.0.1:9080/get" +curl "http://127.0.0.1:9080/get" +curl "http://127.0.0.1:9080/get" +curl "http://127.0.0.1:9080/anything" +curl "http://127.0.0.1:9080/anything" +curl "http://127.0.0.1:9080/anything" +``` + +You should receive `HTTP/1.1 200 OK` responses for all requests. + +Wait a few seconds, then navigate to the __Developer__ page in the Lago dashboard. Under __Events__, you should see 6 event entries sent by APISIX. + +If the self-hosted instance's event worker is configured correctly (or if you're using Lago Cloud), you can also see the total amount consumed in real time in the consumer's subscription usage, which should be `3 * $1 + 3 * $10 = $33` according to our demo use case. + +## FAQ + +### Purpose of the Plugin + +When you make an effort to monetize your API, it's hard to find a ready-made, low-cost solution, so you may have to build your own billing stack, which is complicated. + +This plugin allows you to use APISIX to handle API proxies and use Lago as a billing stack through direct integration with Lago, and both the APISIX open source project and Lago will be part of your portfolio, which is a huge time saver. + +Every API call results in a Lago event, which allows you to bill users for real usage, i.e. pay-as-you-go, and thanks to our built-in transaction ID (request ID) support, you can simply implement API call logging and troubleshooting for your customers. + +In addition to typical API monetization scenarios, APISIX can also do AI tokens-based billing when it is acting as an AI gateway, where each Lago event generated by an API request includes exactly how many tokens were consumed, to allow you to charge the user for a fine-grained per-tokens usage. + +### Is it flexible? + +Of course, the fact that we make transaction ID, subscription ID as a configuration item and allow you to use APISIX and NGINX variables in it means that it's simple to integrate the plugin with any existing or your own authentication and internal services. + +- Use custom authentication: as long as the Lago subscription ID represented by the user ID is registered as an APISIX variable, it will be available from there, so custom authentication is completely possible! +- Integration with internal services: You might not need the APISIX built-in request-id plugin. That's OK. You can have your internal service (APISIX upstream) generate it and include it in the HTTP response header. Then you can access it via an NGINX variable in the transaction ID. + +Event properties are supported, allowing you to set special values for specific APIs. For example, if your service has 100 APIs, you can enable general billing for all of them while customizing a few with different pricing—just as demonstrated above. + +### Which Lago versions does it work with? + +When we first developed the Lago plugin, it was released to `1.17.0`, which we used for integration, so it works at least with `1.17.0`. + +Technically, we use the Lago batch event API to submit events in batches, and APISIX will only use this API, so as long as Lago doesn't make any disruptive changes to this API, APISIX will be able to integrate with it. + +Here's an [archive page](https://web.archive.org/web/20250516073803/https://getlago.com/docs/api-reference/events/batch) of the API documentation, which allows you to check the differences between the API at the time of our integration and the latest API. + +If the latest API changes, you can submit an issue to inform the APISIX maintainers that this may require some changes. + +### Why Lago can't receive events? + +Look at `error.log` for such a log. + +```text +2023/04/30 13:45:46 [error] 19381#19381: *1075673 [lua] batch-processor.lua:95: Batch Processor[lago logger] failed to process entries: lago api returned status: 400, body: , context: ngx.timer, client: 127.0.0.1, server: 0.0.0.0:9080 +``` + +The error can be diagnosed based on the error code in the `failed to process entries: lago api returned status: 400, body: ` and the response body of the lago server. + +### Reliability of reporting + +The plugin may encounter a network problem that prevents the node where the gateway is located from communicating with the Lago API, in which case APISIX will discard the batch according to the [batch processor](../batch-processor.md) configuration, the batch will be discarded if the specified number of retries are made and the dosage still cannot be sent. + +Discarded events are permanently lost, so it is recommended that you use this plugin in conjunction with other logging mechanisms and perform event replay after Lago is unavailable causing data to be discarded to ensure that all logs are correctly sent to Lago. + +### Will the event duplicate? + +While APISIX performs retries based on the [batch processor](../batch-processor.md) configuration, you don't need to worry about duplicate events being reported to Lago. + +The `event_transcation_id` and `timestamp` are generated and logged after the request is processed on the APISIX side, and Lago de-duplicates the event based on them. +So even if a retry is triggered because the network causes Lago to send a `success` response that is not received by APISIX, the event is still not duplicated on Lago. + +### Performance Impacts + +The plugin is logically simple and reliable; it simply builds a Lago event object for each request, buffers and sends them in bulk. The logic is not coupled to the request proxy path, so this does not cause latency to rise for requests going through the gateway. + +Technically, the logic is executed in the NGINX log phase and [batch processor](../batch-processor.md) timer, so this does not affect the request itself. + +### Resource overhead + +As explained earlier in the performance impact section, the plugin doesn't cause a significant increase in system resources. It only uses a small amount of memory to store events for batching. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ldap-auth.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ldap-auth.md new file mode 100644 index 0000000..7e33fc9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ldap-auth.md @@ -0,0 +1,168 @@ +--- +title: ldap-auth +keywords: + - Apache APISIX + - API Gateway + - Plugin + - LDAP Authentication + - ldap-auth +description: This document contains information about the Apache APISIX ldap-auth Plugin. +--- + + + +## Description + +The `ldap-auth` Plugin can be used to add LDAP authentication to a Route or a Service. + +This Plugin works with the Consumer object and the consumers of the API can authenticate with an LDAP server using [basic authentication](https://en.wikipedia.org/wiki/Basic_access_authentication). + +This Plugin uses [lua-resty-ldap](https://github.com/api7/lua-resty-ldap) for connecting with an LDAP server. + +## Attributes + +For Consumer: + +| Name | Type | Required | Description | +| ------- | ------ | -------- | -------------------------------------------------------------------------------- | +| user_dn | string | True | User dn of the LDAP client. For example, `cn=user01,ou=users,dc=example,dc=org`. This field supports saving the value in Secret Manager using the [APISIX Secret](../terminology/secret.md) resource. | + +For Route: + +| Name | Type | Required | Default | Description | +|----------|---------|----------|---------|------------------------------------------------------------------------| +| base_dn | string | True | | Base dn of the LDAP server. For example, `ou=users,dc=example,dc=org`. | +| ldap_uri | string | True | | URI of the LDAP server. | +| use_tls | boolean | False | `false` | If set to `true` uses TLS. | +| tls_verify| boolean | False | `false` | Whether to verify the server certificate when `use_tls` is enabled; If set to `true`, you must set `ssl_trusted_certificate` in `config.yaml`, and make sure the host of `ldap_uri` matches the host in server certificate. | +| uid | string | False | `cn` | uid attribute. | + +## Enable plugin + +First, you have to create a Consumer and enable the `ldap-auth` Plugin on it: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "foo", + "plugins": { + "ldap-auth": { + "user_dn": "cn=user01,ou=users,dc=example,dc=org" + } + } +}' +``` + +Now you can enable the Plugin on a specific Route or a Service as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": { + "ldap-auth": { + "base_dn": "ou=users,dc=example,dc=org", + "ldap_uri": "localhost:1389", + "uid": "cn" + }, + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## Example usage + +After configuring the Plugin as mentioned above, clients can make requests with authorization to access the API: + +```shell +curl -i -uuser01:password1 http://127.0.0.1:9080/hello +``` + +```shell +HTTP/1.1 200 OK +... +hello, world +``` + +If an authorization header is missing or invalid, the request is denied: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +```shell +HTTP/1.1 401 Unauthorized +... +{"message":"Missing authorization in request"} +``` + +```shell +curl -i -uuser:password1 http://127.0.0.1:9080/hello +``` + +```shell +HTTP/1.1 401 Unauthorized +... +{"message":"Invalid user authorization"} +``` + +```shell +curl -i -uuser01:passwordfalse http://127.0.0.1:9080/hello +``` + +```shell +HTTP/1.1 401 Unauthorized +... +{"message":"Invalid user authorization"} +``` + +## Delete Plugin + +To remove the `ldap-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/limit-conn.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/limit-conn.md new file mode 100644 index 0000000..8b08ab1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/limit-conn.md @@ -0,0 +1,420 @@ +--- +title: limit-conn +keywords: + - Apache APISIX + - API Gateway + - Limit Connection +description: The limit-conn plugin restricts the rate of requests by managing concurrent connections. Requests exceeding the threshold may be delayed or rejected, ensuring controlled API usage and preventing overload. +--- + + + + + + + +## Description + +The `limit-conn` Plugin limits the rate of requests by the number of concurrent connections. Requests exceeding the threshold will be delayed or rejected based on the configuration, ensuring controlled resource usage and preventing overload. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|------------|---------|----------|-------------|-------------------|-----------------| +| conn | integer | True | | > 0 | The maximum number of concurrent requests allowed. Requests exceeding the configured limit and below `conn + burst` will be delayed. | +| burst | integer | True | | >= 0 | The number of excessive concurrent requests allowed to be delayed per second. Requests exceeding the limit will be rejected immediately. | +| default_conn_delay | number | True | | > 0 | Processing latency allowed in seconds for concurrent requests exceeding `conn + burst`, which can be dynamically adjusted based on `only_use_default_delay` setting. | +| only_use_default_delay | boolean | False | false | | If false, delay requests proportionally based on how much they exceed the `conn` limit. The delay grows larger as congestion increases. For instance, with `conn` being `5`, `burst` being `3`, and `default_conn_delay` being `1`, 6 concurrent requests would result in a 1-second delay, 7 requests a 2-second delay, 8 requests a 3-second delay, and so on, until the total limit of `conn + burst` is reached, beyond which requests are rejected. If true, use `default_conn_delay` to delay all excessive requests within the `burst` range. Requests beyond `conn + burst` are rejected immediately. For instance, with `conn` being `5`, `burst` being `3`, and `default_conn_delay` being `1`, 6, 7, or 8 concurrent requests are all delayed by exactly 1 second each. | +| key_type | string | False | var | ["var","var_combination"] | The type of key. If the `key_type` is `var`, the `key` is interpreted a variable. If the `key_type` is `var_combination`, the `key` is interpreted as a combination of variables. | +| key | string | False | remote_addr | | The key to count requests by. If the `key_type` is `var`, the `key` is interpreted a variable. The variable does not need to be prefixed by a dollar sign (`$`). If the `key_type` is `var_combination`, the `key` is interpreted as a combination of variables. All variables should be prefixed by dollar signs (`$`). For example, to configure the `key` to use a combination of two request headers `custom-a` and `custom-b`, the `key` should be configured as `$http_custom_a $http_custom_b`. | +| rejected_code | integer | False | 503 | [200,...,599] | The HTTP status code returned when a request is rejected for exceeding the threshold. | +| rejected_msg | string | False | | non-empty | The response body returned when a request is rejected for exceeding the threshold. | +| allow_degradation | boolean | False | false | | If true, allow APISIX to continue handling requests without the Plugin when the Plugin or its dependencies become unavailable. | +| policy | string | False | local | ["local","redis","redis-cluster"] | The policy for rate limiting counter. If it is `local`, the counter is stored in memory locally. If it is `redis`, the counter is stored on a Redis instance. If it is `redis-cluster`, the counter is stored in a Redis cluster. | +| redis_host | string | False | | | The address of the Redis node. Required when `policy` is `redis`. | +| redis_port | integer | False | 6379 | [1,...] | The port of the Redis node when `policy` is `redis`. | +| redis_username | string | False | | | The username for Redis if Redis ACL is used. If you use the legacy authentication method `requirepass`, configure only the `redis_password`. Used when `policy` is `redis`. | +| redis_password | string | False | | | The password of the Redis node when `policy` is `redis` or `redis-cluster`. | +| redis_ssl | boolean | False | false | | If true, use SSL to connect to Redis cluster when `policy` is `redis`. | +| redis_ssl_verify | boolean | False | false | | If true, verify the server SSL certificate when `policy` is `redis`. | +| redis_database | integer | False | 0 | >= 0 | The database number in Redis when `policy` is `redis`. | +| redis_timeout | integer | False | 1000 | [1,...] | The Redis timeout value in milliseconds when `policy` is `redis` or `redis-cluster`. | +| redis_cluster_nodes | array[string] | False | | | The list of the Redis cluster nodes with at least two addresses. Required when policy is redis-cluster. | +| redis_cluster_name | string | False | | | The name of the Redis cluster. Required when `policy` is `redis-cluster`. | +| redis_cluster_ssl | boolean | False | false | | If true, use SSL to connect to Redis cluster when `policy` is | +| redis_cluster_ssl_verify | boolean | False | false | | If true, verify the server SSL certificate when `policy` is `redis-cluster`. | + +## Examples + +The examples below demonstrate how you can configure `limit-conn` in different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Apply Rate Limiting by Remote Address + +The following example demonstrates how to use `limit-conn` to rate limit requests by `remote_addr`, with example connection and burst thresholds. + +Create a Route with `limit-conn` Plugin to allow 2 concurrent requests and 1 excessive concurrent request. Additionally: + +* Configure the Plugin to allow 0.1 second of processing latency for concurrent requests exceeding `conn + burst`. +* Set the key type to `vars` to interpret `key` as a variable. +* Calculate rate limiting count by request's `remote_address`. +* Set `policy` to `local` to use the local counter in memory. +* Customize the `rejected_code` to `429`. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-conn-route", + "uri": "/get", + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "key_type": "var", + "key": "remote_addr", + "policy": "local", + "rejected_code": 429 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send five concurrent requests to the route: + +```shell +seq 1 5 | xargs -n1 -P5 bash -c 'curl -s -o /dev/null -w "Response: %{http_code}\n" "http://127.0.0.1:9080/get"' +``` + +You should see responses similar to the following, where excessive requests are rejected: + +```text +Response: 200 +Response: 200 +Response: 200 +Response: 429 +Response: 429 +``` + +### Apply Rate Limiting by Remote Address and Consumer Name + +The following example demonstrates how to use `limit-conn` to rate limit requests by a combination of variables, `remote_addr` and `consumer_name`. + +Create a Consumer `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john" + }' +``` + +Create `key-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +Create a second Consumer `jane`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jane" + }' +``` + +Create `key-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jane/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jane-key-auth", + "plugins": { + "key-auth": { + "key": "jane-key" + } + } + }' +``` + +Create a Route with `key-auth` and `limit-conn` Plugins, and specify in the `limit-conn` Plugin to use a combination of variables as the rate limiting key: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-conn-route", + "uri": "/get", + "plugins": { + "key-auth": {}, + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 429, + "key_type": "var_combination", + "key": "$remote_addr $consumer_name" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send five concurrent requests as the Consumer `john`: + +```shell +seq 1 5 | xargs -n1 -P5 bash -c 'curl -s -o /dev/null -w "Response: %{http_code}\n" "http://127.0.0.1:9080/get" -H "apikey: john-key"' +``` + +You should see responses similar to the following, where excessive requests are rejected: + +```text +Response: 200 +Response: 200 +Response: 200 +Response: 429 +Response: 429 +``` + +Immediately send five concurrent requests as the Consumer `jane`: + +```shell +seq 1 5 | xargs -n1 -P5 bash -c 'curl -s -o /dev/null -w "Response: %{http_code}\n" "http://127.0.0.1:9080/get" -H "apikey: jane-key"' +``` + +You should also see responses similar to the following, where excessive requests are rejected: + +```text +Response: 200 +Response: 200 +Response: 200 +Response: 429 +Response: 429 +``` + +### Rate Limit WebSocket Connections + +The following example demonstrates how you can use the `limit-conn` Plugin to limit the number of concurrent WebSocket connections. + +Start a [sample upstream WebSocket server](https://hub.docker.com/r/jmalloc/echo-server): + +```shell +docker run -d \ + -p 8080:8080 \ + --name websocket-server \ + --network=apisix-quickstart-net \ + jmalloc/echo-server +``` + +Create a Route to the server WebSocket endpoint and enable WebSocket for the route. Adjust the WebSocket server address accordingly. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "ws-route", + "uri": "/.ws", + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "key_type": "var", + "key": "remote_addr", + "rejected_code": 429 + } + }, + "enable_websocket": true, + "upstream": { + "type": "roundrobin", + "nodes": { + "websocket-server:8080": 1 + } + } +}' +``` + +Install a WebSocket client, such as [websocat](https://github.com/vi/websocat), if you have not already. Establish connection with the WebSocket server through the route: + +```shell +websocat "ws://127.0.0.1:9080/.ws" +``` + +Send a "hello" message in the terminal, you should see the WebSocket server echoes back the same message: + +```text +Request served by 1cd244052136 +hello +hello +``` + +Open three more terminal sessions and run: + +```shell +websocat "ws://127.0.0.1:9080/.ws" +``` + +You should see the last terminal session prints `429 Too Many Requests` when you try to establish a WebSocket connection with the server, due to the rate limiting effect. + +### Share Quota Among APISIX Nodes with a Redis Server + +The following example demonstrates the rate limiting of requests across multiple APISIX nodes with a Redis server, such that different APISIX nodes share the same rate limiting quota. + +On each APISIX instance, create a Route with the following configurations. Adjust the address of the Admin API, Redis host, port, password, and database accordingly. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-conn-route", + "uri": "/get", + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 429, + "key_type": "var", + "key": "remote_addr", + "policy": "redis", + "redis_host": "192.168.xxx.xxx", + "redis_port": 6379, + "redis_password": "p@ssw0rd", + "redis_database": 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send five concurrent requests to the route: + +```shell +seq 1 5 | xargs -n1 -P5 bash -c 'curl -s -o /dev/null -w "Response: %{http_code}\n" "http://127.0.0.1:9080/get"' +``` + +You should see responses similar to the following, where excessive requests are rejected: + +```text +Response: 200 +Response: 200 +Response: 429 +Response: 429 +Response: 429 +``` + +This shows the two routes configured in different APISIX instances share the same quota. + +### Share Quota Among APISIX Nodes with a Redis Cluster + +You can also use a Redis cluster to apply the same quota across multiple APISIX nodes, such that different APISIX nodes share the same rate limiting quota. + +Ensure that your Redis instances are running in [cluster mode](https://redis.io/docs/management/scaling/#create-and-use-a-redis-cluster). A minimum of two nodes are required for the `limit-conn` Plugin configurations. + +On each APISIX instance, create a Route with the following configurations. Adjust the address of the Admin API, Redis cluster nodes, password, cluster name, and SSL varification accordingly. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-conn-route", + "uri": "/get", + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 429, + "key_type": "var", + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "192.168.xxx.xxx:6379", + "192.168.xxx.xxx:16379" + ], + "redis_password": "p@ssw0rd", + "redis_cluster_name": "redis-cluster-1", + "redis_cluster_ssl": true + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send five concurrent requests to the route: + +```shell +seq 1 5 | xargs -n1 -P5 bash -c 'curl -s -o /dev/null -w "Response: %{http_code}\n" "http://127.0.0.1:9080/get"' +``` + +You should see responses similar to the following, where excessive requests are rejected: + +```text +Response: 200 +Response: 200 +Response: 429 +Response: 429 +Response: 429 +``` + +This shows the two routes configured in different APISIX instances share the same quota. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/limit-count.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/limit-count.md new file mode 100644 index 0000000..a5edbc0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/limit-count.md @@ -0,0 +1,507 @@ +--- +title: limit-count +keywords: + - Apache APISIX + - API Gateway + - Limit Count +description: The limit-count plugin uses a fixed window algorithm to limit the rate of requests by the number of requests within a given time interval. Requests exceeding the configured quota will be rejected. +--- + + + + + + + +## Description + +The `limit-count` plugin uses a fixed window algorithm to limit the rate of requests by the number of requests within a given time interval. Requests exceeding the configured quota will be rejected. + +You may see the following rate limiting headers in the response: + +* `X-RateLimit-Limit`: the total quota +* `X-RateLimit-Remaining`: the remaining quota +* `X-RateLimit-Reset`: number of seconds left for the counter to reset + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ----------------------- | ------- | ----------------------------------------- | ------------- | -------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| count | integer | True | | > 0 | The maximum number of requests allowed within a given time interval. | +| time_window | integer | True | | > 0 | The time interval corresponding to the rate limiting `count` in seconds. | +| key_type | string | False | var | ["var","var_combination","constant"] | The type of key. If the `key_type` is `var`, the `key` is interpreted a variable. If the `key_type` is `var_combination`, the `key` is interpreted as a combination of variables. If the `key_type` is `constant`, the `key` is interpreted as a constant. | +| key | string | False | remote_addr | | The key to count requests by. If the `key_type` is `var`, the `key` is interpreted a variable. The variable does not need to be prefixed by a dollar sign (`$`). If the `key_type` is `var_combination`, the `key` is interpreted as a combination of variables. All variables should be prefixed by dollar signs (`$`). For example, to configure the `key` to use a combination of two request headers `custom-a` and `custom-b`, the `key` should be configured as `$http_custom_a $http_custom_b`. If the `key_type` is `constant`, the `key` is interpreted as a constant value. | +| rejected_code | integer | False | 503 | [200,...,599] | The HTTP status code returned when a request is rejected for exceeding the threshold. | +| rejected_msg | string | False | | non-empty | The response body returned when a request is rejected for exceeding the threshold. | +| policy | string | False | local | ["local","redis","redis-cluster"] | The policy for rate limiting counter. If it is `local`, the counter is stored in memory locally. If it is `redis`, the counter is stored on a Redis instance. If it is `redis-cluster`, the counter is stored in a Redis cluster. | +| allow_degradation | boolean | False | false | | If true, allow APISIX to continue handling requests without the plugin when the plugin or its dependencies become unavailable. | +| show_limit_quota_header | boolean | False | true | | If true, include `X-RateLimit-Limit` to show the total quota and `X-RateLimit-Remaining` to show the remaining quota in the response header. | +| group | string | False | | non-empty | The `group` ID for the plugin, such that routes of the same `group` can share the same rate limiting counter. | +| redis_host | string | False | | | The address of the Redis node. Required when `policy` is `redis`. | +| redis_port | integer | False | 6379 | [1,...] | The port of the Redis node when `policy` is `redis`. | +| redis_username | string | False | | | The username for Redis if Redis ACL is used. If you use the legacy authentication method `requirepass`, configure only the `redis_password`. Used when `policy` is `redis`. | +| redis_password | string | False | | | The password of the Redis node when `policy` is `redis` or `redis-cluster`. | +| redis_ssl | boolean | False | false | | If true, use SSL to connect to Redis cluster when `policy` is `redis`. | +| redis_ssl_verify | boolean | False | false | | If true, verify the server SSL certificate when `policy` is `redis`. | +| redis_database | integer | False | 0 | >= 0 | The database number in Redis when `policy` is `redis`. | +| redis_timeout | integer | False | 1000 | [1,...] | The Redis timeout value in milliseconds when `policy` is `redis` or `redis-cluster`. | +| redis_cluster_nodes | array[string] | False | | | The list of the Redis cluster nodes with at least two addresses. Required when policy is redis-cluster. | +| redis_cluster_name | string | False | | | The name of the Redis cluster. Required when `policy` is `redis-cluster`. | +| redis_cluster_ssl | boolean | False | false | | If true, use SSL to connect to Redis cluster when `policy` is `redis-cluster`. | +| redis_cluster_ssl_verify | boolean | False | false | | If true, verify the server SSL certificate when `policy` is `redis-cluster`. | + +## Examples + +The examples below demonstrate how you can configure `limit-count` in different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Apply Rate Limiting by Remote Address + +The following example demonstrates the rate limiting of requests by a single variable, `remote_addr`. + +Create a Route with `limit-count` plugin that allows for a quota of 1 within a 30-second window per remote address: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-route", + "uri": "/get", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429, + "key_type": "var", + "key": "remote_addr" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to verify: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +You should see an `HTTP/1.1 200 OK` response. + +The request has consumed all the quota allowed for the time window. If you send the request again within the same 30-second time interval, you should receive an `HTTP/1.1 429 Too Many Requests` response, indicating the request surpasses the quota threshold. + +### Apply Rate Limiting by Remote Address and Consumer Name + +The following example demonstrates the rate limiting of requests by a combination of variables, `remote_addr` and `consumer_name`. It allows for a quota of 1 within a 30-second window per remote address and for each consumer. + +Create a Consumer `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john" + }' +``` + +Create `key-auth` Credential for the consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +Create a second Consumer `jane`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jane" + }' +``` + +Create `key-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jane/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jane-key-auth", + "plugins": { + "key-auth": { + "key": "jane-key" + } + } + }' +``` + +Create a Route with `key-auth` and `limit-count` plugins, and specify in the `limit-count` plugin to use a combination of variables as the rate limiting key: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-route", + "uri": "/get", + "plugins": { + "key-auth": {}, + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429, + "key_type": "var_combination", + "key": "$remote_addr $consumer_name" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request as the Consumer `jane`: + +```shell +curl -i "http://127.0.0.1:9080/get" -H 'apikey: jane-key' +``` + +You should see an `HTTP/1.1 200 OK` response with the corresponding response body. + +This request has consumed all the quota set for the time window. If you send the same request as the Consumer `jane` within the same 30-second time interval, you should receive an `HTTP/1.1 429 Too Many Requests` response, indicating the request surpasses the quota threshold. + +Send the same request as the Consumer `john` within the same 30-second time interval: + +```shell +curl -i "http://127.0.0.1:9080/get" -H 'apikey: john-key' +``` + +You should see an `HTTP/1.1 200 OK` response with the corresponding response body, indicating the request is not rate limited. + +Send the same request as the Consumer `john` again within the same 30-second time interval, you should receive an `HTTP/1.1 429 Too Many Requests` response. + +This verifies the plugin rate limits by the combination of variables, `remote_addr` and `consumer_name`. + +### Share Quota among Routes + +The following example demonstrates the sharing of rate limiting quota among multiple routes by configuring the `group` of the `limit-count` plugin. + +Note that the configurations of the `limit-count` plugin of the same `group` should be identical. To avoid update anomalies and repetitive configurations, you can create a Service with `limit-count` plugin and Upstream for routes to connect to. + +Create a service: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/services" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-service", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429, + "group": "srv1" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Create two Routes and configure their `service_id` to be `limit-count-service`, so that they share the same configurations for the Plugin and Upstream: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-route-1", + "service_id": "limit-count-service", + "uri": "/get1", + "plugins": { + "proxy-rewrite": { + "uri": "/get" + } + } + }' +``` + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-route-2", + "service_id": "limit-count-service", + "uri": "/get2", + "plugins": { + "proxy-rewrite": { + "uri": "/get" + } + } + }' +``` + +:::note + +The [`proxy-rewrite`](./proxy-rewrite.md) plugin is used to rewrite the URI to `/get` so that requests are forwarded to the correct endpoint. + +::: + +Send a request to Route `/get1`: + +```shell +curl -i "http://127.0.0.1:9080/get1" +``` + +You should see an `HTTP/1.1 200 OK` response with the corresponding response body. + +Send the same request to Route `/get2` within the same 30-second time interval: + +```shell +curl -i "http://127.0.0.1:9080/get2" +``` + +You should receive an `HTTP/1.1 429 Too Many Requests` response, which verifies the two routes share the same rate limiting quota. + +### Share Quota Among APISIX Nodes with a Redis Server + +The following example demonstrates the rate limiting of requests across multiple APISIX nodes with a Redis server, such that different APISIX nodes share the same rate limiting quota. + +On each APISIX instance, create a Route with the following configurations. Adjust the address of the Admin API, Redis host, port, password, and database accordingly. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-route", + "uri": "/get", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429, + "key": "remote_addr", + "policy": "redis", + "redis_host": "192.168.xxx.xxx", + "redis_port": 6379, + "redis_password": "p@ssw0rd", + "redis_database": 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to an APISIX instance: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +You should see an `HTTP/1.1 200 OK` response with the corresponding response body. + +Send the same request to a different APISIX instance within the same 30-second time interval, you should receive an `HTTP/1.1 429 Too Many Requests` response, verifying routes configured in different APISIX nodes share the same quota. + +### Share Quota Among APISIX Nodes with a Redis Cluster + +You can also use a Redis cluster to apply the same quota across multiple APISIX nodes, such that different APISIX nodes share the same rate limiting quota. + +Ensure that your Redis instances are running in [cluster mode](https://redis.io/docs/management/scaling/#create-and-use-a-redis-cluster). A minimum of two nodes are required for the `limit-count` plugin configurations. + +On each APISIX instance, create a Route with the following configurations. Adjust the address of the Admin API, Redis cluster nodes, password, cluster name, and SSL varification accordingly. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-route", + "uri": "/get", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "192.168.xxx.xxx:6379", + "192.168.xxx.xxx:16379" + ], + "redis_password": "p@ssw0rd", + "redis_cluster_name": "redis-cluster-1", + "redis_cluster_ssl": true + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to an APISIX instance: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +You should see an `HTTP/1.1 200 OK` response with the corresponding response body. + +Send the same request to a different APISIX instance within the same 30-second time interval, you should receive an `HTTP/1.1 429 Too Many Requests` response, verifying routes configured in different APISIX nodes share the same quota. + +### Rate Limit with Anonymous Consumer + +does not need to authenticate and has less quotas. While this example uses [`key-auth`](./key-auth.md) for authentication, the anonymous Consumer can also be configured with [`basic-auth`](./basic-auth.md), [`jwt-auth`](./jwt-auth.md), and [`hmac-auth`](./hmac-auth.md). + +Create a regular Consumer `john` and configure the `limit-count` plugin to allow for a quota of 3 within a 30-second window: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john", + "plugins": { + "limit-count": { + "count": 3, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +Create the `key-auth` Credential for the Consumer `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +Create an anonymous user `anonymous` and configure the `limit-count` Plugin to allow for a quota of 1 within a 30-second window: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +Create a Route and configure the `key-auth` Plugin to accept anonymous Consumer `anonymous` from bypassing the authentication: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "key-auth-route", + "uri": "/anything", + "plugins": { + "key-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +To verify, send five consecutive requests with `john`'s key: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H 'apikey: john-key' -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +You should see the following response, showing that out of the 5 requests, 3 requests were successful (status code 200) while the others were rejected (status code 429). + +```text +200: 3, 429: 2 +``` + +Send five anonymous requests: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +You should see the following response, showing that only one request was successful: + +```text +200: 1, 429: 4 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/limit-req.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/limit-req.md new file mode 100644 index 0000000..132f6fa --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/limit-req.md @@ -0,0 +1,284 @@ +--- +title: limit-req +keywords: + - Apache APISIX + - API Gateway + - Limit Request + - limit-req +description: The limit-req Plugin uses the leaky bucket algorithm to rate limit the number of the requests and allow for throttling. +--- + + + + + + + +## Description + +The `limit-req` Plugin uses the [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) algorithm to rate limit the number of the requests and allow for throttling. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|-------------------|---------|----------|---------|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| rate | integer | True | | > 0 | The maximum number of requests allowed per second. Requests exceeding the rate and below burst will be delayed. | +| burst | integer | True | | >= 0 | The number of requests allowed to be delayed per second for throttling. Requests exceeding the rate and burst will get rejected. | +| key_type | string | False | var | ["var", "var_combination"] | The type of key. If the `key_type` is `var`, the `key` is interpreted a variable. If the `key_type` is `var_combination`, the `key` is interpreted as a combination of variables. | +| key | string | True | remote_addr | | The key to count requests by. If the `key_type` is `var`, the `key` is interpreted a variable. The variable does not need to be prefixed by a dollar sign (`$`). If the `key_type` is `var_combination`, the `key` is interpreted as a combination of variables. All variables should be prefixed by dollar signs (`$`). For example, to configure the `key` to use a combination of two request headers `custom-a` and `custom-b`, the `key` should be configured as `$http_custom_a $http_custom_b`. | +| rejected_code | integer | False | 503 | [200,...,599] | The HTTP status code returned when a request is rejected for exceeding the threshold. | +| rejected_msg | string | False | | non-empty | The response body returned when a request is rejected for exceeding the threshold. | +| nodelay | boolean | False | false | | If true, do not delay requests within the burst threshold. | +| allow_degradation | boolean | False | false | | If true, allow APISIX to continue handling requests without the Plugin when the Plugin or its dependencies become unavailable. | +| policy | string | False | local | ["local", "redis", "redis-cluster"] | The policy for rate limiting counter. If it is `local`, the counter is stored in memory locally. If it is `redis`, the counter is stored on a Redis instance. If it is `redis-cluster`, the counter is stored in a Redis cluster. | +| redis_host | string | False | | | The address of the Redis node. Required when `policy` is `redis`. | +| redis_port | integer | False | 6379 | [1,...] | The port of the Redis node when `policy` is `redis`. | +| redis_username | string | False | | | The username for Redis if Redis ACL is used. If you use the legacy authentication method `requirepass`, configure only the `redis_password`. Used when `policy` is `redis`. | +| redis_password | string | False | | | The password of the Redis node when `policy` is `redis` or `redis-cluster`. | +| redis_ssl | boolean | False | false | | If true, use SSL to connect to Redis cluster when `policy` is `redis`. | +| redis_ssl_verify | boolean | False | false | | If true, verify the server SSL certificate when `policy` is `redis`. | +| redis_database | integer | False | 0 | >= 0 | The database number in Redis when `policy` is `redis`. | +| redis_timeout | integer | False | 1000 | [1,...] | The Redis timeout value in milliseconds when `policy` is `redis` or `redis-cluster`. | +| redis_cluster_nodes | array[string] | False | | | The list of the Redis cluster nodes with at least two addresses. Required when policy is redis-cluster. | +| redis_cluster_name | string | False | | | The name of the Redis cluster. Required when `policy` is `redis-cluster`. | +| redis_cluster_ssl | boolean | False | false | | If true, use SSL to connect to Redis cluster when `policy` is | +| redis_cluster_ssl_verify | boolean | False | false | | If true, verify the server SSL certificate when `policy` is `redis-cluster`. | + +## Examples + +The examples below demonstrate how you can configure `limit-req` in different scenarios. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +### Apply Rate Limiting by Remote Address + +The following example demonstrates the rate limiting of HTTP requests by a single variable, `remote_addr`. + +Create a Route with `limit-req` Plugin that allows for 1 QPS per remote address: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d ' + { + "id": "limit-req-route", + "uri": "/get", + "plugins": { + "limit-req": { + "rate": 1, + "burst": 0, + "key": "remote_addr", + "key_type": "var", + "rejected_code": 429, + "nodelay": true + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to verify: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +You should see an `HTTP/1.1 200 OK` response. + +The request has consumed all the quota allowed for the time window. If you send the request again within the same second, you should receive an `HTTP/1.1 429 Too Many Requests` response, indicating the request surpasses the quota threshold. + +### Implement API Throttling + +The following example demonstrates how to configure `burst` to allow overrun of the rate limiting threshold by the configured value and achieve request throttling. You will also see a comparison against when throttling is not implemented. + +Create a Route with `limit-req` Plugin that allows for 1 QPS per remote address, with a `burst` of 1 to allow for 1 request exceeding the `rate` to be delayed for processing: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-req-route", + "uri": "/get", + "plugins": { + "limit-req": { + "rate": 1, + "burst": 1, + "key": "remote_addr", + "rejected_code": 429 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Generate three requests to the Route: + +```shell +resp=$(seq 3 | xargs -I{} curl -i "http://127.0.0.1:9080/get" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200 responses: $count_200 ; 429 responses: $count_429" +``` + +You are likely to see that all three requests are successful: + +```text +200 responses: 3 ; 429 responses: 0 +``` + +To see the effect without `burst`, update `burst` to 0 or set `nodelay` to `true` as follows: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/limit-req-route" -X PATCH \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "plugins": { + "limit-req": { + "nodelay": true + } + } + }' +``` + +Generate three requests to the Route again: + +```shell +resp=$(seq 3 | xargs -I{} curl -i "http://127.0.0.1:9080/get" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200 responses: $count_200 ; 429 responses: $count_429" +``` + +You should see a response similar to the following, showing requests surpassing the rate have been rejected: + +```text +200 responses: 1 ; 429 responses: 2 +``` + +### Apply Rate Limiting by Remote Address and Consumer Name + +The following example demonstrates the rate limiting of requests by a combination of variables, `remote_addr` and `consumer_name`. + +Create a Route with `limit-req` Plugin that allows for 1 QPS per remote address and for each Consumer. + +Create a Consumer `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john" + }' +``` + +Create `key-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +Create a second Consumer `jane`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jane" + }' +``` + +Create `key-auth` Credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jane/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jane-key-auth", + "plugins": { + "key-auth": { + "key": "jane-key" + } + } + }' +``` + +Create a Route with `key-auth` and `limit-req` Plugins, and specify in the `limit-req` Plugin to use a combination of variables as the rate-limiting key: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-req-route", + "uri": "/get", + "plugins": { + "key-auth": {}, + "limit-req": { + "rate": 1, + "burst": 0, + "key": "$remote_addr $consumer_name", + "key_type": "var_combination", + "rejected_code": 429 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send two requests simultaneously, each for one Consumer: + +```shell +curl -i "http://127.0.0.1:9080/get" -H 'apikey: jane-key' & \ +curl -i "http://127.0.0.1:9080/get" -H 'apikey: john-key' & +``` + +You should receive `HTTP/1.1 200 OK` for both requests, indicating the request has not exceeded the threshold for each Consumer. + +If you send more requests as either Consumer within the same second, you should receive an `HTTP/1.1 429 Too Many Requests` response. + +This verifies the Plugin rate limits by the combination of variables, `remote_addr` and `consumer_name`. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/log-rotate.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/log-rotate.md new file mode 100644 index 0000000..4f3fd82 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/log-rotate.md @@ -0,0 +1,118 @@ +--- +title: log-rotate +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Log rotate +description: This document contains information about the Apache APISIX log-rotate Plugin. +--- + + + +## Description + +The `log-rotate` Plugin is used to keep rotating access and error log files in the log directory at regular intervals. + +You can configure how often the logs are rotated and how many logs to keep. When the number of logs exceeds, older logs are automatically deleted. + +## Attributes + +| Name | Type | Required | Default | Description | +|--------------------|---------|----------|---------|------------------------------------------------------------------------------------------------| +| interval | integer | True | 60 * 60 | Time in seconds specifying how often to rotate the logs. | +| max_kept | integer | True | 24 * 7 | Maximum number of historical logs to keep. If this number is exceeded, older logs are deleted. | +| max_size | integer | False | -1 | Max size(Bytes) of log files to be rotated, size check would be skipped with a value less than 0 or time is up specified by interval. | +| enable_compression | boolean | False | false | When set to `true`, compresses the log file (gzip). Requires `tar` to be installed. | + +## Enable Plugin + +To enable the Plugin, add it in your configuration file (`conf/config.yaml`): + +```yaml title="conf/config.yaml" +plugins: + - log-rotate + +plugin_attr: + log-rotate: + interval: 3600 # rotate interval (unit: second) + max_kept: 168 # max number of log files will be kept + max_size: -1 # max size of log files will be kept + enable_compression: false # enable log file compression(gzip) or not, default false +``` + +## Example usage + +Once you enable the Plugin as shown above, the logs will be stored and rotated based on your configuration. + +In the example below the `interval` is set to `10` and `max_kept` is set to `10`. This will create logs as shown: + +```shell +ll logs +``` + +```shell +total 44K +-rw-r--r--. 1 resty resty 0 Mar 20 20:32 2020-03-20_20-32-40_access.log +-rw-r--r--. 1 resty resty 2.4K Mar 20 20:32 2020-03-20_20-32-40_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:32 2020-03-20_20-32-50_access.log +-rw-r--r--. 1 resty resty 2.8K Mar 20 20:32 2020-03-20_20-32-50_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:32 2020-03-20_20-33-00_access.log +-rw-r--r--. 1 resty resty 2.4K Mar 20 20:33 2020-03-20_20-33-00_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:33 2020-03-20_20-33-10_access.log +-rw-r--r--. 1 resty resty 2.4K Mar 20 20:33 2020-03-20_20-33-10_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:33 2020-03-20_20-33-20_access.log +-rw-r--r--. 1 resty resty 2.4K Mar 20 20:33 2020-03-20_20-33-20_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:33 2020-03-20_20-33-30_access.log +-rw-r--r--. 1 resty resty 2.4K Mar 20 20:33 2020-03-20_20-33-30_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:33 2020-03-20_20-33-40_access.log +-rw-r--r--. 1 resty resty 2.8K Mar 20 20:33 2020-03-20_20-33-40_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:33 2020-03-20_20-33-50_access.log +-rw-r--r--. 1 resty resty 2.4K Mar 20 20:33 2020-03-20_20-33-50_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:33 2020-03-20_20-34-00_access.log +-rw-r--r--. 1 resty resty 2.4K Mar 20 20:34 2020-03-20_20-34-00_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:34 2020-03-20_20-34-10_access.log +-rw-r--r--. 1 resty resty 2.4K Mar 20 20:34 2020-03-20_20-34-10_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:34 access.log +-rw-r--r--. 1 resty resty 1.5K Mar 20 21:31 error.log +``` + +If you have enabled compression, the logs will be as shown below: + +```shell +total 10.5K +-rw-r--r--. 1 resty resty 1.5K Mar 20 20:33 2020-03-20_20-33-50_access.log.tar.gz +-rw-r--r--. 1 resty resty 1.5K Mar 20 20:33 2020-03-20_20-33-50_error.log.tar.gz +-rw-r--r--. 1 resty resty 1.5K Mar 20 20:33 2020-03-20_20-34-00_access.log.tar.gz +-rw-r--r--. 1 resty resty 1.5K Mar 20 20:34 2020-03-20_20-34-00_error.log.tar.gz +-rw-r--r--. 1 resty resty 1.5K Mar 20 20:34 2020-03-20_20-34-10_access.log.tar.gz +-rw-r--r--. 1 resty resty 1.5K Mar 20 20:34 2020-03-20_20-34-10_error.log.tar.gz +-rw-r--r--. 1 resty resty 0 Mar 20 20:34 access.log +-rw-r--r--. 1 resty resty 1.5K Mar 20 21:31 error.log +``` + +## Delete Plugin + +To remove the `log-rotate` Plugin, you can remove it from your configuration file (`conf/config.yaml`): + +```yaml title="conf/config.yaml" +plugins: + # - log-rotate +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/loggly.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/loggly.md new file mode 100644 index 0000000..a1838c2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/loggly.md @@ -0,0 +1,183 @@ +--- +title: loggly +keywords: + - Apache APISIX + - API Gateway + - Plugin + - SolarWinds Loggly +description: This document contains information about the Apache APISIX loggly Plugin. +--- + + + +## Description + +The `loggly` Plugin is used to forward logs to [SolarWinds Loggly](https://www.solarwinds.com/loggly) for analysis and storage. + +When the Plugin is enabled, APISIX will serialize the request context information to [Loggly Syslog](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/streaming-syslog-without-using-files.htm?cshid=loggly_streaming-syslog-without-using-files) data format which is Syslog events with [RFC5424](https://datatracker.ietf.org/doc/html/rfc5424) compliant headers. + +When the maximum batch size is exceeded, the data in the queue is pushed to Loggly enterprise syslog endpoint. See [batch processor](../batch-processor.md) for more details. + +## Attributes + +| Name | Type | Required | Default | Description | +|------------------------|---------------|----------|---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| customer_token | string | True | | Unique identifier used when sending logs to Loggly to ensure that they are sent to the right organisation account. | +| severity | string (enum) | False | INFO | Syslog log event severity level. Choose between: `DEBUG`, `INFO`, `NOTICE`, `WARNING`, `ERR`, `CRIT`, `ALERT`, and `EMEGR`. | +| severity_map | object | False | nil | A way to map upstream HTTP response codes to Syslog severity. Key-value pairs where keys are the HTTP response codes and the values are the Syslog severity levels. For example `{"410": "CRIT"}`. | +| tags | array | False | | Metadata to be included with any event log to aid in segmentation and filtering. | +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| include_req_body | boolean | False | false | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to Nginx's limitations. | +| include_req_body_expr | array | False | | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| include_resp_body | boolean | False | false | When set to `true` includes the response body in the log. | +| include_resp_body_expr | array | False | | When the `include_resp_body` attribute is set to `true`, use this to filter based on [lua-resty-expr](https://github.com/api7/lua-resty-expr). If present, only logs the response if the expression evaluates to `true`. | + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +To generate a Customer token, go to `/loggly.com/tokens` or navigate to Logs > Source setup > Customer tokens. + +### Example of default log format + +```text +<10>1 2024-01-06T06:50:51.739Z 127.0.0.1 apisix 58525 - [token-1@41058 tag="apisix"] {"service_id":"","server":{"version":"3.7.0","hostname":"localhost"},"apisix_latency":100.99985313416,"request":{"url":"http://127.0.0.1:1984/opentracing","headers":{"content-type":"application/x-www-form-urlencoded","user-agent":"lua-resty-http/0.16.1 (Lua) ngx_lua/10025","host":"127.0.0.1:1984"},"querystring":{},"uri":"/opentracing","size":155,"method":"GET"},"response":{"headers":{"content-type":"text/plain","server":"APISIX/3.7.0","transfer-encoding":"chunked","connection":"close"},"size":141,"status":200},"route_id":"1","latency":103.99985313416,"upstream_latency":3,"client_ip":"127.0.0.1","upstream":"127.0.0.1:1982","start_time":1704523851634} +``` + +## Metadata + +You can also configure the Plugin through Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Valid values | Description | +|------------|---------|----------|----------------------|--------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| host | string | False | "logs-01.loggly.com" | | Endpoint of the host where the logs are being sent. | +| port | integer | False | 514 | | Loggly port to connect to. Only used for `syslog` protocol. | +| timeout | integer | False | 5000 | | Loggly send data request timeout in milliseconds. | +| protocol | string | False | "syslog" | [ "syslog" , "http", "https" ] | Protocol in which the logs are sent to Loggly. | +| log_format | object | False | nil | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +We support [Syslog](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/streaming-syslog-without-using-files.htm), [HTTP/S](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/http-bulk-endpoint.htm) (bulk endpoint) protocols to send log events to Loggly. By default, in APISIX side, the protocol is set to "syslog". It lets you send RFC5424 compliant syslog events with some fine-grained control (log severity mapping based on upstream HTTP response code). But HTTP/S bulk endpoint is great to send larger batches of log events with faster transmission speed. If you wish to update it, just update the metadata. + +:::note + +APISIX supports [Syslog](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/streaming-syslog-without-using-files.htm) and [HTTP/S](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/http-bulk-endpoint.htm) protocols to send data to Loggly. Syslog lets you send RFC5424 compliant syslog events with fine-grained control. But, HTTP/S bulk endpoint is better while sending large batches of logs at a fast transmission speed. You can configure the metadata to update the protocol as shown below: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/loggly -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "protocol": "http" +}' +``` + +::: + +## Enable Plugin + +### Full configuration + +The example below shows a complete configuration of the Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins":{ + "loggly":{ + "customer_token":"0e6fe4bf-376e-40f4-b25f-1d55cb29f5a2", + "tags":["apisix", "testroute"], + "severity":"info", + "severity_map":{ + "503": "err", + "410": "alert" + }, + "buffer_duration":60, + "max_retry_count":0, + "retry_delay":1, + "inactive_timeout":2, + "batch_max_size":10 + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1 + } + }, + "uri":"/index.html" +}' +``` + +### Minimal configuration + +The example below shows a bare minimum configuration of the Plugin on a Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins":{ + "loggly":{ + "customer_token":"0e6fe4bf-376e-40f4-b25f-1d55cb29f5a2", + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1 + } + }, + "uri":"/index.html" +}' +``` + +## Example usage + +Now, if you make a request to APISIX, it will be logged in Loggly: + +```shell +curl -i http://127.0.0.1:9080/index.html +``` + +You can then view the logs on your Loggly Dashboard: + +![Loggly Dashboard](../../../assets/images/plugin/loggly-dashboard.png) + +## Delete Plugin + +To remove the `file-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/loki-logger.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/loki-logger.md new file mode 100644 index 0000000..65a0a86 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/loki-logger.md @@ -0,0 +1,403 @@ +--- +title: loki-logger +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Loki-logger + - Grafana Loki +description: The loki-logger Plugin pushes request and response logs in batches to Grafana Loki, via the Loki HTTP API /loki/api/v1/push. The Plugin also supports the customization of log formats. +--- + + + + + + + +## Description + +The `loki-logger` Plugin pushes request and response logs in batches to [Grafana Loki](https://grafana.com/oss/loki/), via the [Loki HTTP API](https://grafana.com/docs/loki/latest/reference/loki-http-api/#loki-http-api) `/loki/api/v1/push`. The Plugin also supports the customization of log formats. + +When enabled, the Plugin will serialize the request context information to [JSON objects](https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki) and add them to the queue, before they are pushed to Loki. See [batch processor](../batch-processor.md) for more details. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|---|---|---|---|---|---| +| endpoint_addrs | array[string] | True | | | Loki API base URLs, such as `http://127.0.0.1:3100`. If multiple endpoints are configured, the log will be pushed to a randomly determined endpoint from the list. | +| endpoint_uri | string | False | /loki/api/v1/push | | URI path to the Loki ingest endpoint. | +| tenant_id | string | False | fake | | Loki tenant ID. According to Loki's [multi-tenancy documentation](https://grafana.com/docs/loki/latest/operations/multi-tenancy/#multi-tenancy), the default value is set to `fake` under single-tenancy. | +| headers | object | False | | | Key-value pairs of request headers (settings for `X-Scope-OrgID` and `Content-Type` will be ignored). | +| log_labels | object | False | {job = "apisix"} | | Loki log label. Support [NGINX variables](https://nginx.org/en/docs/varindex.html) and constant strings in values. Variables should be prefixed with a `$` sign. For example, the label can be `{"origin" = "apisix"}` or `{"origin" = "$remote_addr"}`. | +| ssl_verify | boolean | False | true | | If true, verify Loki's SSL certificates. | +| timeout | integer | False | 3000 | [1, 60000] | Timeout for the Loki service HTTP call in milliseconds. | +| keepalive | boolean | False | true | | If true, keep the connection alive for multiple requests. | +| keepalive_timeout | integer | False | 60000 | >=1000 | Keepalive timeout in milliseconds. | +| keepalive_pool | integer | False | 5 | >=1 | Maximum number of connections in the connection pool. | +| log_format | object | False | | | Custom log format in key-value pairs in JSON format. Support [APISIX variables](../apisix-variable.md) and [NGINX variables](http://nginx.org/en/docs/varindex.html) in values. | +| name | string | False | loki-logger | | Unique identifier of the Plugin for the batch processor. If you use [Prometheus](./prometheus.md) to monitor APISIX metrics, the name is exported in `apisix_batch_process_entries`. | +| include_req_body | boolean | False | false | | If true, include the request body in the log. Note that if the request body is too big to be kept in the memory, it can not be logged due to NGINX's limitations. | +| include_req_body_expr | array[array] | False | | | An array of one or more conditions in the form of [lua-resty-expr](https://github.com/api7/lua-resty-expr). Used when the `include_req_body` is true. Request body would only be logged when the expressions configured here evaluate to true. | +| include_resp_body | boolean | False | false | | If true, include the response body in the log. | +| include_resp_body_expr | array[array] | False | | | An array of one or more conditions in the form of [lua-resty-expr](https://github.com/api7/lua-resty-expr). Used when the `include_resp_body` is true. Response body would only be logged when the expressions configured here evaluate to true. | + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +## Plugin Metadata + +You can also configure log format on a global scale using the [Plugin Metadata](../terminology/plugin-metadata.md), which configures the log format for all `loki-logger` Plugin instances. If the log format configured on the individual Plugin instance differs from the log format configured on Plugin metadata, the log format configured on the individual Plugin instance takes precedence. + +| Name | Type | Required | Default | Description | +|------|------|----------|---------|-------------| +| log_format | object | False | | Custom log format in key-value pairs in JSON format. Support [APISIX variables](../apisix-variable.md) and [NGINX variables](http://nginx.org/en/docs/varindex.html) in values. | + +## Examples + +The examples below demonstrate how you can configure `loki-logger` Plugin for different scenarios. + +To follow along the examples, start a sample Loki instance in Docker: + +```shell +wget https://raw.githubusercontent.com/grafana/loki/v3.0.0/cmd/loki/loki-local-config.yaml -O loki-config.yaml +docker run --name loki -d -v $(pwd):/mnt/config -p 3100:3100 grafana/loki:3.2.1 -config.file=/mnt/config/loki-config.yaml +``` + +Additionally, start a Grafana instance to view and visualize the logs: + +```shell +docker run -d --name=apisix-quickstart-grafana \ + -p 3000:3000 \ + grafana/grafana-oss +``` + +To connect Loki and Grafana, visit Grafana at [`http://localhost:3000`](http://localhost:3000). Under __Connections > Data sources__, add a new data source and select Loki. Your connection URL should follow the format of `http://{your_ip_address}:3100`. When saving the new data source, Grafana should also test the connection, and you are expected to see Grafana notifying the data source is successfully connected. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Log Requests and Responses in Default Log Format + +The following example demonstrates how you can configure the `loki-logger` Plugin on a Route to log requests and responses going through the route. + +Create a Route with the `loki-logger` Plugin and configure the address of Loki: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "loki-logger-route", + "uri": "/anything", + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://192.168.1.5:3100"] + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +Send a few requests to the Route to generate log entries: + +```shell +curl "http://127.0.0.1:9080/anything" +``` + +You should receive `HTTP/1.1 200 OK` responses for all requests. + +Navigate to the [Grafana explore view](http://localhost:3000/explore) and run a query `job = apisix`. You should see a number of logs corresponding to your requests, such as the following: + +```json +{ + "route_id": "loki-logger-route", + "response": { + "status": 200, + "headers": { + "date": "Fri, 03 Jan 2025 03:54:26 GMT", + "server": "APISIX/3.11.0", + "access-control-allow-credentials": "true", + "content-length": "391", + "access-control-allow-origin": "*", + "content-type": "application/json", + "connection": "close" + }, + "size": 619 + }, + "start_time": 1735876466, + "client_ip": "192.168.65.1", + "service_id": "", + "apisix_latency": 5.0000038146973, + "upstream": "34.197.122.172:80", + "upstream_latency": 666, + "server": { + "hostname": "0b9a772e68f8", + "version": "3.11.0" + }, + "request": { + "headers": { + "user-agent": "curl/8.6.0", + "accept": "*/*", + "host": "127.0.0.1:9080" + }, + "size": 85, + "method": "GET", + "url": "http://127.0.0.1:9080/anything", + "querystring": {}, + "uri": "/anything" + }, + "latency": 671.0000038147 +} +``` + +This verifies that Loki has been receiving logs from APISIX. You may also create dashboards in Grafana to further visualize and analyze the logs. + +### Customize Log Format with Plugin Metadata + +The following example demonstrates how you can customize log format using [Plugin Metadata](../terminology/plugin-metadata.md). + +Create a Route with the `loki-logger` plugin: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "loki-logger-route", + "uri": "/anything", + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://192.168.1.5:3100"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +Configure Plugin metadata for `loki-logger`, which will update the log format for all routes of which requests would be logged: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/plugin_metadata/loki-logger" -X PUT \ + -H 'X-API-KEY: ${admin_key}' \ + -d '{ + "log_format": { + "host": "$host", + "client_ip": "$remote_addr", + "route_id": "$route_id", + "@timestamp": "$time_iso8601" + } + }' +``` + +Send a request to the Route to generate a new log entry: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response. + +Navigate to the [Grafana explore view](http://localhost:3000/explore) and run a query `job = apisix`. You should see a log entry corresponding to your request, similar to the following: + +```json +{ + "@timestamp":"2025-01-03T21:11:34+00:00", + "client_ip":"192.168.65.1", + "route_id":"loki-logger-route", + "host":"127.0.0.1" +} +``` + +If the Plugin on a Route specifies a specific log format, it will take precedence over the log format specified in the Plugin metadata. For instance, update the Plugin on the previous Route as such: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/loki-logger-route" -X PATCH \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "plugins": { + "loki-logger": { + "log_format": { + "route_id": "$route_id", + "client_ip": "$remote_addr", + "@timestamp": "$time_iso8601" + } + } + } + }' +``` + +Send a request to the Route to generate a new log entry: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response. + +Navigate to the [Grafana explore view](http://localhost:3000/explore) and re-run the query `job = apisix`. You should see a log entry corresponding to your request, consistent with the format configured on the route, similar to the following: + +```json +{ + "client_ip":"192.168.65.1", + "route_id":"loki-logger-route", + "@timestamp":"2025-01-03T21:19:45+00:00" +} +``` + +### Log Request Bodies Conditionally + +The following example demonstrates how you can conditionally log request body. + +Create a Route with `loki-logger` to only log request body if the URL query string `log_body` is `yes`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "loki-logger-route", + "uri": "/anything", + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://192.168.1.5:3100"], + "include_req_body": true, + "include_req_body_expr": [["arg_log_body", "==", "yes"]] + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +Send a request to the Route with a URL query string satisfying the condition: + +```shell +curl -i "http://127.0.0.1:9080/anything?log_body=yes" -X POST -d '{"env": "dev"}' +``` + +Navigate to the [Grafana explore view](http://localhost:3000/explore) and run the query `job = apisix`. You should see a log entry corresponding to your request, where the request body is logged: + +```json +{ + "route_id": "loki-logger-route", + ..., + "request": { + "headers": { + ... + }, + "body": "{\"env\": \"dev\"}", + "size": 182, + "method": "POST", + "url": "http://127.0.0.1:9080/anything?log_body=yes", + "querystring": { + "log_body": "yes" + }, + "uri": "/anything?log_body=yes" + }, + "latency": 809.99994277954 +} +``` + +Send a request to the Route without any URL query string: + +```shell +curl -i "http://127.0.0.1:9080/anything" -X POST -d '{"env": "dev"}' +``` + +Navigate to the [Grafana explore view](http://localhost:3000/explore) and run the query `job = apisix`. You should see a log entry corresponding to your request, where the request body is not logged: + +```json +{ + "route_id": "loki-logger-route", + ..., + "request": { + "headers": { + ... + }, + "size": 169, + "method": "POST", + "url": "http://127.0.0.1:9080/anything", + "querystring": {}, + "uri": "/anything" + }, + "latency": 557.00016021729 +} +``` + +:::info + +If you have customized the `log_format` in addition to setting `include_req_body` or `include_resp_body` to `true`, the Plugin would not include the bodies in the logs. + +As a workaround, you may be able to use the NGINX variable `$request_body` in the log format, such as: + +```json +{ + "kafka-logger": { + ..., + "log_format": {"body": "$request_body"} + } +} +``` + +::: + +## FAQ + +### Logs are not pushed properly + +Look at `error.log` for such a log. + +```text +2023/04/30 13:45:46 [error] 19381#19381: *1075673 [lua] batch-processor.lua:95: Batch Processor[loki logger] failed to process entries: loki server returned status: 401, body: no org id, context: ngx.timer, client: 127.0.0.1, server: 0.0.0.0:9081 +``` + +The error can be diagnosed based on the error code in the `failed to process entries: loki server returned status: 401, body: no org id` and the response body of the loki server. + +### Getting errors when RPS is high? + +- Make sure to `keepalive` related configuration is set properly. See [Attributes](#attributes) for more information. +- Check the logs in `error.log`, look for such a log. + + ```text + 2023/04/30 13:49:34 [error] 19381#19381: *1082680 [lua] batch-processor.lua:95: Batch Processor[loki logger] failed to process entries: loki server returned status: 429, body: Ingestion rate limit exceeded for user tenant_1 (limit: 4194304 bytes/sec) while attempting to ingest '1000' lines totaling '616307' bytes, reduce log volume or contact your Loki administrator to see if the limit can be increased, context: ngx.timer, client: 127.0.0.1, server: 0.0.0.0:9081 + ``` + + - The logs usually associated with high QPS look like the above. The error is: `Ingestion rate limit exceeded for user tenant_1 (limit: 4194304 bytes/sec) while attempting to ingest '1000' lines totaling '616307' bytes, reduce log volume or contact your Loki administrator to see if the limit can be increased`. + - Refer to [Loki documentation](https://grafana.com/docs/loki/latest/configuration/#limits_config) to add limits on the amount of default and burst logs, such as `ingestion_rate_mb` and `ingestion_burst_size_mb`. + + As the test during development, setting the `ingestion_burst_size_mb` to 100 allows APISIX to push the logs correctly at least at 10000 RPS. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/mocking.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/mocking.md new file mode 100644 index 0000000..71324a1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/mocking.md @@ -0,0 +1,250 @@ +--- +title: mocking +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Mocking +description: This document contains information about the Apache APISIX mocking Plugin. +--- + + + +## Description + +The `mocking` Plugin is used for mocking an API. When executed, it returns random mock data in the format specified and the request is not forwarded to the Upstream. + +## Attributes + +| Name | Type | Required | Default | Description | +|------------------|---------|----------|------------------|----------------------------------------------------------------------------------------| +| delay | integer | False | | Response delay in seconds. | +| response_status | integer | False | 200 | HTTP status code of the response. | +| content_type | string | False | application/json | Header `Content-Type` of the response. | +| response_example | string | False | | Body of the response, support use variables, like `$remote_addr $consumer_name`. | +| response_schema | object | False | | The JSON schema object for the response. Works when `response_example` is unspecified. | +| with_mock_header | boolean | False | true | When set to `true`, adds a response header `x-mock-by: APISIX/{version}`. | +| response_headers | object | false | | Headers to be added in the mocked response. Example: `{"X-Foo": "bar", "X-Few": "baz"}`| + +The JSON schema supports the following types in their fields: + +- `string` +- `number` +- `integer` +- `boolean` +- `object` +- `array` + +Here is a JSON schema example: + +```json +{ + "properties":{ + "field0":{ + "example":"abcd", + "type":"string" + }, + "field1":{ + "example":123.12, + "type":"number" + }, + "field3":{ + "properties":{ + "field3_1":{ + "type":"string" + }, + "field3_2":{ + "properties":{ + "field3_2_1":{ + "example":true, + "type":"boolean" + }, + "field3_2_2":{ + "items":{ + "example":155.55, + "type":"integer" + }, + "type":"array" + } + }, + "type":"object" + } + }, + "type":"object" + }, + "field2":{ + "items":{ + "type":"string" + }, + "type":"array" + } + }, + "type":"object" +} +``` + +This is the response generated by the Plugin from this JSON schema: + +```json +{ + "field1": 123.12, + "field3": { + "field3_1": "LCFE0", + "field3_2": { + "field3_2_1": true, + "field3_2_2": [ + 155, + 155 + ] + } + }, + "field0": "abcd", + "field2": [ + "sC" + ] +} +``` + +## Enable Plugin + +The example below configures the `mocking` Plugin for a specific Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/index.html", + "plugins": { + "mocking": { + "delay": 1, + "content_type": "application/json", + "response_status": 200, + "response_schema": { + "properties":{ + "field0":{ + "example":"abcd", + "type":"string" + }, + "field1":{ + "example":123.12, + "type":"number" + }, + "field3":{ + "properties":{ + "field3_1":{ + "type":"string" + }, + "field3_2":{ + "properties":{ + "field3_2_1":{ + "example":true, + "type":"boolean" + }, + "field3_2_2":{ + "items":{ + "example":155.55, + "type":"integer" + }, + "type":"array" + } + }, + "type":"object" + } + }, + "type":"object" + }, + "field2":{ + "items":{ + "type":"string" + }, + "type":"array" + } + }, + "type":"object" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## Example usage + +Once you have configured the Plugin as mentioned above, you can test the Route. + +The example used here uses this mocked response: + +```json +{ + "delay":0, + "content_type":"", + "with_mock_header":true, + "response_status":201, + "response_example":"{\"a\":1,\"b\":2}" +} +``` + +Now to test the Route: + +```shell +curl http://127.0.0.1:9080/test-mock -i +``` + +``` +HTTP/1.1 201 Created +... +Content-Type: application/json;charset=utf8 +x-mock-by: APISIX/2.10.0 +... + +{"a":1,"b":2} +``` + +## Delete Plugin + +To remove the `mocking` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/mqtt-proxy.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/mqtt-proxy.md new file mode 100644 index 0000000..07668d1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/mqtt-proxy.md @@ -0,0 +1,169 @@ +--- +title: mqtt-proxy +keywords: + - Apache APISIX + - API Gateway + - Plugin + - MQTT Proxy +description: This document contains information about the Apache APISIX mqtt-proxy Plugin. The `mqtt-proxy` Plugin is used for dynamic load balancing with `client_id` of MQTT. +--- + + + +## Description + +The `mqtt-proxy` Plugin is used for dynamic load balancing with `client_id` of MQTT. It only works in stream model. + +This Plugin supports both the protocols [3.1.*](http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html) and [5.0](https://docs.oasis-open.org/mqtt/mqtt/v5.0/mqtt-v5.0.html). + +## Attributes + +| Name | Type | Required | Description | +|----------------|---------|------------|-----------------------------------------------------------------------------------| +| protocol_name | string | True | Name of the protocol. Generally `MQTT`. | +| protocol_level | integer | True | Level of the protocol. It should be `4` for MQTT `3.1.*` and `5` for MQTT `5.0`. | + +## Enable Plugin + +To enable the Plugin, you need to first enable the `stream_proxy` configuration in your configuration file (`conf/config.yaml`). The below configuration represents listening on the `9100` TCP port: + +```yaml title="conf/config.yaml" + ... + router: + http: 'radixtree_uri' + ssl: 'radixtree_sni' + stream_proxy: # TCP/UDP proxy + tcp: # TCP proxy port list + - 9100 + dns_resolver: + ... +``` + +You can now send the MQTT request to port `9100`. + +You can now create a stream Route and enable the `mqtt-proxy` Plugin: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": [{ + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + }] + } +}' +``` + +:::note + +If you are using Docker in macOS, then `host.docker.internal` is the right parameter for the `host` attribute. + +::: + +This Plugin exposes a variable `mqtt_client_id` which can be used for load balancing as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1995, + "weight": 1 + }, + { + "host": "127.0.0.2", + "port": 1995, + "weight": 1 + } + ] + } +}' +``` + +MQTT connections with different client ID will be forwarded to different nodes based on the consistent hash algorithm. If client ID is missing, client IP is used instead for load balancing. + +## Enabling mTLS with mqtt-proxy plugin + +Stream proxies use TCP connections and can accept TLS. Follow the guide about [how to accept tls over tcp connections](../stream-proxy.md/#accept-tls-over-tcp-connection) to open a stream proxy with enabled TLS. + +The `mqtt-proxy` plugin is enabled through TCP communications on the specified port for the stream proxy, and will also require clients to authenticate via TLS if `tls` is set to `true`. + +Configure `ssl` providing the CA certificate and the server certificate, together with a list of SNIs. Steps to protect `stream_routes` with `ssl` are equivalent to the ones to [protect Routes](../mtls.md/#protect-route). + +### Create a stream_route using mqtt-proxy plugin and mTLS + +Here is an example of how create a stream_route which is using the `mqtt-proxy` plugin, providing the CA certificate, the client certificate and the client key (for self-signed certificates which are not trusted by your host, use the `-k` flag): + +```shell +curl 127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "sni": "${your_sni_name}", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } +}' +``` + +The `sni` name must match one or more of the SNIs provided to the SSL object that you created with the CA and server certificates. + +## Delete Plugin + +To remove the `mqtt-proxy` Plugin you can remove the corresponding configuration as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X DELETE +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/multi-auth.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/multi-auth.md new file mode 100644 index 0000000..824259f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/multi-auth.md @@ -0,0 +1,164 @@ +--- +title: multi-auth +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Multi Auth + - multi-auth +description: This document contains information about the Apache APISIX multi-auth Plugin. +--- + + + +## Description + +The `multi-auth` Plugin is used to add multiple authentication methods to a Route or a Service. It supports plugins of type 'auth'. You can combine different authentication methods using `multi-auth` plugin. + +This plugin provides a flexible authentication mechanism by iterating through the list of authentication plugins specified in the `auth_plugins` attribute. It allows multiple consumers to share the same route while using different authentication methods. For example, one consumer can authenticate using basic authentication, while another consumer can authenticate using JWT. + +## Attributes + +For Route: + +| Name | Type | Required | Default | Description | +|--------------|-------|----------|---------|-----------------------------------------------------------------------| +| auth_plugins | array | True | - | Add supporting auth plugins configuration. expects at least 2 plugins | + +## Enable Plugin + +To enable the Plugin, you have to create two or more Consumer objects with different authentication configurations: + +First create a Consumer using basic authentication: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "foo1", + "plugins": { + "basic-auth": { + "username": "foo1", + "password": "bar1" + } + } +}' +``` + +Then create a Consumer using key authentication: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "foo2", + "plugins": { + "key-auth": { + "key": "auth-one" + } + } +}' +``` + +Once you have created Consumer objects, you can then configure a Route or a Service to authenticate requests: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": { + "multi-auth":{ + "auth_plugins":[ + { + "basic-auth":{ } + }, + { + "key-auth":{ + "query":"apikey", + "hide_credentials":true, + "header":"apikey" + } + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## Example usage + +After you have configured the Plugin as mentioned above, you can make a request to the Route as shown below: + +Send a request with `basic-auth` credentials: + +```shell +curl -i -ufoo1:bar1 http://127.0.0.1:9080/hello +``` + +Send a request with `key-auth` credentials: + +```shell +curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -i +``` + +``` +HTTP/1.1 200 OK +... +hello, world +``` + +If the request is not authorized, an `401 Unauthorized` error will be thrown: + +```json +{"message":"Authorization Failed"} +``` + +## Delete Plugin + +To remove the `multi-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/node-status.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/node-status.md new file mode 100644 index 0000000..91e184c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/node-status.md @@ -0,0 +1,128 @@ +--- +title: node-status +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Node status +description: This document contains information about the Apache APISIX node-status Plugin. +--- + + +## Description + +The `node-status` Plugin can be used get the status of requests to APISIX by exposing an API endpoint. + +## Attributes + +None. + +## API + +This Plugin will add the endpoint `/apisix/status` to expose the status of APISIX. + +You may need to use the [public-api](public-api.md) Plugin to expose the endpoint. + +## Enable Plugin + +To configure the `node-status` Plugin, you have to first enable it in your configuration file (`conf/config.yaml`): + +```yaml title="conf/config.yaml" +plugins: + - example-plugin + - limit-req + - jwt-auth + - zipkin + - node-status + ...... +``` + +You have to the setup the Route for the status API and expose it using the [public-api](public-api.md) Plugin. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/ns -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/apisix/status", + "plugins": { + "public-api": {} + } +}' +``` + +## Example usage + +Once you have configured the Plugin, you can make a request to the `apisix/status` endpoint to get the status: + +```shell +curl http://127.0.0.1:9080/apisix/status -i +``` + +```shell +HTTP/1.1 200 OK +Date: Tue, 03 Nov 2020 11:12:55 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server + +{"status":{"total":"23","waiting":"0","accepted":"22","writing":"1","handled":"22","active":"1","reading":"0"},"id":"6790a064-8f61-44ba-a6d3-5df42f2b1bb3"} +``` + +The parameters in the response are described below: + +| Parameter | Description | +|-----------|------------------------------------------------------------------------------------------------------------------------| +| status | Status of APISIX. | +| total | Total number of client requests. | +| waiting | Number of idle client connections waiting for a request. | +| accepted | Number of accepted client connections. | +| writing | Number of connections to which APISIX is writing back a response. | +| handled | Number of handled connections. Generally, this value is the same as `accepted` unless any a resource limit is reached. | +| active | Number of active client connections including `waiting` connections. | +| reading | Number of connections where APISIX is reading the request header. | +| id | UID of APISIX instance saved in `apisix/conf/apisix.uid`. | + +## Delete Plugin + +To remove the Plugin, you can remove it from your configuration file (`conf/config.yaml`): + +```yaml title="conf/config.yaml" +plugins: + - example-plugin + - limit-req + - jwt-auth + - zipkin + ...... +``` + +You can also remove the Route on `/apisix/status`: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/ns -H "X-API-KEY: $admin_key" -X DELETE +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ocsp-stapling.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ocsp-stapling.md new file mode 100644 index 0000000..1991ccf --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ocsp-stapling.md @@ -0,0 +1,142 @@ +--- +title: ocsp-stapling +keywords: + - Apache APISIX + - Plugin + - ocsp-stapling +description: This document contains information about the Apache APISIX ocsp-stapling Plugin. +--- + + + +## Description + +The `ocsp-stapling` Plugin dynamically sets the behavior of [OCSP stapling](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_stapling) in Nginx. + +## Enable Plugin + +This Plugin is disabled by default. Modify the config file to enable the plugin: + +```yaml title="./conf/config.yaml" +plugins: + - ... + - ocsp-stapling +``` + +After modifying the config file, reload APISIX or send an hot-loaded HTTP request through the Admin API to take effect: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugins/reload -H "X-API-KEY: $admin_key" -X PUT +``` + +## Attributes + +The attributes of this plugin are stored in specific field `ocsp_stapling` within SSL Resource. + +| Name | Type | Required | Default | Valid values | Description | +|----------------|----------------------|----------|---------------|--------------|-----------------------------------------------------------------------------------------------| +| enabled | boolean | False | false | | Like the `ssl_stapling` directive, enables or disables OCSP stapling feature. | +| skip_verify | boolean | False | false | | Like the `ssl_stapling_verify` directive, enables or disables verification of OCSP responses. | +| cache_ttl | integer | False | 3600 | >= 60 | Specifies the expired time of OCSP response cache. | + +## Example usage + +You should create an SSL Resource first, and the certificate of the server certificate issuer should be known. Normally the fullchain certificate works fine. + +Create an SSL Resource as such: + +```shell +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test.com"], + "ocsp_stapling": { + "enabled": true + } +}' +``` + +Next, establish a secure connection to the server, request the SSL/TLS session status, and display the output from the server: + +```shell +echo -n "Q" | openssl s_client -status -connect localhost:9443 -servername test.com 2>&1 | cat +``` + +``` +... +CONNECTED(00000003) +OCSP response: +====================================== +OCSP Response Data: + OCSP Response Status: successful (0x0) +... +``` + +To disable OCSP stapling feature, you can make a request as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test.com"], + "ocsp_stapling": { + "enabled": false + } +}' +``` + +## Delete Plugin + +Make sure all your SSL Resource doesn't contains `ocsp_stapling` field anymore. To remove this field, you can make a request as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PATCH -d ' +{ + "ocsp_stapling": null +}' +``` + +Modify the config file `./conf/config.yaml` to disable the plugin: + +```yaml title="./conf/config.yaml" +plugins: + - ... + # - ocsp-stapling +``` + +After modifying the config file, reload APISIX or send an hot-loaded HTTP request through the Admin API to take effect: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugins/reload -H "X-API-KEY: $admin_key" -X PUT +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/opa.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/opa.md new file mode 100644 index 0000000..12d79a2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/opa.md @@ -0,0 +1,327 @@ +--- +title: opa +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Open Policy Agent + - opa +description: This document contains information about the Apache APISIX opa Plugin. +--- + + + +## Description + +The `opa` Plugin can be used to integrate with [Open Policy Agent (OPA)](https://www.openpolicyagent.org). OPA is a policy engine that helps defininig and enforcing authorization policies, which determines whether a user or application has the necessary permissions to perform a particular action or access a particular resource. Using OPA with APISIX decouples authorization logics from APISIX. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|-------------------|---------|----------|---------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| host | string | True | | | Host address of the OPA service. For example, `https://localhost:8181`. | +| ssl_verify | boolean | False | true | | When set to `true` verifies the SSL certificates. | +| policy | string | True | | | OPA policy path. A combination of `package` and `decision`. While using advanced features like custom response, you can omit `decision`. | +| timeout | integer | False | 3000ms | [1, 60000]ms | Timeout for the HTTP call. | +| keepalive | boolean | False | true | | When set to `true`, keeps the connection alive for multiple requests. | +| keepalive_timeout | integer | False | 60000ms | [1000, ...]ms | Idle time after which the connection is closed. | +| keepalive_pool | integer | False | 5 | [1, ...]ms | Connection pool limit. | +| with_route | boolean | False | false | | When set to true, sends information about the current Route. | +| with_service | boolean | False | false | | When set to true, sends information about the current Service. | +| with_consumer | boolean | False | false | | When set to true, sends information about the current Consumer. Note that this may send sensitive information like the API key. Make sure to turn it on only when you are sure it is safe. | + +## Data definition + +### APISIX to OPA service + +The JSON below shows the data sent to the OPA service by APISIX: + +```json +{ + "type": "http", + "request": { + "scheme": "http", + "path": "\/get", + "headers": { + "user-agent": "curl\/7.68.0", + "accept": "*\/*", + "host": "127.0.0.1:9080" + }, + "query": {}, + "port": 9080, + "method": "GET", + "host": "127.0.0.1" + }, + "var": { + "timestamp": 1701234567, + "server_addr": "127.0.0.1", + "server_port": "9080", + "remote_port": "port", + "remote_addr": "ip address" + }, + "route": {}, + "service": {}, + "consumer": {} +} +``` + +Each of these keys are explained below: + +- `type` indicates the request type (`http` or `stream`). +- `request` is used when the `type` is `http` and contains the basic request information (URL, headers etc). +- `var` contains the basic information about the requested connection (IP, port, request timestamp etc). +- `route`, `service` and `consumer` contains the same data as stored in APISIX and are only sent if the `opa` Plugin is configured on these objects. + +### OPA service to APISIX + +The JSON below shows the response from the OPA service to APISIX: + +```json +{ + "result": { + "allow": true, + "reason": "test", + "headers": { + "an": "header" + }, + "status_code": 401 + } +} +``` + +The keys in the response are explained below: + +- `allow` is indispensable and indicates whether the request is allowed to be forwarded through APISIX. +- `reason`, `headers`, and `status_code` are optional and are only returned when you configure a custom response. See the next section use cases for this. + +## Example usage + +First, you need to launch the Open Policy Agent environment: + +```shell +docker run -d --name opa -p 8181:8181 openpolicyagent/opa:0.35.0 run -s +``` + +### Basic usage + +Once you have the OPA service running, you can create a basic policy: + +```shell +curl -X PUT '127.0.0.1:8181/v1/policies/example1' \ + -H 'Content-Type: text/plain' \ + -d 'package example1 + +import input.request + +default allow = false + +allow { + # HTTP method must GET + request.method == "GET" +}' +``` + +Then, you can configure the `opa` Plugin on a specific Route: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/r1' \ + -H 'X-API-KEY: ' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/*", + "plugins": { + "opa": { + "host": "http://127.0.0.1:8181", + "policy": "example1" + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } +}' +``` + +Now, to test it out: + +```shell +curl -i -X GET 127.0.0.1:9080/get +``` + +```shell +HTTP/1.1 200 OK +``` + +Now if we try to make a request to a different endpoint the request will fail: + +``` +curl -i -X POST 127.0.0.1:9080/post +``` + +```shell +HTTP/1.1 403 FORBIDDEN +``` + +### Using custom response + +You can also configure custom responses for more complex scenarios: + +```shell +curl -X PUT '127.0.0.1:8181/v1/policies/example2' \ + -H 'Content-Type: text/plain' \ + -d 'package example2 + +import input.request + +default allow = false + +allow { + request.method == "GET" +} + +# custom response body (Accepts a string or an object, the object will respond as JSON format) +reason = "test" { + not allow +} + +# custom response header (The data of the object can be written in this way) +headers = { + "Location": "http://example.com/auth" +} { + not allow +} + +# custom response status code +status_code = 302 { + not allow +}' +``` + +Now you can test it out by changing the `opa` Plugin's policy parameter to `example2` and then making a request: + +```shell +curl -i -X GET 127.0.0.1:9080/get +``` + +``` +HTTP/1.1 200 OK +``` + +Now if you make a failing request, you will see the custom response from the OPA service: + +``` +curl -i -X POST 127.0.0.1:9080/post +``` + +``` +HTTP/1.1 302 FOUND +Location: http://example.com/auth + +test +``` + +### Sending APISIX data + +Let's think about another scenario, when your decision needs to use some APISIX data, such as `route`, `consumer`, etc., how should we do it? + +If your OPA service needs to make decisions based on APISIX data like Route and Consumer details, you can configure the Plugin to do so. + +The example below shows a simple `echo` policy which will return the data sent by APISIX as it is: + +```shell +curl -X PUT '127.0.0.1:8181/v1/policies/echo' \ + -H 'Content-Type: text/plain' \ + -d 'package echo + +allow = false +reason = input' +``` + +Now we can configure the Plugin on the Route to send APISIX data: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/r1' \ + -H 'X-API-KEY: ' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/*", + "plugins": { + "opa": { + "host": "http://127.0.0.1:8181", + "policy": "echo", + "with_route": true + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } +}' +``` + +Now if you make a request, you can see the data from the Route through the custom response: + +```shell +curl -X GET 127.0.0.1:9080/get +{ + "type": "http", + "request": { + xxx + }, + "var": { + xxx + }, + "route": { + xxx + } +} +``` + +## Delete Plugin + +To remove the `opa` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/openfunction.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/openfunction.md new file mode 100644 index 0000000..12340ee --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/openfunction.md @@ -0,0 +1,170 @@ +--- +title: openfunction +keywords: + - Apache APISIX + - API Gateway + - Plugin + - OpenFunction +description: This document contains information about the Apache APISIX openfunction Plugin. +--- + + + +## Description + +The `openfunction` Plugin is used to integrate APISIX with [CNCF OpenFunction](https://openfunction.dev/) serverless platform. + +This Plugin can be configured on a Route and requests will be sent to the configured OpenFunction API endpoint as the upstream. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| --------------------------- | ------- | -------- | ------- | ------------ | ---------------------------------------------------------------------------------------------------------- | +| function_uri | string | True | | | function uri. For example, `https://localhost:30858/default/function-sample`. | +| ssl_verify | boolean | False | true | | When set to `true` verifies the SSL certificate. | +| authorization | object | False | | | Authorization credentials to access functions of OpenFunction. | +| authorization.service_token | string | False | | | The token format is 'xx:xx' which supports basic auth for function entry points. | +| timeout | integer | False | 3000 ms | [100, ...] ms| OpenFunction action and HTTP call timeout in ms. | +| keepalive | boolean | False | true | | When set to `true` keeps the connection alive for reuse. | +| keepalive_timeout | integer | False | 60000 ms| [1000,...] ms| Time is ms for connection to remain idle without closing. | +| keepalive_pool | integer | False | 5 | [1,...] | Maximum number of requests that can be sent on this connection before closing it. | + +:::note + +The `timeout` attribute sets the time taken by the OpenFunction to execute, and the timeout for the HTTP client in APISIX. OpenFunction calls may take time to pull the runtime image and start the container. So, if the value is set too small, it may cause a large number of requests to fail. + +::: + +## Prerequisites + +Before configuring the plugin, you need to have OpenFunction running. +Installation of OpenFunction requires a certain version Kubernetes cluster. +For details, please refer to [Installation](https://openfunction.dev/docs/getting-started/installation/). + +### Create and Push a Function + +You can then create a function following the [sample](https://github.com/OpenFunction/samples) + +You'll need to push your function container image to a container registry like Docker Hub or Quay.io when building a function. To do that, you'll need to generate a secret for your container registry first. + +```shell +REGISTRY_SERVER=https://index.docker.io/v1/ REGISTRY_USER= ${your_registry_user} REGISTRY_PASSWORD= ${your_registry_password} +kubectl create secret docker-registry push-secret \ + --docker-server=$REGISTRY_SERVER \ + --docker-username=$REGISTRY_USER \ + --docker-password=$REGISTRY_PASSWORD +``` + +## Enable the Plugin + +You can now configure the Plugin on a specific Route and point to this running OpenFunction service: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "openfunction": { + "function_uri": "http://localhost:3233/default/function-sample/test", + "authorization": { + "service_token": "test:test" + } + } + } +}' +``` + +## Example usage + +Once you have configured the plugin, you can send a request to the Route and it will invoke the configured function: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +This will give back the response from the function: + +``` +hello, test! +``` + +### Configure Path Transforming + +The `OpenFunction` Plugin also supports transforming the URL path while proxying requests to the OpenFunction API endpoints. Extensions to the base request path get appended to the `function_uri` specified in the Plugin configuration. + +:::info IMPORTANT + +The `uri` configured on a Route must end with `*` for this feature to work properly. APISIX Routes are matched strictly and the `*` implies that any subpath to this URI would be matched to the same Route. + +::: + +The example below configures this feature: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello/*", + "plugins": { + "openfunction": { + "function_uri": "http://localhost:3233/default/function-sample", + "authorization": { + "service_token": "test:test" + } + } + } +}' +``` + +Now, any requests to the path `hello/123` will invoke the OpenFunction, and the added path is forwarded: + +```shell +curl http://127.0.0.1:9080/hello/123 +``` + +```shell +Hello, 123! +``` + +## Delete Plugin + +To remove the `openfunction` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/openid-connect.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/openid-connect.md new file mode 100644 index 0000000..6d5b7b7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/openid-connect.md @@ -0,0 +1,257 @@ +--- +title: openid-connect +keywords: + - Apache APISIX + - API Gateway + - OpenID Connect + - OIDC +description: The openid-connect Plugin supports the integration with OpenID Connect (OIDC) identity providers, such as Keycloak, Auth0, Microsoft Entra ID, Google, Okta, and more. It allows APISIX to authenticate clients and obtain their information from the identity provider before allowing or denying their access to upstream protected resources. +--- + + + + + + + +## Description + +The `openid-connect` Plugin supports the integration with [OpenID Connect (OIDC)](https://openid.net/connect/) identity providers, such as Keycloak, Auth0, Microsoft Entra ID, Google, Okta, and more. It allows APISIX to authenticate clients and obtain their information from the identity provider before allowing or denying their access to upstream protected resources. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|--------------------------------------|----------|----------|-----------------------|--------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| client_id | string | True | | | OAuth client ID. | +| client_secret | string | True | | | OAuth client secret. | +| discovery | string | True | | | URL to the well-known discovery document of the OpenID provider, which contains a list of OP API endpoints. The Plugin can directly utilize the endpoints from the discovery document. You can also configure these endpoints individually, which takes precedence over the endpoints supplied in the discovery document. | +| scope | string | False | openid | | OIDC scope that corresponds to information that should be returned about the authenticated user, also known as [claims](https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims). This is used to authorize users with proper permission. The default value is `openid`, the required scope for OIDC to return a `sub` claim that uniquely identifies the authenticated user. Additional scopes can be appended and delimited by spaces, such as `openid email profile`. | +| required_scopes | array[string] | False | | | Scopes required to be present in the access token. Used in conjunction with the introspection endpoint when `bearer_only` is `true`. If any required scope is missing, the Plugin rejects the request with a 403 forbidden error. | +| realm | string | False | apisix | | Realm in [`WWW-Authenticate`](https://www.rfc-editor.org/rfc/rfc6750#section-3) response header accompanying a 401 unauthorized request due to invalid bearer token. | +| bearer_only | boolean | False | false | | If true, strictly require bearer access token in requests for authentication. | +| logout_path | string | False | /logout | | Path to activate the logout. | +| post_logout_redirect_uri | string | False | | | URL to redirect users to after the `logout_path` receive a request to log out. | +| redirect_uri | string | False | | | URI to redirect to after authentication with the OpenID provider. Note that the redirect URI should not be the same as the request URI, but a sub-path of the request URI. For example, if the `uri` of the Route is `/api/v1/*`, `redirect_uri` can be configured as `/api/v1/redirect`. If `redirect_uri` is not configured, APISIX will append `/.apisix/redirect` to the request URI to determine the value for `redirect_uri`. | +| timeout | integer | False | 3 | [1,...] | Request timeout time in seconds. | +| ssl_verify | boolean | False | false | | If true, verify the OpenID provider 's SSL certificates. | +| introspection_endpoint | string | False | | | URL of the [token introspection](https://datatracker.ietf.org/doc/html/rfc7662) endpoint for the OpenID provider used to introspect access tokens. If this is unset, the introspection endpoint presented in the well-known discovery document is used [as a fallback](https://github.com/zmartzone/lua-resty-openidc/commit/cdaf824996d2b499de4c72852c91733872137c9c). | +| introspection_endpoint_auth_method | string | False | client_secret_basic | | Authentication method for the token introspection endpoint. The value should be one of the authentication methods specified in the `introspection_endpoint_auth_methods_supported` [authorization server metadata](https://www.rfc-editor.org/rfc/rfc8414.html) as seen in the well-known discovery document, such as `client_secret_basic`, `client_secret_post`, `private_key_jwt`, and `client_secret_jwt`. | +| token_endpoint_auth_method | string | False | client_secret_basic | | Authentication method for the token endpoint. The value should be one of the authentication methods specified in the `token_endpoint_auth_methods_supported` [authorization server metadata](https://www.rfc-editor.org/rfc/rfc8414.html) as seen in the well-known discovery document, such as `client_secret_basic`, `client_secret_post`, `private_key_jwt`, and `client_secret_jwt`. If the configured method is not supported, fall back to the first method in the `token_endpoint_auth_methods_supported` array. | +| public_key | string | False | | | Public key used to verify JWT signature id asymmetric algorithm is used. Providing this value to perform token verification will skip token introspection in client credentials flow. You can pass the public key in `-----BEGIN PUBLIC KEY-----\\n……\\n-----END PUBLIC KEY-----` format. | +| use_jwks | boolean | False | false | | If true and if `public_key` is not set, use the JWKS to verify JWT signature and skip token introspection in client credentials flow. The JWKS endpoint is parsed from the discovery document. | +| use_pkce | boolean | False | false | | If true, use the Proof Key for Code Exchange (PKCE) for Authorization Code Flow as defined in [RFC 7636](https://datatracker.ietf.org/doc/html/rfc7636). | +| token_signing_alg_values_expected | string | False | | | Algorithm used for signing JWT, such as `RS256`. | +| set_access_token_header | boolean | False | true | | If true, set the access token in a request header. By default, the `X-Access-Token` header is used. | +| access_token_in_authorization_header | boolean | False | false | | If true and if `set_access_token_header` is also true, set the access token in the `Authorization` header. | +| set_id_token_header | boolean | False | true | | If true and if the ID token is available, set the value in the `X-ID-Token` request header. | +| set_userinfo_header | boolean | False | true | | If true and if user info data is available, set the value in the `X-Userinfo` request header. | +| set_refresh_token_header | boolean | False | false | | If true and if the refresh token is available, set the value in the `X-Refresh-Token` request header. | +| session | object | False | | | Session configuration used when `bearer_only` is `false` and the Plugin uses Authorization Code flow. | +| session.secret | string | True | | 16 or more characters | Key used for session encryption and HMAC operation when `bearer_only` is `false`. It is automatically generated and saved to etcd if not configured. When using APISIX in the standalone mode where etcd is no longer the configuration center, the `secret` should be configured. | +| session.cookie | object | False | | | Cookie configurations. | +| session.cookie.lifetime | integer | False | 3600 | | Cookie lifetime in seconds. | +| session_contents | object | False | | | Session content configurations. If unconfigured, all data will be stored in the session. | +| session_contents.access_token | boolean | False | | | If true, store the access token in the session. | +| session_contents.id_token | boolean | False | | | If true, store the ID token in the session. | +| session_contents.enc_id_token | boolean | False | | | If true, store the encrypted ID token in the session. | +| session_contents.user | boolean | False | | | If true, store the user info in the session. | +| unauth_action | string | False | auth | ["auth","deny","pass"] | Action for unauthenticated requests. When set to `auth`, redirect to the authentication endpoint of the OpenID provider. When set to `pass`, allow the request without authentication. When set to `deny`, return 401 unauthenticated responses rather than start the authorization code grant flow. | +| proxy_opts | object | False | | | Configurations for the proxy server that the OpenID provider is behind. | +| proxy_opts.http_proxy | string | False | | | Proxy server address for HTTP requests, such as `http://:`. | +| proxy_opts.https_proxy | string | False | | | Proxy server address for HTTPS requests, such as `http://:`. | +| proxy_opts.http_proxy_authorization | string | False | | Basic [base64 username:password] | Default `Proxy-Authorization` header value to be used with `http_proxy`. Can be overridden with custom `Proxy-Authorization` request header. | +| proxy_opts.https_proxy_authorization | string | False | | Basic [base64 username:password] | Default `Proxy-Authorization` header value to be used with `https_proxy`. Cannot be overridden with custom `Proxy-Authorization` request header since with HTTPS, the authorization is completed when connecting. | +| proxy_opts.no_proxy | string | False | | | Comma separated list of hosts that should not be proxied. | +| authorization_params | object | False | | | Additional parameters to send in the request to the authorization endpoint. | +| client_rsa_private_key | string | False | | | Client RSA private key used to sign JWT for authentication to the OP. Required when `token_endpoint_auth_method` is `private_key_jwt`. | +| client_rsa_private_key_id | string | False | | | Client RSA private key ID used to compute a signed JWT. Optional when `token_endpoint_auth_method` is `private_key_jwt`. | +| client_jwt_assertion_expires_in | integer | False | 60 | | Life duration of the signed JWT for authentication to the OP, in seconds. Used when `token_endpoint_auth_method` is `private_key_jwt` or `client_secret_jwt`. | +| renew_access_token_on_expiry | boolean | False | true | | If true, attempt to silently renew the access token when it expires or if a refresh token is available. If the token fails to renew, redirect user for re-authentication. | +| access_token_expires_in | integer | False | | | Lifetime of the access token in seconds if no `expires_in` attribute is present in the token endpoint response. | +| refresh_session_interval | integer | False | | | Time interval to refresh user ID token without requiring re-authentication. When not set, it will not check the expiration time of the session issued to the client by the gateway. If set to 900, it means refreshing the user's id_token (or session in the browser) after 900 seconds without requiring re-authentication. | +| iat_slack | integer | False | 120 | | Tolerance of clock skew in seconds with the `iat` claim in an ID token. | +| accept_none_alg | boolean | False | false | | Set to true if the OpenID provider does not sign its ID token, such as when the signature algorithm is set to `none`. | +| accept_unsupported_alg | boolean | False | true | | If true, ignore ID token signature to accept unsupported signature algorithm. | +| access_token_expires_leeway | integer | False | 0 | | Expiration leeway in seconds for access token renewal. When set to a value greater than 0, token renewal will take place the set amount of time before token expiration. This avoids errors in case the access token just expires when arriving to the resource server. | +| force_reauthorize | boolean | False | false | | If true, execute the authorization flow even when a token has been cached. | +| use_nonce | boolean | False | false | | If true, enable nonce parameter in authorization request. | +| revoke_tokens_on_logout | boolean | False | false | | If true, notify the authorization server a previously obtained refresh or access token is no longer needed at the revocation endpoint. | +| jwk_expires_in | integer | False | 86400 | | Expiration time for JWK cache in seconds. | +| jwt_verification_cache_ignore | boolean | False | false | | If true, force re-verification for a bearer token and ignore any existing cached verification results. | +| cache_segment | string | False | | | Optional name of a cache segment, used to separate and differentiate caches used by token introspection or JWT verification. | +| introspection_interval | integer | False | 0 | | TTL of the cached and introspected access token in seconds. The default value is 0, which means this option is not used and the Plugin defaults to use the TTL passed by expiry claim defined in `introspection_expiry_claim`. If `introspection_interval` is larger than 0 and less than the TTL passed by expiry claim defined in `introspection_expiry_claim`, use `introspection_interval`. | +| introspection_expiry_claim | string | False | exp | | Name of the expiry claim, which controls the TTL of the cached and introspected access token. | +| introspection_addon_headers | array[string] | False | | | Used to append additional header values to the introspection HTTP request. If the specified header does not exist in origin request, value will not be appended. | +| claim_validator.issuer.valid_issuers | string[] | False | | | Whitelist the vetted issuers of the jwt. When not passed by the user, the issuer returned by discovery endpoint will be used. In case both are missing, the issuer will not be validated. | + +NOTE: `encrypt_fields = {"client_secret"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +## Examples + +The examples below demonstrate how you can configure the `openid-connect` Plugin for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Authorization Code Flow + +The authorization code flow is defined in [RFC 6749, Section 4.1](https://datatracker.ietf.org/doc/html/rfc6749#section-4.1). It involves exchanging an temporary authorization code for an access token, and is typically used by confidential and public clients. + +The following diagram illustrates the interaction between different entities when you implement the authorization code flow: + +![Authorization code flow diagram](https://static.api7.ai/uploads/2023/11/27/Ga2402sb_oidc-code-auth-flow-revised.png) + +When an incoming request does not contain an access token in its header nor in an appropriate session cookie, the Plugin acts as a relying party and redirects to the authorization server to continue the authorization code flow. + +After successful authentication, the Plugin keeps the token in the session cookie, and subsequent requests will use the token stored in the cookie. + +See [Implement Authorization Code Grant](../tutorials/keycloak-oidc.md#implement-authorization-code-grant) for an example to use the `openid-connect` Plugin to integrate with Keycloak using the authorization code flow. + +### Proof Key for Code Exchange (PKCE) + +The Proof Key for Code Exchange (PKCE) is defined in [RFC 7636](https://datatracker.ietf.org/doc/html/rfc7636). PKCE enhances the authorization code flow by adding a code challenge and verifier to prevent authorization code interception attacks. + +The following diagram illustrates the interaction between different entities when you implement the authorization code flow with PKCE: + +![Authorization code flow with PKCE diagram](https://static.api7.ai/uploads/2024/11/04/aJ2ZVuTC_auth-code-with-pkce.png) + +See [Implement Authorization Code Grant](../tutorials/keycloak-oidc.md#implement-authorization-code-grant) for an example to use the `openid-connect` Plugin to integrate with Keycloak using the authorization code flow with PKCE. + +### Client Credential Flow + +The client credential flow is defined in [RFC 6749, Section 4.4](https://datatracker.ietf.org/doc/html/rfc6749#section-4.4). It involves clients requesting an access token with its own credentials to access protected resources, typically used in machine to machine authentication and is not on behalf of a specific user. + +The following diagram illustrates the interaction between different entities when you implement the client credential flow: + +
+Client credential flow diagram +
+
+ +See [Implement Client Credentials Grant](../tutorials/keycloak-oidc.md#implement-client-credentials-grant) for an example to use the `openid-connect` Plugin to integrate with Keycloak using the client credentials flow. + +### Introspection Flow + +The introspection flow is defined in [RFC 7662](https://datatracker.ietf.org/doc/html/rfc7662). It involves verifying the validity and details of an access token by querying an authorization server’s introspection endpoint. + +In this flow, when a client presents an access token to the resource server, the resource server sends a request to the authorization server’s introspection endpoint, which responds with token details if the token is active, including information like token expiration, associated scopes, and the user or client it belongs to. + +The following diagram illustrates the interaction between different entities when you implement the authorization code flow with token introspection: + +
+
+Client credential with introspection diagram +
+
+ +See [Implement Client Credentials Grant](../tutorials/keycloak-oidc.md#implement-client-credentials-grant) for an example to use the `openid-connect` Plugin to integrate with Keycloak using the client credentials flow with token introspection. + +### Password Flow + +The password flow is defined in [RFC 6749, Section 4.3](https://datatracker.ietf.org/doc/html/rfc6749#section-4.3). It is designed for trusted applications, allowing them to obtain an access token directly using a user’s username and password. In this grant type, the client app sends the user’s credentials along with its own client ID and secret to the authorization server, which then authenticates the user and, if valid, issues an access token. + +Though efficient, this flow is intended for highly trusted, first-party applications only, as it requires the app to handle sensitive user credentials directly, posing significant security risks if used in third-party contexts. + +The following diagram illustrates the interaction between different entities when you implement the password flow: + +
+Password flow diagram +
+
+ +See [Implement Password Grant](../tutorials/keycloak-oidc.md#implement-password-grant) for an example to use the `openid-connect` Plugin to integrate with Keycloak using the password flow. + +### Refresh Token Grant + +The refresh token grant is defined in [RFC 6749, Section 6](https://datatracker.ietf.org/doc/html/rfc6749#section-6). It enables clients to request a new access token without requiring the user to re-authenticate, using a previously issued refresh token. This flow is typically used when an access token expires, allowing the client to maintain continuous access to resources without user intervention. Refresh tokens are issued along with access tokens in certain OAuth flows and their lifespan and security requirements depend on the authorization server’s configuration. + +The following diagram illustrates the interaction between different entities when implementing password flow with refresh token flow: + +
+Password grant with refresh token flow diagram +
+
+ +See [Refresh Token](../tutorials/keycloak-oidc.md#refresh-token) for an example to use the `openid-connect` Plugin to integrate with Keycloak using the password flow with token refreshes. + +## Troubleshooting + +This section covers a few commonly seen issues when working with this Plugin to help you troubleshoot. + +### APISIX Cannot Connect to OpenID provider + +If APISIX fails to resolve or cannot connect to the OpenID provider, double check the DNS settings in your configuration file `config.yaml` and modify as needed. + +### No Session State Found + +If you encounter a `500 internal server error` with the following message in the log when working with [authorization code flow](#authorization-code-flow), there could be a number of reasons. + +```text +the error request to the redirect_uri path, but there's no session state found +``` + +#### 1. Misconfigured Redirection URI + +A common misconfiguration is to configure the `redirect_uri` the same as the URI of the route. When a user initiates a request to visit the protected resource, the request directly hits the redirection URI with no session cookie in the request, which leads to the no session state found error. + +To properly configure the redirection URI, make sure that the `redirect_uri` matches the Route where the Plugin is configured, without being fully identical. For instance, a correct configuration would be to configure `uri` of the Route to `/api/v1/*` and the path portion of the `redirect_uri` to `/api/v1/redirect`. + +You should also ensure that the `redirect_uri` include the scheme, such as `http` or `https`. + +#### 2. Missing Session Secret + +If you deploy APISIX in the [standalone mode](/apisix/production/deployment-modes#standalone-mode), make sure that `session.secret` is configured. + +User sessions are stored in browser as cookies and encrypted with session secret. The secret is automatically generated and saved to etcd if no secret is configured through the `session.secret` attribute. However, in standalone mode, etcd is no longer the configuration center. Therefore, you should explicitly configure `session.secret` for this Plugin in the YAML configuration center `apisix.yaml`. + +#### 3. Cookie Not Sent or Absent + +Check if the [`SameSite`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#samesitesamesite-value) cookie attribute is properly set (i.e. if your application needs to send the cookie cross sites) to see if this could be a factor that prevents the cookie being saved to the browser's cookie jar or being sent from the browser. + +#### 4. Upstream Sent Too Big Header + +If you have NGINX sitting in front of APISIX to proxy client traffic, see if you observe the following error in NGINX's `error.log`: + +```text +upstream sent too big header while reading response header from upstream +``` + +If so, try adjusting `proxy_buffers`, `proxy_buffer_size`, and `proxy_busy_buffers_size` to larger values. + +Another option is to configure the `session_content` attribute to adjust which data to store in session. For instance, you can set `session_content.access_token` to `true`. + +#### 5. Invalid Client Secret + +Verify if `client_secret` is valid and correct. An invalid `client_secret` would lead to an authentication failure and no token shall be returned and stored in session. + +#### 6. PKCE IdP Configuration + +If you are enabling PKCE with the authorization code flow, make sure you have configured the IdP client to use PKCE. For example, in Keycloak, you should configure the PKCE challenge method in the client's advanced settings: + +
+PKCE keycloak configuration +
diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/opentelemetry.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/opentelemetry.md new file mode 100644 index 0000000..54d930c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/opentelemetry.md @@ -0,0 +1,220 @@ +--- +title: opentelemetry +keywords: + - Apache APISIX + - API Gateway + - Plugin + - OpenTelemetry +description: The opentelemetry Plugin instruments APISIX and sends traces to OpenTelemetry collector based on the OpenTelemetry specification, in binary-encoded OLTP over HTTP. +--- + + + + + + +## Description + +The `opentelemetry` Plugin can be used to report tracing data according to the [OpenTelemetry Specification](https://opentelemetry.io/docs/reference/specification/). + +The Plugin only supports binary-encoded [OLTP over HTTP](https://opentelemetry.io/docs/reference/specification/protocol/otlp/#otlphttp). + +## Configurations + +By default, configurations of the Service name, tenant ID, collector, and batch span processor are pre-configured in [default configuration](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua). + +You can change this configuration of the Plugin through the endpoint `apisix/admin/plugin_metadata/opentelemetry` For example: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/opentelemetry -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "trace_id_source": "x-request-id", + "resource": { + "service.name": "APISIX" + }, + "collector": { + "address": "127.0.0.1:4318", + "request_timeout": 3, + "request_headers": { + "Authorization": "token" + } + }, + "batch_span_processor": { + "drop_on_queue_full": false, + "max_queue_size": 1024, + "batch_timeout": 2, + "inactive_timeout": 1, + "max_export_batch_size": 16 + }, + "set_ngx_var": false +}' +``` + +## Attributes + +| Name | Type | Required | Default | Valid Values | Description | +|---------------------------------------|---------------|----------|--------------|--------------|-------------| +| sampler | object | False | - | - | Sampling configuration. | +| sampler.name | string | False | `always_off` | ["always_on", "always_off", "trace_id_ratio", "parent_base"] | Sampling strategy.
To always sample, use `always_on`.
To never sample, use `always_off`.
To randomly sample based on a given ratio, use `trace_id_ratio`.
To use the sampling decision of the span's parent, use `parent_base`. If there is no parent, use the root sampler. | +| sampler.options | object | False | - | - | Parameters for sampling strategy. | +| sampler.options.fraction | number | False | 0 | [0, 1] | Sampling ratio when the sampling strategy is `trace_id_ratio`. | +| sampler.options.root | object | False | - | - | Root sampler when the sampling strategy is `parent_base` strategy. | +| sampler.options.root.name | string | False | - | ["always_on", "always_off", "trace_id_ratio"] | Root sampling strategy. | +| sampler.options.root.options | object | False | - | - | Root sampling strategy parameters. | +| sampler.options.root.options.fraction | number | False | 0 | [0, 1] | Root sampling ratio when the sampling strategy is `trace_id_ratio`. | +| additional_attributes | array[string] | False | - | - | Additional attributes appended to the trace span. Support [built-in variables](https://apisix.apache.org/docs/apisix/apisix-variable/) in values. | +| additional_header_prefix_attributes | array[string] | False | - | - | Headers or header prefixes appended to the trace span's attributes. For example, use `x-my-header"` or `x-my-headers-*` to include all headers with the prefix `x-my-headers-`. | + +## Examples + +The examples below demonstrate how you can work with the `opentelemetry` Plugin for different scenarios. + +### Enable `opentelemetry` Plugin + +By default, the `opentelemetry` Plugin is disabled in APISIX. To enable, add the Plugin to your configuration file as such: + +```yaml title="config.yaml" +plugins: + - ... + - opentelemetry +``` + +Reload APISIX for changes to take effect. + +See [static configurations](#static-configurations) for other available options you can configure in `config.yaml`. + +### Send Traces to OpenTelemetry + +The following example demonstrates how to trace requests to a Route and send traces to OpenTelemetry. + +Start an OpenTelemetry collector instance in Docker: + +```shell +docker run -d --name otel-collector -p 4318:4318 otel/opentelemetry-collector-contrib +``` + +Create a Route with `opentelemetry` Plugin: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "otel-tracing-route", + "uri": "/anything", + "plugins": { + "opentelemetry": { + "sampler": { + "name": "always_on" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +Send a request to the Route: + +```shell +curl "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response. + +In OpenTelemetry collector's log, you should see information similar to the following: + +```text +2024-02-18T17:14:03.825Z info ResourceSpans #0 +Resource SchemaURL: +Resource attributes: + -> telemetry.sdk.language: Str(lua) + -> telemetry.sdk.name: Str(opentelemetry-lua) + -> telemetry.sdk.version: Str(0.1.1) + -> hostname: Str(e34673e24631) + -> service.name: Str(APISIX) +ScopeSpans #0 +ScopeSpans SchemaURL: +InstrumentationScope opentelemetry-lua +Span #0 + Trace ID : fbd0a38d4ea4a128ff1a688197bc58b0 + Parent ID : + ID : af3dc7642104748a + Name : GET /anything + Kind : Server + Start time : 2024-02-18 17:14:03.763244032 +0000 UTC + End time : 2024-02-18 17:14:03.920229888 +0000 UTC + Status code : Unset + Status message : +Attributes: + -> net.host.name: Str(127.0.0.1) + -> http.method: Str(GET) + -> http.scheme: Str(http) + -> http.target: Str(/anything) + -> http.user_agent: Str(curl/7.64.1) + -> apisix.route_id: Str(otel-tracing-route) + -> apisix.route_name: Empty() + -> http.route: Str(/anything) + -> http.status_code: Int(200) +{"kind": "exporter", "data_type": "traces", "name": "debug"} +``` + +To visualize these traces, you can export your telemetry to backend Services, such as Zipkin and Prometheus. See [exporters](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter) for more details. + +### Using Trace Variables in Logging + +The following example demonstrates how to configure the `opentelemetry` Plugin to set the following built-in variables, which can be used in logger Plugins or access logs: + +- `opentelemetry_context_traceparent`: [trace parent](https://www.w3.org/TR/trace-context/#trace-context-http-headers-format) ID +- `opentelemetry_trace_id`: trace ID of the current span +- `opentelemetry_span_id`: span ID of the current span + +Update the configuration file as below. You should customize the access log format to use the `opentelemetry` Plugin variables, and set `opentelemetry` variables in the `set_ngx_var` field. + +```yaml title="conf/config.yaml" +nginx_config: + http: + enable_access_log: true + access_log_format: '{"time": "$time_iso8601","opentelemetry_context_traceparent": "$opentelemetry_context_traceparent","opentelemetry_trace_id": "$opentelemetry_trace_id","opentelemetry_span_id": "$opentelemetry_span_id","remote_addr": "$remote_addr"}' + access_log_format_escape: json +plugin_attr: + opentelemetry: + set_ngx_var: true +``` + +Reload APISIX for configuration changes to take effect. + +You should see access log entries similar to the following when you generate requests: + +```text +{"time": "18/Feb/2024:15:09:00 +0000","opentelemetry_context_traceparent": "00-fbd0a38d4ea4a128ff1a688197bc58b0-8f4b9d9970a02629-01","opentelemetry_trace_id": "fbd0a38d4ea4a128ff1a688197bc58b0","opentelemetry_span_id": "af3dc7642104748a","remote_addr": "172.10.0.1"} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/openwhisk.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/openwhisk.md new file mode 100644 index 0000000..2d923e3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/openwhisk.md @@ -0,0 +1,139 @@ +--- +title: openwhisk +keywords: + - Apache APISIX + - API Gateway + - Plugin + - OpenWhisk +description: This document contains information about the Apache openwhisk Plugin. +--- + + + +## Description + +The `openwhisk` Plugin is used to integrate APISIX with [Apache OpenWhisk](https://openwhisk.apache.org) serverless platform. + +This Plugin can be configured on a Route and requests will be send to the configured OpenWhisk API endpoint as the upstream. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ----------------- | ------- | -------- | ------- | ------------ | ---------------------------------------------------------------------------------------------------------- | +| api_host | string | True | | | OpenWhisk API host address. For example, `https://localhost:3233`. | +| ssl_verify | boolean | False | true | | When set to `true` verifies the SSL certificate. | +| service_token | string | True | | | OpenWhisk service token. The format is `xxx:xxx` and it is passed through basic auth when calling the API. | +| namespace | string | True | | | OpenWhisk namespace. For example `guest`. | +| action | string | True | | | OpenWhisk action. For example `hello`. | +| result | boolean | False | true | | When set to `true` gets the action metadata (executes the function and gets response). | +| timeout | integer | False | 60000ms | [1, 60000]ms | OpenWhisk action and HTTP call timeout in ms. | +| keepalive | boolean | False | true | | When set to `true` keeps the connection alive for reuse. | +| keepalive_timeout | integer | False | 60000ms | [1000,...]ms | Time is ms for connection to remain idle without closing. | +| keepalive_pool | integer | False | 5 | [1,...] | Maximum number of requests that can be sent on this connection before closing it. | + +:::note + +The `timeout` attribute sets the time taken by the OpenWhisk action to execute, and the timeout for the HTTP client in APISIX. OpenWhisk action calls may take time to pull the runtime image and start the container. So, if the value is set too small, it may cause a large number of requests to fail. + +OpenWhisk supports timeouts in the range 1ms to 60000ms and it is recommended to set it to at least 1000ms. + +::: + +## Enable Plugin + +Before configuring the Plugin, you need to have OpenWhisk running. The example below shows OpenWhisk in standalone mode: + +```shell +docker run --rm -d \ + -h openwhisk --name openwhisk \ + -p 3233:3233 -p 3232:3232 \ + -v /var/run/docker.sock:/var/run/docker.sock \ + openwhisk/standalone:nightly +docker exec openwhisk waitready +``` + +Install the [openwhisk-cli](https://github.com/apache/openwhisk-cli) utility. + +You can download the released executable binaries wsk for Linux systems from the [openwhisk-cli](https://github.com/apache/openwhisk-cli) repository. + +You can then create an action to test: + +```shell +wsk property set --apihost "http://localhost:3233" --auth "23bc46b1-71f6-4ed5-8c54-816aa4f8c502:123zO3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP" +wsk action update test <(echo 'function main(){return {"ready":true}}') --kind nodejs:14 +``` + +You can now configure the Plugin on a specific Route and point to this running OpenWhisk service: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "openwhisk": { + "api_host": "http://localhost:3233", + "service_token": "23bc46b1-71f6-4ed5-8c54-816aa4f8c502:123zO3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP", + "namespace": "guest", + "action": "test" + } + } +}' +``` + +## Example usage + +Once you have configured the Plugin, you can send a request to the Route and it will invoke the configured action: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +This will give back the response from the action: + +```json +{ "ready": true } +``` + +## Delete Plugin + +To remove the `openwhisk` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/prometheus.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/prometheus.md new file mode 100644 index 0000000..41344ec --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/prometheus.md @@ -0,0 +1,476 @@ +--- +title: prometheus +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Prometheus +description: The prometheus Plugin provides the capability to integrate APISIX with Prometheus for metric collection and continuous monitoring. +--- + + + + + + + +## Description + +The `prometheus` Plugin provides the capability to integrate APISIX with [Prometheus](https://prometheus.io). + +After enabling the Plugin, APISIX will start collecting relevant metrics, such as API requests and latencies, and exporting them in a [text-based exposition format](https://prometheus.io/docs/instrumenting/exposition_formats/#exposition-formats) to Prometheus. You can then create event monitoring and alerting in Prometheus to monitor the health of your API gateway and APIs. + +## Static Configurations + +By default, `prometheus` configurations are pre-configured in the [default configuration](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua). + +To customize these values, add the corresponding configurations to `config.yaml`. For example: + +```yaml +plugin_attr: + prometheus: # Plugin: prometheus attributes + export_uri: /apisix/prometheus/metrics # Set the URI for the Prometheus metrics endpoint. + metric_prefix: apisix_ # Set the prefix for Prometheus metrics generated by APISIX. + enable_export_server: true # Enable the Prometheus export server. + export_addr: # Set the address for the Prometheus export server. + ip: 127.0.0.1 # Set the IP. + port: 9091 # Set the port. + # metrics: # Create extra labels for metrics. + # http_status: # These metrics will be prefixed with `apisix_`. + # extra_labels: # Set the extra labels for http_status metrics. + # - upstream_addr: $upstream_addr + # - status: $upstream_status + # expire: 0 # The expiration time of metrics in seconds. + # 0 means the metrics will not expire. + # http_latency: + # extra_labels: # Set the extra labels for http_latency metrics. + # - upstream_addr: $upstream_addr + # expire: 0 # The expiration time of metrics in seconds. + # 0 means the metrics will not expire. + # bandwidth: + # extra_labels: # Set the extra labels for bandwidth metrics. + # - upstream_addr: $upstream_addr + # expire: 0 # The expiration time of metrics in seconds. + # 0 means the metrics will not expire. + # default_buckets: # Set the default buckets for the `http_latency` metrics histogram. + # - 10 + # - 50 + # - 100 + # - 200 + # - 500 + # - 1000 + # - 2000 + # - 5000 + # - 10000 + # - 30000 + # - 60000 + # - 500 +``` + +You can use the [Nginx variable](https://nginx.org/en/docs/http/ngx_http_core_module.html) to create `extra_labels`. See [add extra labels](#add-extra-labels-for-metrics). + +Reload APISIX for changes to take effect. + +## Attribute + +| Name | Type | Required | Default | Valid values | Description | +| ------------- | ------- | -------- | ------- | ------------ | ------------------------------------------ | +| prefer_name | boolean | | False | | If true, export Route/Service name instead of their ID in Prometheus metrics. | + +## Metrics + +There are different types of metrics in Prometheus. To understand their differences, see [metrics types](https://prometheus.io/docs/concepts/metric_types/). + +The following metrics are exported by the `prometheus` Plugin by default. See [get APISIX metrics](#get-apisix-metrics) for an example. Note that some metrics, such as `apisix_batch_process_entries`, are not readily visible if there are no data. + +| Name | Type | Description | +| ------------------------------ | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| apisix_bandwidth | counter | Total amount of traffic flowing through APISIX in bytes. | +| apisix_etcd_modify_indexes | gauge | Number of changes to etcd by APISIX keys. | +| apisix_batch_process_entries | gauge | Number of remaining entries in a batch when sending data in batches, such as with `http logger`, and other logging Plugins. | +| apisix_etcd_reachable | gauge | Whether APISIX can reach etcd. A value of `1` represents reachable and `0` represents unreachable. | +| apisix_http_status | counter | HTTP status codes returned from upstream Services. | +| apisix_http_requests_total | gauge | Number of HTTP requests from clients. | +| apisix_nginx_http_current_connections | gauge | Number of current connections with clients. | +| apisix_nginx_metric_errors_total | counter | Total number of `nginx-lua-prometheus` errors. | +| apisix_http_latency | histogram | HTTP request latency in milliseconds. | +| apisix_node_info | gauge | Information of the APISIX node, such as host name and the current APISIX version. | +| apisix_shared_dict_capacity_bytes | gauge | The total capacity of an [NGINX shared dictionary](https://github.com/openresty/lua-nginx-module#ngxshareddict). | +| apisix_shared_dict_free_space_bytes | gauge | The remaining space in an [NGINX shared dictionary](https://github.com/openresty/lua-nginx-module#ngxshareddict). | +| apisix_upstream_status | gauge | Health check status of upstream nodes, available if health checks are configured on the upstream. A value of `1` represents healthy and `0` represents unhealthy. | +| apisix_stream_connection_total | counter | Total number of connections handled per Stream Route. | + +## Labels + +[Labels](https://prometheus.io/docs/practices/naming/#labels) are attributes of metrics that are used to differentiate metrics. + +For example, the `apisix_http_status` metric can be labeled with `route` information to identify which Route the HTTP status originates from. + +The following are labels for a non-exhaustive list of APISIX metrics and their descriptions. + +### Labels for `apisix_http_status` + +The following labels are used to differentiate `apisix_http_status` metrics. + +| Name | Description | +| ------------ | ----------------------------------------------------------------------------------------------------------------------------- | +| code | HTTP response code returned by the upstream node. | +| route | ID of the Route that the HTTP status originates from when `prefer_name` is `false` (default), and name of the Route when `prefer_name` to `true`. Default to an empty string if a request does not match any Route. | +| matched_uri | URI of the Route that matches the request. Default to an empty string if a request does not match any Route. | +| matched_host | Host of the Route that matches the request. Default to an empty string if a request does not match any Route, or host is not configured on the Route. | +| service | ID of the Service that the HTTP status originates from when `prefer_name` is `false` (default), and name of the Service when `prefer_name` to `true`. Default to the configured value of host on the Route if the matched Route does not belong to any Service. | +| consumer | Name of the Consumer associated with a request. Default to an empty string if no Consumer is associated with the request. | +| node | IP address of the upstream node. | + +### Labels for `apisix_bandwidth` + +The following labels are used to differentiate `apisix_bandwidth` metrics. + +| Name | Description | +| ---------- | ----------------------------------------------------------------------------------------------------------------------------- | +| type | Type of traffic, `egress` or `ingress`. | +| route | ID of the Route that bandwidth corresponds to when `prefer_name` is `false` (default), and name of the Route when `prefer_name` to `true`. Default to an empty string if a request does not match any Route. | +| service | ID of the Service that bandwidth corresponds to when `prefer_name` is `false` (default), and name of the Service when `prefer_name` to `true`. Default to the configured value of host on the Route if the matched Route does not belong to any Service. | +| consumer | Name of the Consumer associated with a request. Default to an empty string if no Consumer is associated with the request. | +| node | IP address of the upstream node. | + +### Labels for `apisix_http_latency` + +The following labels are used to differentiate `apisix_http_latency` metrics. + +| Name | Description | +| ---------- | ----------------------------------------------------------------------------------------------------------------------------------- | +| type | Type of latencies. See [latency types](#latency-types) for details. | +| route | ID of the Route that latencies correspond to when `prefer_name` is `false` (default), and name of the Route when `prefer_name` to `true`. Default to an empty string if a request does not match any Route. | +| service | ID of the Service that latencies correspond to when `prefer_name` is `false` (default), and name of the Service when `prefer_name` to `true`. Default to the configured value of host on the Route if the matched Route does not belong to any Service. | +| consumer | Name of the Consumer associated with latencies. Default to an empty string if no Consumer is associated with the request. | +| node | IP address of the upstream node associated with latencies. | + +#### Latency Types + +`apisix_http_latency` can be labeled with one of the three types: + +* `request` represents the time elapsed between the first byte was read from the client and the log write after the last byte was sent to the client. + +* `upstream` represents the time elapsed waiting on responses from the upstream Service. + +* `apisix` represents the difference between the `request` latency and `upstream` latency. + +In other words, the APISIX latency is not only attributed to the Lua processing. It should be understood as follows: + +```text +APISIX latency + = downstream request time - upstream response time + = downstream traffic latency + NGINX latency +``` + +### Labels for `apisix_upstream_status` + +The following labels are used to differentiate `apisix_upstream_status` metrics. + +| Name | Description | +| ---------- | --------------------------------------------------------------------------------------------------- | +| name | Resource ID corresponding to the upstream configured with health checks, such as `/apisix/routes/1` and `/apisix/upstreams/1`. | +| ip | IP address of the upstream node. | +| port | Port number of the node. | + +## Examples + +The examples below demonstrate how you can work with the `prometheus` Plugin for different scenarios. + +### Get APISIX Metrics + +The following example demonstrates how you can get metrics from APISIX. + +The default Prometheus metrics endpoint and other Prometheus related configurations can be found in the [static configuration](#static-configurations). If you would like to customize these configuration, update `config.yaml` and reload APISIX. + +If you deploy APISIX in a containerized environment and would like to access the Prometheus metrics endpoint externally, update the configuration file as follows and reload APISIX: + +```yaml title="conf/config.yaml" +plugin_attr: + prometheus: + export_addr: + ip: 0.0.0.0 +``` + +Send a request to the APISIX Prometheus metrics endpoint: + +```shell +curl "http://127.0.0.1:9091/apisix/prometheus/metrics" +``` + +You should see an output similar to the following: + +```text +# HELP apisix_bandwidth Total bandwidth in bytes consumed per Service in Apisix +# TYPE apisix_bandwidth counter +apisix_bandwidth{type="egress",route="",service="",consumer="",node=""} 8417 +apisix_bandwidth{type="egress",route="1",service="",consumer="",node="127.0.0.1"} 1420 +apisix_bandwidth{type="egress",route="2",service="",consumer="",node="127.0.0.1"} 1420 +apisix_bandwidth{type="ingress",route="",service="",consumer="",node=""} 189 +apisix_bandwidth{type="ingress",route="1",service="",consumer="",node="127.0.0.1"} 332 +apisix_bandwidth{type="ingress",route="2",service="",consumer="",node="127.0.0.1"} 332 +# HELP apisix_etcd_modify_indexes Etcd modify index for APISIX keys +# TYPE apisix_etcd_modify_indexes gauge +apisix_etcd_modify_indexes{key="consumers"} 0 +apisix_etcd_modify_indexes{key="global_rules"} 0 +... +``` + +### Expose APISIX Metrics on Public API Endpoint + +The following example demonstrates how you can disable the Prometheus export server that, by default, exposes an endpoint on port `9091`, and expose APISIX Prometheus metrics on a new public API endpoint on port `9080`, which APISIX uses to listen to other client requests. + +Disable the Prometheus export server in the configuration file and reload APISIX for changes to take effect: + +```yaml title="conf/config.yaml" +plugin_attr: + prometheus: + enable_export_server: false +``` + +Next, create a Route with [`public-api`](../../../en/latest/plugins/public-api.md) Plugin and expose a public API endpoint for APISIX metrics: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/prometheus-metrics" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "uri": "/apisix/prometheus/metrics", + "plugins": { + "public-api": {} + } + }' +``` + +Send a request to the new metrics endpoint to verify: + +```shell +curl "http://127.0.0.1:9080/apisix/prometheus/metrics" +``` + +You should see an output similar to the following: + +```text +# HELP apisix_http_requests_total The total number of client requests since APISIX started +# TYPE apisix_http_requests_total gauge +apisix_http_requests_total 1 +# HELP apisix_nginx_http_current_connections Number of HTTP connections +# TYPE apisix_nginx_http_current_connections gauge +apisix_nginx_http_current_connections{state="accepted"} 1 +apisix_nginx_http_current_connections{state="active"} 1 +apisix_nginx_http_current_connections{state="handled"} 1 +apisix_nginx_http_current_connections{state="reading"} 0 +apisix_nginx_http_current_connections{state="waiting"} 0 +apisix_nginx_http_current_connections{state="writing"} 1 +... +``` + +### Monitor Upstream Health Statuses + +The following example demonstrates how to monitor the health status of upstream nodes. + +Create a Route with the `prometheus` Plugin and configure upstream active health checks: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "prometheus-route", + "uri": "/get", + "plugins": { + "prometheus": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1, + "127.0.0.1:20001": 1 + }, + "checks": { + "active": { + "timeout": 5, + "http_path": "/status", + "healthy": { + "interval": 2, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + }, + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [500], + "http_failures": 3, + "tcp_failures": 3 + } + } + } + } + }' +``` + +Send a request to the APISIX Prometheus metrics endpoint: + +```shell +curl "http://127.0.0.1:9091/apisix/prometheus/metrics" +``` + +You should see an output similar to the following: + +```text +# HELP apisix_upstream_status upstream status from health check +# TYPE apisix_upstream_status gauge +apisix_upstream_status{name="/apisix/routes/1",ip="54.237.103.220",port="80"} 1 +apisix_upstream_status{name="/apisix/routes/1",ip="127.0.0.1",port="20001"} 0 +``` + +This shows that the upstream node `httpbin.org:80` is healthy and the upstream node `127.0.0.1:20001` is unhealthy. + +### Add Extra Labels for Metrics + +The following example demonstrates how to add additional labels to metrics and use the [Nginx variable](https://nginx.org/en/docs/http/ngx_http_core_module.html) in label values. + +Currently, only the following metrics support extra labels: + +* apisix_http_status +* apisix_http_latency +* apisix_bandwidth + +Include the following configurations in the configuration file to add labels for metrics and reload APISIX for changes to take effect: + +```yaml title="conf/config.yaml" +plugin_attr: + prometheus: # Plugin: prometheus + metrics: # Create extra labels from the NGINX variables. + http_status: + extra_labels: # Set the extra labels for http_status metrics. + - upstream_addr: $upstream_addr # Add an extra upstream_addr label with value being the NGINX variable $upstream_addr. + - route_name: $route_name # Add an extra route_name label with value being the APISIX variable $route_name. +``` + +Note that if you define a variable in the label value but it does not correspond to any existing [APISIX variables](https://apisix.apache.org/docs/apisix/apisix-variable/) and [Nginx variable](https://nginx.org/en/docs/http/ngx_http_core_module.html), the label value will default to an empty string. + +Create a Route with the `prometheus` Plugin: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "prometheus-route", +Include the following configurations in the configuration file to add labels for metrics and reload APISIX for changes to take effect: + "name": "extra-label", + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the Route to verify: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +You should see an `HTTP/1.1 200 OK` response. + +Send a request to the APISIX Prometheus metrics endpoint: + +```shell +curl "http://127.0.0.1:9091/apisix/prometheus/metrics" +``` + +You should see an output similar to the following: + +```text +# HELP apisix_http_status HTTP status codes per Service in APISIX +# TYPE apisix_http_status counter +apisix_http_status{code="200",route="1",matched_uri="/get",matched_host="",service="",consumer="",node="54.237.103.220",upstream_addr="54.237.103.220:80",route_name="extra-label"} 1 +``` + +### Monitor TCP/UDP Traffic with Prometheus + +The following example demonstrates how to collect TCP/UDP traffic metrics in APISIX. + +Include the following configurations in `config.yaml` to enable stream proxy and `prometheus` Plugin for stream proxy. Reload APISIX for changes to take effect: + +```yaml title="conf/config.yaml" +apisix: + proxy_mode: http&stream # Enable both L4 & L7 proxies + stream_proxy: # Configure L4 proxy + tcp: + - 9100 # Set TCP proxy listening port + udp: + - 9200 # Set UDP proxy listening port + +stream_plugins: + - prometheus # Enable prometheus for stream proxy +``` + +Create a Stream Route with the `prometheus` Plugin: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/stream_routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ +Include the following configurations in `config.yaml` to enable stream proxy and enable `prometheus` Plugin for stream proxy. Reload APISIX for changes to take effect: + "plugins": { + "prometheus":{} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the Stream Route to verify: + +```shell +curl -i "http://127.0.0.1:9100" +``` + +You should see an `HTTP/1.1 200 OK` response. + +Send a request to the APISIX Prometheus metrics endpoint: + +```shell +curl "http://127.0.0.1:9091/apisix/prometheus/metrics" +``` + +You should see an output similar to the following: + +```text +# HELP apisix_stream_connection_total Total number of connections handled per Stream Route in APISIX +# TYPE apisix_stream_connection_total counter +apisix_stream_connection_total{route="1"} 1 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/proxy-cache.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/proxy-cache.md new file mode 100644 index 0000000..f55cdf0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/proxy-cache.md @@ -0,0 +1,379 @@ +--- +title: proxy-cache +keywords: + - Apache APISIX + - API Gateway + - Proxy Cache +description: The proxy-cache Plugin caches responses based on keys, supporting disk and memory caching for GET, POST, and HEAD requests, enhancing API performance. +--- + + + + + + + +## Description + +The `proxy-cache` Plugin provides the capability to cache responses based on a cache key. The Plugin supports both disk-based and memory-based caching options to cache for [GET](https://anything.org/learn/serving-over-http/#get-request), [POST](https://anything.org/learn/serving-over-http/#post-request), and [HEAD](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/HEAD) requests. + +Responses can be conditionally cached based on request HTTP methods, response status codes, request header values, and more. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|--------------------|----------------|----------|---------------------------|-------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| cache_strategy | string | False | disk | ["disk","memory"] | Caching strategy. Cache on disk or in memory. | +| cache_zone | string | False | disk_cache_one | | Cache zone used with the caching strategy. The value should match one of the cache zones defined in the [configuration files](#static-configurations) and should correspond to the caching strategy. For example, when using the in-memory caching strategy, you should use an in-memory cache zone. | +| cache_key | array[string] | False | ["$host", "$request_uri"] | | Key to use for caching. Support [NGINX variables](https://nginx.org/en/docs/varindex.html) and constant strings in values. Variables should be prefixed with a `$` sign. | +| cache_bypass | array[string] | False | | | One or more parameters to parse value from, such that if any of the values is not empty and is not equal to `0`, response will not be retrieved from cache. Support [NGINX variables](https://nginx.org/en/docs/varindex.html) and constant strings in values. Variables should be prefixed with a `$` sign. | +| cache_method | array[string] | False | ["GET", "HEAD"] | ["GET", "POST", "HEAD"] | Request methods of which the response should be cached. | +| cache_http_status | array[integer] | False | [200, 301, 404] | [200, 599] | Response HTTP status codes of which the response should be cached. | +| hide_cache_headers | boolean | False | false | | If true, hide `Expires` and `Cache-Control` response headers. | +| cache_control | boolean | False | false | | If true, comply with `Cache-Control` behavior in the HTTP specification. Only valid for in-memory strategy. | +| no_cache | array[string] | False | | | One or more parameters to parse value from, such that if any of the values is not empty and is not equal to `0`, response will not be cached. Support [NGINX variables](https://nginx.org/en/docs/varindex.html) and constant strings in values. Variables should be prefixed with a `$` sign. | +| cache_ttl | integer | False | 300 | >=1 | Cache time to live (TTL) in seconds when caching in memory. To adjust the TTL when caching on disk, update `cache_ttl` in the [configuration files](#static-configurations). The TTL value is evaluated in conjunction with the values in the response headers [`Cache-Control`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control) and [`Expires`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Expires) received from the Upstream service. | + +## Static Configurations + +By default, values such as `cache_ttl` when caching on disk and cache `zones` are pre-configured in the [default configuration](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua). + +To customize these values, add the corresponding configurations to `config.yaml`. For example: + +```yaml +apisix: + proxy_cache: + cache_ttl: 10s # default cache TTL used when caching on disk, only if none of the `Expires` + # and `Cache-Control` response headers is present, or if APISIX returns + # `502 Bad Gateway` or `504 Gateway Timeout` due to unavailable upstreams + zones: + - name: disk_cache_one + memory_size: 50m + disk_size: 1G + disk_path: /tmp/disk_cache_one + cache_levels: 1:2 + # - name: disk_cache_two + # memory_size: 50m + # disk_size: 1G + # disk_path: "/tmp/disk_cache_two" + # cache_levels: "1:2" + - name: memory_cache + memory_size: 50m +``` + +Reload APISIX for changes to take effect. + +## Examples + +The examples below demonstrate how you can configure `proxy-cache` for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Cache Data on Disk + +On-disk caching strategy offers the advantages of data persistency when system restarts and having larger storage capacity compared to in-memory cache. It is suitable for applications that prioritize durability and can tolerate slightly larger cache access latency. + +The following example demonstrates how you can use `proxy-cache` Plugin on a Route to cache data on disk. + +When using the on-disk caching strategy, the cache TTL is determined by value from the response header `Expires` or `Cache-Control`. If none of these headers is present or if APISIX returns `502 Bad Gateway` or `504 Gateway Timeout` due to unavailable Upstreams, the cache TTL defaults to the value configured in the [configuration files](#static-configuration). + +Create a Route with the `proxy-cache` Plugin to cache data on disk: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-cache-route", + "uri": "/anything", + "plugins": { + "proxy-cache": { + "cache_strategy": "disk" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +Send a request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should see an `HTTP/1.1 200 OK` response with the following header, showing the Plugin is successfully enabled: + +```text +Apisix-Cache-Status: MISS +``` + +As there is no cache available before the first response, `Apisix-Cache-Status: MISS` is shown. + +Send the same request again within the cache TTL window. You should see an `HTTP/1.1 200 OK` response with the following headers, showing the cache is hit: + +```text +Apisix-Cache-Status: HIT +``` + +Wait for the cache to expire after the TTL and send the same request again. You should see an `HTTP/1.1 200 OK` response with the following headers, showing the cache has expired: + +```text +Apisix-Cache-Status: EXPIRED +``` + +### Cache Data in Memory + +In-memory caching strategy offers the advantage of low-latency access to the cached data, as retrieving data from RAM is faster than retrieving data from disk storage. It also works well for storing temporary data that does not need to be persisted long-term, allowing for efficient caching of frequently changing data. + +The following example demonstrates how you can use `proxy-cache` Plugin on a Route to cache data in memory. + +Create a Route with `proxy-cache` and configure it to use memory-based caching: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-cache-route", + "uri": "/anything", + "plugins": { + "proxy-cache": { + "cache_strategy": "memory", + "cache_zone": "memory_cache", + "cache_ttl": 10 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +Send a request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should see an `HTTP/1.1 200 OK` response with the following header, showing the Plugin is successfully enabled: + +```text +Apisix-Cache-Status: MISS +``` + +As there is no cache available before the first response, `Apisix-Cache-Status: MISS` is shown. + +Send the same request again within the cache TTL window. You should see an `HTTP/1.1 200 OK` response with the following headers, showing the cache is hit: + +```text +Apisix-Cache-Status: HIT +``` + +### Cache Responses Conditionally + +The following example demonstrates how you can configure the `proxy-cache` Plugin to conditionally cache responses. + +Create a Route with the `proxy-cache` Plugin and configure the `no_cache` attribute, such that if at least one of the values of the URL parameter `no_cache` and header `no_cache` is not empty and is not equal to `0`, the response will not be cached: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-cache-route", + "uri": "/anything", + "plugins": { + "proxy-cache": { + "no_cache": ["$arg_no_cache", "$http_no_cache"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +Send a few requests to the Route with the URL parameter `no_cache` value indicating cache bypass: + +```shell +curl -i "http://127.0.0.1:9080/anything?no_cache=1" +``` + +You should receive `HTTP/1.1 200 OK` responses for all requests and observe the following header every time: + +```text +Apisix-Cache-Status: EXPIRED +``` + +Send a few other requests to the Route with the URL parameter `no_cache` value being zero: + +```shell +curl -i "http://127.0.0.1:9080/anything?no_cache=0" +``` + +You should receive `HTTP/1.1 200 OK` responses for all requests and start seeing the cache being hit: + +```text +Apisix-Cache-Status: HIT +``` + +You can also specify the value in the `no_cache` header as such: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H "no_cache: 1" +``` + +The response should not be cached: + +```text +Apisix-Cache-Status: EXPIRED +``` + +### Retrieve Responses from Cache Conditionally + +The following example demonstrates how you can configure the `proxy-cache` Plugin to conditionally retrieve responses from cache. + +Create a Route with the `proxy-cache` Plugin and configure the `cache_bypass` attribute, such that if at least one of the values of the URL parameter `bypass` and header `bypass` is not empty and is not equal to `0`, the response will not be retrieved from the cache: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-cache-route", + "uri": "/anything", + "plugins": { + "proxy-cache": { + "cache_bypass": ["$arg_bypass", "$http_bypass"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +Send a request to the Route with the URL parameter `bypass` value indicating cache bypass: + +```shell +curl -i "http://127.0.0.1:9080/anything?bypass=1" +``` + +You should see an `HTTP/1.1 200 OK` response with the following header: + +```text +Apisix-Cache-Status: BYPASS +``` + +Send another request to the Route with the URL parameter `bypass` value being zero: + +```shell +curl -i "http://127.0.0.1:9080/anything?bypass=0" +``` + +You should see an `HTTP/1.1 200 OK` response with the following header: + +```text +Apisix-Cache-Status: MISS +``` + +You can also specify the value in the `bypass` header as such: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H "bypass: 1" +``` + +The cache should be bypassed: + +```text +Apisix-Cache-Status: BYPASS +``` + +### Cache for 502 and 504 Error Response Code + +When the Upstream services return server errors in the 500 range, `proxy-cache` Plugin will cache the responses if and only if the returned status is `502 Bad Gateway` or `504 Gateway Timeout`. + +The following example demonstrates the behavior of `proxy-cache` Plugin when the Upstream service returns `504 Gateway Timeout`. + +Create a Route with the `proxy-cache` Plugin and configure a dummy Upstream service: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-cache-route", + "uri": "/timeout", + "plugins": { + "proxy-cache": { } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "12.34.56.78": 1 + } + } + }' +``` + +Generate a few requests to the Route: + +```shell +seq 4 | xargs -I{} curl -I "http://127.0.0.1:9080/timeout" +``` + +You should see a response similar to the following: + +```text +HTTP/1.1 504 Gateway Time-out +... +Apisix-Cache-Status: MISS + +HTTP/1.1 504 Gateway Time-out +... +Apisix-Cache-Status: HIT + +HTTP/1.1 504 Gateway Time-out +... +Apisix-Cache-Status: HIT + +HTTP/1.1 504 Gateway Time-out +... +Apisix-Cache-Status: HIT +``` + +However, if the Upstream services returns `503 Service Temporarily Unavailable`, the response will not be cached. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/proxy-control.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/proxy-control.md new file mode 100644 index 0000000..b0f1cb9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/proxy-control.md @@ -0,0 +1,103 @@ +--- +title: proxy-control +keywords: + - Apache APISIX + - API Gateway + - Proxy Control +description: This document contains information about the Apache APISIX proxy-control Plugin, you can use it to control the behavior of the NGINX proxy dynamically. +--- + + + +## Description + +The proxy-control Plugin dynamically controls the behavior of the NGINX proxy. + +:::info IMPORTANT + +This Plugin requires APISIX to run on [APISIX-Runtime](../FAQ.md#how-do-i-build-the-apisix-runtime-environment). See [apisix-build-tools](https://github.com/api7/apisix-build-tools) for more info. + +::: + +## Attributes + +| Name | Type | Required | Default | Description | +| ----------------- | ------- | -------- | ------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| request_buffering | boolean | False | true | When set to `true`, the Plugin dynamically sets the [`proxy_request_buffering`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering) directive. | + +## Enable Plugin + +The example below enables the Plugin on a specific Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/upload", + "plugins": { + "proxy-control": { + "request_buffering": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## Example usage + +The example below shows the use case of uploading a big file: + +```shell +curl -i http://127.0.0.1:9080/upload -d @very_big_file +``` + +It's expected to not find a message "a client request body is buffered to a temporary file" in the error log. + +## Delete Plugin + +To remove the `proxy-control` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/upload", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/proxy-mirror.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/proxy-mirror.md new file mode 100644 index 0000000..fc943f2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/proxy-mirror.md @@ -0,0 +1,145 @@ +--- +title: proxy-mirror +keywords: + - Apache APISIX + - API Gateway + - Proxy Mirror +description: The proxy-mirror Plugin duplicates ingress traffic to APISIX and forwards them to a designated Upstream without interrupting the regular services. +--- + + + + + + + +## Description + +The `proxy-mirror` Plugin duplicates ingress traffic to APISIX and forwards them to a designated upstream, without interrupting the regular services. You can configure the Plugin to mirror all traffic or only a portion. The mechanism benefits a few use cases, including troubleshooting, security inspection, analytics, and more. + +Note that APISIX ignores any response from the Upstream host receiving mirrored traffic. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|--------------|--------|----------|---------|--------------|---------------------------------------------------------------------------------------------------------------------------| +| host | string | True | | | Address of the host to forward the mirrored traffic to. The address should contain the scheme but without the path, such as `http://127.0.0.1:8081`. | +| path | string | False | | | Path of the host to forward the mirrored traffic to. If unspecified, default to the current URI path of the Route. Not applicable if the Plugin is mirroring gRPC traffic. | +| path_concat_mode | string | False | replace | ["replace", "prefix"] | Concatenation mode when `path` is specified. When set to `replace`, the configured `path` would be directly used as the path of the host to forward the mirrored traffic to. When set to `prefix`, the path to forward to would be the configured `path`, appended by the requested URI path of the Route. Not applicable if the Plugin is mirroring gRPC traffic. | +| sample_ratio | number | False | 1 | [0.00001, 1] | Ratio of the requests that will be mirrored. By default, all traffic are mirrored. | + +## Static Configurations + +By default, timeout values for the Plugin are pre-configured in the [default configuration](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua). + +To customize these values, add the corresponding configurations to `config.yaml`. For example: + +```yaml +plugin_attr: + proxy-mirror: + timeout: + connect: 60s + read: 60s + send: 60s +``` + +Reload APISIX for changes to take effect. + +## Examples + +The examples below demonstrate how to configure `proxy-mirror` for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Mirror Partial Traffic + +The following example demonstrates how you can configure `proxy-mirror` to mirror 50% of the traffic to a Route and forward them to another Upstream service. + +Start a sample NGINX server for receiving mirrored traffic: + +```shell +docker run -p 8081:80 --name nginx nginx +``` + +You should see NGINX access log and error log on the terminal session. + +Open a new terminal session and create a Route with `proxy-mirror` to mirror 50% of the traffic: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "traffic-mirror-route", + "uri": "/get", + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1:8081", + "sample_ratio": 0.5 + } + }, + "upstream": { + "nodes": { + "httpbin.org": 1 + }, + "type": "roundrobin" + } + }' +``` + +Send Generate a few requests to the Route: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +You should receive `HTTP/1.1 200 OK` responses for all requests. + +Navigating back to the NGINX terminal session, you should see a number of access log entries, roughly half the number of requests generated: + +```text +172.17.0.1 - - [29/Jan/2024:23:11:01 +0000] "GET /get HTTP/1.1" 404 153 "-" "curl/7.64.1" "-" +``` + +This suggests APISIX has mirrored the request to the NGINX server. Here, the HTTP response status is `404` since the sample NGINX server does not implement the Route. + +### Configure Mirroring Timeouts + +The following example demonstrates how you can update the default connect, read, and send timeouts for the Plugin. This could be useful when mirroring traffic to a very slow backend service. + +As the request mirroring was implemented as sub-requests, excessive delays in the sub-requests could lead to the blocking of the original requests. By default, the connect, read, and send timeouts are set to 60 seconds. To update these values, you can configure them in the `plugin_attr` section of the configuration file as such: + +```yaml title="conf/config.yaml" +plugin_attr: + proxy-mirror: + timeout: + connect: 2000ms + read: 2000ms + send: 2000ms +``` + +Reload APISIX for changes to take effect. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/proxy-rewrite.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/proxy-rewrite.md new file mode 100644 index 0000000..8ba6e82 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/proxy-rewrite.md @@ -0,0 +1,509 @@ +--- +title: proxy-rewrite +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Proxy Rewrite + - proxy-rewrite +description: The proxy-rewrite Plugin offers options to rewrite requests that APISIX forwards to Upstream services. With this plugin, you can modify the HTTP methods, request destination Upstream addresses, request headers, and more. +--- + + + + + + + +## Description + +The `proxy-rewrite` Plugin offers options to rewrite requests that APISIX forwards to Upstream services. With this plugin, you can modify the HTTP methods, request destination Upstream addresses, request headers, and more. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|-----------------------------|---------------|----------|---------|----------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| uri | string | False | | | New Upstream URI path. Value supports [NGINX variables](https://nginx.org/en/docs/http/ngx_http_core_module.html). For example, `$arg_name`. | +| method | string | False | | ["GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS","MKCOL", "COPY", "MOVE", "PROPFIND", "PROPFIND","LOCK", "UNLOCK", "PATCH", "TRACE"] | HTTP method to rewrite requests to use. | +| regex_uri | array[string] | False | | | Regular expressions used to match the URI path from client requests and compose a new Upstream URI path. When both `uri` and `regex_uri` are configured, `uri` has a higher priority. The array should contain one or more **key-value pairs**, with the key being the regular expression to match URI against and value being the new Upstream URI path. For example, with `["^/iresty/(. *)/(. *)", "/$1-$2", ^/theothers/*", "/theothers"]`, if a request is originally sent to `/iresty/hello/world`, the Plugin will rewrite the Upstream URI path to `/iresty/hello-world`; if a request is originally sent to `/theothers/hello/world`, the Plugin will rewrite the Upstream URI path to `/theothers`. | +| host | string | False | | | Set [`Host`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Host) request header. | +| headers | object | False | | | Header actions to be executed. Can be set to objects of action verbs `add`, `remove`, and/or `set`; or an object consisting of headers to be `set`. When multiple action verbs are configured, actions are executed in the order of `add`, `remove`, and `set`. | +| headers.add | object | False | | | Headers to append to requests. If a header already present in the request, the header value will be appended. Header value could be set to a constant, one or more [NGINX variables](https://nginx.org/en/docs/http/ngx_http_core_module.html), or the matched result of `regex_uri` using variables such as `$1-$2-$3`. | +| headers.set | object | False | | | Headers to set to requests. If a header already present in the request, the header value will be overwritten. Header value could be set to a constant, one or more [NGINX variables](https://nginx.org/en/docs/http/ngx_http_core_module.html), or the matched result of `regex_uri` using variables such as `$1-$2-$3`. Should not be used to set `Host`. | +| headers.remove | array[string] | False | | | Headers to remove from requests. +| use_real_request_uri_unsafe | boolean | False | false | | If true, bypass URI normalization and allow for the full original request URI. Enabling this option is considered unsafe. | + +## Examples + +The examples below demonstrate how you can configure `proxy-rewrite` on a Route in different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Rewrite Host Header + +The following example demonstrates how you can modify the `Host` header in a request. Note that you should not use `headers.set` to set the `Host` header. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "methods": ["GET"], + "uri": "/headers", + "plugins": { + "proxy-rewrite": { + "host": "myapisix.demo" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to `/headers` to check all the request headers sent to upstream: + +```shell +curl "http://127.0.0.1:9080/headers" +``` + +You should see a response similar to the following: + +```text +{ + "headers": { + "Accept": "*/*", + "Host": "myapisix.demo", + "User-Agent": "curl/8.2.1", + "X-Amzn-Trace-Id": "Root=1-64fef198-29da0970383150175bd2d76d", + "X-Forwarded-Host": "127.0.0.1" + } +} +``` + +### Rewrite URI And Set Headers + +The following example demonstrates how you can rewrite the request Upstream URI and set additional header values. If the same headers present in the client request, the corresponding header values set in the Plugin will overwrite the values present in the client request. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "methods": ["GET"], + "uri": "/", + "plugins": { + "proxy-rewrite": { + "uri": "/anything", + "headers": { + "set": { + "X-Api-Version": "v1", + "X-Api-Engine": "apisix" + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to verify: + +```shell +curl "http://127.0.0.1:9080/" -H '"X-Api-Version": "v2"' +``` + +You should see a response similar to the following: + +```text +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/8.2.1", + "X-Amzn-Trace-Id": "Root=1-64fed73a-59cd3bd640d76ab16c97f1f1", + "X-Api-Engine": "apisix", + "X-Api-Version": "v1", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "::1, 103.248.35.179", + "url": "http://localhost/anything" +} +``` + +Note that both headers present and the header value of `X-Api-Version` configured in the Plugin overwrites the header value passed in the request. + +### Rewrite URI And Append Headers + +The following example demonstrates how you can rewrite the request Upstream URI and append additional header values. If the same headers present in the client request, their headers values will append to the configured header values in the plugin. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "methods": ["GET"], + "uri": "/", + "plugins": { + "proxy-rewrite": { + "uri": "/headers", + "headers": { + "add": { + "X-Api-Version": "v1", + "X-Api-Engine": "apisix" + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to verify: + +```shell +curl "http://127.0.0.1:9080/" -H '"X-Api-Version": "v2"' +``` + +You should see a response similar to the following: + +```text +{ + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/8.2.1", + "X-Amzn-Trace-Id": "Root=1-64fed73a-59cd3bd640d76ab16c97f1f1", + "X-Api-Engine": "apisix", + "X-Api-Version": "v1,v2", + "X-Forwarded-Host": "127.0.0.1" + } +} +``` + +Note that both headers present and the header value of `X-Api-Version` configured in the Plugin is appended by the header value passed in the request. + +### Remove Existing Header + +The following example demonstrates how you can remove an existing header `User-Agent`. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "methods": ["GET"], + "uri": "/headers", + "plugins": { + "proxy-rewrite": { + "headers": { + "remove":[ + "User-Agent" + ] + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to verify if the specified header is removed: + +```shell +curl "http://127.0.0.1:9080/headers" +``` + +You should see a response similar to the following, where the `User-Agent` header is not present: + +```text +{ + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "X-Amzn-Trace-Id": "Root=1-64fef302-07f2b13e0eb006ba776ad91d", + "X-Forwarded-Host": "127.0.0.1" + } +} +``` + +### Rewrite URI Using RegEx + +The following example demonstrates how you can parse text from the original Upstream URI path and use them to compose a new Upstream URI path. In this example, APISIX is configured to forward all requests from `/test/user/agent` to `/user-agent`. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "uri": "/test/*", + "plugins": { + # highlight-start + "proxy-rewrite": { + "regex_uri": ["^/test/(.*)/(.*)", "/$1-$2"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to `/test/user/agent` to check if it is redirected to `/user-agent`: + +```shell +curl "http://127.0.0.1:9080/test/user/agent" +``` + +You should see a response similar to the following: + +```text +{ + "user-agent": "curl/8.2.1" +} +``` + +### Add URL Parameters + +The following example demonstrates how you can add URL parameters to the request. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "methods": ["GET"], + "uri": "/get", + "plugins": { + "proxy-rewrite": { + "uri": "/get?arg1=apisix&arg2=plugin" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to verify if the URL parameters are also forwarded to upstream: + +```shell +curl "http://127.0.0.1:9080/get" +``` + +You should see a response similar to the following: + +```text +{ + "args": { + "arg1": "apisix", + "arg2": "plugin" + }, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.2.1", + "X-Amzn-Trace-Id": "Root=1-64fef6dc-2b0e09591db7353a275cdae4", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "127.0.0.1, 103.248.35.148", + # highlight-next-line + "url": "http://127.0.0.1/get?arg1=apisix&arg2=plugin" +} +``` + +### Rewrite HTTP Method + +The following example demonstrates how you can rewrite a GET request into a POST request. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "methods": ["GET"], + "uri": "/get", + "plugins": { + "proxy-rewrite": { + "uri": "/anything", + "method":"POST" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a GET request to `/get` to verify if it is transformed into a POST request to `/anything`: + +```shell +curl "http://127.0.0.1:9080/get" +``` + +You should see a response similar to the following: + +```text +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.2.1", + "X-Amzn-Trace-Id": "Root=1-64fef7de-0c63387645353998196317f2", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "POST", + "origin": "::1, 103.248.35.179", + "url": "http://localhost/anything" +} +``` + +### Forward Consumer Names to Upstream + +The following example demonstrates how you can forward the name of consumers who authenticates successfully to Upstream services. As an example, you will be using `key-auth` as the authentication method. + +Create a Consumer `JohnDoe`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "JohnDoe" + }' +``` + +Create `key-auth` credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/JohnDoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +Next, create a Route with key authentication enabled, configure `proxy-rewrite` to add Consumer name to the header, and remove the authentication key so that it is not visible to the Upstream service: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "consumer-restricted-route", + "uri": "/get", + "plugins": { + "key-auth": {}, + "proxy-rewrite": { + "headers": { + "set": { + "X-Apisix-Consumer": "$consumer_name" + }, + "remove": [ "Apikey" ] + } + } + }, + "upstream" : { + "nodes": { + "httpbin.org":1 + } + } + }' +``` + +Send a request to the Route as Consumer `JohnDoe`: + +```shell +curl -i "http://127.0.0.1:9080/get" -H 'apikey: john-key' +``` + +You should receive an `HTTP/1.1 200 OK` response with the following body: + +```text +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.4.0", + "X-Amzn-Trace-Id": "Root=1-664b01a6-2163c0156ed4bff51d87d877", + "X-Apisix-Consumer": "JohnDoe", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "172.19.0.1, 203.12.12.12", + "url": "http://127.0.0.1/get" +} +``` + +Send another request to the Route without the valid credential: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +You should receive an `HTTP/1.1 403 Forbidden` response. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/public-api.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/public-api.md new file mode 100644 index 0000000..c9c62c5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/public-api.md @@ -0,0 +1,245 @@ +--- +title: public-api +keywords: + - Apache APISIX + - API Gateway + - Public API +description: The public-api plugin exposes an internal API endpoint, making it publicly accessible. One of the primary use cases of this plugin is to expose internal endpoints created by other plugins. +--- + + + + + + + +## Description + +The `public-api` Plugin exposes an internal API endpoint, making it publicly accessible. One of the primary use cases of this Plugin is to expose internal endpoints created by other Plugins. + +## Attributes + +| Name | Type | Required | Default | Valid Values | Description | +|---------|-----------|----------|---------|--------------|-------------| +| uri | string | False | | | Internal endpoint to expose. If not configured, expose the Route URI. | + +## Examples + +The examples below demonstrate how you can configure `public-api` in different scenarios. + +### Expose Prometheus Metrics at Custom Endpoint + +The following example demonstrates how you can disable the Prometheus export server that, by default, exposes an endpoint on port `9091`, and expose APISIX Prometheus metrics on a new public API endpoint on port `9080`, which APISIX uses to listen to other client requests. + +You will also configure the Route such that the internal endpoint `/apisix/prometheus/metrics` is exposed at a custom endpoint. + +:::caution + +If a large quantity of metrics is being collected, the Plugin could take up a significant amount of CPU resources for metric computations and negatively impact the processing of regular requests. + +To address this issue, APISIX uses [privileged agent](https://github.com/openresty/lua-resty-core/blob/master/lib/ngx/process.md#enable_privileged_agent) and offloads the metric computations to a separate process. This optimization applies automatically if you use the metric endpoint configured under `plugin_attr.prometheus.export_addr` in the configuration file. If you expose the metric endpoint with the `public-api` Plugin, you will not benefit from this optimization. + +::: + +Disable the Prometheus export server in the configuration file and reload APISIX for changes to take effect: + +```yaml title="conf/config.yaml" +plugin_attr: + prometheus: + enable_export_server: false +``` + +Next, create a Route with the `public-api` Plugin and expose a public API endpoint for APISIX metrics. You should set the Route `uri` to the custom endpoint path and set the Plugin `uri` to the internal endpoint to be exposed. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "prometheus-metrics", + "uri": "/prometheus_metrics", + "plugins": { + "public-api": { + "uri": "/apisix/prometheus/metrics" + } + } + }' +``` + +Send a request to the custom metrics endpoint: + +```shell +curl "http://127.0.0.1:9080/prometheus_metrics" +``` + +You should see an output similar to the following: + +```text +# HELP apisix_http_requests_total The total number of client requests since APISIX started +# TYPE apisix_http_requests_total gauge +apisix_http_requests_total 1 +# HELP apisix_nginx_http_current_connections Number of HTTP connections +# TYPE apisix_nginx_http_current_connections gauge +apisix_nginx_http_current_connections{state="accepted"} 1 +apisix_nginx_http_current_connections{state="active"} 1 +apisix_nginx_http_current_connections{state="handled"} 1 +apisix_nginx_http_current_connections{state="reading"} 0 +apisix_nginx_http_current_connections{state="waiting"} 0 +apisix_nginx_http_current_connections{state="writing"} 1 +... +``` + +### Expose Batch Requests Endpoint + +The following example demonstrates how you can use the `public-api` Plugin to expose an endpoint for the `batch-requests` Plugin, which is used for assembling multiple requests into one single request before sending them to the gateway. + +[//]: + +Create a sample Route to httpbin's `/anything` endpoint for verification purpose: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "httpbin-anything", + "uri": "/anything", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Create a Route with `public-api` Plugin and set the Route `uri` to the internal endpoint to be exposed: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "batch-requests", + "uri": "/apisix/batch-requests", + "plugins": { + "public-api": {} + } + }' +``` + +Send a pipelined request consisting of a GET and a POST request to the exposed batch requests endpoint: + +```shell +curl "http://127.0.0.1:9080/apisix/batch-requests" -X POST -d ' +{ + "pipeline": [ + { + "method": "GET", + "path": "/anything" + }, + { + "method": "POST", + "path": "/anything", + "body": "a post request" + } + ] +}' +``` + +You should receive responses from both requests, similar to the following: + +```json +[ + { + "reason": "OK", + "body": "{\n \"args\": {}, \n \"data\": \"\", \n \"files\": {}, \n \"form\": {}, \n \"headers\": {\n \"Accept\": \"*/*\", \n \"Host\": \"127.0.0.1\", \n \"User-Agent\": \"curl/8.6.0\", \n \"X-Amzn-Trace-Id\": \"Root=1-67b6e33b-5a30174f5534287928c54ca9\", \n \"X-Forwarded-Host\": \"127.0.0.1\"\n }, \n \"json\": null, \n \"method\": \"GET\", \n \"origin\": \"192.168.107.1, 43.252.208.84\", \n \"url\": \"http://127.0.0.1/anything\"\n}\n", + "headers": { + ... + }, + "status": 200 + }, + { + "reason": "OK", + "body": "{\n \"args\": {}, \n \"data\": \"a post request\", \n \"files\": {}, \n \"form\": {}, \n \"headers\": {\n \"Accept\": \"*/*\", \n \"Content-Length\": \"14\", \n \"Host\": \"127.0.0.1\", \n \"User-Agent\": \"curl/8.6.0\", \n \"X-Amzn-Trace-Id\": \"Root=1-67b6e33b-0eddcec07f154dac0d77876f\", \n \"X-Forwarded-Host\": \"127.0.0.1\"\n }, \n \"json\": null, \n \"method\": \"POST\", \n \"origin\": \"192.168.107.1, 43.252.208.84\", \n \"url\": \"http://127.0.0.1/anything\"\n}\n", + "headers": { + ... + }, + "status": 200 + } +] +``` + +If you would like to expose the batch requests endpoint at a custom endpoint, create a Route with `public-api` Plugin as such. You should set the Route `uri` to the custom endpoint path and set the plugin `uri` to the internal endpoint to be exposed. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "batch-requests", + "uri": "/batch-requests", + "plugins": { + "public-api": { + "uri": "/apisix/batch-requests" + } + } + }' +``` + +The batch requests endpoint should now be exposed as `/batch-requests`, instead of `/apisix/batch-requests`. + +Send a pipelined request consisting of a GET and a POST request to the exposed batch requests endpoint: + +```shell +curl "http://127.0.0.1:9080/batch-requests" -X POST -d ' +{ + "pipeline": [ + { + "method": "GET", + "path": "/anything" + }, + { + "method": "POST", + "path": "/anything", + "body": "a post request" + } + ] +}' +``` + +You should receive responses from both requests, similar to the following: + +```json +[ + { + "reason": "OK", + "body": "{\n \"args\": {}, \n \"data\": \"\", \n \"files\": {}, \n \"form\": {}, \n \"headers\": {\n \"Accept\": \"*/*\", \n \"Host\": \"127.0.0.1\", \n \"User-Agent\": \"curl/8.6.0\", \n \"X-Amzn-Trace-Id\": \"Root=1-67b6e33b-5a30174f5534287928c54ca9\", \n \"X-Forwarded-Host\": \"127.0.0.1\"\n }, \n \"json\": null, \n \"method\": \"GET\", \n \"origin\": \"192.168.107.1, 43.252.208.84\", \n \"url\": \"http://127.0.0.1/anything\"\n}\n", + "headers": { + ... + }, + "status": 200 + }, + { + "reason": "OK", + "body": "{\n \"args\": {}, \n \"data\": \"a post request\", \n \"files\": {}, \n \"form\": {}, \n \"headers\": {\n \"Accept\": \"*/*\", \n \"Content-Length\": \"14\", \n \"Host\": \"127.0.0.1\", \n \"User-Agent\": \"curl/8.6.0\", \n \"X-Amzn-Trace-Id\": \"Root=1-67b6e33b-0eddcec07f154dac0d77876f\", \n \"X-Forwarded-Host\": \"127.0.0.1\"\n }, \n \"json\": null, \n \"method\": \"POST\", \n \"origin\": \"192.168.107.1, 43.252.208.84\", \n \"url\": \"http://127.0.0.1/anything\"\n}\n", + "headers": { + ... + }, + "status": 200 + } +] +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/real-ip.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/real-ip.md new file mode 100644 index 0000000..c607046 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/real-ip.md @@ -0,0 +1,202 @@ +--- +title: real-ip +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Real IP +description: The real-ip plugin allows Apache APISIX to set the client's real IP by the IP address passed in the HTTP header or HTTP query string. +--- + + + + + + + +## Description + +The `real-ip` Plugin allows APISIX to set the client's real IP by the IP address passed in the HTTP header or HTTP query string. This is particularly useful when APISIX is behind a reverse proxy since the proxy could act as the request-originating client otherwise. + +The Plugin is functionally similar to NGINX's [ngx_http_realip_module](https://nginx.org/en/docs/http/ngx_http_realip_module.html) but offers more flexibility. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|-----------|---------|----------|---------|----------------|---------------| +| source | string | True | | |A built-in [APISIX variable](https://apisix.apache.org/docs/apisix/apisix-variable/) or [NGINX variable](https://nginx.org/en/docs/varindex.html), such as `http_x_forwarded_for` or `arg_realip`. The variable value should be a valid IP address that represents the client's real IP address, with an optional port.| +| trusted_addresses | array[string] | False | | array of IPv4 or IPv6 addresses (CIDR notation acceptable) | Trusted addresses that are known to send correct replacement addresses. This configuration sets the [`set_real_ip_from`](https://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from) directive. | +| recursive | boolean | False | False | | If false, replace the original client address that matches one of the trusted addresses by the last address sent in the configured `source`.
If true, replace the original client address that matches one of the trusted addresses by the last non-trusted address sent in the configured `source`. | + +:::note +If the address specified in `source` is missing or invalid, the Plugin would not change the client address. +::: + +## Examples + +The examples below demonstrate how you can configure `real-ip` in different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Obtain Real Client Address From URI Parameter + +The following example demonstrates how to update the client IP address with a URI parameter. + +Create a Route as follows. You should configure `source` to obtain value from the URL parameter `realip` using [APISIX variable](https://apisix.apache.org/docs/apisix/apisix-variable/) or [NGINX variable](https://nginx.org/en/docs/varindex.html). Use the `response-rewrite` Plugin to set response headers to verify if the client IP and port were actually updated. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "real-ip-route", + "uri": "/get", + "plugins": { + "real-ip": { + "source": "arg_realip", + "trusted_addresses": ["127.0.0.0/24"] + }, + "response-rewrite": { + "headers": { + "remote_addr": "$remote_addr", + "remote_port": "$remote_port" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the Route with real IP and port in the URL parameter: + +```shell +curl -i "http://127.0.0.1:9080/get?realip=1.2.3.4:9080" +``` + +You should see the response includes the following header: + +```text +remote-addr: 1.2.3.4 +remote-port: 9080 +``` + +### Obtain Real Client Address From Header + +The following example shows how to set the real client IP when APISIX is behind a reverse proxy, such as a load balancer when the proxy exposes the real client IP in the [`X-Forwarded-For`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For) header. + +Create a Route as follows. You should configure `source` to obtain value from the request header `X-Forwarded-For` using [APISIX variable](https://apisix.apache.org/docs/apisix/apisix-variable/) or [NGINX variable](https://nginx.org/en/docs/varindex.html). Use the `response-rewrite` Plugin to set a response header to verify if the client IP was actually updated. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "real-ip-route", + "uri": "/get", + "plugins": { + "real-ip": { + "source": "http_x_forwarded_for", + "trusted_addresses": ["127.0.0.0/24"] + }, + "response-rewrite": { + "headers": { + "remote_addr": "$remote_addr" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +You should see a response including the following header: + +```text +remote-addr: 10.26.3.19 +``` + +The IP address should correspond to the IP address of the request-originating client. + +### Obtain Real Client Address Behind Multiple Proxies + +The following example shows how to get the real client IP when APISIX is behind multiple proxies, which causes [`X-Forwarded-For`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For) header to include a list of proxy IP addresses. + +Create a Route as follows. You should configure `source` to obtain value from the request header `X-Forwarded-For` using [APISIX variable](https://apisix.apache.org/docs/apisix/apisix-variable/) or [NGINX variable](https://nginx.org/en/docs/varindex.html). Set `recursive` to `true` so that the original client address that matches one of the trusted addresses is replaced by the last non-trusted address sent in the configured `source`. Then, use the `response-rewrite` Plugin to set a response header to verify if the client IP was actually updated. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "real-ip-route", + "uri": "/get", + "plugins": { + "real-ip": { + "source": "http_x_forwarded_for", + "recursive": true, + "trusted_addresses": ["192.128.0.0/16", "127.0.0.0/24"] + }, + "response-rewrite": { + "headers": { + "remote_addr": "$remote_addr" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +Send a request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/get" \ + -H "X-Forwarded-For: 127.0.0.2, 192.128.1.1, 127.0.0.1" +``` + +You should see a response including the following header: + +```text +remote-addr: 127.0.0.2 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/redirect.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/redirect.md new file mode 100644 index 0000000..3ec556b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/redirect.md @@ -0,0 +1,172 @@ +--- +title: redirect +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Redirect +description: This document contains information about the Apache APISIX redirect Plugin. +--- + + + +## Description + +The `redirect` Plugin can be used to configure redirects. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|---------------------|---------------|----------|---------|--------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| http_to_https | boolean | False | false | | When set to `true` and the request is HTTP, it will be redirected to HTTPS with the same URI with a 301 status code. Note the querystring from the raw URI will also be contained in the Location header. | +| uri | string | False | | | URI to redirect to. Can contain Nginx variables. For example, `/test/index.html`, `$uri/index.html`, `${uri}/index.html`, `https://example.com/foo/bar`. If you refer to a variable name that doesn't exist, instead of throwing an error, it will treat it as an empty variable. | +| regex_uri | array[string] | False | | | Match the URL from client with a regular expression and redirect. If it doesn't match, the request will be forwarded to the Upstream. Only either of `uri` or `regex_uri` can be used at a time. For example, [" ^/iresty/(.*)/(.*)/(.*)", "/$1-$2-$3"], where the first element is the regular expression to match and the second element is the URI to redirect to. APISIX only support one `regex_uri` currently, so the length of the `regex_uri` array is `2`. | +| ret_code | integer | False | 302 | [200, ...] | HTTP response code. | +| encode_uri | boolean | False | false | | When set to `true` the URI in the `Location` header will be encoded as per [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986). | +| append_query_string | boolean | False | false | | When set to `true`, adds the query string from the original request to the `Location` header. If the configured `uri` or `regex_uri` already contains a query string, the query string from the request will be appended to it with an `&`. Do not use this if you have already handled the query string (for example, with an Nginx variable `$request_uri`) to avoid duplicates. | + +:::note + +* Only one of `http_to_https`, `uri` and `regex_uri` can be configured. +* Only one of `http_to_https` and `append_query_string` can be configured. +* When enabling `http_to_https`, the ports in the redirect URL will pick a value in the following order (in descending order of priority) + * Read `plugin_attr.redirect.https_port` from the configuration file (`conf/config.yaml`). + * If `apisix.ssl` is enabled, read `apisix.ssl.listen` and select a port randomly from it. + * Use 443 as the default https port. + +::: + +## Enable Plugin + +The example below shows how you can enable the `redirect` Plugin on a specific Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/test/index.html", + "plugins": { + "redirect": { + "uri": "/test/default.html", + "ret_code": 301 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1 + } + } +}' +``` + +You can also use any built-in Nginx variables in the new URI: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/test", + "plugins": { + "redirect": { + "uri": "$uri/index.html", + "ret_code": 301 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1 + } + } +}' +``` + +## Example usage + +First, we configure the Plugin as mentioned above. We can then make a request and it will be redirected as shown below: + +```shell +curl http://127.0.0.1:9080/test/index.html -i +``` + +```shell +HTTP/1.1 301 Moved Permanently +Date: Wed, 23 Oct 2019 13:48:23 GMT +Content-Type: text/html +Content-Length: 166 +Connection: keep-alive +Location: /test/default.html +... +``` + +The response shows the response code and the `Location` header implying that the Plugin is in effect. + +The example below shows how you can redirect HTTP to HTTPS: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "redirect": { + "http_to_https": true + } + } +}' +``` + +To test this: + +```shell +curl http://127.0.0.1:9080/hello -i +``` + +``` +HTTP/1.1 301 Moved Permanently +... +Location: https://127.0.0.1:9443/hello +... +``` + +## Delete Plugin + +To remove the `redirect` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/test/index.html", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/referer-restriction.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/referer-restriction.md new file mode 100644 index 0000000..24cb17b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/referer-restriction.md @@ -0,0 +1,135 @@ +--- +title: referer-restriction +keywords: + - Apache APISIX + - API Gateway + - Referer restriction +description: This document contains information about the Apache APISIX referer-restriction Plugin, which can be used to restrict access to a Service or a Route by whitelisting/blacklisting the Referer request header. +--- + + + +## Description + +The `referer-restriction` Plugin can be used to restrict access to a Service or a Route by whitelisting/blacklisting the `Referer` request header. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|----------------|---------------|----------|----------------------------------|--------------|---------------------------------------------------------------------------------------------------| +| whitelist | array[string] | False | | | List of hostnames to whitelist. A hostname can start with `*` for wildcard. | +| blacklist | array[string] | False | | | List of hostnames to blacklist. A hostname can start with `*` for wildcard. | +| message | string | False | "Your referer host is not allowed" | [1, 1024] | Message returned when access is not allowed. | +| bypass_missing | boolean | False | false | | When set to `true`, bypasses the check when the `Referer` request header is missing or malformed. | + +:::info IMPORTANT + +Only one of `whitelist` or `blacklist` attribute must be specified. They cannot work together. + +::: + +## Enable Plugin + +You can enable the Plugin on a specific Route or a Service as shown below: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "referer-restriction": { + "bypass_missing": true, + "whitelist": [ + "xx.com", + "*.xx.com" + ] + } + } +}' +``` + +## Example usage + +Once you have configured the Plugin as shown above, you can test it by setting `Referer: http://xx.com/x`: + +```shell +curl http://127.0.0.1:9080/index.html -H 'Referer: http://xx.com/x' +``` + +```shell +HTTP/1.1 200 OK +... +``` + +Now, if you make a request with `Referer: http://yy.com/x`, the request will be blocked: + +```shell +curl http://127.0.0.1:9080/index.html -H 'Referer: http://yy.com/x' +``` + +```shell +HTTP/1.1 403 Forbidden +... +{"message":"Your referer host is not allowed"} +``` + +Since we have set `bypass_missing` to `true`, a request without the `Referer` header will be successful as the check is skipped: + +```shell +curl http://127.0.0.1:9080/index.html +``` + +```shell +HTTP/1.1 200 OK +... +``` + +## Delete Plugin + +To remove the `referer-restriction` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/request-id.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/request-id.md new file mode 100644 index 0000000..3f5fb39 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/request-id.md @@ -0,0 +1,296 @@ +--- +title: request-id +keywords: + - Apache APISIX + - API Gateway + - Request ID +description: The request-id Plugin adds a unique ID to each request proxied through APISIX, which can be used to track API requests. +--- + + + + + + + +## Description + +The `request-id` Plugin adds a unique ID to each request proxied through APISIX, which can be used to track API requests. If a request carries an ID in the header corresponding to `header_name`, the Plugin will use the header value as the unique ID and will not overwrite with the automatically generated ID. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ------------------- | ------- | -------- | -------------- | ------------------------------- | ---------------------------------------------------------------------- | +| header_name | string | False | "X-Request-Id" | | Name of the header that carries the request unique ID. Note that if a request carries an ID in the `header_name` header, the Plugin will use the header value as the unique ID and will not overwrite it with the generated ID. | +| include_in_response | boolean | False | true | | If true, include the generated request ID in the response header, where the name of the header is the `header_name` value. | +| algorithm | string | False | "uuid" | ["uuid","nanoid","range_id"] | Algorithm used for generating the unique ID. When set to `uuid` , the Plugin generates a universally unique identifier. When set to `nanoid`, the Plugin generates a compact, URL-safe ID. When set to `range_id`, the Plugin generates a sequential ID with specific parameters. | +| range_id | object | False | | | Configuration for generating a request ID using the `range_id` algorithm. | +| range_id.char_set | string | False | "abcdefghijklmnopqrstuvwxyzABCDEFGHIGKLMNOPQRSTUVWXYZ0123456789" | minimum length 6 | Character set used for the `range_id` algorithm. | +| range_id.length | integer | False | 16 | >=6 | Length of the generated ID for the `range_id` algorithm. | + +## Examples + +The examples below demonstrate how you can configure `request-id` in different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Attach Request ID to Default Response Header + +The following example demonstrates how to configure `request-id` on a Route which attaches a generated request ID to the default `X-Request-Id` response header, if the header value is not passed in the request. When the `X-Request-Id` header is set in the request, the Plugin will take the value in the request header as the request ID. + +Create a Route with the `request-id` Plugin using its default configurations (explicitly defined): + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "request-id-route", + "uri": "/anything", + "plugins": { + "request-id": { + "header_name": "X-Request-Id", + "include_in_response": true, + "algorithm": "uuid" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response and see the response includes the `X-Request-Id` header with a generated ID: + +```text +X-Request-Id: b9b2c0d4-d058-46fa-bafc-dd91a0ccf441 +``` + +Send a request to the Route with a custom request ID in the header: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H 'X-Request-Id: some-custom-request-id' +``` + +You should receive an `HTTP/1.1 200 OK` response and see the response includes the `X-Request-Id` header with the custom request ID: + +```text +X-Request-Id: some-custom-request-id +``` + +### Attach Request ID to Custom Response Header + +The following example demonstrates how to configure `request-id` on a Route which attaches a generated request ID to a specified header. + +Create a Route with the `request-id` Plugin to define a custom header that carries the request ID and include the request ID in the response header: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "request-id-route", + "uri": "/anything", + "plugins": { + "request-id": { + "header_name": "X-Req-Identifier", + "include_in_response": true + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the route: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response and see the response includes the `X-Req-Identifier` header with a generated ID: + +```text +X-Req-Identifier: 1c42ff59-ee4c-4103-a980-8359f4135b21 +``` + +### Hide Request ID in Response Header + +The following example demonstrates how to configure `request-id` on a Route which attaches a generated request ID to a specified header. The header containing the request ID should be forwarded to the Upstream service but not returned in the response header. + +Create a Route with the `request-id` Plugin to define a custom header that carries the request ID and not include the request ID in the response header: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "request-id-route", + "uri": "/anything", + "plugins": { + "request-id": { + "header_name": "X-Req-Identifier", + "include_in_response": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response not and see `X-Req-Identifier` header among the response headers. In the response body, you should see: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-6752748c-7d364f48564508db1e8c9ea8", + "X-Forwarded-Host": "127.0.0.1", + "X-Req-Identifier": "268092bc-15e1-4461-b277-bf7775f2856f" + }, + ... +} +``` + +This shows the request ID is forwarded to the Upstream service but not returned in the response header. + +### Use `nanoid` Algorithm + +The following example demonstrates how to configure `request-id` on a Route and use the `nanoid` algorithm to generate the request ID. + +Create a Route with the `request-id` Plugin as such: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "request-id-route", + "uri": "/anything", + "plugins": { + "request-id": { + "algorithm": "nanoid" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response and see the response includes the `X-Req-Identifier` header with an ID generated using the `nanoid` algorithm: + +```text +X-Request-Id: kepgHWCH2ycQ6JknQKrX2 +``` + +### Attach Request ID Globally and on a Route + +The following example demonstrates how to configure `request-id` as a global Plugin and on a Route to attach two IDs. + +Create a global rule for the `request-id` Plugin which adds request ID to a custom header: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/global_rules" -X PUT -d '{ + "id": "rule-for-request-id", + "plugins": { + "request-id": { + "header_name": "Global-Request-ID" + } + } +}' +``` + +Create a Route with the `request-id` Plugin which adds request ID to a different custom header: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "request-id-route", + "uri": "/anything", + "plugins": { + "request-id": { + "header_name": "Route-Request-ID" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response and see the response includes the following headers: + +```text +Global-Request-ID: 2e9b99c1-08ed-4a74-b347-49c0891b07ad +Route-Request-ID: d755666b-732c-4f0e-a30e-a7a71ace4e26 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/request-validation.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/request-validation.md new file mode 100644 index 0000000..ae539b6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/request-validation.md @@ -0,0 +1,528 @@ +--- +title: request-validation +keywords: + - Apache APISIX + - API Gateway + - Request Validation +description: The request-validation Plugin validates requests before forwarding them to Upstream services. This Plugin uses JSON Schema for validation and can validate headers and body of a request. +--- + + + + + + + +## Description + +The `request-validation` Plugin validates requests before forwarding them to Upstream services. This Plugin uses [JSON Schema](https://github.com/api7/jsonschema) for validation and can validate headers and body of a request. + +See [JSON schema specification](https://json-schema.org/specification) to learn more about the syntax. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|---------------|---------|----------|---------|---------------|---------------------------------------------------| +| header_schema | object | False | | | Schema for the request header data. | +| body_schema | object | False | | | Schema for the request body data. | +| rejected_code | integer | False | 400 | [200,...,599] | Status code to return when rejecting requests. | +| rejected_msg | string | False | | | Message to return when rejecting requests. | + +:::note + +At least one of `header_schema` or `body_schema` should be filled in. + +::: + +## Examples + +The examples below demonstrate how you can configure `request-validation` for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Validate Request Header + +The following example demonstrates how to validate request headers against a defined JSON schema, which requires two specific headers and the header value to conform to specified requirements. + +Create a Route with `request-validation` Plugin as follows: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "request-validation-route", + "uri": "/get", + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "required": ["User-Agent", "Host"], + "properties": { + "User-Agent": { + "type": "string", + "pattern": "^curl\/" + }, + "Host": { + "type": "string", + "enum": ["httpbin.org", "httpbin"] + } + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +#### Verify with Request Conforming to the Schema + +Send a request with header `Host: httpbin`, which complies with the schema: + +```shell +curl -i "http://127.0.0.1:9080/get" -H "Host: httpbin" +``` + +You should receive an `HTTP/1.1 200 OK` response similar to the following: + +```json +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin", + "User-Agent": "curl/7.74.0", + "X-Amzn-Trace-Id": "Root=1-6509ae35-63d1e0fd3934e3f221a95dd8", + "X-Forwarded-Host": "httpbin" + }, + "origin": "127.0.0.1, 183.17.233.107", + "url": "http://httpbin/get" +} +``` + +#### Verify with Request Not Conforming to the Schema + +Send a request without any header: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +You should receive an `HTTP/1.1 400 Bad Request` response, showing that the request fails to pass validation: + +```text +property "Host" validation failed: matches none of the enum value +``` + +Send a request with the required headers but with non-conformant header value: + +```shell +curl -i "http://127.0.0.1:9080/get" -H "Host: httpbin" -H "User-Agent: cli-mock" +``` + +You should receive an `HTTP/1.1 400 Bad Request` response showing the `User-Agent` header value does not match the expected pattern: + +```text +property "User-Agent" validation failed: failed to match pattern "^curl/" with "cli-mock" +``` + +### Customize Rejection Message and Status Code + +The following example demonstrates how to customize response status and message when the validation fails. + +Configure the Route with `request-validation` as follows: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "request-validation-route", + "uri": "/get", + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "required": ["Host"], + "properties": { + "Host": { + "type": "string", + "enum": ["httpbin.org", "httpbin"] + } + } + }, + "rejected_code": 403, + "rejected_msg": "Request header validation failed." + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request with a misconfigured `Host` in the header: + +```shell +curl -i "http://127.0.0.1:9080/get" -H "Host: httpbin2" +``` + +You should receive an `HTTP/1.1 403 Forbidden` response with the custom message: + +```text +Request header validation failed. +``` + +### Validate Request Body + +The following example demonstrates how to validate request body against a defined JSON schema. + +The `request-validation` Plugin supports validation of two types of media types: + +* `application/json` +* `application/x-www-form-urlencoded` + +#### Validate JSON Request Body + +Create a Route with `request-validation` Plugin as follows: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "request-validation-route", + "uri": "/post", + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "required": ["Content-Type"], + "properties": { + "Content-Type": { + "type": "string", + "pattern": "^application\/json$" + } + } + }, + "body_schema": { + "type": "object", + "required": ["required_payload"], + "properties": { + "required_payload": {"type": "string"}, + "boolean_payload": {"type": "boolean"}, + "array_payload": { + "type": "array", + "minItems": 1, + "items": { + "type": "integer", + "minimum": 200, + "maximum": 599 + }, + "uniqueItems": true, + "default": [200] + } + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request with JSON body that conforms to the schema to verify: + +```shell +curl -i "http://127.0.0.1:9080/post" -X POST \ + -H "Content-Type: application/json" \ + -d '{"required_payload":"hello", "array_payload":[301]}' +``` + +You should receive an `HTTP/1.1 200 OK` response similar to the following: + +```json +{ + "args": {}, + "data": "{\"array_payload\":[301],\"required_payload\":\"hello\"}", + "files": {}, + "form": {}, + "headers": { + ... + }, + "json": { + "array_payload": [ + 301 + ], + "required_payload": "hello" + }, + "origin": "127.0.0.1, 183.17.233.107", + "url": "http://127.0.0.1/post" +} +``` + +If you send a request without specifying `Content-Type: application/json`: + +```shell +curl -i "http://127.0.0.1:9080/post" -X POST \ + -d '{"required_payload":"hello,world"}' +``` + +You should receive an `HTTP/1.1 400 Bad Request` response similar to the following: + +```text +property "Content-Type" validation failed: failed to match pattern "^application/json$" with "application/x-www-form-urlencoded" +``` + +Similarly, if you send a request without the required JSON field `required_payload`: + +```shell +curl -i "http://127.0.0.1:9080/post" -X POST \ + -H "Content-Type: application/json" \ + -d '{}' +``` + +You should receive an `HTTP/1.1 400 Bad Request` response: + +```text +property "required_payload" is required +``` + +#### Validate URL-Encoded Form Body + +Create a Route with `request-validation` Plugin as follows: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "request-validation-route", + "uri": "/post", + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "required": ["Content-Type"], + "properties": { + "Content-Type": { + "type": "string", + "pattern": "^application\/x-www-form-urlencoded$" + } + } + }, + "body_schema": { + "type": "object", + "required": ["required_payload","enum_payload"], + "properties": { + "required_payload": {"type": "string"}, + "enum_payload": { + "type": "string", + "enum": ["enum_string_1", "enum_string_2"], + "default": "enum_string_1" + } + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request with URL-encoded form data to verify: + +```shell +curl -i "http://127.0.0.1:9080/post" -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "required_payload=hello&enum_payload=enum_string_1" +``` + +You should receive an `HTTP/1.1 400 Bad Request` response similar to the following: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": { + "enum_payload": "enum_string_1", + "required_payload": "hello" + }, + "headers": { + ... + }, + "json": null, + "origin": "127.0.0.1, 183.17.233.107", + "url": "http://127.0.0.1/post" +} +``` + +Send a request without the URL-encoded field `enum_payload`: + +```shell +curl -i "http://127.0.0.1:9080/post" -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "required_payload=hello" +``` + +You should receive an `HTTP/1.1 400 Bad Request` of the following: + +```text +property "enum_payload" is required +``` + +## Appendix: JSON Schema + +The following section provides boilerplate JSON schema for you to adjust, combine, and use with this Plugin. For a complete reference, see [JSON schema specification](https://json-schema.org/specification). + +### Enumerated Values + +```json +{ + "body_schema": { + "type": "object", + "required": ["enum_payload"], + "properties": { + "enum_payload": { + "type": "string", + "enum": ["enum_string_1", "enum_string_2"], + "default": "enum_string_1" + } + } + } +} +``` + +### Boolean Values + +```json +{ + "body_schema": { + "type": "object", + "required": ["bool_payload"], + "properties": { + "bool_payload": { + "type": "boolean", + "default": true + } + } + } +} +``` + +### Numeric Values + +```json +{ + "body_schema": { + "type": "object", + "required": ["integer_payload"], + "properties": { + "integer_payload": { + "type": "integer", + "minimum": 1, + "maximum": 65535 + } + } + } +} +``` + +### Strings + +```json +{ + "body_schema": { + "type": "object", + "required": ["string_payload"], + "properties": { + "string_payload": { + "type": "string", + "minLength": 1, + "maxLength": 32 + } + } + } +} +``` + +### RegEx for Strings + +```json +{ + "body_schema": { + "type": "object", + "required": ["regex_payload"], + "properties": { + "regex_payload": { + "type": "string", + "minLength": 1, + "maxLength": 32, + "pattern": "[[^[a-zA-Z0-9_]+$]]" + } + } + } +} +``` + +### Arrays + +```json +{ + "body_schema": { + "type": "object", + "required": ["array_payload"], + "properties": { + "array_payload": { + "type": "array", + "minItems": 1, + "items": { + "type": "integer", + "minimum": 200, + "maximum": 599 + }, + "uniqueItems": true, + "default": [200, 302] + } + } + } +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/response-rewrite.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/response-rewrite.md new file mode 100644 index 0000000..f7292bf --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/response-rewrite.md @@ -0,0 +1,313 @@ +--- +title: response-rewrite +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Response Rewrite + - response-rewrite +description: The response-rewrite Plugin offers options to rewrite responses that APISIX and its Upstream services return to clients. With the Plugin, you can modify HTTP status codes, request headers, response body, and more. +--- + + + + + + + +## Description + +The `response-rewrite` Plugin offers options to rewrite responses that APISIX and its Upstream services return to clients. With the Plugin, you can modify HTTP status codes, request headers, response body, and more. + +For instance, you can use this Plugin to: + +- Support [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) by setting `Access-Control-Allow-*` headers. +- Indicate redirection by setting HTTP status codes and `Location` header. + +:::tip + +You can also use the [redirect](./redirect.md) Plugin to set up redirects. + +::: + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|-----------------|---------|----------|---------|---------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| status_code | integer | False | | [200, 598] | New HTTP status code in the response. If unset, falls back to the original status code. | +| body | string | False | | | New response body. The `Content-Length` header would also be reset. Should not be configured with `filters`. | +| body_base64 | boolean | False | false | | If true, decode the response body configured in `body` before sending to client, which is useful for image and protobuf decoding. Note that this configuration cannot be used to decode Upstream response. | +| headers | object | False | | | Actions to be executed in the order of `add`, `remove`, and `set`. | +| headers.add | array[string] | False | | | Headers to append to requests. If a header already present in the request, the header value will be appended. Header value could be set to a constant, or one or more [Nginx variables](https://nginx.org/en/docs/http/ngx_http_core_module.html). | +| headers.set | object | False | | |Headers to set to requests. If a header already present in the request, the header value will be overwritten. Header value could be set to a constant, or one or more[Nginx variables](https://nginx.org/en/docs/http/ngx_http_core_module.html). | +| headers.remove | array[string] | False | | | Headers to remove from requests. | +| vars | array[array] | False | | | An array of one or more matching conditions in the form of [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list). | +| filters | array[object] | False | | | List of filters that modify the response body by replacing one specified string with another. Should not be configured with `body`. | +| filters.regex | string | True | | | RegEx pattern to match on the response body. | +| filters.scope | string | False | "once" | ["once","global"] | Scope of substitution. `once` substitutes the first matched instance and `global` substitutes globally. | +| filters.replace | string | True | | | Content to substitute with. | +| filters.options | string | False | "jo" | | RegEx options to control how the match operation should be performed. See [Lua NGINX module](https://github.com/openresty/lua-nginx-module#ngxrematch) for the available options. | + +## Examples + +The examples below demonstrate how you can configure `response-rewrite` on a Route in different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Rewrite Header and Body + +The following example demonstrates how to add response body and headers, only to responses with `200` HTTP status codes. + +Create a Route with the `response-rewrite` Plugin: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "response-rewrite-route", + "methods": ["GET"], + "uri": "/headers", + "plugins": { + "response-rewrite": { + "body": "{\"code\":\"ok\",\"message\":\"new json body\"}", + "headers": { + "set": { + "X-Server-id": 3, + "X-Server-status": "on", + "X-Server-balancer-addr": "$balancer_ip:$balancer_port" + } + }, + "vars": [ + [ "status","==",200 ] + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to verify: + +```shell +curl -i "http://127.0.0.1:9080/headers" +``` + +You should receive a `HTTP/1.1 200 OK` response similar to the following: + +```text +... +X-Server-id: 3 +X-Server-status: on +X-Server-balancer-addr: 50.237.103.220:80 + +{"code":"ok","message":"new json body"} +``` + +### Rewrite Header With RegEx Filter + +The following example demonstrates how to use RegEx filter matching to replace `X-Amzn-Trace-Id` for responses. + +Create a Route with the `response-rewrite` Plugin: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "response-rewrite-route", + "methods": ["GET"], + "uri": "/headers", + "plugins":{ + "response-rewrite":{ + "filters":[ + { + "regex":"X-Amzn-Trace-Id", + "scope":"global", + "replace":"X-Amzn-Trace-Id-Replace" + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to verify: + +```shell +curl -i "http://127.0.0.1:9080/headers" +``` + +You should see a response similar to the following: + +```text +{ + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.2.1", + "X-Amzn-Trace-Id-Replace": "Root=1-6500095d-1041b05e2ba9c6b37232dbc7", + "X-Forwarded-Host": "127.0.0.1" + } +} +``` + +### Decode Body from Base64 + +The following example demonstrates how to Decode Body from Base64 format. + +Create a Route with the `response-rewrite` Plugin: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "response-rewrite-route", + "methods": ["GET"], + "uri": "/get", + "plugins":{ + "response-rewrite": { + "body": "SGVsbG8gV29ybGQ=", + "body_base64": true + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to verify: + +```shell +curl "http://127.0.0.1:9080/get" +``` + +You should see a response of the following: + +```text +Hello World +``` + +### Rewrite Response and Its Connection with Execution Phases + +The following example demonstrates the connection between the `response-rewrite` Plugin and [execution phases](/apisix/key-concepts/plugins#plugins-execution-lifecycle) by configuring the Plugin with the `key-auth` Plugin, and see how the response is still rewritten to `200 OK` in the case of an unauthenticated request. + +Create a Consumer `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +Create `key-auth` credential for the Consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-key-auth", + "plugins": { + "key-auth": { + "key": "jack-key" + } + } + }' +``` + +Create a Route with `key-auth` and configure `response-rewrite` to rewrite the response status code and body: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "response-rewrite-route", + "uri": "/get", + "plugins": { + "key-auth": {}, + "response-rewrite": { + "status_code": 200, + "body": "{\"code\": 200, \"msg\": \"success\"}" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the Route with the valid key: + +```shell +curl -i "http://127.0.0.1:9080/get" -H 'apikey: jack-key' +``` + +You should receive an `HTTP/1.1 200 OK` response of the following: + +```text +{"code": 200, "msg": "success"} +``` + +Send a request to the Route without any key: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +You should still receive an `HTTP/1.1 200 OK` response of the same, instead of `HTTP/1.1 401 Unauthorized` from the `key-auth` Plugin. This shows that the `response-rewrite` Plugin still rewrites the response. + +This is because **header_filter** and **body_filter** phase logics of the `response-rewrite` Plugin will continue to run after [`ngx.exit`](https://openresty-reference.readthedocs.io/en/latest/Lua_Nginx_API/#ngxexit) in the **access** or **rewrite** phases from other plugins. + +The following table summarizes the impact of `ngx.exit` on execution phases. + +| Phase | rewrite | access | header_filter | body_filter | +|---------------|----------|----------|---------------|-------------| +| **rewrite** | ngx.exit | | | | +| **access** | × | ngx.exit | | | +| **header_filter** | ✓ | ✓ | ngx.exit | | +| **body_filter** | ✓ | ✓ | × | ngx.exit | + +For example, if `ngx.exit` takes places in the **rewrite** phase, it will interrupt the execution of **access** phase but not interfere with **header_filter** and **body_filter** phases. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/rocketmq-logger.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/rocketmq-logger.md new file mode 100644 index 0000000..ff09c66 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/rocketmq-logger.md @@ -0,0 +1,280 @@ +--- +title: rocketmq-logger +keywords: + - Apache APISIX + - API Gateway + - Plugin + - RocketMQ Logger +description: This document contains information about the Apache APISIX rocketmq-logger Plugin. +--- + + +## Description + +The `rocketmq-logger` Plugin provides the ability to push logs as JSON objects to your RocketMQ clusters. + +It might take some time to receive the log data. It will be automatically sent after the timer function in the [batch processor](../batch-processor.md) expires. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|------------------------|---------|----------|-------------------------------------------------------------------------------|----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| nameserver_list | object | True | | | List of RocketMQ nameservers. | +| topic | string | True | | | Target topic to push the data to. | +| key | string | False | | | Key of the messages. | +| tag | string | False | | | Tag of the messages. | +| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| timeout | integer | False | 3 | [1,...] | Timeout for the upstream to send data. | +| use_tls | boolean | False | false | | When set to `true`, uses TLS. | +| access_key | string | False | "" | | Access key for ACL. Setting to an empty string will disable the ACL. | +| secret_key | string | False | "" | | secret key for ACL. | +| name | string | False | "rocketmq logger" | | Unique identifier for the batch processor. If you use Prometheus to monitor APISIX metrics, the name is exported in `apisix_batch_process_entries`. processor. | +| meta_format | enum | False | "default" | ["default","origin"] | Format to collect the request information. Setting to `default` collects the information in JSON format and `origin` collects the information with the original HTTP request. See [examples](#meta_format-example) below. | +| include_req_body | boolean | False | false | [false, true] | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to Nginx's limitations. | +| include_req_body_expr | array | False | | | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| include_resp_body | boolean | False | false | [false, true] | When set to `true` includes the response body in the log. | +| include_resp_body_expr | array | False | | | Filter for when the `include_resp_body` attribute is set to `true`. Response body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | + +NOTE: `encrypt_fields = {"secret_key"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +:::info IMPORTANT + +The data is first written to a buffer. When the buffer exceeds the `batch_max_size` or `buffer_duration` attribute, the data is sent to the RocketMQ server and the buffer is flushed. + +If the process is successful, it will return `true` and if it fails, returns `nil` with a string with the "buffer overflow" error. + +::: + +### meta_format example + +- default: + +```json + { + "upstream": "127.0.0.1:1980", + "start_time": 1619414294760, + "client_ip": "127.0.0.1", + "service_id": "", + "route_id": "1", + "request": { + "querystring": { + "ab": "cd" + }, + "size": 90, + "uri": "/hello?ab=cd", + "url": "http://localhost:1984/hello?ab=cd", + "headers": { + "host": "localhost", + "content-length": "6", + "connection": "close" + }, + "method": "GET" + }, + "response": { + "headers": { + "connection": "close", + "content-type": "text/plain; charset=utf-8", + "date": "Mon, 26 Apr 2021 05:18:14 GMT", + "server": "APISIX/2.5", + "transfer-encoding": "chunked" + }, + "size": 190, + "status": 200 + }, + "server": { + "hostname": "localhost", + "version": "2.5" + }, + "latency": 0 + } +``` + +- origin: + +```http + GET /hello?ab=cd HTTP/1.1 + host: localhost + content-length: 6 + connection: close + + abcdef +``` + +### meta_format example + +- `default`: + + ```json + { + "upstream": "127.0.0.1:1980", + "start_time": 1619414294760, + "client_ip": "127.0.0.1", + "service_id": "", + "route_id": "1", + "request": { + "querystring": { + "ab": "cd" + }, + "size": 90, + "uri": "/hello?ab=cd", + "url": "http://localhost:1984/hello?ab=cd", + "headers": { + "host": "localhost", + "content-length": "6", + "connection": "close" + }, + "body": "abcdef", + "method": "GET" + }, + "response": { + "headers": { + "connection": "close", + "content-type": "text/plain; charset=utf-8", + "date": "Mon, 26 Apr 2021 05:18:14 GMT", + "server": "APISIX/2.5", + "transfer-encoding": "chunked" + }, + "size": 190, + "status": 200 + }, + "server": { + "hostname": "localhost", + "version": "2.5" + }, + "latency": 0 + } + ``` + +- `origin`: + + ```http + GET /hello?ab=cd HTTP/1.1 + host: localhost + content-length: 6 + connection: close + + abcdef + ``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +|------------|--------|----------|-------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `rocketmq-logger` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/rocketmq-logger -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## Enable Plugin + +The example below shows how you can enable the `rocketmq-logger` Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/5 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "batch_max_size": 1, + "name": "rocketmq logger" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +This Plugin also supports pushing to more than one nameserver at a time. You can specify multiple nameserver in the Plugin configuration as shown below: + +```json +"nameserver_list" : [ + "127.0.0.1:9876", + "127.0.0.2:9876" +] +``` + +## Example usage + +Now, if you make a request to APISIX, it will be logged in your RocketMQ server: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +## Delete Plugin + +To remove the `rocketmq-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload, and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/server-info.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/server-info.md new file mode 100644 index 0000000..9b0348a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/server-info.md @@ -0,0 +1,118 @@ +--- +title: server-info +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Server info + - server-info +description: This document contains information about the Apache APISIX server-info Plugin. +--- + + + +## Description + +The `server-info` Plugin periodically reports basic server information to etcd. + +:::warning + +The `server-info` Plugin is deprecated and will be removed in a future release. For more details about the deprecation and removal plan, please refer to [this discussion](https://github.com/apache/apisix/discussions/12298). + +::: + +The information reported by the Plugin is explained below: + +| Name | Type | Description | +|--------------|---------|------------------------------------------------------------------------------------------------------------------------| +| boot_time | integer | Bootstrap time (UNIX timestamp) of the APISIX instance. Resets when hot updating but not when APISIX is just reloaded. | +| id | string | APISIX instance ID. | +| etcd_version | string | Version of the etcd cluster used by APISIX. Will be `unknown` if the network to etcd is partitioned. | +| version | string | Version of APISIX instance. | +| hostname | string | Hostname of the machine/pod APISIX is deployed to. | + +## Attributes + +None. + +## API + +This Plugin exposes the endpoint `/v1/server_info` to the [Control API](../control-api.md) + +## Enable Plugin + +Add `server-info` to the Plugin list in your configuration file (`conf/config.yaml`): + +```yaml title="conf/config.yaml" +plugins: + - ... + - server-info +``` + +## Customizing server info report configuration + +We can change the report configurations in the `plugin_attr` section of `conf/config.yaml`. + +The following configurations of the server info report can be customized: + +| Name | Type | Default | Description | +| ------------ | ------ | -------- | -------------------------------------------------------------------- | +| report_ttl | integer | 36 | Time in seconds after which the report is deleted from etcd (maximum: 86400, minimum: 3). | + +To customize, you can modify the `plugin_attr` attribute in your configuration file (`conf/config.yaml`): + +```yaml title="conf/config.yaml" +plugin_attr: + server-info: + report_ttl: 60 +``` + +## Example usage + +After you enable the Plugin as mentioned above, you can access the server info report through the Control API: + +```shell +curl http://127.0.0.1:9090/v1/server_info -s | jq . +``` + +```json +{ + "etcd_version": "3.5.0", + "id": "b7ce1c5c-b1aa-4df7-888a-cbe403f3e948", + "hostname": "fedora32", + "version": "2.1", + "boot_time": 1608522102 +} +``` + +:::tip + +You can also view the server info report through the [APISIX Dashboard](/docs/dashboard/USER_GUIDE). + +::: + +## Delete Plugin + +To remove the Plugin, you can remove `server-info` from the list of Plugins in your configuration file: + +```yaml title="conf/config.yaml" +plugins: + - ... +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/serverless.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/serverless.md new file mode 100644 index 0000000..b49ac8c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/serverless.md @@ -0,0 +1,144 @@ +--- +title: serverless +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Serverless +description: This document contains information about the Apache APISIX serverless Plugin. +--- + + + +## Description + +There are two `serverless` Plugins in APISIX: `serverless-pre-function` and `serverless-post-function`. The former runs at the beginning of the specified phase, while the latter runs at the end of the specified phase. + +Both Plugins have the same attributes. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|-----------|---------------|----------|------------|------------------------------------------------------------------------------|------------------------------------------------------------------| +| phase | string | False | ["access"] | ["rewrite", "access", "header_filter", "body_filter", "log", "before_proxy"] | Phase before or after which the serverless function is executed. | +| functions | array[string] | True | | | List of functions that are executed sequentially. | + +:::info IMPORTANT + +Only Lua functions are allowed here and not other Lua code. + +For example, anonymous functions are legal: + +```lua +return function() + ngx.log(ngx.ERR, 'one') +end +``` + +Closures are also legal: + +```lua +local count = 1 +return function() + count = count + 1 + ngx.say(count) +end +``` + +But code other than functions are illegal: + +```lua +local count = 1 +ngx.say(count) +``` + +::: + +:::note + +From v2.6, `conf` and `ctx` are passed as the first two arguments to a serverless function like regular Plugins. + +Prior to v2.12.0, the phase `before_proxy` was called `balancer`. This was updated considering that this method would run after `access` and before the request goes Upstream and is unrelated to `balancer`. + +::: + +## Enable Plugin + +The example below enables the Plugin on a specific Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : ["return function() ngx.log(ngx.ERR, \"serverless pre function\"); end"] + }, + "serverless-post-function": { + "phase": "rewrite", + "functions" : ["return function(conf, ctx) ngx.log(ngx.ERR, \"match uri \", ctx.curr_req_matched and ctx.curr_req_matched._path); end"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## Example usage + +Once you have configured the Plugin as shown above, you can make a request as shown below: + +```shell +curl -i http://127.0.0.1:9080/index.html +``` + +You will find a message "serverless pre-function" and "match uri /index.html" in the error.log. + +## Delete Plugin + +To remove the `serverless` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/skywalking-logger.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/skywalking-logger.md new file mode 100644 index 0000000..bb0532a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/skywalking-logger.md @@ -0,0 +1,344 @@ +--- +title: skywalking-logger +keywords: + - Apache APISIX + - API Gateway + - Plugin + - SkyWalking Logger + - skywalking-logger +description: The skywalking-logger pushes request and response logs as JSON objects to SkyWalking OAP server in batches and supports the customization of log formats. +--- + + + + + + + +## Description + +The `skywalking-logger` Plugin pushes request and response logs as JSON objects to SkyWalking OAP server in batches and supports the customization of log formats. + +If there is an existing tracing context, it sets up the trace-log correlation automatically and relies on [SkyWalking Cross Process Propagation Headers Protocol](https://skywalking.apache.org/docs/main/next/en/api/x-process-propagation-headers-v3/). + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|-----------------------|---------|----------|------------------------|---------------|--------------------------------------------------------------------------------------------------------------| +| endpoint_addr | string | True | | | URI of the SkyWalking OAP server. | +| service_name | string | False | "APISIX" | | Service name for the SkyWalking reporter. | +| service_instance_name | string | False | "APISIX Instance Name" | | Service instance name for the SkyWalking reporter. Set it to `$hostname` to directly get the local hostname. | +| log_format | object | False | | Custom log format in key-value pairs in JSON format. Support [APISIX](../apisix-variable.md) or [Nginx variables](http://nginx.org/en/docs/varindex.html) in values if the string starts with `$`. | +| timeout | integer | False | 3 | [1,...] | Time to keep the connection alive for after sending a request. | +| name | string | False | "skywalking logger" | | Unique identifier to identify the logger. If you use Prometheus to monitor APISIX metrics, the name is exported in `apisix_batch_process_entries`. | +| include_req_body | boolean | False | false | If true, include the request body in the log. Note that if the request body is too big to be kept in the memory, it can not be logged due to NGINX's limitations. | +| include_req_body_expr | array[array] | False | | An array of one or more conditions in the form of [lua-resty-expr](https://github.com/api7/lua-resty-expr). Used when the `include_req_body` is true. Request body would only be logged when the expressions configured here evaluate to true. | +| include_resp_body | boolean | False | false | If true, include the response body in the log. | +| include_resp_body_expr | array[array] | False | | An array of one or more conditions in the form of [lua-resty-expr](https://github.com/api7/lua-resty-expr). Used when the `include_resp_body` is true. Response body would only be logged when the expressions configured here evaluate to true. | + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| log_format | object | False | | Custom log format in key-value pairs in JSON format. Support [APISIX](../apisix-variable.md) or [NGINX variables](http://nginx.org/en/docs/varindex.html) in values. | + +## Examples + +The examples below demonstrate how you can configure `skywalking-logger` Plugin for different scenarios. + +To follow along the example, start a storage, OAP and Booster UI with Docker Compose, following [Skywalking's documentation](https://skywalking.apache.org/docs/main/next/en/setup/backend/backend-docker/). Once set up, the OAP server should be listening on `12800` and you should be able to access the UI at [http://localhost:8080](http://localhost:8080). + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Log Requests in Default Log Format + +The following example demonstrates how you can configure the `skywalking-logger` Plugin on a Route to log information of requests hitting the Route. + +Create a Route with the `skywalking-logger` Plugin and configure the Plugin with your OAP server URI: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "skywalking-logger-route", + "uri": "/anything", + "plugins": { + "skywalking-logger": { + "endpoint_addr": "http://192.168.2.103:12800" + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +Send a request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response. + +In [Skywalking UI](http://localhost:8080), navigate to __General Service__ > __Services__. You should see a service called `APISIX` with a log entry corresponding to your request: + +```json +{ + "upstream_latency": 674, + "request": { + "method": "GET", + "headers": { + "user-agent": "curl/8.6.0", + "host": "127.0.0.1:9080", + "accept": "*/*" + }, + "url": "http://127.0.0.1:9080/anything", + "size": 85, + "querystring": {}, + "uri": "/anything" + }, + "client_ip": "192.168.65.1", + "route_id": "skywalking-logger-route", + "start_time": 1736945107345, + "upstream": "3.210.94.60:80", + "server": { + "version": "3.11.0", + "hostname": "7edbcebe8eb3" + }, + "service_id": "", + "response": { + "size": 619, + "status": 200, + "headers": { + "content-type": "application/json", + "date": "Thu, 16 Jan 2025 12:45:08 GMT", + "server": "APISIX/3.11.0", + "access-control-allow-origin": "*", + "connection": "close", + "access-control-allow-credentials": "true", + "content-length": "391" + } + }, + "latency": 764.9998664856, + "apisix_latency": 90.999866485596 +} +``` + +### Log Request and Response Headers With Plugin Metadata + +The following example demonstrates how you can customize log format using Plugin metadata and built-in variables to log specific headers from request and response. + +In APISIX, Plugin metadata is used to configure the common metadata fields of all Plugin instances of the same Plugin. It is useful when a Plugin is enabled across multiple resources and requires a universal update to their metadata fields. + +First, create a Route with the `skywalking-logger` Plugin and configure the Plugin with your OAP server URI: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "skywalking-logger-route", + "uri": "/anything", + "plugins": { + "skywalking-logger": { + "endpoint_addr": "http://192.168.2.103:12800" + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +Next, configure the Plugin metadata for `skywalking-logger` to log the custom request header `env` and the response header `Content-Type`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/plugin_metadata/skywalking-logger" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr", + "env": "$http_env", + "resp_content_type": "$sent_http_Content_Type" + } + }' +``` + +Send a request to the Route with the `env` header: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H "env: dev" +``` + +You should receive an `HTTP/1.1 200 OK` response. In [Skywalking UI](http://localhost:8080), navigate to __General Service__ > __Services__. You should see a service called `APISIX` with a log entry corresponding to your request: + +```json +[ + { + "route_id": "skywalking-logger-route", + "client_ip": "192.168.65.1", + "@timestamp": "2025-01-16T12:51:53+00:00", + "host": "127.0.0.1", + "env": "dev", + "resp_content_type": "application/json" + } +] +``` + +### Log Request Bodies Conditionally + +The following example demonstrates how you can conditionally log request body. + +Create a Route with the `skywalking-logger` Plugin as such, to only include request body if the URL query string `log_body` is `yes`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "skywalking-logger-route", + "uri": "/anything", + "plugins": { + "skywalking-logger": { + "endpoint_addr": "http://192.168.2.103:12800", + "include_req_body": true, + "include_req_body_expr": [["arg_log_body", "==", "yes"]] + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +Send a request to the Route with a URL query string satisfying the condition: + +```shell +curl -i "http://127.0.0.1:9080/anything?log_body=yes" -X POST -d '{"env": "dev"}' +``` + +You should receive an `HTTP/1.1 200 OK` response. In [Skywalking UI](http://localhost:8080), navigate to __General Service__ > __Services__. You should see a service called `APISIX` with a log entry corresponding to your request, with the request body logged: + +```json +[ + { + "request": { + "url": "http://127.0.0.1:9080/anything?log_body=yes", + "querystring": { + "log_body": "yes" + }, + "uri": "/anything?log_body=yes", + ..., + "body": "{\"env\": \"dev\"}", + }, + ... + } +] +``` + +Send a request to the Route without any URL query string: + +```shell +curl -i "http://127.0.0.1:9080/anything" -X POST -d '{"env": "dev"}' +``` + +You should not observe a log entry without the request body. + +:::info + +If you have customized the `log_format` in addition to setting `include_req_body` or `include_resp_body` to `true`, the Plugin would not include the bodies in the logs. + +As a workaround, you may be able to use the NGINX variable `$request_body` in the log format, such as: + +```json +{ + "skywalking-logger": { + ..., + "log_format": {"body": "$request_body"} + } +} +``` + +::: + +### Associate Traces with Logs + +The following example demonstrates how you can configure the `skywalking-logger` Plugin on a Route to log information of requests hitting the route. + +Create a Route with the `skywalking-logger` Plugin and configure the Plugin with your OAP server URI: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "skywalking-logger-route", + "uri": "/anything", + "plugins": { + "skywalking": { + "sample_ratio": 1 + }, + "skywalking-logger": { + "endpoint_addr": "http://192.168.2.103:12800" + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +Generate a few requests to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive `HTTP/1.1 200 OK` responses. + +In [Skywalking UI](http://localhost:8080), navigate to __General Service__ > __Services__. You should see a service called `APISIX` with a trace corresponding to your request, where you can view the associated logs: + +![trace context](https://static.apiseven.com/uploads/2025/01/16/soUpXm6b_trace-view-logs.png) + +![associated log](https://static.apiseven.com/uploads/2025/01/16/XD934LvU_associated-logs.png) diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/skywalking.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/skywalking.md new file mode 100644 index 0000000..e49a2a5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/skywalking.md @@ -0,0 +1,176 @@ +--- +title: skywalking +keywords: + - Apache APISIX + - API Gateway + - Plugin + - SkyWalking +description: The skywalking Plugin supports the integrating with Apache SkyWalking for request tracing. +--- + + + + + + + +## Description + +The `skywalking` Plugin supports the integrating with [Apache SkyWalking](https://skywalking.apache.org) for request tracing. + +SkyWalking uses its native Nginx Lua tracer to provide tracing, topology analysis, and metrics from both service and URI perspectives. APISIX supports HTTP protocol to interact with the SkyWalking server. + +The server currently supports two protocols: HTTP and gRPC. In APISIX, only HTTP is currently supported. + +## Static Configurations + +By default, service names and endpoint address for the Plugin are pre-configured in the [default configuration](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua). + +To customize these values, add the corresponding configurations to `config.yaml`. For example: + +```yaml +plugin_attr: + skywalking: + report_interval: 3 # Reporting interval time in seconds. + service_name: APISIX # Service name for SkyWalking reporter. + service_instance_name: "APISIX Instance Name" # Service instance name for SkyWalking reporter. + # Set to $hostname to get the local hostname. + endpoint_addr: http://127.0.0.1:12800 # SkyWalking HTTP endpoint. +``` + +Reload APISIX for changes to take effect. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|--------------|--------|----------|---------|--------------|----------------------------------------------------------------------------| +| sample_ratio | number | True | 1 | [0.00001, 1] | Frequency of request sampling. Setting the sample ratio to `1` means to sample all requests. | + +## Example + +To follow along the example, start a storage, OAP and Booster UI with Docker Compose, following [Skywalking's documentation](https://skywalking.apache.org/docs/main/next/en/setup/backend/backend-docker/). Once set up, the OAP server should be listening on `12800` and you should be able to access the UI at [http://localhost:8080](http://localhost:8080). + +Update APISIX configuration file to enable the `skywalking` plugin, which is disabled by default, and update the endpoint address: + +```yaml title="config.yaml" +plugins: + - skywalking + - ... + +plugin_attr: + skywalking: + report_interval: 3 + service_name: APISIX + service_instance_name: APISIX Instance + endpoint_addr: http://192.168.2.103:12800 +``` + +Reload APISIX for configuration changes to take effect. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Trace All Requests + +The following example demonstrates how you can trace all requests passing through a Route. + +Create a Route with `skywalking` and configure the sampling ratio to be 1 to trace all requests: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "skywalking-route", + "uri": "/anything", + "plugins": { + "skywalking": { + "sample_ratio": 1 + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +Send a few requests to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive `HTTP/1.1 200 OK` responses. + +In [Skywalking UI](http://localhost:8080), navigate to __General Service__ > __Services__. You should see a service called `APISIX` with traces corresponding to your requests: + +![SkyWalking APISIX traces](https://static.apiseven.com/uploads/2025/01/15/UdwiO8NJ_skywalking-traces.png) + +### Associate Traces with Logs + +The following example demonstrates how you can configure the `skywalking-logger` Plugin on a Route to log information of requests hitting the Route. + +Create a Route with the `skywalking-logger` Plugin and configure the Plugin with your OAP server URI: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "skywalking-logger-route", + "uri": "/anything", + "plugins": { + "skywalking": { + "sample_ratio": 1 + }, + "skywalking-logger": { + "endpoint_addr": "http://192.168.2.103:12800" + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +Generate a few requests to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive `HTTP/1.1 200 OK` responses. + +In [Skywalking UI](http://localhost:8080), navigate to __General Service__ > __Services__. You should see a service called `APISIX` with a trace corresponding to your request, where you can view the associated logs: + +![trace context](https://static.apiseven.com/uploads/2025/01/16/soUpXm6b_trace-view-logs.png) + +![associated log](https://static.apiseven.com/uploads/2025/01/16/XD934LvU_associated-logs.png) diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/sls-logger.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/sls-logger.md new file mode 100644 index 0000000..d779386 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/sls-logger.md @@ -0,0 +1,184 @@ +--- +title: sls-logger +keywords: + - Apache APISIX + - API Gateway + - Plugin + - SLS Logger + - Alibaba Cloud Log Service +description: This document contains information about the Apache APISIX sls-logger Plugin. +--- + + +## Description + +The `sls-logger` Plugin is used to push logs to [Alibaba Cloud log Service](https://www.alibabacloud.com/help/en/log-service/latest/use-the-syslog-protocol-to-upload-logs) using [RF5424](https://tools.ietf.org/html/rfc5424). + +It might take some time to receive the log data. It will be automatically sent after the timer function in the [batch processor](../batch-processor.md) expires. + +## Attributes + +| Name | Required | Description | +|-------------------|----------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| host | True | IP address or the hostname of the TCP server. See [Alibaba Cloud log service documentation](https://www.alibabacloud.com/help/en/log-service/latest/endpoints) for details. Use IP address instead of domain. | +| port | True | Target upstream port. Defaults to `10009`. | +| timeout | False | Timeout for the upstream to send data. | +| log_format | False | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| project | True | Project name in Alibaba Cloud log service. Create SLS before using this Plugin. | +| logstore | True | logstore name in Ali Cloud log service. Create SLS before using this Plugin. | +| access_key_id | True | AccessKey ID in Alibaba Cloud. See [Authorization](https://www.alibabacloud.com/help/en/log-service/latest/create-a-ram-user-and-authorize-the-ram-user-to-access-log-service) for more details. | +| access_key_secret | True | AccessKey Secret in Alibaba Cloud. See [Authorization](https://www.alibabacloud.com/help/en/log-service/latest/create-a-ram-user-and-authorize-the-ram-user-to-access-log-service) for more details. | +| include_req_body | True | When set to `true`, includes the request body in the log. | +| include_req_body_expr | No | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| include_resp_body | No | When set to `true` includes the response body in the log. | +| include_resp_body_expr | No | Filter for when the `include_resp_body` attribute is set to `true`. Response body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| name | False | Unique identifier for the batch processor. If you use Prometheus to monitor APISIX metrics, the name is exported in `apisix_batch_process_entries`. | + +NOTE: `encrypt_fields = {"access_key_secret"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +### Example of default log format + +```json +{ + "route_conf": { + "host": "100.100.99.135", + "buffer_duration": 60, + "timeout": 30000, + "include_req_body": false, + "logstore": "your_logstore", + "log_format": { + "vip": "$remote_addr" + }, + "project": "your_project", + "inactive_timeout": 5, + "access_key_id": "your_access_key_id", + "access_key_secret": "your_access_key_secret", + "batch_max_size": 1000, + "max_retry_count": 0, + "retry_delay": 1, + "port": 10009, + "name": "sls-logger" + }, + "data": "<46>1 2024-01-06T03:29:56.457Z localhost apisix 28063 - [logservice project=\"your_project\" logstore=\"your_logstore\" access-key-id=\"your_access_key_id\" access-key-secret=\"your_access_key_secret\"] {\"vip\":\"127.0.0.1\",\"route_id\":\"1\"}\n" +} +``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `sls-logger` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/sls-logger -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## Enable Plugin + +The example below shows how you can configure the Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/5 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "sls-logger": { + "host": "100.100.99.135", + "port": 10009, + "project": "your_project", + "logstore": "your_logstore", + "access_key_id": "your_access_key_id", + "access_key_secret": "your_access_key_secret", + "timeout": 30000 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## Example usage + +Now, if you make a request to APISIX, it will be logged in your Ali Cloud log server: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +Now if you check your Ali Cloud log server, you will be able to see the logs: + +![sls logger view](../../../assets/images/plugin/sls-logger-1.png "sls logger view") + +## Delete Plugin + +To remove the `sls-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/splunk-hec-logging.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/splunk-hec-logging.md new file mode 100644 index 0000000..565f84e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/splunk-hec-logging.md @@ -0,0 +1,212 @@ +--- +title: splunk-hec-logging +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Splunk HTTP Event Collector + - splunk-hec-logging +description: This document contains information about the Apache APISIX splunk-hec-logging Plugin. +--- + + + +## Description + +The `splunk-hec-logging` Plugin is used to forward logs to [Splunk HTTP Event Collector (HEC)](https://docs.splunk.com/Documentation/Splunk/8.2.6/Data/UsetheHTTPEventCollector) for analysis and storage. + +When the Plugin is enabled, APISIX will serialize the request context information to [Splunk Event Data format](https://docs.splunk.com/Documentation/Splunk/latest/Data/FormateventsforHTTPEventCollector#Event_metadata) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Splunk HEC. See [batch processor](../batch-processor.md) for more details. + +## Attributes + +| Name | Required | Default | Description | +|------------------|----------|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| endpoint | True | | Splunk HEC endpoint configurations. | +| endpoint.uri | True | | Splunk HEC event collector API endpoint. | +| endpoint.token | True | | Splunk HEC authentication token. | +| endpoint.channel | False | | Splunk HEC send data channel identifier. Read more: [About HTTP Event Collector Indexer Acknowledgment](https://docs.splunk.com/Documentation/Splunk/8.2.3/Data/AboutHECIDXAck). | +| endpoint.timeout | False | 10 | Splunk HEC send data timeout in seconds. | +| endpoint.keepalive_timeout | False | 60000 | Keepalive timeout in milliseconds. | +| ssl_verify | False | true | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). | +| log_format | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +### Example of default log format + +```json +{ + "sourcetype": "_json", + "time": 1704513555.392, + "event": { + "upstream": "127.0.0.1:1980", + "request_url": "http://localhost:1984/hello", + "request_query": {}, + "request_size": 59, + "response_headers": { + "content-length": "12", + "server": "APISIX/3.7.0", + "content-type": "text/plain", + "connection": "close" + }, + "response_status": 200, + "response_size": 118, + "latency": 108.00004005432, + "request_method": "GET", + "request_headers": { + "connection": "close", + "host": "localhost" + } + }, + "source": "apache-apisix-splunk-hec-logging", + "host": "localhost" +} +``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `splunk-hec-logging` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/splunk-hec-logging -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```json +[{"time":1673976669.269,"source":"apache-apisix-splunk-hec-logging","event":{"host":"localhost","client_ip":"127.0.0.1","@timestamp":"2023-01-09T14:47:25+08:00","route_id":"1"},"host":"DESKTOP-2022Q8F-wsl","sourcetype":"_json"}] +``` + +## Enable Plugin + +### Full configuration + +The example below shows a complete configuration of the Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins":{ + "splunk-hec-logging":{ + "endpoint":{ + "uri":"http://127.0.0.1:8088/services/collector", + "token":"BD274822-96AA-4DA6-90EC-18940FB2414C", + "channel":"FE0ECFAD-13D5-401B-847D-77833BD77131", + "timeout":60 + }, + "buffer_duration":60, + "max_retry_count":0, + "retry_delay":1, + "inactive_timeout":2, + "batch_max_size":10 + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/splunk.do" +}' +``` + +### Minimal configuration + +The example below shows a bare minimum configuration of the Plugin on a Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins":{ + "splunk-hec-logging":{ + "endpoint":{ + "uri":"http://127.0.0.1:8088/services/collector", + "token":"BD274822-96AA-4DA6-90EC-18940FB2414C" + } + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/splunk.do" +}' +``` + +## Example usage + +Once you have configured the Route to use the Plugin, when you make a request to APISIX, it will be logged in your Splunk server: + +```shell +curl -i http://127.0.0.1:9080/splunk.do?q=hello +``` + +You should be able to login and search these logs from your Splunk dashboard: + +![splunk hec search view](../../../assets/images/plugin/splunk-hec-admin-en.png) + +## Delete Plugin + +To remove the `splunk-hec-logging` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/syslog.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/syslog.md new file mode 100644 index 0000000..8cba65e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/syslog.md @@ -0,0 +1,154 @@ +--- +title: syslog +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Syslog +description: This document contains information about the Apache APISIX syslog Plugin. +--- + + +## Description + +The `syslog` Plugin is used to push logs to a Syslog server. + +Logs can be set as JSON objects. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|------------------|---------|----------|--------------|---------------|--------------------------------------------------------------------------------------------------------------------------| +| host | string | True | | | IP address or the hostname of the Syslog server. | +| port | integer | True | | | Target port of the Syslog server. | +| name | string | False | "sys logger" | | Identifier for the server. If you use Prometheus to monitor APISIX metrics, the name is exported in `apisix_batch_process_entries`. | +| timeout | integer | False | 3000 | [1, ...] | Timeout in ms for the upstream to send data. | +| tls | boolean | False | false | | When set to `true` performs TLS verification. | +| flush_limit | integer | False | 4096 | [1, ...] | Maximum size of the buffer (KB) and the current message before it is flushed and written to the server. | +| drop_limit | integer | False | 1048576 | | Maximum size of the buffer (KB) and the current message before the current message is dropped because of the size limit. | +| sock_type | string | False | "tcp" | ["tcp", "udp] | Transport layer protocol to use. | +| pool_size | integer | False | 5 | [5, ...] | Keep-alive pool size used by `sock:keepalive`. | +| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| include_req_body | boolean | False | false | [false, true] | When set to `true` includes the request body in the log. | +| include_req_body_expr | array | False | | | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| include_resp_body | boolean | False | false | [false, true] | When set to `true` includes the response body in the log. | +| include_resp_body_expr | array | False | | | When the `include_resp_body` attribute is set to `true`, use this to filter based on [lua-resty-expr](https://github.com/api7/lua-resty-expr). If present, only logs the response if the expression evaluates to `true`. | + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +### meta_format example + +```text +"<46>1 2024-01-06T02:30:59.145Z 127.0.0.1 apisix 82324 - - {\"response\":{\"status\":200,\"size\":141,\"headers\":{\"content-type\":\"text/plain\",\"server\":\"APISIX/3.7.0\",\"transfer-encoding\":\"chunked\",\"connection\":\"close\"}},\"route_id\":\"1\",\"server\":{\"hostname\":\"baiyundeMacBook-Pro.local\",\"version\":\"3.7.0\"},\"request\":{\"uri\":\"/opentracing\",\"url\":\"http://127.0.0.1:1984/opentracing\",\"querystring\":{},\"method\":\"GET\",\"size\":155,\"headers\":{\"content-type\":\"application/x-www-form-urlencoded\",\"host\":\"127.0.0.1:1984\",\"user-agent\":\"lua-resty-http/0.16.1 (Lua) ngx_lua/10025\"}},\"upstream\":\"127.0.0.1:1982\",\"apisix_latency\":100.99999809265,\"service_id\":\"\",\"upstream_latency\":1,\"start_time\":1704508259044,\"client_ip\":\"127.0.0.1\",\"latency\":101.99999809265}\n" +``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `syslog` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/syslog -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## Enable Plugin + +The example below shows how you can enable the Plugin for a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 5044, + "flush_limit" : 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## Example usage + +Now, if you make a request to APISIX, it will be logged in your Syslog server: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +## Delete Plugin + +To remove the `syslog` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/tcp-logger.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/tcp-logger.md new file mode 100644 index 0000000..53fd43e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/tcp-logger.md @@ -0,0 +1,189 @@ +--- +title: tcp-logger +keywords: + - Apache APISIX + - API Gateway + - Plugin + - TCP Logger + - tcp-logger +description: This document contains information about the Apache APISIX tcp-logger Plugin. +--- + + + +## Description + +The `tcp-logger` Plugin can be used to push log data requests to TCP servers. + +This provides the ability to send log data requests as JSON objects to monitoring tools and other TCP servers. + +This plugin also allows to push logs as a batch to your external TCP server. It might take some time to receive the log data. It will be automatically sent after the timer function in the [batch processor](../batch-processor.md) expires. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|------------------|---------|----------|---------|--------------|----------------------------------------------------------| +| host | string | True | | | IP address or the hostname of the TCP server. | +| port | integer | True | | [0,...] | Target upstream port. | +| timeout | integer | False | 1000 | [1,...] | Timeout for the upstream to send data. | +| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| tls | boolean | False | false | | When set to `true` performs SSL verification. | +| tls_options | string | False | | | TLS options. | +| include_req_body | boolean | False | false | [false, true] | When set to `true` includes the request body in the log. | +| include_req_body_expr | array | No | | | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| include_resp_body | boolean | No | false | [false, true] | When set to `true` includes the response body in the log. | +| include_resp_body_expr | array | No | | | Filter for when the `include_resp_body` attribute is set to `true`. Response body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +### Example of default log format + +```json +{ + "response": { + "status": 200, + "headers": { + "server": "APISIX/3.7.0", + "content-type": "text/plain", + "content-length": "12", + "connection": "close" + }, + "size": 118 + }, + "server": { + "version": "3.7.0", + "hostname": "localhost" + }, + "start_time": 1704527628474, + "client_ip": "127.0.0.1", + "service_id": "", + "latency": 102.9999256134, + "apisix_latency": 100.9999256134, + "upstream_latency": 2, + "request": { + "headers": { + "connection": "close", + "host": "localhost" + }, + "size": 59, + "method": "GET", + "uri": "/hello", + "url": "http://localhost:1984/hello", + "querystring": {} + }, + "upstream": "127.0.0.1:1980", + "route_id": "1" +} +``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `tcp-logger` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/tcp-logger -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```json +{"@timestamp":"2023-01-09T14:47:25+08:00","route_id":"1","host":"localhost","client_ip":"127.0.0.1"} +``` + +## Enable Plugin + +The example below shows how you can enable the `tcp-logger` Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/5 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "tcp-logger": { + "host": "127.0.0.1", + "port": 5044, + "tls": false, + "batch_max_size": 1, + "name": "tcp logger" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## Example usage + +Now, if you make a request to APISIX, it will be logged in your TCP server: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +## Delete Plugin + +To remove the `tcp-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/tencent-cloud-cls.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/tencent-cloud-cls.md new file mode 100644 index 0000000..f1ee4c4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/tencent-cloud-cls.md @@ -0,0 +1,196 @@ +--- +title: tencent-cloud-cls +keywords: + - Apache APISIX + - API Gateway + - Plugin + - CLS + - Tencent Cloud +description: This document contains information about the Apache APISIX tencent-cloud-cls Plugin. +--- + + + +## Description + +The `tencent-cloud-cls` Plugin uses [TencentCloud CLS](https://cloud.tencent.com/document/product/614) API to forward APISIX logs to your topic. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ----------------- | ------- |----------|---------|---------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| cls_host | string | Yes | | | CLS API host,please refer [Uploading Structured Logs](https://www.tencentcloud.com/document/api/614/16873). | +| cls_topic | string | Yes | | | topic id of CLS. | +| secret_id | string | Yes | | | SecretId of your API key. | +| secret_key | string | Yes | | | SecretKey of your API key. | +| sample_ratio | number | No | 1 | [0.00001, 1] | How often to sample the requests. Setting to `1` will sample all requests. | +| include_req_body | boolean | No | false | [false, true] | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to NGINX's limitations. | +| include_req_body_expr | array | No | | | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| include_resp_body | boolean | No | false | [false, true] | When set to `true` includes the response body in the log. | +| include_resp_body_expr | array | No | | | Filter for when the `include_resp_body` attribute is set to `true`. Response body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| global_tag | object | No | | | kv pairs in JSON,send with each log. | +| log_format | object | No | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +NOTE: `encrypt_fields = {"secret_key"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +### Example of default log format + +```json +{ + "response": { + "headers": { + "content-type": "text/plain", + "connection": "close", + "server": "APISIX/3.7.0", + "transfer-encoding": "chunked" + }, + "size": 136, + "status": 200 + }, + "route_id": "1", + "upstream": "127.0.0.1:1982", + "client_ip": "127.0.0.1", + "apisix_latency": 100.99985313416, + "service_id": "", + "latency": 103.99985313416, + "start_time": 1704525145772, + "server": { + "version": "3.7.0", + "hostname": "localhost" + }, + "upstream_latency": 3, + "request": { + "headers": { + "connection": "close", + "host": "localhost" + }, + "url": "http://localhost:1984/opentracing", + "querystring": {}, + "method": "GET", + "size": 65, + "uri": "/opentracing" + } +} +``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `tencent-cloud-cls` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/tencent-cloud-cls \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## Enable Plugin + +The example below shows how you can enable the Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "ap-guangzhou.cls.tencentyun.com", + "cls_topic": "${your CLS topic name}", + "global_tag": { + "module": "cls-logger", + "server_name": "YourApiGateWay" + }, + "include_req_body": true, + "include_resp_body": true, + "secret_id": "${your secret id}", + "secret_key": "${your secret key}" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## Example usage + +Now, if you make a request to APISIX, it will be logged in your cls topic: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +## Delete Plugin + +To disable this Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/traffic-split.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/traffic-split.md new file mode 100644 index 0000000..a4a65b6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/traffic-split.md @@ -0,0 +1,637 @@ +--- +title: traffic-split +keywords: + - Apache APISIX + - API Gateway + - Traffic Split + - Blue-green Deployment + - Canary Deployment +description: The traffic-split Plugin directs traffic to various Upstream services based on conditions and/or weights. It provides a dynamic and flexible approach to implement release strategies and manage traffic. +--- + + + + + + + +## Description + +The `traffic-split` Plugin directs traffic to various Upstream services based on conditions and/or weights. It provides a dynamic and flexible approach to implement release strategies and manage traffic. + +:::note + +The traffic ratio between Upstream services may be less accurate since round robin algorithm is used to direct traffic (especially when the state is reset). + +::: + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|--------------------------------|----------------|----------|------------|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| rules.match | array[object] | False | | | An array of one or more pairs of matching conditions and actions to be executed. | +| rules.match | array[object] | False | | | Rules to match for conditional traffic split. | +| rules.match.vars | array[array] | False | | | An array of one or more matching conditions in the form of [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) to conditionally execute the plugin. | +| rules.weighted_upstreams | array[object] | False | | | List of Upstream configurations. | +| rules.weighted_upstreams.upstream_id | string/integer | False | | | ID of the configured Upstream object. | +| rules.weighted_upstreams.weight | integer | False | weight = 1 | | Weight for each upstream. | +| rules.weighted_upstreams.upstream | object | False | | | Configuration of the upstream. Certain configuration options Upstream are not supported here. These fields are `service_name`, `discovery_type`, `checks`, `retries`, `retry_timeout`, `desc`, and `labels`. As a workaround, you can create an Upstream object and configure it in `upstream_id`. | +| rules.weighted_upstreams.upstream.type | array | False | roundrobin | [roundrobin, chash] | Algorithm for traffic splitting. `roundrobin` for weighted round robin and `chash` for consistent hashing. | +| rules.weighted_upstreams.upstream.hash_on | array | False | vars | | Used when `type` is `chash`. Support hashing on [NGINX variables](https://nginx.org/en/docs/varindex.html), headers, cookie, Consumer, or a combination of [NGINX variables](https://nginx.org/en/docs/varindex.html). | +| rules.weighted_upstreams.upstream.key | string | False | | | Used when `type` is `chash`. When `hash_on` is set to `header` or `cookie`, `key` is required. When `hash_on` is set to `consumer`, `key` is not required as the Consumer name will be used as the key automatically. | +| rules.weighted_upstreams.upstream.nodes | object | False | | | Addresses of the Upstream nodes. | +| rules.weighted_upstreams.upstream.timeout | object | False | 15 | | Timeout in seconds for connecting, sending and receiving messages. | +| rules.weighted_upstreams.upstream.pass_host | array | False | "pass" | ["pass", "node", "rewrite"] | Mode deciding how the host name is passed. `pass` passes the client's host name to the upstream. `node` passes the host configured in the node of the upstream. `rewrite` passes the value configured in `upstream_host`. | +| rules.weighted_upstreams.upstream.name | string | False | | | Identifier for the Upstream for specifying service name, usage scenarios, and so on. | +| rules.weighted_upstreams.upstream.upstream_host | string | False | | | Used when `pass_host` is `rewrite`. Host name of the upstream. | + +## Examples + +The examples below show different use cases for using the `traffic-split` Plugin. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Implement Canary Release + +The following example demonstrates how to implement canary release with this Plugin. + +A Canary release is a gradual deployment in which an increasing percentage of traffic is directed to a new release, allowing for a controlled and monitored rollout. This method ensures that any potential issues or bugs in the new release can be identified and addressed early on, before fully redirecting all traffic. + +Create a Route and configure `traffic-split` Plugin with the following rules: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/headers", + "id": "traffic-split-route", + "plugins": { + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "httpbin.org:443":1 + } + }, + "weight": 3 + }, + { + "weight": 2 + } + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "mock.api7.ai:443":1 + } + } + }' +``` + +The proportion of traffic to each Upstream is determined by the weight of the Upstream relative to the total weight of all upstreams. Here, the total weight is calculated as: 3 + 2 = 5. + +Therefore, 60% of the traffic are to be forwarded to `httpbin.org` and the other 40% of the traffic are to be forwarded to `mock.api7.ai`. + +Send 10 consecutive requests to the Route to verify: + +```shell +resp=$(seq 10 | xargs -I{} curl "http://127.0.0.1:9080/headers" -sL) && \ + count_httpbin=$(echo "$resp" | grep "httpbin.org" | wc -l) && \ + count_mockapi7=$(echo "$resp" | grep "mock.api7.ai" | wc -l) && \ + echo httpbin.org: $count_httpbin, mock.api7.ai: $count_mockapi7 +``` + +You should see a response similar to the following: + +```text +httpbin.org: 6, mock.api7.ai: 4 +``` + +Adjust the Upstream weights accordingly to complete the canary release. + +### Implement Blue-Green Deployment + +The following example demonstrates how to implement blue-green deployment with this Plugin. + +Blue-green deployment is a deployment strategy that involves maintaining two identical environments: the _blue_ and the _green_. The blue environment refers to the current production deployment and the green environment refers to the new deployment. Once the green environment is tested to be ready for production, traffic will be routed to the green environment, making it the new production deployment. + +Create a Route and configure `traffic-split` Plugin to execute the Plugin to redirect traffic only when the request contains a header `release: new_release`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/headers", + "id": "traffic-split-route", + "plugins": { + "traffic-split": { + "rules": [ + { + "match": [ + { + "vars": [ + ["http_release","==","new_release"] + ] + } + ], + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "httpbin.org:443":1 + } + } + } + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "mock.api7.ai:443":1 + } + } + }' +``` + +Send a request to the Route with the `release` header: + +```shell +curl "http://127.0.0.1:9080/headers" -H 'release: new_release' +``` + +You should see a response similar to the following: + +```json +{ + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + ... + } +} +``` + +Send a request to the Route without any additional header: + +```shell +curl "http://127.0.0.1:9080/headers" +``` + +You should see a response similar to the following: + +```json +{ + "headers": { + "accept": "*/*", + "host": "mock.api7.ai", + ... + } +} +``` + +### Define Matching Condition for POST Request With APISIX Expressions + +The following example demonstrates how to use [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) in rules to conditionally execute the Plugin when certain condition of a POST request is satisfied. + +Create a Route and configure `traffic-split` Plugin with the following rules: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/post", + "methods": ["POST"], + "id": "traffic-split-route", + "plugins": { + "traffic-split": { + "rules": [ + { + "match": [ + { + "vars": [ + ["post_arg_id", "==", "1"] + ] + } + ], + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "httpbin.org:443":1 + } + } + } + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "mock.api7.ai:443":1 + } + } + }' +``` + +Send a POST request with body `id=1`: + +```shell +curl "http://127.0.0.1:9080/post" -X POST \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d 'id=1' +``` + +You should see a response similar to the following: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": { + "id": "1" + }, + "headers": { + "Accept": "*/*", + "Content-Length": "4", + "Content-Type": "application/x-www-form-urlencoded", + "Host": "httpbin.org", + ... + }, + ... +} +``` + +Send a POST request without `id=1` in the body: + +```shell +curl "http://127.0.0.1:9080/post" -X POST \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d 'random=string' +``` + +You should see that the request was forwarded to `mock.api7.ai`. + +### Define AND Matching Conditions With APISIX Expressions + +The following example demonstrates how to use [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) in rules to conditionally execute the Plugin when multiple conditions are satisfied. + +Create a Route and configure `traffic-split` Plugin to redirect traffic only when all three conditions are satisfied: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/headers", + "id": "traffic-split-route", + "plugins": { + "traffic-split": { + "rules": [ + { + "match": [ + { + "vars": [ + ["arg_name","==","jack"], + ["http_user-id",">","23"], + ["http_apisix-key","~~","[a-z]+"] + ] + } + ], + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "httpbin.org:443":1 + } + }, + "weight": 3 + }, + { + "weight": 2 + } + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "mock.api7.ai:443":1 + } + } + }' +``` + +If conditions are satisfied, 60% of the traffic should be directed to `httpbin.org` and the other 40% should be directed to `mock.api7.ai`. If conditions are not satisfied, all traffic should be directed to `mock.api7.ai`. + +Send 10 consecutive requests that satisfy all conditions to verify: + +```shell +resp=$(seq 10 | xargs -I{} curl "http://127.0.0.1:9080/headers?name=jack" -H 'user-id: 30' -H 'apisix-key: helloapisix' -sL) && \ + count_httpbin=$(echo "$resp" | grep "httpbin.org" | wc -l) && \ + count_mockapi7=$(echo "$resp" | grep "mock.api7.ai" | wc -l) && \ + echo httpbin.org: $count_httpbin, mock.api7.ai: $count_mockapi7 +``` + +You should see a response similar to the following: + +```text +httpbin.org: 6, mock.api7.ai: 4 +``` + +Send 10 consecutive requests that do not satisfy the conditions to verify: + +```shell +resp=$(seq 10 | xargs -I{} curl "http://127.0.0.1:9080/headers?name=random" -sL) && \ + count_httpbin=$(echo "$resp" | grep "httpbin.org" | wc -l) && \ + count_mockapi7=$(echo "$resp" | grep "mock.api7.ai" | wc -l) && \ + echo httpbin.org: $count_httpbin, mock.api7.ai: $count_mockapi7 +``` + +You should see a response similar to the following: + +```text +httpbin.org: 0, mock.api7.ai: 10 +``` + +### Define OR Matching Conditions With APISIX Expressions + +The following example demonstrates how to use [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) in rules to conditionally execute the Plugin when either set of the condition is satisfied. + +Create a Route and configure `traffic-split` Plugin to redirect traffic when either set of the configured conditions are satisfied: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/headers", + "id": "traffic-split-route", + "plugins": { + "traffic-split": { + "rules": [ + { + "match": [ + { + "vars": [ + ["arg_name","==","jack"], + ["http_user-id",">","23"], + ["http_apisix-key","~~","[a-z]+"] + ] + }, + { + "vars": [ + ["arg_name2","==","rose"], + ["http_user-id2","!",">","33"], + ["http_apisix-key2","~~","[a-z]+"] + ] + } + ], + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "httpbin.org:443":1 + } + }, + "weight": 3 + }, + { + "weight": 2 + } + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "mock.api7.ai:443":1 + } + } + }' +``` + +Alternatively, you can also use the OR operator in the [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) for these conditions. + +If conditions are satisfied, 60% of the traffic should be directed to `httpbin.org` and the other 40% should be directed to `mock.api7.ai`. If conditions are not satisfied, all traffic should be directed to `mock.api7.ai`. + +Send 10 consecutive requests that satisfy the second set of conditions to verify: + +```shell +resp=$(seq 10 | xargs -I{} curl "http://127.0.0.1:9080/headers?name2=rose" -H 'user-id:30' -H 'apisix-key2: helloapisix' -sL) && \ + count_httpbin=$(echo "$resp" | grep "httpbin.org" | wc -l) && \ + count_mockapi7=$(echo "$resp" | grep "mock.api7.ai" | wc -l) && \ + echo httpbin.org: $count_httpbin, mock.api7.ai: $count_mockapi7 +``` + +You should see a response similar to the following: + +```json +httpbin.org: 6, mock.api7.ai: 4 +``` + +Send 10 consecutive requests that do not satisfy any set of conditions to verify: + +```shell +resp=$(seq 10 | xargs -I{} curl "http://127.0.0.1:9080/headers?name=random" -sL) && \ + count_httpbin=$(echo "$resp" | grep "httpbin.org" | wc -l) && \ + count_mockapi7=$(echo "$resp" | grep "mock.api7.ai" | wc -l) && \ + echo httpbin.org: $count_httpbin, mock.api7.ai: $count_mockapi7 +``` + +You should see a response similar to the following: + +```json +httpbin.org: 0, mock.api7.ai: 10 +``` + +### Configure Different Rules for Different Upstreams + +The following example demonstrates how to set one-to-one mapping between rule sets and upstreams. + +Create a Route and configure `traffic-split` Plugin with the following matching rules to redirect traffic when the request contains a header `x-api-id: 1` or `x-api-id: 2`, to the corresponding Upstream service: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/headers", + "id": "traffic-split-route", + "plugins": { + "traffic-split": { + "rules": [ + { + "match": [ + { + "vars": [ + ["http_x-api-id","==","1"] + ] + } + ], + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "httpbin.org:443":1 + } + }, + "weight": 1 + } + ] + }, + { + "match": [ + { + "vars": [ + ["http_x-api-id","==","2"] + ] + } + ], + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "mock.api7.ai:443":1 + } + }, + "weight": 1 + } + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "postman-echo.com:443": 1 + }, + "scheme": "https", + "pass_host": "node" + } + }' +``` + +Send a request with header `x-api-id: 1`: + +```shell +curl "http://127.0.0.1:9080/headers" -H 'x-api-id: 1' +``` + +You should see an `HTTP/1.1 200 OK` response similar to the following: + +```json +{ + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + ... + } +} +``` + +Send a request with header `x-api-id: 2`: + +```shell +curl "http://127.0.0.1:9080/headers" -H 'x-api-id: 2' +``` + +You should see an `HTTP/1.1 200 OK` response similar to the following: + +```json +{ + "headers": { + "accept": "*/*", + "host": "mock.api7.ai", + ... + } +} +``` + +Send a request without any additional header: + +```shell +curl "http://127.0.0.1:9080/headers" +``` + +You should see a response similar to the following: + +```json +{ + "headers": { + "accept": "*/*", + "host": "postman-echo.com", + ... + } +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ua-restriction.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ua-restriction.md new file mode 100644 index 0000000..0d243cb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/ua-restriction.md @@ -0,0 +1,159 @@ +--- +title: ua-restriction +keywords: + - Apache APISIX + - API Gateway + - UA restriction +description: The ua-restriction Plugin restricts access to upstream resources using an allowlist or denylist of user agents, preventing overload from web crawlers and enhancing API security. +--- + + + + + + + +## Description + +The `ua-restriction` Plugin supports restricting access to upstream resources through either configuring an allowlist or denylist of user agents. A common use case is to prevent web crawlers from overloading the upstream resources and causing service degradation. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|----------------|---------------|----------|--------------|-------------------------|---------------------------------------------------------------------------------| +| bypass_missing | boolean | False | false | | If true, bypass the user agent restriction check when the `User-Agent` header is missing. | +| allowlist | array[string] | False | | | List of user agents to allow. Support regular expressions. At least one of the `allowlist` and `denylist` should be configured, but they cannot be configured at the same time. | +| denylist | array[string] | False | | | List of user agents to deny. Support regular expressions. At least one of the `allowlist` and `denylist` should be configured, but they cannot be configured at the same time. | +| message | string | False | "Not allowed" | | Message returned when the user agent is denied access. | + +## Examples + +The examples below demonstrate how you can configure `ua-restriction` for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Reject Web Crawlers and Customize Error Message + +The following example demonstrates how you can configure the Plugin to fend off unwanted web crawlers and customize the rejection message. + +Create a Route and configure the Plugin to block specific crawlers from accessing resources with a customized message: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ua-restriction-route", + "uri": "/anything", + "plugins": { + "ua-restriction": { + "bypass_missing": false, + "denylist": [ + "(Baiduspider)/(\\d+)\\.(\\d+)", + "bad-bot-1" + ], + "message": "Access denied" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the Route: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response. + +Send another request to the Route with a disallowed user agent: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H 'User-Agent: Baiduspider/5.0' +``` + +You should receive an `HTTP/1.1 403 Forbidden` response with the following message: + +```text +{"message":"Access denied"} +``` + +### Bypass UA Restriction Checks + +The following example demonstrates how to configure the Plugin to allow requests of a specific user agent to bypass the UA restriction. + +Create a Route as such: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ua-restriction-route", + "uri": "/anything", + "plugins": { + "ua-restriction": { + "bypass_missing": true, + "allowlist": [ + "good-bot-1" + ], + "message": "Access denied" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to the Route without modifying the user agent: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 403 Forbidden` response with the following message: + +```text +{"message":"Access denied"} +``` + +Send another request to the Route with an empty user agent: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H 'User-Agent: ' +``` + +You should receive an `HTTP/1.1 200 OK` response. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/udp-logger.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/udp-logger.md new file mode 100644 index 0000000..503566a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/udp-logger.md @@ -0,0 +1,186 @@ +--- +title: udp-logger +keywords: + - Apache APISIX + - API Gateway + - Plugin + - UDP Logger +description: This document contains information about the Apache APISIX udp-logger Plugin. +--- + + + +## Description + +The `udp-logger` Plugin can be used to push log data requests to UDP servers. + +This provides the ability to send log data requests as JSON objects to monitoring tools and other UDP servers. + +This plugin also allows to push logs as a batch to your external UDP server. It might take some time to receive the log data. It will be automatically sent after the timer function in the [batch processor](../batch-processor.md) expires. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|------------------|---------|----------|--------------|--------------|----------------------------------------------------------| +| host | string | True | | | IP address or the hostname of the UDP server. | +| port | integer | True | | [0,...] | Target upstream port. | +| timeout | integer | False | 3 | [1,...] | Timeout for the upstream to send data. | +| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| name | string | False | "udp logger" | | Unique identifier for the batch processor. If you use Prometheus to monitor APISIX metrics, the name is exported in `apisix_batch_process_entries`. processor. | +| include_req_body | boolean | False | false | [false, true] | When set to `true` includes the request body in the log. | +| include_req_body_expr | array | No | | | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| include_resp_body | boolean | No | false | [false, true] | When set to `true` includes the response body in the log. | +| include_resp_body_expr | array | No | | | Filter for when the `include_resp_body` attribute is set to `true`. Response body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | + +This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +### Example of default log format + +```json +{ + "apisix_latency": 99.999988555908, + "service_id": "", + "server": { + "version": "3.7.0", + "hostname": "localhost" + }, + "request": { + "method": "GET", + "headers": { + "connection": "close", + "host": "localhost" + }, + "url": "http://localhost:1984/opentracing", + "size": 65, + "querystring": {}, + "uri": "/opentracing" + }, + "start_time": 1704527399740, + "client_ip": "127.0.0.1", + "response": { + "status": 200, + "size": 136, + "headers": { + "server": "APISIX/3.7.0", + "content-type": "text/plain", + "transfer-encoding": "chunked", + "connection": "close" + } + }, + "upstream": "127.0.0.1:1982", + "route_id": "1", + "upstream_latency": 12, + "latency": 111.99998855591 +} +``` + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +| ---------- | ------ | -------- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the Plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `udp-logger` Plugin. + +::: + +The example below shows how you can configure through the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/udp-logger -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```json +{"@timestamp":"2023-01-09T14:47:25+08:00","route_id":"1","host":"localhost","client_ip":"127.0.0.1"} +``` + +## Enable Plugin + +The example below shows how you can enable the Plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/5 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "udp-logger": { + "host": "127.0.0.1", + "port": 3000, + "batch_max_size": 1, + "name": "udp logger" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## Example usage + +Now, if you make a request to APISIX, it will be logged in your UDP server: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +## Delete Plugin + +To remove the `udp-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/uri-blocker.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/uri-blocker.md new file mode 100644 index 0000000..07cb774 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/uri-blocker.md @@ -0,0 +1,120 @@ +--- +title: uri-blocker +keywords: + - Apache APISIX + - API Gateway + - URI Blocker +description: This document contains information about the Apache APISIX uri-blocker Plugin. +--- + + + +## Description + +The `uri-blocker` Plugin intercepts user requests with a set of `block_rules`. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +|------------------|---------------|----------|---------|--------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| block_rules | array[string] | True | | | List of regex filter rules. If the request URI hits any one of the rules, the response code is set to the `rejected_code` and the user request is terminated. For example, `["root.exe", "root.m+"]`. | +| rejected_code | integer | False | 403 | [200, ...] | HTTP status code returned when the request URI hits any of the `block_rules`. | +| rejected_msg | string | False | | non-empty | HTTP response body returned when the request URI hits any of the `block_rules`. | +| case_insensitive | boolean | False | false | | When set to `true`, ignores the case when matching request URI. | + +## Enable Plugin + +The example below enables the `uri-blocker` Plugin on a specific Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/*", + "plugins": { + "uri-blocker": { + "block_rules": ["root.exe", "root.m+"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## Example usage + +Once you have configured the Plugin as shown above, you can try accessing the file: + +```shell +curl -i http://127.0.0.1:9080/root.exe?a=a +``` + +```shell +HTTP/1.1 403 Forbidden +Date: Wed, 17 Jun 2020 13:55:41 GMT +Content-Type: text/html; charset=utf-8 +Content-Length: 150 +Connection: keep-alive +Server: APISIX web server + +... ... +``` + +You can also set a `rejected_msg` and it will be added to the response body: + +```shell +HTTP/1.1 403 Forbidden +Date: Wed, 17 Jun 2020 13:55:41 GMT +Content-Type: text/html; charset=utf-8 +Content-Length: 150 +Connection: keep-alive +Server: APISIX web server + +{"error_msg":"access is not allowed"} +``` + +## Delete Plugin + +To remove the `uri-blocker` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/wolf-rbac.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/wolf-rbac.md new file mode 100644 index 0000000..9ee6245 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/wolf-rbac.md @@ -0,0 +1,296 @@ +--- +title: wolf-rbac +keywords: + - Apache APISIX + - API Gateway + - Plugin + - wolf RBAC + - wolf-rbac +description: This document contains information about the Apache APISIX wolf-rbac Plugin. +--- + + + +## Description + +The `wolf-rbac` Plugin provides a [role-based access control](https://en.wikipedia.org/wiki/Role-based_access_control) system with [wolf](https://github.com/iGeeky/wolf) to a Route or a Service. This Plugin can be used with a [Consumer](../terminology/consumer.md). + +## Attributes + +| Name | Type | Required | Default | Description | +|---------------|--------|----------|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| server | string | False | "http://127.0.0.1:12180" | Service address of wolf server. | +| appid | string | False | "unset" | App id added in wolf console. This field supports saving the value in Secret Manager using the [APISIX Secret](../terminology/secret.md) resource. | +| header_prefix | string | False | "X-" | Prefix for a custom HTTP header. After authentication is successful, three headers will be added to the request header (for backend) and response header (for frontend) namely: `X-UserId`, `X-Username`, and `X-Nickname`. | + +## API + +This Plugin will add the following endpoints when enabled: + +- `/apisix/plugin/wolf-rbac/login` +- `/apisix/plugin/wolf-rbac/change_pwd` +- `/apisix/plugin/wolf-rbac/user_info` + +:::note + +You may need to use the [public-api](public-api.md) Plugin to expose this endpoint. + +::: + +## Pre-requisites + +To use this Plugin, you have to first [install wolf](https://github.com/iGeeky/wolf/blob/master/quick-start-with-docker/README.md) and start it. + +Once you have done that you need to add `application`, `admin`, `normal user`, `permission`, `resource` and user authorize to the [wolf-console](https://github.com/iGeeky/wolf/blob/master/docs/usage.md). + +## Enable Plugin + +You need to first configure the Plugin on a Consumer: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username":"wolf_rbac", + "plugins":{ + "wolf-rbac":{ + "server":"http://127.0.0.1:12180", + "appid":"restful" + } + }, + "desc":"wolf-rbac" +}' +``` + +:::note + +The `appid` added in the configuration should already exist in wolf. + +::: + +You can now add the Plugin to a Route or a Service: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/*", + "plugins": { + "wolf-rbac": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "www.baidu.com:80": 1 + } + } +}' +``` + +You can also use the [APISIX Dashboard](/docs/dashboard/USER_GUIDE) to complete the operation through a web UI. + + + +## Example usage + +You can use the `public-api` Plugin to expose the API: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/wal -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/apisix/plugin/wolf-rbac/login", + "plugins": { + "public-api": {} + } +}' +``` + +Similarly, you can setup the Routes for `change_pwd` and `user_info`. + +You can now login and get a wolf `rbac_token`: + +```shell +curl http://127.0.0.1:9080/apisix/plugin/wolf-rbac/login -i \ +-H "Content-Type: application/json" \ +-d '{"appid": "restful", "username":"test", "password":"user-password", "authType":1}' +``` + +```shell +HTTP/1.1 200 OK +Date: Wed, 24 Jul 2019 10:33:31 GMT +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server +{"rbac_token":"V1#restful#eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts","user_info":{"nickname":"test","username":"test","id":"749"}} +``` + +:::note + +The `appid`, `username`, and `password` must be configured in the wolf system. + +`authType` is the authentication type—1 for password authentication (default) and 2 for LDAP authentication (v0.5.0+). + +::: + +You can also make a post request with `x-www-form-urlencoded` instead of JSON: + +```shell +curl http://127.0.0.1:9080/apisix/plugin/wolf-rbac/login -i \ +-H "Content-Type: application/x-www-form-urlencoded" \ +-d 'appid=restful&username=test&password=user-password' +``` + +Now you can test the Route: + +- without token: + +```shell +curl http://127.0.0.1:9080/ -H"Host: www.baidu.com" -i +``` + +``` +HTTP/1.1 401 Unauthorized +... +{"message":"Missing rbac token in request"} +``` + +- with token in `Authorization` header: + +```shell +curl http://127.0.0.1:9080/ -H"Host: www.baidu.com" \ +-H 'Authorization: V1#restful#eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts' -i +``` + +```shell +HTTP/1.1 200 OK + + +``` + +- with token in `x-rbac-token` header: + +```shell +curl http://127.0.0.1:9080/ -H"Host: www.baidu.com" \ +-H 'x-rbac-token: V1#restful#eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts' -i +``` + +```shell +HTTP/1.1 200 OK + + +``` + +- with token in request parameters: + +```shell +curl 'http://127.0.0.1:9080?rbac_token=V1%23restful%23eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts' -H"Host: www.baidu.com" -i +``` + +```shell +HTTP/1.1 200 OK + + +``` + +- with token in cookie: + +```shell +curl http://127.0.0.1:9080 -H"Host: www.baidu.com" \ +--cookie x-rbac-token=V1#restful#eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts -i +``` + +``` +HTTP/1.1 200 OK + + +``` + +And to get a user information: + +```shell +curl http://127.0.0.1:9080/apisix/plugin/wolf-rbac/user_info \ +--cookie x-rbac-token=V1#restful#eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts -i +``` + +```shell +HTTP/1.1 200 OK +{ + "user_info":{ + "nickname":"test", + "lastLogin":1582816780, + "id":749, + "username":"test", + "appIDs":["restful"], + "manager":"none", + "permissions":{"USER_LIST":true}, + "profile":null, + "roles":{}, + "createTime":1578820506, + "email":"" + } +} +``` + +And to change a user's password: + +```shell +curl http://127.0.0.1:9080/apisix/plugin/wolf-rbac/change_pwd \ +-H "Content-Type: application/json" \ +--cookie x-rbac-token=V1#restful#eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts -i \ +-X PUT -d '{"oldPassword": "old password", "newPassword": "new password"}' +``` + +```shell +HTTP/1.1 200 OK +{"message":"success to change password"} +``` + +## Delete Plugin + +To remove the `wolf-rbac` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/*", + "plugins": { + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "www.baidu.com:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/workflow.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/workflow.md new file mode 100644 index 0000000..6b71146 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/workflow.md @@ -0,0 +1,386 @@ +--- +title: workflow +keywords: + - Apache APISIX + - API Gateway + - Plugin + - workflow + - traffic control +description: The workflow Plugin supports the conditional execution of user-defined actions to client traffic based a given set of rules. This provides a granular approach to implement complex traffic management. +--- + + + + + + + +## Description + +The `workflow` Plugin supports the conditional execution of user-defined actions to client traffic based a given set of rules, defined using [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list). This provides a granular approach to traffic management. + +## Attributes + +| Name | Type | Required | Default | Valid values | Description | +| ---------------------------- | ------------- | -------- | ------- | ------------ | ------------------------------------------------------------ | +| rules | array[object] | True | | | An array of one or more pairs of matching conditions and actions to be executed. | +| rules.case | array[array] | False | | | An array of one or more matching conditions in the form of [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list). For example, `{"arg_name", "==", "json"}`. | +| rules.actions | array[object] | True | | | An array of actions to be executed when a condition is successfully matched. Currently, the array only supports one action, and it should be either `return`, or `limit-count`. When the action is configured to be `return`, you can configure an HTTP status code to return to the client when the condition is matched. When the action is configured to be `limit-count`, you can configure all options of the [`limit-count`](./limit-count.md) plugin, except for `group`. | + +## Examples + +The examples below demonstrates how you can use the `workflow` Plugin for different scenarios. + +:::note + +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Return Response HTTP Status Code Conditionally + +The following example demonstrates a simple rule with one matching condition and one associated action to return HTTP status code conditionally. + +Create a Route with the `workflow` Plugin to return HTTP status code 403 when the request's URI path is `/anything/rejected`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "workflow-route", + "uri": "/anything/*", + "plugins": { + "workflow":{ + "rules":[ + { + "case":[ + ["uri", "==", "/anything/rejected"] + ], + "actions":[ + [ + "return", + {"code": 403} + ] + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +Send a request that matches none of the rules: + +```shell +curl -i "http://127.0.0.1:9080/anything/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response. + +Send a request that matches the configured rule: + +```shell +curl -i "http://127.0.0.1:9080/anything/rejected" +``` + +You should receive an `HTTP/1.1 403 Forbidden` response of following: + +```text +{"error_msg":"rejected by workflow"} +``` + +### Apply Rate Limiting Conditionally by URI and Query Parameter + +The following example demonstrates a rule with two matching conditions and one associated action to rate limit requests conditionally. + +Create a Route with the `workflow` Plugin to apply rate limiting when the URI path is `/anything/rate-limit` and the query parameter `env` value is `v1`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "workflow-route", + "uri": "/anything/*", + "plugins":{ + "workflow":{ + "rules":[ + { + "case":[ + ["uri", "==", "/anything/rate-limit"], + ["arg_env", "==", "v1"] + ], + "actions":[ + [ + "limit-count", + { + "count":1, + "time_window":60, + "rejected_code":429 + } + ] + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +Generate two consecutive requests that matches the second rule: + +```shell +curl -i "http://127.0.0.1:9080/anything/rate-limit?env=v1" +``` + +You should receive an `HTTP/1.1 200 OK` response and an `HTTP 429 Too Many Requests` response. + +Generate requests that do not match the condition: + +```shell +curl -i "http://127.0.0.1:9080/anything/anything?env=v1" +``` + +You should receive `HTTP/1.1 200 OK` responses for all requests, as they are not rate limited. + +### Apply Rate Limiting Conditionally by Consumers + +The following example demonstrates how to configure the Plugin to perform rate limiting based on the following specifications: + +* Consumer `john` should have a quota of 5 requests within a 30-second window +* Consumer `jane` should have a quota of 3 requests within a 30-second window +* All other consumers should have a quota of 2 requests within a 30-second window + +While this example will be using [`key-auth`](./key-auth.md), you can easily replace it with other authentication Plugins. + +Create a Consumer `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john" + }' +``` + +Create `key-auth` credential for the consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +Create a second Consumer `jane`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jane" + }' +``` + +Create `key-auth` credential for the consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jane/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jane-key-auth", + "plugins": { + "key-auth": { + "key": "jane-key" + } + } + }' +``` + +Create a third Consumer `jimmy`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jimmy" + }' +``` + +Create `key-auth` credential for the consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jimmy/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jimmy-key-auth", + "plugins": { + "key-auth": { + "key": "jimmy-key" + } + } + }' +``` + +Create a Route with the `workflow` and `key-auth` Plugins, with the desired rate limiting rules: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "workflow-route", + "uri": "/anything", + "plugins":{ + "key-auth": {}, + "workflow":{ + "rules":[ + { + "actions": [ + [ + "limit-count", + { + "count": 5, + "key": "consumer_john", + "key_type": "constant", + "rejected_code": 429, + "time_window": 30 + } + ] + ], + "case": [ + [ + "consumer_name", + "==", + "john" + ] + ] + }, + { + "actions": [ + [ + "limit-count", + { + "count": 3, + "key": "consumer_jane", + "key_type": "constant", + "rejected_code": 429, + "time_window": 30 + } + ] + ], + "case": [ + [ + "consumer_name", + "==", + "jane" + ] + ] + }, + { + "actions": [ + [ + "limit-count", + { + "count": 2, + "key": "$consumer_name", + "key_type": "var", + "rejected_code": 429, + "time_window": 30 + } + ] + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +To verify, send 6 consecutive requests with `john`'s key: + +```shell +resp=$(seq 6 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H 'apikey: john-key' -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +You should see the following response, showing that out of the 6 requests, 5 requests were successful (status code 200) while the others were rejected (status code 429). + +```text +200: 5, 429: 1 +``` + +Send 6 consecutive requests with `jane`'s key: + +```shell +resp=$(seq 6 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H 'apikey: jane-key' -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +You should see the following response, showing that out of the 6 requests, 3 requests were successful (status code 200) while the others were rejected (status code 429). + +```text +200: 3, 429: 3 +``` + +Send 3 consecutive requests with `jimmy`'s key: + +```shell +resp=$(seq 3 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H 'apikey: jimmy-key' -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +You should see the following response, showing that out of the 3 requests, 2 requests were successful (status code 200) while the others were rejected (status code 429). + +```text +200: 2, 429: 1 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/zipkin.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/zipkin.md new file mode 100644 index 0000000..8ec64b0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/plugins/zipkin.md @@ -0,0 +1,265 @@ +--- +title: zipkin +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Zipkin +description: Zipkin is an open-source distributed tracing system. The zipkin Plugin instruments APISIX and sends traces to Zipkin based on the Zipkin API specification. +--- + + + + + + + +## Description + +[Zipkin](https://github.com/openzipkin/zipkin) is an open-source distributed tracing system. The `zipkin` Plugin instruments APISIX and sends traces to Zipkin based on the [Zipkin API specification](https://zipkin.io/pages/instrumenting.html). + +The Plugin can also send traces to other compatible collectors, such as [Jaeger](https://www.jaegertracing.io/docs/1.51/getting-started/#migrating-from-zipkin) and [Apache SkyWalking](https://skywalking.apache.org/docs/main/latest/en/setup/backend/zipkin-trace/#zipkin-receiver), both of which support Zipkin [v1](https://zipkin.io/zipkin-api/zipkin-api.yaml) and [v2](https://zipkin.io/zipkin-api/zipkin2-api.yaml) APIs. + +## Static Configurations + +By default, `zipkin` Plugin NGINX variables configuration is set to false in the [default configuration](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua): + +To modify this value, add the updated configuration to `config.yaml`. For example: + +```yaml +plugin_attr: + zipkin: + set_ngx_var: true +``` + +Reload APISIX for changes to take effect. + +## Attributes + +See the configuration file for configuration options available to all Plugins. + +| Name | Type | Required | Default | Valid values | Description | +|--------------|---------|----------|----------------|--------------|---------------------------------------------------------------------------------| +| endpoint | string | True | | | Zipkin span endpoint to POST to, such as `http://127.0.0.1:9411/api/v2/spans`. | +|sample_ratio| number | True | | [0.00001, 1] | Frequency to sample requests. Setting to `1` means sampling every request. | +|service_name| string | False | "APISIX" | | Service name for the Zipkin reporter to be displayed in Zipkin. | +|server_addr | string | False |the value of `$server_addr` | IPv4 address | IPv4 address for the Zipkin reporter. For example, you can set this to your external IP address. | +|span_version | integer | False | 2 | [1, 2] | Version of the span type. | + +## Examples + +The examples below show different use cases of the `zipkin` Plugin. + +### Send Traces to Zipkin + +The following example demonstrates how to trace requests to a Route and send traces to Zipkin using [Zipkin API v2](https://zipkin.io/zipkin-api/zipkin2-api.yaml). You will also understand the differences between span version 2 and span version 1. + +Start a Zipkin instance in Docker: + +```shell +docker run -d --name zipkin -p 9411:9411 openzipkin/zipkin +``` + +Create a Route with `zipkin` and use the default span version 2. You should adjust the IP address as needed for the Zipkin HTTP endpoint, and configure the sample ratio to `1` to trace every request. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "zipkin-tracing-route", + "uri": "/anything", + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9411/api/v2/spans", + "sample_ratio": 1, + "span_version": 2 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +Send a request to the Route: + +```shell +curl "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response similar to the following: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/7.64.1", + "X-Amzn-Trace-Id": "Root=1-65af2926-497590027bcdb09e34752b78", + "X-B3-Parentspanid": "347dddedf73ec176", + "X-B3-Sampled": "1", + "X-B3-Spanid": "429afa01d0b0067c", + "X-B3-Traceid": "aea58f4b490766eccb08275acd52a13a", + "X-Forwarded-Host": "127.0.0.1" + }, + ... +} +``` + +Navigate to the Zipkin web UI at [http://127.0.0.1:9411/zipkin](http://127.0.0.1:9411/zipkin) and click __Run Query__, you should see a trace corresponding to the request: + +![trace-from-request](https://static.api7.ai/uploads/2024/01/23/MaXhacYO_zipkin-run-query.png) + +Click __Show__ to see more tracing details: + +![v2-trace-spans](https://static.api7.ai/uploads/2024/01/23/3SmfFq9f_trace-details.png) + +Note that with span version 2, every traced request creates the following spans: + +```text +request +├── proxy +└── response +``` + +where `proxy` represents the time from the beginning of the request to the beginning of `header_filter`, and `response` represents the time from the beginning of `header_filter` to the beginning of `log`. + +Now, update the Plugin on the Route to use span version 1: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/zipkin-tracing-route" -X PATCH \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "plugins": { + "zipkin": { + "span_version": 1 + } + } + }' +``` + +Send another request to the Route: + +```shell +curl "http://127.0.0.1:9080/anything" +``` + +In the Zipkin web UI, you should see a new trace with details similar to the following: + +![v1-trace-spans](https://static.api7.ai/uploads/2024/01/23/OPw2sTPa_v1-trace-spans.png) + +Note that with the older span version 1, every traced request creates the following spans: + +```text +request +├── rewrite +├── access +└── proxy + └── body_filter +``` + +### Send Traces to Jaeger + +The following example demonstrates how to trace requests to a Route and send traces to Jaeger. + +Start a Jaeger instance in Docker: + +```shell +docker run -d --name jaeger \ + -e COLLECTOR_ZIPKIN_HOST_PORT=9411 \ + -p 16686:16686 \ + -p 9411:9411 \ + jaegertracing/all-in-one +``` + +Create a Route with `zipkin`. Please adjust the IP address as needed for the Zipkin HTTP endpoint, and configure the sample ratio to `1` to trace every request. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "kin-tracing-route", + "uri": "/anything", + "plugins": { + "kin": { + "endpoint": "http://127.0.0.1:9411/api/v2/spans", + "sample_ratio": 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +Send a request to the Route: + +```shell +curl "http://127.0.0.1:9080/anything" +``` + +You should receive an `HTTP/1.1 200 OK` response. + +Navigate to the Jaeger web UI at [http://127.0.0.1:16686](http://127.0.0.1:16686), select APISIX as the Service, and click __Find Traces__, you should see a trace corresponding to the request: + +![jaeger-traces](https://static.api7.ai/uploads/2024/01/23/X6QdLN3l_jaeger.png) + +Similarly, you should find more span details once you click into a trace: + +![jaeger-details](https://static.api7.ai/uploads/2024/01/23/iP9fXI2A_jaeger-details.png) + +### Using Trace Variables in Logging + +The following example demonstrates how to configure the `kin` Plugin to set the following built-in variables, which can be used in logger Plugins or access logs: + +- `kin_context_traceparent`: [trace parent](https://www.w3.org/TR/trace-context/#trace-context-http-headers-format) ID +- `kin_trace_id`: trace ID of the current span +- `kin_span_id`: span ID of the current span + +Update the configuration file as below. You can customize the access log format to use the `zipkin` Plugin variables, and set `zipkin` variables in the `set_ngx_var` field. + +```yaml title="conf/config.yaml" +nginx_config: + http: + enable_access_log: true + access_log_format: '{"time": "$time_iso8601","zipkin_context_traceparent": "$zipkin_context_traceparent","zipkin_trace_id": "$zipkin_trace_id","zipkin_span_id": "$zipkin_span_id","remote_addr": "$remote_addr"}' + access_log_format_escape: json +plugin_attr: + zipkin: + set_ngx_var: true +``` + +Reload APISIX for configuration changes to take effect. + +You should see access log entries similar to the following when you generate requests: + +```text +{"time": "23/Jan/2024:06:28:00 +0000","zipkin_context_traceparent": "00-61bce33055c56f5b9bec75227befd142-13ff3c7370b29925-01","zipkin_trace_id": "61bce33055c56f5b9bec75227befd142","zipkin_span_id": "13ff3c7370b29925","remote_addr": "172.28.0.1"} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/profile.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/profile.md new file mode 100644 index 0000000..8c0eaa3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/profile.md @@ -0,0 +1,140 @@ +--- +title: Configuration based on environments +keywords: + - Apache APISIX + - API Gateway + - Configuration + - Environment +description: This document describes how you can change APISIX configuration based on environments. +--- + + + +Extracting configuration from the code makes APISIX adaptable to changes in the operating environments. For example, APISIX can be deployed in a development environment for testing and then moved to a production environment. The configuration for APISIX in these environments would be different. + +APISIX supports managing multiple configurations through environment variables in two different ways: + +1. Using environment variables in the configuration file +2. Using an environment variable to switch between multiple configuration profiles + +## Using environment variables in the configuration file + +This is useful when you want to change some configurations based on the environment. + +To use environment variables, you can use the syntax `key_name: ${{ENVIRONMENT_VARIABLE_NAME:=}}`. You can also set a default value to fall back to if no environment variables are set by adding it to the configuration as `key_name: ${{ENVIRONMENT_VARIABLE_NAME:=VALUE}}`. The example below shows how you can modify your configuration file to use environment variables to set the listening ports of APISIX: + +```yaml title="config.yaml" +apisix: + node_listen: + - ${{APISIX_NODE_LISTEN:=}} +deployment: + admin: + admin_listen: + port: ${{DEPLOYMENT_ADMIN_ADMIN_LISTEN:=}} +``` + +When you run APISIX, you can set these environment variables dynamically: + +```shell +export APISIX_NODE_LISTEN=8132 +export DEPLOYMENT_ADMIN_ADMIN_LISTEN=9232 +``` + +:::caution + +You should set these variables with `export`. If you do not export, APISIX will fail to resolve for these variables. + +::: + +Now when you start APISIX, it will listen on port `8132` and expose the Admin API on port `9232`. + +To use default values if no environment variables are set, you can add it to your configuration file as shown below: + +```yaml title="config.yaml" +apisix: + node_listen: + - ${{APISIX_NODE_LISTEN:=9080}} +deployment: + admin: + admin_listen: + port: ${{DEPLOYMENT_ADMIN_ADMIN_LISTEN:=9180}} +``` + +Now if you don't specify these environment variables when running APISIX, it will fall back to the default values and expose the Admin API on port `9180` and listen on port `9080`. + +Similarly, you can also use environment variables in `apisix.yaml` when deploying APISIX in standalone mode. + +For example, you can export the upstream address and port to environment variables: + +```shell +export HOST_ADDR=httpbin.org +export HOST_PORT=80 +``` + +Then create a route as such: + +```yaml title="apisix.yaml" +routes: + - + uri: "/anything" + upstream: + nodes: + "${{HOST_ADDR}}:${{HOST_PORT}}": 1 + type: roundrobin +#END +``` + +Initialize and start APISIX in standalone mode, requests to `/anything` should now be forwarded to `httpbin.org:80/anything`. + +*WARNING*: When using docker to deploy APISIX in standalone mode. New environment variables added to `apisix.yaml` while APISIX has been initialized will only take effect after a reload. + +## Using the `APISIX_PROFILE` environment variable + +If you have multiple configuration changes for multiple environments, it might be better to have a different configuration file for each. + +Although this might increase the number of configuration files, you would be able to manage each independently and can even do version management. + +APISIX uses the `APISIX_PROFILE` environment variable to switch between environments, i.e. to switch between different sets of configuration files. If the value of `APISIX_PROFILE` is `env`, then APISIX will look for the configuration files `conf/config-env.yaml`, `conf/apisix-env.yaml`, and `conf/debug-env.yaml`. + +For example for the production environment, you can have: + +* conf/config-prod.yaml +* conf/apisix-prod.yaml +* conf/debug-prod.yaml + +And for the development environment: + +* conf/config-dev.yaml +* conf/apisix-dev.yaml +* conf/debug-dev.yaml + +And if no environment is specified, APISIX can use the default configuration files: + +* conf/config.yaml +* conf/apisix.yaml +* conf/debug.yaml + +To use a particular configuration, you can specify it in the environment variable: + +```shell +export APISIX_PROFILE=prod +``` + +APISIX will now use the `-prod.yaml` configuration files. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/pubsub.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/pubsub.md new file mode 100644 index 0000000..ec75622 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/pubsub.md @@ -0,0 +1,148 @@ +--- +title: PubSub +keywords: + - APISIX + - PubSub +description: This document contains information about the Apache APISIX pubsub framework. +--- + + + +## What is PubSub + +Publish-subscribe is a messaging paradigm: + +- Producers send messages to specific brokers rather than directly to consumers. +- Brokers cache messages sent by producers and then actively push them to subscribed consumers or pull them. + +The system architectures use this pattern to decouple or handle high traffic scenarios. + +In Apache APISIX, the most common scenario is handling north-south traffic from the server to the client. Combining it with a publish-subscribe system, we can achieve more robust features, such as real-time collaboration on online documents, online games, etc. + +## Architecture + +![pubsub architecture](../../assets/images/pubsub-architecture.svg) + +Currently, Apache APISIX supports WebSocket communication with the client, which can be any application that supports WebSocket, with Protocol Buffer as the serialization mechanism, see the [protocol definition](https://github.com/apache/apisix/blob/master/apisix/include/apisix/model/pubsub.proto). + +## Supported messaging systems + +- [Apache Kafka](pubsub/kafka.md) + +## How to support other messaging systems + +Apache APISIX implement an extensible pubsub module, which is responsible for starting the WebSocket server, coding and decoding communication protocols, handling client commands, and adding support for the new messaging system. + +### Basic Steps + +- Add new commands and response body definitions to `pubsub.proto` +- Add a new option to the `scheme` configuration item in upstream +- Add a new `scheme` judgment branch to `http_access_phase` +- Implement the required message system instruction processing functions +- Optional: Create plugins to support advanced configurations of this messaging system + +### Example of Apache Kafka + +#### Add new commands and response body definitions to `pubsub.proto` + +The core of the protocol definition in `pubsub.proto` is the two parts `PubSubReq` and `PubSubResp`. + +First, create the `CmdKafkaFetch` command and add the required parameters. Then, register this command in the list of commands for `req` in `PubSubReq`, which is named `cmd_kafka_fetch`. + +Then create the corresponding response body `KafkaFetchResp` and register it in the `resp` of `PubSubResp`, named `kafka_fetch_resp`. + +The protocol definition [pubsub.proto](https://github.com/apache/apisix/blob/master/apisix/include/apisix/model/pubsub.proto). + +#### Add a new option to the `scheme` configuration item in upstream + +Add a new option `kafka` to the `scheme` field enumeration in the `upstream` of `apisix/schema_def.lua`. + +The schema definition [schema_def.lua](https://github.com/apache/apisix/blob/master/apisix/schema_def.lua). + +#### Add a new `scheme` judgment branch to `http_access_phase` + +Add a `scheme` judgment branch to the `http_access_phase` function in `apisix/init.lua` to support the processing of `kafka` type upstreams. Because Apache Kafka has its clustering and partition scheme, we do not need to use the Apache APISIX built-in load balancing algorithm, so we intercept and take over the processing flow before selecting the upstream node, using the `kafka_access_phase` function. + +The APISIX init file [init.lua](https://github.com/apache/apisix/blob/master/apisix/init.lua). + +#### Implement the required message system commands processing functions + +First, create an instance of the `pubsub` module, which is provided in the `core` package. + +Then, an instance of the Apache Kafka client is created and omitted code here. + +Next, add the command registered in the protocol definition above to the `pubsub` instance, which will provide a callback function that provides the parameters parsed from the communication protocol, in which the developer needs to call the kafka client to get the data and return it to the `pubsub` module as the function return value. + +:::note Callback function prototype + +The `params` is the data in the protocol definition; the first return value is the data, which needs to contain the fields in the response body definition, and returns the `nil` value when there is an error; the second return value is the error, and returns the error string when there is an error + +::: + +Finally, it enters the loop to wait for client commands, and when an error occurs, it returns the error and stops the processing flow. + +The kafka pubsub implementation [kafka.lua](https://github.com/apache/apisix/blob/master/apisix/pubsub/kafka.lua). + +#### Optional: Create plugins to support advanced configurations of this messaging system + +Add the required fields to the plugin schema definition and write them to the context of the current request in the `access` function. + +The `kafka-proxy` plugin [kafka-proxy.lua](https://github.com/apache/apisix/blob/master/apisix/plugins/kafka-proxy.lua). + +Add this plugin to [the existing list of plugins](https://github.com/apache/apisix/blob/master/apisix/cli/config.yaml.example) in the APISIX configuration file [`config.yaml`](https://github.com/apache/apisix/blob/master/conf/config.yaml). For instance: + +```yaml title="conf/config.yaml" +plugins: # see `conf/config.yaml.example` for an example + - ... # add existing plugins + - kafka-proxy +``` + +#### Results + +After this is done, create a route like the one below to connect to this messaging system via APISIX using the WebSocket. + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/kafka' \ + -H 'X-API-KEY: ${api-key}' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/kafka", + "plugins": { + "kafka-proxy": { + "sasl": { + "username": "user", + "password": "pwd" + } + } + }, + "upstream": { + "nodes": { + "kafka-server1:9092": 1, + "kafka-server2:9092": 1, + "kafka-server3:9092": 1 + }, + "type": "none", + "scheme": "kafka", + "tls": { + "verify": true + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/pubsub/kafka.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/pubsub/kafka.md new file mode 100644 index 0000000..185eec7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/pubsub/kafka.md @@ -0,0 +1,128 @@ +--- +title: Apache Kafka +keywords: + - Apache APISIX + - API Gateway + - PubSub + - Kafka +description: This document contains information about the Apache APISIX kafka pubsub scenario. +--- + + + +## Connect to Apache Kafka + +Connecting to Apache Kafka in Apache APISIX is very simple. + +Currently, we provide a simpler way to integrate by combining two APIs, ListOffsets and Fetch, to quickly implement the ability to pull Kafka messages. Still, they do not support Apache Kafka's consumer group feature for now and cannot be managed for offsets by Apache Kafka. + +### Limitations + +- Offsets need to be managed manually + +They can be stored by a custom backend service or obtained via the list_offset command before starting to fetch the message, which can use timestamp to get the starting offset, or to get the initial and end offsets. + +- Unsupported batch data acquisition + +A single instruction can only obtain the data of a Topic Partition, does not support batch data acquisition through a single instruction + +### Prepare + +First, it is necessary to compile the [communication protocol](https://github.com/apache/apisix/blob/master/apisix/include/apisix/model/pubsub.proto) as a language-specific SDK using the `protoc`, which provides the command and response definitions to connect to Kafka via APISIX using the WebSocket. + +The `sequence` field in the protocol is used to associate the request with the response, they will correspond one to one, the client can manage it in the way they want, APISIX will not modify it, only pass it back to the client through the response body. + +The following commands are currently used by Apache Kafka connect: + +- CmdKafkaFetch +- CmdKafkaListOffset + +> The `timestamp` field in the `CmdKafkaListOffset` command supports the following value: +> +> - `unix timestamp`: Offset of the first message after the specified timestamp +> - `-1`:Offset of the last message of the current Partition +> - `-2`:Offset of the first message of current Partition +> +> For more information, see [Apache Kafka Protocol Documentation](https://kafka.apache.org/protocol.html#The_Messages_ListOffsets) + +Possible response body: When an error occurs, `ErrorResp` will be returned, which includes the error string; the rest of the response will be returned after the execution of the particular command. + +- ErrorResp +- KafkaFetchResp +- KafkaListOffsetResp + +### How to use + +#### Create route + +Create a route, set the upstream `scheme` field to `kafka`, and configure `nodes` to be the address of the Kafka broker. + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/kafka' \ + -H 'X-API-KEY: ' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/kafka", + "upstream": { + "nodes": { + "kafka-server1:9092": 1, + "kafka-server2:9092": 1, + "kafka-server3:9092": 1 + }, + "type": "none", + "scheme": "kafka" + } +}' +``` + +After configuring the route, you can use this feature. + +#### Enabling TLS and SASL/PLAIN authentication + +Simply turn on the `kafka-proxy` plugin on the created route and enable the Kafka TLS handshake and SASL authentication through the configuration, which can be found in the [plugin documentation](../../../en/latest/plugins/kafka-proxy.md). + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/kafka' \ + -H 'X-API-KEY: ' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/kafka", + "plugins": { + "kafka-proxy": { + "sasl": { + "username": "user", + "password": "pwd" + } + } + }, + "upstream": { + "nodes": { + "kafka-server1:9092": 1, + "kafka-server2:9092": 1, + "kafka-server3:9092": 1 + }, + "type": "none", + "scheme": "kafka", + "tls": { + "verify": true + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/router-radixtree.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/router-radixtree.md new file mode 100644 index 0000000..c375360 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/router-radixtree.md @@ -0,0 +1,415 @@ +--- +Title: Router Radixtree +--- + + + +### What is Libradixtree? + +[Libradixtree](https://github.com/api7/lua-resty-radixtree) is an adaptive radix tree that is implemented in Lua for OpenResty and it is based on FFI for [rax](https://github.com/antirez/rax). APISIX uses libradixtree as a route dispatching library. + +### How to use Libradixtree in APISIX? + +There are several ways to use Libradixtree in APISIX. Let's take a look at a few examples and have an intuitive understanding. + +#### 1. Full match + +``` +/blog/foo +``` + +It will only match the full path `/blog/foo`. + +#### 2. Prefix matching + +``` +/blog/bar* +``` + +It will match the path with the prefix `/blog/bar`. For example, `/blog/bar/a`, +`/blog/bar/b`, `/blog/bar/c/d/e`, `/blog/bar` etc. + +#### 3. Match priority + +Full match has a higher priority than deep prefix matching. + +Here are the rules: + +``` +/blog/foo/* +/blog/foo/a/* +/blog/foo/c/* +/blog/foo/bar +``` + +| path | Match result | +|------|--------------| +|/blog/foo/bar | `/blog/foo/bar` | +|/blog/foo/a/b/c | `/blog/foo/a/*` | +|/blog/foo/c/d | `/blog/foo/c/*` | +|/blog/foo/gloo | `/blog/foo/*` | +|/blog/bar | not match | + +#### 4. Different routes have the same `uri` + +When different routes have the same `uri`, you can set the priority field of the route to determine which route to match first, or add other matching rules to distinguish different routes. + +Note: In the matching rules, the `priority` field takes precedence over other rules except `uri`. + +1. Different routes have the same `uri` but different `priority` field + +Create two routes with different `priority` values ​​(the larger the value, the higher the priority). + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "priority": 3, + "uri": "/hello" +}' +``` + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + }, + "priority": 2, + "uri": "/hello" +}' +``` + +Test: + +```shell +curl http://127.0.0.1:1980/hello +1980 +``` + +All requests will only hit the route of port `1980` because it has a priority of 3 while the route with the port of `1981` has a priority of 2. + +2. Different routes have the same `uri` but different matching conditions + +To understand this, look at the example of setting host matching rules: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "hosts": ["localhost.com"], + "uri": "/hello" +}' +``` + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + }, + "hosts": ["test.com"], + "uri": "/hello" +}' +``` + +Test: + +```shell +$ curl http://127.0.0.1:9080/hello -H 'host: localhost.com' +1980 +``` + +```shell +$ curl http://127.0.0.1:9080/hello -H 'host: test.com' +1981 +``` + +```shell +$ curl http://127.0.0.1:9080/hello +{"error_msg":"404 Route Not Found"} +``` + +If the `host` rule matches, the request hits the corresponding upstream, and if the `host` does not match, the request returns a 404 message. + +#### 5. Parameter match + +When `radixtree_uri_with_parameter` is used, we can match routes with parameters. + +For example, with configuration: + +```yaml +apisix: + router: + http: 'radixtree_uri_with_parameter' +``` + +route like + +``` +/blog/:name +``` + +will match both `/blog/dog` and `/blog/cat`. + +For more details, see https://github.com/api7/lua-resty-radixtree/#parameters-in-path. + +### How to filter route by Nginx built-in variable? + +Nginx provides a variety of built-in variables that can be used to filter routes based on certain criteria. Here is an example of how to filter routes by Nginx built-in variables: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/index.html", + "vars": [ + ["http_host", "==", "iresty.com"], + ["cookie_device_id", "==", "a66f0cdc4ba2df8c096f74c9110163a9"], + ["arg_name", "==", "json"], + ["arg_age", ">", "18"], + ["arg_address", "~~", "China.*"] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +This route will require the request header `host` equal `iresty.com`, request cookie key `_device_id` equal `a66f0cdc4ba2df8c096f74c9110163a9` etc. You can learn more at [radixtree-new](https://github.com/api7/lua-resty-radixtree#new). + +### How to filter route by POST form attributes? + +APISIX supports filtering route by POST form attributes with `Content-Type` = `application/x-www-form-urlencoded`. + +We can define the following route: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "methods": ["POST", "GET"], + "uri": "/_post", + "vars": [ + ["post_arg_name", "==", "json"] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +The route will be matched when the POST form contains `name=json`. + +### How to filter route by GraphQL attributes? + +APISIX can handle HTTP GET and POST methods. At the same time, the request body can be a GraphQL query string or JSON-formatted content. + +APISIX supports filtering routes by some attributes of GraphQL. Currently, we support: + +* graphql_operation +* graphql_name +* graphql_root_fields + +For instance, with GraphQL like this: + +```graphql +query getRepo { + owner { + name + } + repo { + created + } +} +``` + +Where + +* The `graphql_operation` is `query` +* The `graphql_name` is `getRepo`, +* The `graphql_root_fields` is `["owner", "repo"]` + +We can filter such route with: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "methods": ["POST", "GET"], + "uri": "/graphql", + "vars": [ + ["graphql_operation", "==", "query"], + ["graphql_name", "==", "getRepo"], + ["graphql_root_fields", "has", "owner"] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +We can verify GraphQL matches in the following three ways: + +1. GraphQL query strings + +```shell +$ curl -H 'content-type: application/graphql' -X POST http://127.0.0.1:9080/graphql -d ' +query getRepo { + owner { + name + } + repo { + created + } +}' +``` + +2. JSON format + +```shell +$ curl -H 'content-type: application/json' -X POST \ +http://127.0.0.1:9080/graphql --data '{"query": "query getRepo { owner {name } repo {created}}"}' +``` + +3. Try `GET` request match + +```shell +$ curl -H 'content-type: application/graphql' -X GET \ +"http://127.0.0.1:9080/graphql?query=query getRepo { owner {name } repo {created}}" -g +``` + +To prevent spending too much time reading invalid GraphQL request body, we only read the first 1 MiB +data from the request body. This limitation is configured via: + +```yaml +graphql: + max_size: 1048576 + +``` + +If you need to pass a GraphQL body which is larger than the limitation, you can increase the value in `conf/config.yaml`. + +### How to filter route by POST request JSON body? + +APISIX supports filtering route by POST form attributes with `Content-Type` = `application/json`. + +We can define the following route: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "methods": ["POST"], + "uri": "/_post", + "vars": [ + ["post_arg.name", "==", "xyz"] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +It will match the following POST request + +```shell +curl -X POST http://127.0.0.1:9180/_post \ + -H "Content-Type: application/json" \ + -d '{"name":"xyz"}' +``` + +We can also filter by complex queries like the example below: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "methods": ["POST"], + "uri": "/_post", + "vars": [ + ["post_arg.messages[*].content[*].type","has","image_url"] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +It will match the following POST request + +```shell +curl -X POST http://127.0.0.1:9180/_post \ + -H "Content-Type: application/json" \ + -d '{ + "model": "deepseek", + "messages": [ + { + "role": "system", + "content": [ + { + "text": "You are a mathematician", + "type": "text" + }, + { + "text": "You are a mathematician", + "type": "image_url" + } + ] + } + ] +}' + +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/ssl-protocol.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/ssl-protocol.md new file mode 100644 index 0000000..654eb00 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/ssl-protocol.md @@ -0,0 +1,352 @@ +--- +title: SSL Protocol +--- + + + +`APISIX` supports set TLS protocol and also supports dynamically specifying different TLS protocol versions for each [SNI](https://en.wikipedia.org/wiki/Server_Name_Indication). + +**For security reasons, the encryption suite used by default in `APISIX` does not support TLSv1.1 and lower versions.** +**If you need to enable the TLSv1.1 protocol, please add the encryption suite supported by the TLSv1.1 protocol to the configuration item `apisix.ssl.ssl_ciphers` in `config.yaml`.** + +## ssl_protocols Configuration + +### Static Configuration + +The `ssl_protocols` parameter in the static configuration `config.yaml` applies to the entire APISIX, but cannot be dynamically modified. It only takes effect when the matching SSL resource does not set `ssl_protocols`. + +```yaml +apisix: + ssl: + ssl_protocols: TLSv1.2 TLSv1.3 # default TLSv1.2 TLSv1.3 +``` + +### Dynamic Configuration + +Use the `ssl_protocols` field in the `ssl` resource to dynamically specify different TLS protocol versions for each SNI. + +Specify the `test.com` domain uses the TLSv1.2 and TLSv1.3: + +```bash +{ + "cert": "$cert", + "key": "$key", + "snis": ["test.com"], + "ssl_protocols": [ + "TLSv1.2", + "TLSv1.3" + ] +} +``` + +### Notes + +- Dynamic configuration has a higher priority than static configuration. When the `ssl_protocols` configuration item in the ssl resource is not empty, the static configuration will be overridden. +- The static configuration applies to the entire APISIX and requires a reload of APISIX to take effect. +- Dynamic configuration can control the TLS protocol version of each SNI in a fine-grained manner and can be dynamically modified, which is more flexible than static configuration. + +## Examples + +### How to specify the TLSv1.1 protocol + +While newer products utilize higher security-level TLS protocol versions, there are still legacy clients that rely on the lower-level TLSv1.1 protocol. However, enabling TLSv1.1 for new products presents potential security risks. In order to maintain the security of the API, it is crucial to have the ability to seamlessly switch between different protocol versions based on specific requirements and circumstances. +For example, consider two domain names: `test.com`, utilized by legacy clients requiring TLSv1.1 configuration, and `test2.com`, associated with new products that support TLSv1.2 and TLSv1.3 protocols. + +1. `config.yaml` configuration. + +```yaml +apisix: + ssl: + ssl_protocols: TLSv1.3 + # ssl_ciphers is for reference only + ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA +``` + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +2. Specify the TLSv1.1 protocol version for the test.com domain. + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test.com"], + "ssl_protocols": [ + "TLSv1.1" + ] +}' +``` + +3. Create an SSL object for test.com without specifying the TLS protocol version, which will use the static configuration by default. + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat server2.crt)"'", + "key": "'"$(cat server2.key)"'", + "snis": ["test2.com"] +}' +``` + +4. Access Verification + +Failed, accessed test.com with TLSv1.3: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +``` + +Successfully, accessed test.com with TLSv1.1: + +```shell +$ curl --tls-max 1.1 --tlsv1.1 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.1 (OUT), TLS handshake, Client hello (1): +* TLSv1.1 (IN), TLS handshake, Server hello (2): +* TLSv1.1 (IN), TLS handshake, Certificate (11): +* TLSv1.1 (IN), TLS handshake, Server key exchange (12): +* TLSv1.1 (IN), TLS handshake, Server finished (14): +* TLSv1.1 (OUT), TLS handshake, Client key exchange (16): +* TLSv1.1 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.1 (OUT), TLS handshake, Finished (20): +* TLSv1.1 (IN), TLS handshake, Finished (20): +* SSL connection using TLSv1.1 / ECDHE-RSA-AES256-SHA +``` + +Successfully, accessed test2.com with TLSv1.3: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +``` + +Failed, accessed test2.com with TLSv1.1: + +```shell +curl --tls-max 1.1 --tlsv1.1 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.1 (OUT), TLS handshake, Client hello (1): +* TLSv1.1 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +``` + +### Certificates are associated with multiple domains, but different TLS protocols are used between domains + +Sometimes, we may encounter a situation where a certificate is associated with multiple domains, but they need to use different TLS protocols to ensure security. For example, the test.com domain needs to use the TLSv1.2 protocol, while the test2.com domain needs to use the TLSv1.3 protocol. In this case, we cannot simply create an SSL object for all domains, but need to create an SSL object for each domain separately and specify the appropriate protocol version. This way, we can perform the correct SSL handshake and encrypted communication based on different domains and protocol versions. The example is as follows: + +1. Create an SSL object for test.com using the certificate and specify the TLSv1.2 protocol. + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test.com"], + "ssl_protocols": [ + "TLSv1.2" + ] +}' +``` + +2. Use the same certificate as test.com to create an SSL object for test2.com and specify the TLSv1.3 protocol. + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/2 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test2.com"], + "ssl_protocols": [ + "TLSv1.3" + ] +}' +``` + +3. Access verification + +Successfully, accessed test.com with TLSv1.2: + +```shell +$ curl --tls-max 1.2 --tlsv1.2 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.2 (OUT), TLS handshake, Client hello (1): +* TLSv1.2 (IN), TLS handshake, Server hello (2): +* TLSv1.2 (IN), TLS handshake, Certificate (11): +* TLSv1.2 (IN), TLS handshake, Server key exchange (12): +* TLSv1.2 (IN), TLS handshake, Server finished (14): +* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): +* TLSv1.2 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.2 (OUT), TLS handshake, Finished (20): +* TLSv1.2 (IN), TLS handshake, Finished (20): +* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 +* ALPN, server accepted to use h2 +* Server certificate: +* subject: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test.com +* start date: Jul 20 15:50:08 2023 GMT +* expire date: Jul 17 15:50:08 2033 GMT +* issuer: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test.com +* SSL certificate verify result: EE certificate key too weak (66), continuing anyway. +* Using HTTP2, server supports multi-use +* Connection state changed (HTTP/2 confirmed) +* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0 +* Using Stream ID: 1 (easy handle 0x5608905ee2e0) +> HEAD / HTTP/2 +> Host: test.com:9443 +> user-agent: curl/7.74.0 +> accept: */* + +``` + +Failed, accessed test.com with TLSv1.3: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version + +``` + +Successfully, accessed test2.com with TLSv1.3: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +* ALPN, server accepted to use h2 +* Server certificate: +* subject: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test2.com +* start date: Jul 20 16:05:47 2023 GMT +* expire date: Jul 17 16:05:47 2033 GMT +* issuer: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test2.com +* SSL certificate verify result: EE certificate key too weak (66), continuing anyway. +* Using HTTP2, server supports multi-use +* Connection state changed (HTTP/2 confirmed) +* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0 +* Using Stream ID: 1 (easy handle 0x55569cbe42e0) +> HEAD / HTTP/2 +> Host: test2.com:9443 +> user-agent: curl/7.74.0 +> accept: */* +> +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* old SSL session ID is stale, removing +``` + +Failed, accessed test2.com with TLSv1.2: + +```shell +$ curl --tls-max 1.2 --tlsv1.2 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.2 (OUT), TLS handshake, Client hello (1): +* TLSv1.2 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/status-api.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/status-api.md new file mode 100644 index 0000000..6171496 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/status-api.md @@ -0,0 +1,78 @@ +--- +title: Status API +--- + + + +In Apache APISIX, the status API is used to: + +* Check if APISIX has successfully started and running correctly. +* Check if all of the workers have received and loaded the configuration. + +To change the default endpoint (`127.0.0.1:7085`) of the Status API server, change the `ip` and `port` in the `status` section in your configuration file (`conf/config.yaml`): + +```yaml +apisix: + status: + ip: "127.0.0.1" + port: 7085 +``` + +This API can be used to perform readiness probes on APISIX before APISIX starts receiving user requests. + +### GET /status + +Returns a JSON reporting the status of APISIX workers. If APISIX is not running, the request will error out while establishing TCP connection. Otherwise this endpoint will always return ok if request reaches a running worker. + +```json +{ + "status": "ok" +} +``` + +### GET /status/ready + +Returns `ok` when all workers have loaded the configuration, otherwise returns the specific error with `503` error code. Below are specific examples. + +When all workers have loaded the configuration: + +```json +{ + "status": "ok" +} +``` + +When 1 workers has't been initialised: + +```json +{ + "status": "error", + "error": "worker count: 16 but status report count: 15" +} +``` + +When a particular worker hasn't loaded the configuration: + +```json +{ + "error": "worker id: 9 has not received configuration", + "status": "error" +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/stream-proxy.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/stream-proxy.md new file mode 100644 index 0000000..1fb9ac0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/stream-proxy.md @@ -0,0 +1,243 @@ +--- +title: Stream Proxy +--- + + + +A stream proxy operates at the transport layer, handling stream-oriented traffic based on TCP and UDP protocols. TCP is used for many applications and services, such as LDAP, MySQL, and RTMP. UDP is used for many popular non-transactional applications, such as DNS, syslog, and RADIUS. + +APISIX can serve as a stream proxy, in addition to being an application layer proxy. + +## How to enable stream proxy? + +By default, stream proxy is disabled. + +To enable this option, set `apisix.proxy_mode` to `stream` or `http&stream`, depending on whether you want stream proxy only or both http and stream. Then add the `apisix.stream_proxy` option in `conf/config.yaml` and specify the list of addresses where APISIX should act as a stream proxy and listen for incoming requests. + +```yaml +apisix: + proxy_mode: http&stream # enable both http and stream proxies + stream_proxy: + tcp: + - 9100 # listen on 9100 ports of all network interfaces for TCP requests + - "127.0.0.1:9101" + udp: + - 9200 # listen on 9200 ports of all network interfaces for UDP requests + - "127.0.0.1:9211" +``` + +If `apisix.stream_proxy` is undefined in `conf/config.yaml`, you will encounter an error similar to the following and not be able to add a stream route: + +``` +{"error_msg":"stream mode is disabled, can not add stream routes"} +``` + +## How to set a route? + +You can create a stream route using the Admin API `/stream_routes` endpoint. For example: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "remote_addr": "192.168.5.3", + "upstream": { + "nodes": { + "192.168.4.10:1995": 1 + }, + "type": "roundrobin" + } +}' +``` + +With this configuration, APISIX would only forward the request to the upstream service at `192.168.4.10:1995` if and only if the request is sent from `192.168.5.3`. See the next section to learn more about filtering options. + +More examples can be found in [test cases](https://github.com/apache/apisix/blob/master/t/stream-node/sanity.t). + +## More stream route filtering options + +Currently there are three attributes in stream routes that can be used for filtering requests: + +- `server_addr`: The address of the APISIX server that accepts the L4 stream connection. +- `server_port`: The port of the APISIX server that accepts the L4 stream connection. +- `remote_addr`: The address of client from which the request has been made. + +Here is an example: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "server_addr": "127.0.0.1", + "server_port": 2000, + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } +}' +``` + +It means APISIX will proxy the request to `127.0.0.1:1995` when the server address is `127.0.0.1` and the server port is equal to `2000`. + +Here is an example with MySQL: + +1. Put this config inside `config.yaml` + + ```yaml + apisix: + proxy_mode: http&stream # enable both http and stream proxies + stream_proxy: # TCP/UDP proxy + tcp: # TCP proxy address list + - 9100 # by default uses 0.0.0.0 + - "127.0.0.10:9101" + ``` + +2. Now run a mysql docker container and expose port 3306 to the host + + ```shell + $ docker run --name mysql -e MYSQL_ROOT_PASSWORD=toor -p 3306:3306 -d mysql mysqld --default-authentication-plugin=mysql_native_password + # check it using a mysql client that it works + $ mysql --host=127.0.0.1 --port=3306 -u root -p + Enter password: + Welcome to the MySQL monitor. Commands end with ; or \g. + Your MySQL connection id is 25 + ... + mysql> + ``` + +3. Now we are going to create a stream route with server filtering: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "server_addr": "127.0.0.10", + "server_port": 9101, + "upstream": { + "nodes": { + "127.0.0.1:3306": 1 + }, + "type": "roundrobin" + } + }' + ``` + + It only forwards the request to the mysql upstream whenever a connection is received at APISIX server `127.0.0.10` and port `9101`. Let's test that behaviour: + +4. Making a request to 9100 (stream proxy port enabled inside config.yaml), filter matching fails. + + ```shell + $ mysql --host=127.0.0.1 --port=9100 -u root -p + Enter password: + ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 + + ``` + + Instead making a request to the APISIX host and port where the filter matching succeeds: + + ```shell + mysql --host=127.0.0.10 --port=9101 -u root -p + Enter password: + Welcome to the MySQL monitor. Commands end with ; or \g. + Your MySQL connection id is 26 + ... + mysql> + ``` + +Read [Admin API's Stream Route section](./admin-api.md#stream-route) for the complete options list. + +## Accept TLS over TCP connection + +APISIX can accept TLS over TCP connection. + +First of all, we need to enable TLS for the TCP address: + +```yaml +apisix: + proxy_mode: http&stream # enable both http and stream proxies + stream_proxy: # TCP/UDP proxy + tcp: # TCP proxy address list + - addr: 9100 + tls: true +``` + +Second, we need to configure certificate for the given SNI. +See [Admin API's SSL section](./admin-api.md#ssl) for how to do. +mTLS is also supported, see [Protect Route](./mtls.md#protect-route) for how to do. + +Third, we need to configure a stream route to match and proxy it to the upstream: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } +}' +``` + +When the connection is TLS over TCP, we can use the SNI to match a route, like: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "sni": "a.test.com", + "upstream": { + "nodes": { + "127.0.0.1:5991": 1 + }, + "type": "roundrobin" + } +}' +``` + +In this case, a connection handshaked with SNI `a.test.com` will be proxied to `127.0.0.1:5991`. + +## Proxy to TLS over TCP upstream + +APISIX also supports proxying to TLS over TCP upstream. + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "scheme": "tls", + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } +}' +``` + +By setting the `scheme` to `tls`, APISIX will do TLS handshake with the upstream. + +When the client is also speaking TLS over TCP, the SNI from the client will pass through to the upstream. Otherwise, a dummy SNI `apisix_backend` will be used. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/support-fips-in-apisix.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/support-fips-in-apisix.md new file mode 100644 index 0000000..996a448 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/support-fips-in-apisix.md @@ -0,0 +1,60 @@ +--- +id: support-fips-in-apisix +title: Support FIPS in APISIX +keywords: + - API Gateway + - Apache APISIX + - Code Contribution + - Building APISIX + - OpenSSL 3.0 FIPS +description: Compile apisix-runtime with OpenSSL 3.0 (FIPS enabled) +--- + + + +OpenSSL 3.0 [supports](https://www.openssl.org/blog/blog/2022/08/24/FIPS-validation-certificate-issued/) [FIPS](https://en.wikipedia.org/wiki/FIPS_140-2) mode. To support FIPS in APISIX, you can compile apisix-runtime with OpenSSL 3.0. + +## Compilation + +To compile apisix-runtime with OpenSSL 3.0, run the commands below as root user: + +```bash +cd $(mktemp -d) +OPENSSL3_PREFIX=${OPENSSL3_PREFIX-/usr/local} +apt install -y build-essential +git clone https://github.com/openssl/openssl +cd openssl +./Configure --prefix=$OPENSSL3_PREFIX/openssl-3.0 enable-fips +make install +echo $OPENSSL3_PREFIX/openssl-3.0/lib64 > /etc/ld.so.conf.d/openssl3.conf +ldconfig +$OPENSSL3_PREFIX/openssl-3.0/bin/openssl fipsinstall -out $OPENSSL3_PREFIX/openssl-3.0/ssl/fipsmodule.cnf -module $OPENSSL3_PREFIX/openssl-3.0/lib64/ossl-modules/fips.so +sed -i 's@# .include fipsmodule.cnf@.include '"$OPENSSL3_PREFIX"'/openssl-3.0/ssl/fipsmodule.cnf@g; s/# \(fips = fips_sect\)/\1\nbase = base_sect\n\n[base_sect]\nactivate=1\n/g' $OPENSSL3_PREFIX/openssl-3.0/ssl/openssl.cnf +cd .. + +export cc_opt="-I$OPENSSL3_PREFIX/openssl-3.0/include" +export ld_opt="-L$OPENSSL3_PREFIX/openssl-3.0/lib64 -Wl,-rpath,$OPENSSL3_PREFIX/openssl-3.0/lib64" + +wget --no-check-certificate https://raw.githubusercontent.com/api7/apisix-build-tools/master/build-apisix-runtime.sh +chmod +x build-apisix-runtime.sh +./build-apisix-runtime.sh +``` + +This will install apisix-runtime to `/usr/local/openresty`. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/api-gateway.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/api-gateway.md new file mode 100644 index 0000000..7cbd2e4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/api-gateway.md @@ -0,0 +1,44 @@ +--- +title: API Gateway +keywords: + - Apache APISIX + - API Gateway + - Gateway +description: This article mainly introduces the role of the API gateway and why it is needed. +--- + + + +## Description + +An API gateway is a software pattern that sits in front of an application programming interface (API) or group of microservices, to facilitate requests and delivery of data and services. Its primary role is to act as a single entry point and standardized process for interactions between an organization's apps, data, and services and internal and external customers. The API gateway can also perform various other functions to support and manage API usage, from authentication to rate limiting to analytics. + +An API gateway also acts as a gateway between the API and the underlying infrastructure. It can be used to route requests to different backends, such as a load balancer, or route requests to different services based on the request headers. + +## Why use an API gateway? + +An API gateway comes with a lot of benefits over a traditional API microservice. The following are some of the benefits: + +- It is a single entry point for all API requests. +- It can be used to route requests to different backends, such as a load balancer, or route requests to different services based on the request headers. +- It can be used to perform authentication, authorization, and rate-limiting. +- It can be used to support analytics, such as monitoring, logging, and tracing. +- It can protect the API from malicious attack vectors such as SQL injections, DDOS attacks, and XSS. +- It decreases the complexity of the API and microservices. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/consumer-group.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/consumer-group.md new file mode 100644 index 0000000..88f437c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/consumer-group.md @@ -0,0 +1,122 @@ +--- +title: Consumer Group +keywords: + - API gateway + - Apache APISIX + - Consumer Group +description: Consumer Group in Apache APISIX. +--- + + + +## Description + +Consumer Groups are used to extract commonly used [Plugin](./plugin.md) configurations and can be bound directly to a [Consumer](./consumer.md). + +With consumer groups, you can define any number of plugins, e.g. rate limiting and apply them to a set of consumers, +instead of managing each consumer individually. + +## Example + +The example below illustrates how to create a Consumer Group and bind it to a Consumer. + +Create a Consumer Group which shares the same rate limiting quota: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumer_groups/company_a \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "limit-count": { + "count": 200, + "time_window": 60, + "rejected_code": 503, + "group": "grp_company_a" + } + } +}' +``` + +Create a Consumer within the Consumer Group: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + } + }, + "group_id": "company_a" +}' +``` + +When APISIX can't find the Consumer Group with the `group_id`, the Admin API is terminated with a status code of `400`. + +:::tip + +1. When the same plugin is configured in [consumer](./consumer.md), [routing](./route.md), [plugin config](./plugin-config.md) and [service](./service.md), only one configuration is in effect, and the consumer has the highest priority. Please refer to [Plugin](./plugin.md). +2. If a Consumer already has the `plugins` field configured, the plugins in the Consumer Group will effectively be merged into it. The same plugin in the Consumer Group will not override the one configured directly in the Consumer. + +::: + +For example, if we configure a Consumer Group as shown below: + +```json +{ + "id": "bar", + "plugins": { + "response-rewrite": { + "body": "hello" + } + } +} +``` + +To a Consumer as shown below. + +```json +{ + "username": "foo", + "group_id": "bar", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + }, + "response-rewrite": { + "body": "world" + } + } +} +``` + +Then the `body` in `response-rewrite` keeps `world`. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/consumer.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/consumer.md new file mode 100644 index 0000000..17fc7d8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/consumer.md @@ -0,0 +1,174 @@ +--- +title: Consumer +keywords: + - Apache APISIX + - API Gateway + - APISIX Consumer + - Consumer +description: This article describes the role of the Apache APISIX Consumer object and how to use the Consumer. +--- + + + +## Description + +For an API gateway, it is usually possible to identify the type of the requester by using things like their request domain name and client IP address. A gateway like APISIX can then filter these requests using [Plugins](./plugin.md) and forward it to the specified [Upstream](./upstream.md). + +It has the highest priority: Consumer > Route > Plugin Config > Service. + +But this level of depth can be insufficient on some occasions. + +![consumer-who](../../../assets/images/consumer-who.png) + +An API gateway should know who the consumer of the API is to configure different rules for different consumers. This is where the **Consumer** construct comes in APISIX. + +### Configuration options + +The fields for defining a Consumer are defined as below. + +| Field | Required | Description | +| ---------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `username` | True | Name of the consumer. | +| `plugins` | False | Plugin configuration of the **Consumer**. For specific Plugin configurations, please refer the [Plugins](./plugin.md). | + +## Identifying a Consumer + +The process of identifying a Consumer in APISIX is described below: + +![consumer-internal](../../../assets/images/consumer-internal.png) + +1. The first step is Authentication. This is achieved by Authentication Plugins like [key-auth](../plugins/key-auth.md) and [JWT](../plugins/jwt-auth.md). +2. After authenticating, you can obtain the `id` of the Consumer. This `id` will be the unique identifier of a Consumer. +3. The configurations like Plugins and Upstream bound to the Consumer are then executed. + +Consumers are useful when you have different consumers requesting the same API and you need to execute different Plugin and Upstream configurations based on the consumer. These need to be used in conjunction with the user authentication system. + +Authentication plugins that can be configured with a Consumer include `basic-auth`, `hmac-auth`, `jwt-auth`, `key-auth`, `ldap-auth`, and `wolf-rbac`. + +Refer to the documentation for the [key-auth](../plugins/key-auth.md) authentication Plugin to further understand the concept of a Consumer. + +:::note + +For more information about the Consumer object, you can refer to the [Admin API Consumer](../admin-api.md#consumer) object resource introduction. + +::: + +## Example + +The example below shows how you can enable a Plugin for a specific Consumer. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +1. Create a Consumer, specify the authentication plugin `key-auth`, and enable the specific plugin `limit-count`. + + ```shell + curl http://127.0.0.1:9180/apisix/admin/consumers \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }' + ``` + +2. Create a Router, set routing rules and enable plugin configuration. + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }' + ``` + +3. Send a test request, the first two return to normal, did not reach the speed limit threshold. + + ```shell + curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -I + ``` + + The third test returns `503` and the request is restricted. + + ```shell + HTTP/1.1 503 Service Temporarily Unavailable + ... + ``` + +We can use the [consumer-restriction](../plugins/consumer-restriction.md) Plugin to restrict our user "Jack" from accessing the API. + +1. Add Jack to the blacklist. + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "plugins": { + "key-auth": {}, + "consumer-restriction": { + "blacklist": [ + "jack" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }' + ``` + +2. Repeated tests, all return `403`; Jack is forbidden to access this API. + + ```shell + curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -I + ``` + + ```shell + HTTP/1.1 403 + ... + ``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/credential.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/credential.md new file mode 100644 index 0000000..d8a5ab5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/credential.md @@ -0,0 +1,151 @@ +--- +title: Credential +keywords: + - APISIX + - API Gateway + - Consumer + - Credential +description: This article describes what the Apache APISIX Credential object does and how to use it. +--- + + + +## Description + +Credential is the object that holds the [Consumer](./consumer.md) credential configuration. +A Consumer can use multiple credentials of different types. +Credentials are used when you need to configure multiple credentials for a Consumer. + +Currently, Credential can be configured with the authentication plugins `basic-auth`, `hmac-auth`, `jwt-auth`, and `key-auth`. + +### Configuration options + +The fields for defining a Credential are defined as below. + +| Field | Required | Description | +|---------|----------|---------------------------------------------------------------------------------------------------------| +| desc | False | Decriptiion of the Credential. | +| labels | False | Labels of the Credential. | +| plugins | False | The plugin configuration corresponding to Credential. For more information, see [Plugins](./plugin.md). | + +:::note + +For more information about the Credential object, you can refer to the [Admin API Credential](../admin-api.md#credential) resource guide. + +::: + +## Example + +[Consumer Example](./consumer.md#example) describes how to configure the auth plugin for Consumer and how to use it with other plugins. +In this example, the Consumer has only one credential of type key-auth. +Now suppose the user needs to configure multiple credentials for that Consumer, you can use Credential to support this. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +1. Create the Consumer without specifying the auth plug-n, but use Credential to configure the auth plugin later. + + ```shell + curl http://127.0.0.1:9180/apisix/admin/consumers \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "username": "jack" + }' + ``` + +2. Create 2 `key-auth` Credentials for the Consumer. + + ```shell + curl http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials/key-auth-one \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "plugins": { + "key-auth": { + "key": "auth-one" + } + } + }' + ``` + + ```shell + curl http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials/key-auth-two \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "plugins": { + "key-auth": { + "key": "auth-two" + } + } + }' + ``` + +3. Create a route and enable `key-auth` plugin on it. + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }' + ``` + +4. Test. + + Test the request with the `auth-one` and `auth-two` keys, and they both respond correctly. + + ```shell + curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -I + curl http://127.0.0.1:9080/hello -H 'apikey: auth-two' -I + ``` + + Enable the `limit-count` plugin for the Consumer. + + ```shell + curl http://127.0.0.1:9180/apisix/admin/consumers \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "username": "jack", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }' + ``` + + Requesting the route more than 3 times in a row with each of the two keys, the test returns `503` and the request is restricted. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/global-rule.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/global-rule.md new file mode 100644 index 0000000..1007f4b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/global-rule.md @@ -0,0 +1,70 @@ +--- +title: Global Rules +keywords: + - API Gateway + - Apache APISIX + - Global Rules +description: This article describes how to use global rules. +--- + + + +## Description + +A [Plugin](./plugin.md) configuration can be bound directly to a [Route](./route.md), a [Service](./service.md) or a [Consumer](./consumer.md). But what if we want a Plugin to work on all requests? This is where we register a global Plugin with Global Rule. + +Compared with the plugin configuration in Route, Service, Plugin Config, and Consumer, the plugin in the Global Rules is always executed first. + +## Example + +The example below shows how you can use the `limit-count` Plugin on all requests: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -X PUT \ + http://{apisix_listen_address}/apisix/admin/global_rules/1 \ + -H 'Content-Type: application/json' \ + -H "X-API-KEY: $admin_key" \ + -d '{ + "plugins": { + "limit-count": { + "time_window": 60, + "policy": "local", + "count": 2, + "key": "remote_addr", + "rejected_code": 503 + } + } + }' +``` + +You can also list all the Global rules by making this request with the Admin API: + +```shell +curl http://{apisix_listen_address}/apisix/admin/global_rules -H "X-API-KEY: $admin_key" +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/plugin-config.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/plugin-config.md new file mode 100644 index 0000000..88476d4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/plugin-config.md @@ -0,0 +1,171 @@ +--- +title: Plugin Config +keywords: + - API Gateway + - Apache APISIX + - Plugin Config +description: Plugin Config in Apache APISIX. +--- + + + +## Description + +Plugin Configs are used to extract commonly used [Plugin](./plugin.md) configurations and can be bound directly to a [Route](./route.md). + +While configuring the same plugin, only one copy of the configuration is valid. Please read the [plugin execution order](../terminology/plugin.md#plugins-execution-order) and [plugin merging order](../terminology/plugin.md#plugins-merging-precedence). + +## Example + +The example below illustrates how to create a Plugin Config and bind it to a Route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_configs/1 \ +-H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "desc": "blah", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503 + } + } +}' +``` + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY:edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' +{ + "uris": ["/index.html"], + "plugin_config_id": 1, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +When APISIX can't find the Plugin Config with the `id`, the requests reaching this Route are terminated with a status code of `503`. + +:::note + +If a Route already has the `plugins` field configured, the plugins in the Plugin Config will effectively be merged to it. + +The same plugin in the Plugin Config will not override the ones configured directly in the Route. For more information, see [Plugin](./plugin.md). + +::: + +For example, if you configure a Plugin Config as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_configs/1 \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "desc": "I am plugin_config 1", + "plugins": { + "ip-restriction": { + "whitelist": [ + "127.0.0.0/24", + "113.74.26.106" + ] + }, + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503 + } + } +}' +``` + +to a Route as shown below, + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uris": ["/index.html"], + "plugin_config_id": 1, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + "plugins": { + "proxy-rewrite": { + "uri": "/test/add", + "host": "apisix.iresty.com" + }, + "limit-count": { + "count": 20, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } +}' +``` + +the effective configuration will be as the one shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uris": ["/index.html"], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + "plugins": { + "ip-restriction": { + "whitelist": [ + "127.0.0.0/24", + "113.74.26.106" + ] + }, + "proxy-rewrite": { + "uri": "/test/add", + "host": "apisix.iresty.com" + }, + "limit-count": { + "count": 20, + "time_window": 60, + "rejected_code": 503 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/plugin-metadata.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/plugin-metadata.md new file mode 100644 index 0000000..93ece32 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/plugin-metadata.md @@ -0,0 +1,83 @@ +--- +title: Plugin Metadata +keywords: + - API Gateway + - Apache APISIX + - Plugin Metadata +description: Plugin Metadata in Apache APISIX. +--- + + + +## Description + +In this document, you will learn the basic concept of plugin metadata in APISIX and why you may need them. + +Explore additional resources at the end of the document for more information on related topics. + +## Overview + +In APISIX, a plugin metadata object is used to configure the common metadata field(s) of all plugin instances sharing the same plugin name. It is useful when a plugin is enabled across multiple objects and requires a universal update to their metadata fields. + +The following diagram illustrates the concept of plugin metadata using two instances of [syslog](https://apisix.apache.org/docs/apisix/plugins/syslog/) plugins on two different routes, as well as a plugin metadata object setting a [global](https://apisix.apache.org/docs/apisix/plugins/syslog/) `log_format` for the syslog plugin: + +![plugin_metadata](https://static.apiseven.com/uploads/2023/04/17/Z0OFRQhV_plugin%20metadata.svg) + +Without otherwise specified, the `log_format` on plugin metadata object should apply the same log format uniformly to both `syslog` plugins. However, since the `syslog` plugin on the `/orders` route has a different `log_format`, requests visiting this route will generate logs in the `log_format` specified by the plugin in route. + +Metadata properties set at the plugin level is more granular and has a higher priority over the "global" metadata object. + +Plugin metadata objects should only be used for plugins that have metadata fields. Check the specific plugin documentation to know more. + +## Example usage + +The example below shows how you can configure through the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/http-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```json +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## Additional Resource(s) + +Key Concepts - [Plugins](https://apisix.apache.org/docs/apisix/terminology/plugin/) diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/plugin.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/plugin.md new file mode 100644 index 0000000..0312c66 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/plugin.md @@ -0,0 +1,346 @@ +--- +title: Plugin +keywords: + - API Gateway + - Apache APISIX + - Plugin + - Filter + - Priority +description: This article introduces the related information of the APISIX Plugin object and how to use it, and introduces how to customize the plugin priority, customize the error response, and dynamically control the execution status of the plugin. +--- + + + +## Description + +APISIX Plugins extend APISIX's functionalities to meet organization or user-specific requirements in traffic management, observability, security, request/response transformation, serverless computing, and more. + +A **Plugin** configuration can be bound directly to a [`Route`](route.md), [`Service`](service.md), [`Consumer`](consumer.md) or [`Plugin Config`](plugin-config.md). You can refer to [Admin API plugins](../admin-api.md#plugin) for how to use this resource. + +If existing APISIX Plugins do not meet your needs, you can also write your own plugins in Lua or other languages such as Java, Python, Go, and Wasm. + +## Plugins installation + +By default, most APISIX plugins are [installed](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua): + +```lua title="apisix/cli/config.lua" +local _M = { + ... + plugins = { + "real-ip", + "ai", + "client-control", + "proxy-control", + "request-id", + "zipkin", + "ext-plugin-pre-req", + "fault-injection", + "mocking", + "serverless-pre-function", + ... + }, + ... +} +``` + +If you would like to make adjustments to plugins installation, add the customized `plugins` configuration to `config.yaml`. For example: + +```yaml +plugins: + - real-ip # installed + - ai + - real-ip + - ai + - client-control + - proxy-control + - request-id + - zipkin + - ext-plugin-pre-req + - fault-injection + # - mocking # not install + - serverless-pre-function + ... # other plugins +``` + +See `config.yaml.example`(https://github.com/apache/apisix/blob/master/conf/config.yaml.example) for a complete configuration reference. + +You should reload APISIX for configuration changes to take effect. + +## Plugins execution lifecycle + +An installed plugin is first initialized. The configuration of the plugin is then checked against the defined [JSON Schema](https://json-schema.org) to make sure the plugins configuration schema is correct. + +When a request goes through APISIX, the plugin's corresponding methods are executed in one or more of the following phases : `rewrite`, `access`, `before_proxy`, `header_filter`, `body_filter`, and `log`. These phases are largely influenced by the [OpenResty directives](https://openresty-reference.readthedocs.io/en/latest/Directives/). + +
+
+Routes Diagram +
+
+ +## Plugins execution order + +In general, plugins are executed in the following order: + +1. Plugins in [global rules](./global-rule.md) + 1. plugins in rewrite phase + 2. plugins in access phase + +2. Plugins bound to other objects + 1. plugins in rewrite phase + 2. plugins in access phase + +Within each phase, you can optionally define a new priority number in the `_meta.priority` field of the plugin, which takes precedence over the default plugins priority during execution. Plugins with higher priority numbers are executed first. + +For example, if you want to have `limit-count` (priority 1002) run before `ip-restriction` (priority 3000) when requests hit a route, you can do so by passing a higher priority number to `_meta.priority` field of `limit-count`: + +```json +{ + ..., + "plugins": { + "limit-count": { + ..., + "_meta": { + "priority": 3010 + } + } + } +} +``` + +To reset the priority of this plugin instance to the default, simply remove the `_meta.priority` field from your plugin configuration. + +## Plugins merging precedence + +When the same plugin is configured both globally in a global rule and locally in an object (e.g. a route), both plugin instances are executed sequentially. + +However, if the same plugin is configured locally on multiple objects, such as on [Route](./route.md), [Service](./service.md), [Consumer](./consumer.md), [Consumer Group](./consumer-group.md), or [Plugin Config](./plugin-config.md), only one copy of configuration is used as each non-global plugin is only executed once. This is because during execution, plugins configured in these objects are merged with respect to a specific order of precedence: + +`Consumer` > `Consumer Group` > `Route` > `Plugin Config` > `Service` + +such that if the same plugin has different configurations in different objects, the plugin configuration with the highest order of precedence during merging will be used. + +## Plugin common configuration + +Some common configurations can be applied to plugins through the `_meta` configuration items, the specific configuration items are as follows: + +| Name | Type | Description | +|----------------|--------------- |-------------| +| disable | boolean | When set to `true`, the plugin is disabled. | +| error_response | string/object | Custom error response. | +| priority | integer | Custom plugin priority. | +| filter | array | Depending on the requested parameters, it is decided at runtime whether the plugin should be executed. Something like this: `{{var, operator, val}, {var, operator, val}, ...}}`. For example: `{"arg_version", "==", "v2"}`, indicating that the current request parameter `version` is `v2`. The variables here are consistent with NGINX internal variables. For details on supported operators, please see [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list). | + +### Disable the plugin + +Through the `disable` configuration, you can add a new plugin with disabled status and the request will not go through the plugin. + +```json +{ + "proxy-rewrite": { + "_meta": { + "disable": true + } + } +} +``` + +### Custom error response + +Through the `error_response` configuration, you can configure the error response of any plugin to a fixed value to avoid troubles caused by the built-in error response information of the plugin. + +The configuration below means to customize the error response of the `jwt-auth` plugin to `Missing credential in request`. + +```json +{ + "jwt-auth": { + "_meta": { + "error_response": { + "message": "Missing credential in request" + } + } + } +} +``` + +### Custom plugin priority + +All plugins have default priorities, but through the `priority` configuration item you can customize the plugin priority and change the plugin execution order. + +```json + { + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + }, + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } +} +``` + +The default priority of serverless-pre-function is 10000, and the default priority of serverless-post-function is -2000. By default, the serverless-pre-function plugin will be executed first, and serverless-post-function plugin will be executed next. + +The above configuration means setting the priority of the serverless-pre-function plugin to -2000 and the priority of the serverless-post-function plugin to 10000. The serverless-post-function plugin will be executed first, and serverless-pre-function plugin will be executed next. + +:::note + +- Custom plugin priority only affects the current object(route, service ...) of the plugin instance binding, not all instances of that plugin. For example, if the above plugin configuration belongs to Route A, the order of execution of the plugins serverless-post-function and serverless-post-function on Route B will not be affected and the default priority will be used. +- Custom plugin priority does not apply to the rewrite phase of some plugins configured on the consumer. The rewrite phase of plugins configured on the route will be executed first, and then the rewrite phase of plugins (exclude auth plugins) from the consumer will be executed. + +::: + +### Dynamically control whether the plugin is executed + +By default, all plugins specified in the route will be executed. But we can add a filter to the plugin through the `filter` configuration item, and control whether the plugin is executed through the execution result of the filter. + +The configuration below means that the `proxy-rewrite` plugin will only be executed if the `version` value in the request query parameters is `v2`. + +```json +{ + "proxy-rewrite": { + "_meta": { + "filter": [ + ["arg_version", "==", "v2"] + ] + }, + "uri": "/anything" + } +} +``` + +Create a complete route with the below configuration: + +```json +{ + "uri": "/get", + "plugins": { + "proxy-rewrite": { + "_meta": { + "filter": [ + ["arg_version", "==", "v2"] + ] + }, + "uri": "/anything" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +} +``` + +When the request does not have any parameters, the `proxy-rewrite` plugin will not be executed, the request will be proxy to the upstream `/get`: + +```shell +curl -v /dev/null http://127.0.0.1:9080/get -H"host:httpbin.org" +``` + +```shell +< HTTP/1.1 200 OK +...... +< Server: APISIX/2.15.0 +< +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.79.1", + "X-Amzn-Trace-Id": "Root=1-62eb6eec-46c97e8a5d95141e621e07fe", + "X-Forwarded-Host": "httpbin.org" + }, + "origin": "127.0.0.1, 117.152.66.200", + "url": "http://httpbin.org/get" +} +``` + +When the parameter `version=v2` is carried in the request, the `proxy-rewrite` plugin is executed, and the request will be proxy to the upstream `/anything`: + +```shell +curl -v /dev/null http://127.0.0.1:9080/get?version=v2 -H"host:httpbin.org" +``` + +```shell +< HTTP/1.1 200 OK +...... +< Server: APISIX/2.15.0 +< +{ + "args": { + "version": "v2" + }, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.79.1", + "X-Amzn-Trace-Id": "Root=1-62eb6f02-24a613b57b6587a076ef18b4", + "X-Forwarded-Host": "httpbin.org" + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1, 117.152.66.200", + "url": "http://httpbin.org/anything?version=v2" +} +``` + +## Hot reload + +APISIX Plugins are hot-loaded. This means that there is no need to restart the service if you add, delete, modify plugins, or even if you update the plugin code. To hot-reload, you can send an HTTP request through the [Admin API](../admin-api.md): + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugins/reload -H "X-API-KEY: $admin_key" -X PUT +``` + +:::note + +If a configured Plugin is disabled, then its execution will be skipped. + +::: + +### Hot reload in standalone mode + +For hot-reloading in standalone mode, see the plugin related section in [stand alone mode](../deployment-modes.md#standalone). diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/route.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/route.md new file mode 100644 index 0000000..6300717 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/route.md @@ -0,0 +1,91 @@ +--- +title: Route +keywords: + - API Gateway + - Apache APISIX + - Route +description: This article describes the concept of Route and how to use it. +--- + + + +## Description + +Routes match the client's request based on defined rules, load and execute the corresponding [plugins](./plugin.md), and forwards the request to the specified [Upstream](./upstream.md). + +A Route mainly consists of three parts: + +1. Matching rules (`uri`, `host`, `remote address`) +2. Plugin configuration (current-limit, rate-limit) +3. Upstream information + +The image below shows some example Route rules. Note that the values are of the same color if they are identical. + +![routes-example](../../../assets/images/routes-example.png) + +All the parameters are configured directly in the Route. It is easy to set up, and each Route has a high degree of freedom. + +When Routes have repetitive configurations (say, enabling the same plugin configuration or Upstream information), to update it, we need to traverse all the Routes and modify them. This adds a lot of complexity, making it difficult to maintain. + +These shortcomings are independently abstracted in APISIX by two concepts: [Service](service.md) and [Upstream](upstream.md). + +## Example + +The Route example shown below proxies the request with the URL `/index.html` to the Upstream service with the address `127.0.0.1:1980`. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +```shell +HTTP/1.1 201 Created +Date: Sat, 31 Aug 2019 01:17:15 GMT +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server + +{"node":{"value":{"uri":"\/index.html","upstream":{"nodes":{"127.0.0.1:1980":1},"type":"roundrobin"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925}} +``` + +A successful response indicates that the route was created. + +## Configuration + +For specific options of Route, please refer to the [Admin API](../admin-api.md#route). diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/router.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/router.md new file mode 100644 index 0000000..0eb49f1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/router.md @@ -0,0 +1,56 @@ +--- +title: Router +keywords: + - API Gateway + - Apache APISIX + - Router +description: This article describes how to choose a router for Apache APISIX. +--- + + + +## Description + +A distinguishing feature of Apache APISIX from other API gateways is that it allows you to choose different Routers to better match free services, giving you the best choices for performance and freedom. + +You can set the Router that best suits your needs in your configuration file `conf/config.yaml`. + +## Configuration + +A Router can have the following configurations: + +- `apisix.router.http`: The HTTP request route. It can take the following values: + + - `radixtree_uri`: Only use the `uri` as the primary index. To learn more about the support for full and deep prefix matching, check [How to use router-radixtree](../router-radixtree.md). + - `Absolute match`: Match completely with the given `uri` (`/foo/bar`, `/foo/glo`). + - `Prefix match`: Match with the given prefix. Use `*` to represent the given `uri` for prefix matching. For example, `/foo*` can match with `/foo/`, `/foo/a` and `/foo/b`. + - `match priority`: First try an absolute match, if it didn't match, try prefix matching. + - `Any filter attribute`: This allows you to specify any Nginx built-in variable as a filter, such as URL request parameters, request headers, and cookies. + - `radixtree_uri_with_parameter`: Like `radixtree_uri` but also supports parameter match. + - `radixtree_host_uri`: (Default) Matches both host and URI of the request. Use `host + uri` as the primary index (based on the `radixtree` engine). + +:::note + +In version 3.2 and earlier, APISIX used `radixtree_uri` as the default Router. `radixtree_uri` has better performance than `radixtree_host_uri`, so if you have higher performance requirements and can live with the fact that `radixtree_uri` only use the `uri` as the primary index, consider continuing to use `radixtree_uri` as the default Router. + +::: + +- `apisix.router.ssl`: SSL loads the matching route. + - `radixtree_sni`: (Default) Use `SNI` (Server Name Indication) as the primary index (based on the radixtree engine). diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/script.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/script.md new file mode 100644 index 0000000..793a5ac --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/script.md @@ -0,0 +1,39 @@ +--- +title: Script +--- + + + +## Description + +Scripts lets you write arbitrary Lua code or directly call existing plugins and execute them during the HTTP request/response lifecycle. + +A Script configuration can be directly bound to a [Route](./route.md). + +Scripts and [Plugins](./plugin.md) are mutually exclusive, and a Script is executed before a Plugin. This means that after configuring a Script, the Plugin configured on the Route will **not** be executed. + +Scripts also have a concept of execution phase which supports the `access`, `header_filter`, `body_filter`, and the `log` phase. The corresponding phase will be executed automatically by the system in the Script. + +```json +{ + ... + "script": "local _M = {} \n function _M.access(api_ctx) \n ngx.log(ngx.INFO,\"hit access phase\") \n end \nreturn _M" +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/secret.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/secret.md new file mode 100644 index 0000000..94c1b88 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/secret.md @@ -0,0 +1,349 @@ +--- +title: Secret +--- + + + +## Description + +Secrets refer to any sensitive information required during the running process of APISIX, which may be part of the core configuration (such as the etcd's password) or some sensitive information in the plugin. Common types of Secrets in APISIX include: + +- username, the password for some components (etcd, Redis, Kafka, etc.) +- the private key of the certificate +- API key +- Sensitive plugin configuration fields, typically used for authentication, hashing, signing, or encryption + +APISIX Secret allows users to store secrets through some secrets management services (Vault, etc.) in APISIX, and read them according to the key when using them to ensure that **Secrets do not exist in plain text throughout the platform**. + +Its working principle is shown in the figure: +![secret](../../../assets/images/secret.png) + +APISIX currently supports storing secrets in the following ways: + +- [Environment Variables](#use-environment-variables-to-manage-secrets) +- [HashiCorp Vault](#use-hashicorp-vault-to-manage-secrets) +- [AWS Secrets Manager](#use-aws-secrets-manager-to-manage-secrets) +- [GCP Secrets Manager](#use-gcp-secrets-manager-to-manage-secrets) + +You can use APISIX Secret functions by specifying format variables in the consumer configuration of the following plugins, such as `key-auth`. + +:::note + +If a key-value pair `key: "$ENV://ABC"` is configured in APISIX and the value of `$ENV://ABC` is unassigned in the environment variable, `$ENV://ABC` will be interpreted as a string literal, instead of `nil`. + +::: + +## Use environment variables to manage secrets + +Using environment variables to manage secrets means that you can save key information in environment variables, and refer to environment variables through variables in a specific format when configuring plugins. APISIX supports referencing system environment variables and environment variables configured through the Nginx `env` directive. + +### Usage + +``` +$ENV://$env_name/$sub_key +``` + +- env_name: environment variable name +- sub_key: get the value of a property when the value of the environment variable is a JSON string + + If the value of the environment variable is of type string, such as: + +``` +export JACK_AUTH_KEY=abc +``` + +It can be referenced as follows: + +``` +$ENV://JACK_AUTH_KEY +``` + +If the value of the environment variable is a JSON string like: + +``` +export JACK={"auth-key":"abc","openid-key": "def"} +``` + +It can be referenced as follows: + +``` +# Get the auth-key of the environment variable JACK +$ENV://JACK/auth-key + +# Get the openid-key of the environment variable JACK +$ENV://JACK/openid-key +``` + +### Example: use in key-auth plugin + +Step 1: Create environment variables before the APISIX instance starts + +``` +export JACK_AUTH_KEY=abc +``` + +Step 2: Reference the environment variable in the `key-auth` plugin + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$ENV://JACK_AUTH_KEY" + } + } +}' +``` + +Through the above steps, the `key` configuration in the `key-auth` plugin can be saved in the environment variable instead of being displayed in plain text when configuring the plugin. + +## Use HashiCorp Vault to manage secrets + +Using HashiCorp Vault to manage secrets means that you can store secrets information in the Vault service and refer to it through variables in a specific format when configuring plugins. APISIX currently supports [Vault KV engine version V1](https://developer.hashicorp.com/vault/docs/secrets/kv/kv-v1). + +### Usage + +``` +$secret://$manager/$id/$secret_name/$key +``` + +- manager: secrets management service, could be the HashiCorp Vault, AWS, etc. +- id: APISIX Secrets resource ID, which needs to be consistent with the one specified when adding the APISIX Secrets resource +- secret_name: the secret name in the secrets management service +- key: the key corresponding to the secret in the secrets management service + +### Example: use in key-auth plugin + +Step 1: Create the corresponding key in the Vault, you can use the following command: + +```shell +vault kv put apisix/jack auth-key=value +``` + +Step 2: Add APISIX Secrets resources through the Admin API, configure the Vault address and other connection information: + +```shell +curl http://127.0.0.1:9180/apisix/admin/secrets/vault/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "https://127.0.0.1:8200", + "prefix": "apisix", + "token": "root" +}' +``` + +If you use APISIX Standalone mode, you can add the following configuration in `apisix.yaml` configuration file: + +```yaml +secrets: + - id: vault/1 + prefix: apisix + token: root + uri: 127.0.0.1:8200 +``` + +:::tip + +It now supports the use of the [`namespace` field](../admin-api.md#request-body-parameters-11) to set the multi-tenant namespace concepts supported by [HashiCorp Vault Enterprise](https://developer.hashicorp.com/vault/docs/enterprise/namespaces#vault-api-and-namespaces) and HCP Vault. + +::: + +Step 3: Reference the APISIX Secrets resource in the `key-auth` plugin and fill in the key information: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$secret://vault/1/jack/auth-key" + } + } +}' +``` + +Through the above two steps, when the user request hits the `key-auth` plugin, the real value of the key in the Vault will be obtained through the APISIX Secret component. + +## Use AWS Secrets Manager to manage secrets + +Managing secrets with AWS Secrets Manager is a secure and convenient way to store and manage sensitive information. This method allows you to save secret information in AWS Secrets Manager and reference these secrets in a specific format when configuring APISIX plugins. + +APISIX currently supports two authentication methods: using [long-term credentials](https://docs.aws.amazon.com/sdkref/latest/guide/access-iam-users.html) and [short-term credentials](https://docs.aws.amazon.com/sdkref/latest/guide/access-temp-idc.html). + +### Usage + +``` +$secret://$manager/$id/$secret_name/$key +``` + +- manager: secrets management service, could be the HashiCorp Vault, AWS, etc. +- id: APISIX Secrets resource ID, which needs to be consistent with the one specified when adding the APISIX Secrets resource +- secret_name: the secret name in the secrets management service +- key: get the value of a property when the value of the secret is a JSON string + +### Required Parameters + +| Name | Required | Default Value | Description | +| --- | --- | --- | --- | +| access_key_id | True | | AWS Access Key ID | +| secret_access_key | True | | AWS Secret Access Key | +| session_token | False | | Temporary access credential information | +| region | False | us-east-1 | AWS Region | +| endpoint_url | False | https://secretsmanager.{region}.amazonaws.com | AWS Secret Manager URL | + +### Example: use in key-auth plugin + +Here, we use the key-auth plugin as an example to demonstrate how to manage secrets through AWS Secrets Manager. + +Step 1: Create the corresponding key in the AWS secrets manager. Here, [localstack](https://www.localstack.cloud/) is used for as the example environment, and you can use the following command: + +```shell +docker exec -i localstack sh -c "awslocal secretsmanager create-secret --name jack --description 'APISIX Secret' --secret-string '{\"auth-key\":\"value\"}'" +``` + +Step 2: Add APISIX Secrets resources through the Admin API, configure the connection information such as the address of AWS Secrets Manager. + +You can store the critical key information in environment variables to ensure the configuration information is secure, and reference it where it is used: + +```shell +export AWS_ACCESS_KEY_ID= +export AWS_SECRET_ACCESS_KEY= +export AWS_SESSION_TOKEN= +export AWS_REGION= +``` + +Alternatively, you can also specify all the information directly in the configuration: + +```shell +curl http://127.0.0.1:9180/apisix/admin/secrets/aws/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "endpoint_url": "http://127.0.0.1:4566", + "region": "us-east-1", + "access_key_id": "access", + "secret_access_key": "secret", + "session_token": "token" +}' +``` + +If you use APISIX Standalone mode, you can add the following configuration in `apisix.yaml` configuration file: + +```yaml +secrets: + - id: aws/1 + endpoint_url: http://127.0.0.1:4566 + region: us-east-1 + access_key_id: access + secret_access_key: secret + session_token: token +``` + +Step 3: Reference the APISIX Secrets resource in the `key-auth` plugin and fill in the key information: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$secret://aws/1/jack/auth-key" + } + } +}' +``` + +Through the above two steps, when the user request hits the `key-auth` plugin, the real value of the key in the Vault will be obtained through the APISIX Secret component. + +### Verification + +You can verify this with the following command: + +```shell +#Replace the following your_route with the actual route path. +curl -i http://127.0.0.1:9080/your_route -H 'apikey: value' +``` + +This will verify whether the `key-auth` plugin is correctly using the key from AWS Secrets Manager. + +## Use GCP Secrets Manager to manage secrets + +Using the GCP Secrets Manager to manage secrets means you can store the secret information in the GCP service, and reference it using a specific format of variables when configuring plugins. APISIX currently supports integration with the GCP Secrets Manager, and the supported authentication method is [OAuth 2.0](https://developers.google.com/identity/protocols/oauth2). + +### Reference Format + +``` +$secret://$manager/$id/$secret_name/$key +``` + +The reference format is the same as before: + +- manager: secrets management service, could be the HashiCorp Vault, AWS, GCP etc. +- id: APISIX Secrets resource ID, which needs to be consistent with the one specified when adding the APISIX Secrets resource +- secret_name: the secret name in the secrets management service +- key: get the value of a property when the value of the secret is a JSON string + +### Required Parameters + +| Name | Required | Default | Description | +|-------------------------|----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| auth_config | True | | Either `auth_config` or `auth_file` must be provided. | +| auth_config.client_email | True | | Email address of the Google Cloud service account. | +| auth_config.private_key | True | | Private key of the Google Cloud service account. | +| auth_config.project_id | True | | Project ID in the Google Cloud service account. | +| auth_config.token_uri | False | https://oauth2.googleapis.com/token | Token URI of the Google Cloud service account. | +| auth_config.entries_uri | False | https://secretmanager.googleapis.com/v1 | The API access endpoint for the Google Secrets Manager. | +| auth_config.scope | False | https://www.googleapis.com/auth/cloud-platform | Access scopes of the Google Cloud service account. See [OAuth 2.0 Scopes for Google APIs](https://developers.google.com/identity/protocols/oauth2/scopes) | +| auth_file | True | | Path to the Google Cloud service account authentication JSON file. Either `auth_config` or `auth_file` must be provided. | +| ssl_verify | False | true | When set to `true`, enables SSL verification as mentioned in [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). | + +You need to configure the corresponding authentication parameters, or specify the authentication file through auth_file, where the content of auth_file is in JSON format. + +### Example + +Here is a correct configuration example: + +``` +curl http://127.0.0.1:9180/apisix/admin/secrets/gcp/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "auth_config" : { + "client_email": "email@apisix.iam.gserviceaccount.com", + "private_key": "private_key", + "project_id": "apisix-project", + "token_uri": "https://oauth2.googleapis.com/token", + "entries_uri": "https://secretmanager.googleapis.com/v1", + "scope": ["https://www.googleapis.com/auth/cloud-platform"] + } +}' + +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/service.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/service.md new file mode 100644 index 0000000..69a0042 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/service.md @@ -0,0 +1,118 @@ +--- +title: Service +--- + + + +## Description + +A Service is an abstraction of an API (which can also be understood as a set of [Route](./route.md) abstractions). It usually corresponds to an upstream service abstraction. + +The relationship between Routes and a Service is usually N:1 as shown in the image below. + +![service-example](../../../assets/images/service-example.png) + +As shown, different Routes could be bound to the same Service. This reduces redundancy as these bounded Routes will have the same [Upstream](./upstream.md) and [Plugin](./plugin.md) configurations. + +For more information about Service, please refer to [Admin API Service object](../admin-api.md#service). + +## Examples + +The following example creates a Service that enables the `limit-count` Plugin and then binds it to the Routes with the ids `100` and `101`. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +1. Create a Service. + +```shell +curl http://127.0.0.1:9180/apisix/admin/services/200 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +2. create new Route and reference the service by id `200` + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/100 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "methods": ["GET"], + "uri": "/index.html", + "service_id": "200" + }' + ``` + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/101 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "methods": ["GET"], + "uri": "/foo/index.html", + "service_id": "200" + }' + ``` + +We can also specify different Plugins or Upstream for the Routes than the ones defined in the Service. The example below creates a Route with a `limit-count` Plugin. This Route will continue to use the other configurations defined in the Service (here, the Upstream configuration). + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/102 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "uri": "/bar/index.html", + "id": "102", + "service_id": "200", + "plugins": { + "limit-count": { + "count": 2000, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }' + ``` + +:::note + +When a Route and a Service enable the same Plugin, the one defined in the Route is given the higher priority. + +::: diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/upstream.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/upstream.md new file mode 100644 index 0000000..4d8733a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/terminology/upstream.md @@ -0,0 +1,249 @@ +--- +title: Upstream +keywords: + - Apache APISIX + - API Gateway + - APISIX Upstream + - Upstream +description: This article describes the role of the Apache APISIX Upstream object and how to use the Upstream. +--- + + + +## Description + +Upstream is a virtual host abstraction that performs load balancing on a given set of service nodes according to the configured rules. + +Although Upstream can be directly configured to the [Route](./route.md) or [Service](./service.md), using an Upstream object is recommended when there is duplication as shown below. + +![upstream-example](../../../assets/images/upstream-example.png) + +By creating an Upstream object and referencing it by `upstream_id` in the Route, you can ensure that there is only a single value of the object that needs to be maintained. + +An Upstream configuration can be directly bound to a Route or a Service, but the configuration in Route has a higher priority. This behavior is consistent with priority followed by the [Plugin](./plugin.md) object. + +## Configuration + +In addition to the equalization algorithm selections, Upstream also supports passive health check and retry for the upstream. You can learn more about this [Admin API Upstream](../admin-api.md#upstream). + +To create an Upstream object, you can use the Admin API as shown below. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/upstreams/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "type": "chash", + "key": "remote_addr", + "nodes": { + "127.0.0.1:80": 1, + "foo.com:80": 2 + } +}' +``` + +After creating an Upstream object, it can be referenced by a specific Route or Service as shown below. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream_id": 1 +}' +``` + +For convenience, you can directly bind the upstream address to a Route or Service. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## Example + +The example below shows how you can configure a health check. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + } + "type": "roundrobin", + "retries": 2, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 2, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + } +}' +``` + +You can learn more about health checks [health-check](../tutorials/health-check.md). + +The examples below show configurations that use different `hash_on` types. + +### Consumer + +Creating a Consumer object. + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-jack" + } + } +}' +``` + +Creating a Route object and enabling the `key-auth` authentication Plugin. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + }, + "type": "chash", + "hash_on": "consumer" + }, + "uri": "/server_port" +}' +``` + +To test the request, the `consumer_name` passed for authentication will be used as the hash value of the load balancing hash algorithm. + +```shell +curl http://127.0.0.1:9080/server_port -H "apikey: auth-jack" +``` + +### Cookie + +Creating a Route and an upstream object. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hash_on_cookie", + "upstream": { + "key": "sid", + "type": "chash", + "hash_on": "cookie", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + } +}' +``` + +The client can then send a request with a cookie. + +```shell + curl http://127.0.0.1:9080/hash_on_cookie \ + -H "Cookie: sid=3c183a30cffcda1408daf1c61d47b274" +``` + +### Header + +Creating a Route and an upstream object. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hash_on_header", + "upstream": { + "key": "content-type", + "type": "chash", + "hash_on": "header", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + } +}' +``` + +The client can now send requests with a header. The example below shows using the header `Content-Type`. + +```shell + curl http://127.0.0.1:9080/hash_on_header \ + -H "X-API-KEY: $admin_key" \ + -H "Content-Type: application/json" +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/add-multiple-api-versions.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/add-multiple-api-versions.md new file mode 100644 index 0000000..290db96 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/add-multiple-api-versions.md @@ -0,0 +1,245 @@ +--- +title: Add multiple API versions +keywords: + - API Versioning + - Apache APISIX + - API Gateway + - Multiple APIs + - Proxy rewrite + - Request redirect + - Route API requests +description: In this tutorial, you will learn how to publish and manage multiple versions of your API with Apache APISIX. +--- + + + +## What is API versioning? + +**API versioning** is the practice of managing changes to an API and ensuring that these changes are made without disrupting clients. A good API versioning strategy clearly communicates the changes made and allows API consumers to decide when to upgrade to the latest version at their own pace. + +## Types of API versioning + +#### URI Path + +The most common way to version an API is in the URI path and is often done with the prefix "v". This method employs URI routing to direct requests to a specific version of the API. + +```shell +http://apisix.apache.org/v1/hello +http://apisix.apache.org/v2/hello +``` + +#### Query parameters + +In this method, the version number is included in the URI, but as a query parameter instead of in the path. + +```shell +http://apisix.apache.org/hello?version=1 +http://apisix.apache.org/hello?version=2 +``` + +#### Custom request Header + +You can also set the version number using custom headers in requests and responses. This leaves the URI of your resources unchanged. + +```shell +http://apisix.apache.org/hello -H 'Version: 1' +http://apisix.apache.org/hello -H 'Version: 2' +``` + +The primary goal of versioning is to provide users of an API with the most functionality possible while causing minimal inconvenience. Keeping this goal in mind, let’s have a look in this tutorial at how to _publish and manage multiple versions of your API_ with Apache APISIX. + +**In this tutorial**, you learn how to: + +- Create a route and upstream for our sample API. +- Add a new version to the existing API. +- Use [proxy-rewrite](https://apisix.apache.org/docs/apisix/plugins/proxy-rewrite/) plugin to rewrite the path in a plugin configuration. +- Route API requests from the old version to the new one. + +## Prerequisites + +For the demo case, we will leverage the sample repository [Evolve APIs](https://github.com/nfrankel/evolve-apis) on GitHub built on the Spring boot that demonstrates our API. You can see the complete source code there. + +To execute and customize the example project per your need shown in this tutorial, here are the minimum requirements you need to install in your system: + +- [Docker](https://docs.docker.com/desktop/windows/install/) - you need [Docker](https://www.docker.com/products/docker-desktop/) installed locally to complete this tutorial. It is available for [Windows](https://desktop.docker.com/win/edge/Docker%20Desktop%20Installer.exe) or [macOS](https://desktop.docker.com/mac/edge/Docker.dmg). + +Also, complete the following steps to run the sample project with Docker. + +Use [git](https://git-scm.com/downloads) to clone the repository: + +``` shell +git clone 'https://github.com/nfrankel/evolve-apis' +``` + +Go to root directory of _evolve-apis_ + +``` shell +cd evolve-apis +``` + +Now we can start our application by running `docker compose up` command from the root folder of the project: + +``` shell +docker compose up -d +``` + +### Create a route and upstream for the API. + +You first need to [Route](https://apisix.apache.org/docs/apisix/terminology/route/) your HTTP requests from the gateway to an [Upstream](https://apisix.apache.org/docs/apisix/terminology/upstream/) (your API). With APISIX, you can create a route by sending an HTTP request to the gateway. + +```shell +curl http://apisix:9180/apisix/admin/routes/1 -H 'X-API-KEY: xyz' -X PUT -d ' +{ + "name": "Direct Route to Old API", + "methods": ["GET"], + "uris": ["/hello", "/hello/", "/hello/*"], + "upstream": { + "type": "roundrobin", + "nodes": { + "oldapi:8081": 1 + } + } +}' +``` + +At this stage, we do not have yet any version and you can query the gateway as below: + +```shell +curl http://apisix.apache.org/hello +``` + +```shell title="output" +Hello world +``` + +```shell +curl http://apisix.apache.org/hello/Joe +``` + +```shell title="output" +Hello Joe +``` + +In the previous step, we created a route that wrapped an upstream inside its configuration. Also, APISIX allows us to create an upstream with a dedicated ID to reuse it across several routes. + +Let's create the shared upstream by running below curl cmd: + +```shell +curl http://apisix:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: xyz' -X PUT -d ' +{ + "name": "Old API", + "type": "roundrobin", + "nodes": { + "oldapi:8081": 1 + } +}' +``` + +### Add a new version + +In the scope of this tutorial, we will use _URI path-based versioning_ because it’s the most widespread. We are going to add `v1` version for our existing `oldapi` in this section. + + ![Apache APISIX Multiple API versions](https://static.apiseven.com/2022/12/13/639875780e094.png) + +Before introducing the new version, we also need to rewrite the query that comes to the API gateway before forwarding it to the upstream. Because both the old and new versions should point to the same upstream and the upstream exposes endpoint `/hello`, not `/v1/hello`. Let’s create a plugin configuration to rewrite the path: + +```shell +curl http://apisix:9180/apisix/admin/plugin_configs/1 -H 'X-API-KEY: xyz' -X PUT -d ' +{ + "plugins": { + "proxy-rewrite": { + "regex_uri": ["/v1/(.*)", "/$1"] + } + } +}' +``` + +We can now create the second versioned route that references the existing upstream and plugin config. + +> Note that we can create routes for different API versions. + +```shell +curl http://apisix:9180/apisix/admin/routes/2 -H 'X-API-KEY: xyz' -X PUT -d ' +{ + "name": "Versioned Route to Old API", + "methods": ["GET"], + "uris": ["/v1/hello", "/v1/hello/", "/v1/hello/*"], + "upstream_id": 1, + "plugin_config_id": 1 +}' +``` + +At this stage, we have configured two routes, one versioned and the other non-versioned: + +```shell +curl http://apisix.apache.org/hello +``` + +```shell title="output" +Hello world +``` + +```shell +curl http://apisix.apache.org/v1/hello +``` + +```shell title="output" +Hello world +``` + +## Route API requests from the old version to the new one + +We have versioned our API, but our API consumers probably still use the legacy non-versioned API. We want them to migrate, but we cannot just delete the legacy route as our users are unaware of it. Fortunately, the `301 HTTP` status code is our friend: we can let users know that the resource has moved from `http://apisix.apache.org/hello` to `http://apisix.apache.org/v1/hello`. It requires configuring the [redirect plugin](https://apisix.apache.org/docs/apisix/plugins/redirect/) on the initial route: + +```shell +curl http://apisix:9180/apisix/admin/routes/1 -H 'X-API-KEY: xyz' -X PATCH -d ' +{ + "plugins": { + "redirect": { + "uri": "/v1$uri", + "ret_code": 301 + } + } +}' +``` + +![Apache APISIX Multiple API versions with two routes](https://static.apiseven.com/2022/12/13/63987577a9e66.png) + +Now when we try to request the first non-versioned API endpoint, you will get an expected output: + +```shell +curl http://apisix.apache.org/hello + + +301 Moved Permanently + +

301 Moved Permanently

+
openresty
+ + +``` + +Either API consumers will transparently use the new endpoint because they will follow, or their integration breaks and they will notice the 301 status and the new API location to use. + +## Next steps + +As you followed throughout the tutorial, it is very easy to publish multiple versions of your API with Apache APISIX and it does not require setting up actual API endpoints for each version of your API in the backend. It also allows your clients to switch between two versions without any downtime and save assets if there’s ever an update. + +Learn more about how to [manage](./manage-api-consumers.md) API consumers and [protect](./protect-api.md) your APIs. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/cache-api-responses.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/cache-api-responses.md new file mode 100644 index 0000000..9ee23f8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/cache-api-responses.md @@ -0,0 +1,231 @@ +--- +title: Cache API responses +keywords: + - API Gateway + - Apache APISIX + - Cache + - Performance +description: This tutorial will focus primarily on handling caching at the API Gateway level by using Apache APISIX API Gateway and you will learn how to use proxy-caching plugin to improve response efficiency for your Web or Microservices API. +--- + + + +This tutorial will focus primarily on handling caching at the API Gateway level by using Apache APISIX API Gateway and you will learn how to use the proxy-cache plugin to improve response efficiency for your Web or Microservices API. + +**Here is an overview of what we cover in this walkthrough:** + +- Caching in API Gateway +- About [Apache APISIX API Gateway](https://apisix.apache.org/docs/apisix/getting-started/) +- Run the demo project [apisix-dotnet-docker](https://github.com/Boburmirzo/apisix-dotnet-docker) +- Configure the [Proxy Cache](https://apisix.apache.org/docs/apisix/plugins/proxy-cache/) plugin +- Validate Proxy Caching + +## Improve performance with caching + +When you are building an API, you want to keep it simple and fast. Once the concurrent need to read the same data increase, you'll face a few issues where you might be considering introducing **caching**: + +- There is latency on some API requests which is noticeably affecting the user's experience. +- Fetching data from a database takes more time to respond. +- Availability of your API is threatened by the API's high throughput. +- There are some network failures in getting frequently accessed information from your API. + +## Caching in API Gateway + +[Caching](https://en.wikipedia.org/wiki/Cache_(computing)) is capable of storing and retrieving network requests and their corresponding responses. Caching happens at different levels in a web application: + +- Edge caching or CDN +- Database caching +- Server caching (API caching) +- Browser caching + +**Reverse Proxy Caching** is yet another caching mechanism that is usually implemented inside **API Gateway**. It can reduce the number of calls made to your endpoint and also improve the latency of requests to your API by caching a response from the upstream. If the API Gateway cache has a fresh copy of the requested resource, it uses that copy to satisfy the request directly instead of making a request to the endpoint. If the cached data is not found, the request travels to the intended upstream services (backend services). + +## Apache APISIX API Gateway Proxy Caching + +With the help of Apache APISIX, you can enable API caching with [proxy-cache](https://apisix.apache.org/docs/apisix/plugins/proxy-cache/) plugin to cache your API endpoint's responses and enhance the performance. It can be used together with other Plugins too and currently supports disk-based caching. The data to be cached can be filtered with _responseCodes_, _requestModes_, or more complex methods using the _noCache_ and _cacheByPass_ attributes. You can specify cache expiration time or a memory capacity in the plugin configuration as well. Please, refer to other `proxy-cache` plugin's [attributes](https://apisix.apache.org/docs/apisix/plugins/proxy-cache/). + +With all this in mind, we'll look next at an example of using `proxy-cache` plugin offered by Apache APISIX and apply it for ASP.NET Core Web API with a single endpoint. + +## Run the demo project + +Until now, I assume that you have the demo project [apisix-dotnet-docker](https://github.com/Boburmirzo/apisix-dotnet-docker) is up and running. You can see the complete source code on **Github** and the instruction on how to build a multi-container **APISIX** via **Docker CLI**. + +In the **ASP.NET Core project**, there is a simple API to get all products list from the service layer in [ProductsController.cs](https://github.com/Boburmirzo/apisix-dotnet-docker/blob/main/ProductApi/Controllers/ProductsController.cs) file. + +Let's assume that this product list is usually updated only once a day and the endpoint receives repeated billions of requests every day to fetch the product list partially or all of them. In this scenario, using API caching technique with `proxy-cache` plugin might be really helpful. For the demo purpose, we only enable caching for `GET` method. + +> Ideally, `GET` requests should be cacheable by default - until a special condition arises. + +## Configure the Proxy Cache Plugin + +Now let's start with adding `proxy-cache` plugin to Apache APISIX declarative configuration file `config.yaml` in the project. Because in the current project, we have not registered yet the plugin we are going to use for this demo. We appended `proxy-cache` plugin's name to the end of plugins list: + +``` yaml +plugins: + - http-logger + - ip-restriction + … + - proxy-cache +``` + +You can add your cache configuration in the same file if you need to specify values like _disk_size, memory_size_ as shown below: + +``` yaml +proxy_cache: + cache_ttl: 10s # default caching time if the upstream doesn't specify the caching time + zones: + - name: disk_cache_one # name of the cache. Admin can specify which cache to use in the Admin API by name + memory_size: 50m # size of shared memory, used to store the cache index + disk_size: 1G # size of disk, used to store the cache data + disk_path: "/tmp/disk_cache_one" # path to store the cache data + cache_levels: "1:2" # hierarchy levels of the cache +``` + +Next, we can directly run `apisix reload` command to reload the latest plugin code without restarting Apache APISIX. See the command to reload the newly added plugin: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +``` shell +curl http://127.0.0.1:9180/apisix/admin/plugins/reload -H "X-API-KEY: $admin_key" -X PUT +``` + +Then, we run two more curl commands to configure an Upstream and Route for the `/api/products` endpoint. The following command creates a sample upstream (that's our API Server): + +``` shell +curl "http://127.0.0.1:9180/apisix/admin/upstreams/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "type": "roundrobin", + "nodes": { + "productapi:80": 1 + } +}' +``` + +Next, we will add a new route with caching ability by setting `proxy-cache` plugin in `plugins` property and giving a reference to the upstream service by its unique id to forward requests to the API server: + +``` shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d '{ + "name": "Route for API Caching", + "methods": [ + "GET" + ], + "uri": "/api/products", + "plugins": { + "proxy-cache": { + "cache_key": [ + "$uri", + "-cache-id" + ], + "cache_bypass": [ + "$arg_bypass" + ], + "cache_method": [ + "GET" + ], + "cache_http_status": [ + 200 + ], + "hide_cache_headers": true, + "no_cache": [ + "$arg_test" + ] + } + }, + "upstream_id": 1 +}' +``` + +As you can see in the above configuration, we defined some plugin attributes that we want to cache only successful responses from the `GET` method of API. + +## Validate Proxy Caching + +Finally, we can test the proxy caching if it is working as it is expected. + +We will send multiple requests to the `/api/products` path and we should receive `HTTP 200 OK` response each time. However, the `Apisix-Cache-Status` in the response shows _MISS_ meaning that the response has not cached yet when the request hits the route for the first time. Now, if you make another request, you will see that you get a cached response with the caching indicator as _HIT_. + +Now we can make an initial request: + +``` shell +curl http://localhost:9080/api/products -i +``` + +The response looks like as below: + +``` shell +HTTP/1.1 200 OK +… +Apisix-Cache-Status: MISS +``` + +When you do the next call to the service, the route responds to the request with a cached response since it has already cached in the previous request: + +``` shell +HTTP/1.1 200 OK +… +Apisix-Cache-Status: HIT +``` + +Or if you try again to hit the endpoint after the time-to-live (TTL) period for the cache ends, you will get: + +``` shell +HTTP/1.1 200 OK +… +Apisix-Cache-Status: EXPIRED +``` + +Excellent! We enabled caching for our API endpoint. + +### Additional test case + +Optionally, you can also add some delay in the Product controller code and measure response time properly with and without cache: + +``` c# + [HttpGet] + public IActionResult GetAll() + { + Console.Write("The delay starts.\n"); + System.Threading.Thread.Sleep(5000); + Console.Write("The delay ends."); + return Ok(_productsService.GetAll()); + } +``` + +The `curl` command to check response time would be: + +```shell +curl -i 'http://localhost:9080/api/products' -s -o /dev/null -w "Response time: %{time_starttransfer} seconds\n" +``` + +## What's next + +As we learned, it is easy to configure and quick to set up API response caching for our ASP.NET Core WEB API with the help of Apache APISIX. It can reduce significantly the number of calls made to your endpoint and also improve the latency of requests to your API. There are other numerous built-in plugins available in Apache APISIX, you can check them on [Plugin Hub page](https://apisix.apache.org/plugins) and use them per your need. + +## Recommended content + +You can refer to [Expose API](./protect-api.md) to learn about how to expose your first API. + +You can refer to [Protect API](./protect-api.md) to protect your API. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/client-to-apisix-mtls.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/client-to-apisix-mtls.md new file mode 100644 index 0000000..303bc00 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/client-to-apisix-mtls.md @@ -0,0 +1,334 @@ +--- +title: Configure mTLS for client to APISIX +keywords: + - mTLS + - API Gateway + - Apache APISIX +description: This article describes how to configure mutual authentication (mTLS) between the client and Apache APISIX. +--- + + + +mTLS is a method for mutual authentication. Suppose in your network environment, only trusted clients are required to access the server. In that case, you can enable mTLS to verify the client's identity and ensure the server API's security. This article mainly introduces how to configure mutual authentication (mTLS) between the client and Apache APISIX. + +## Configuration + +This example includes the following procedures: + +1. Generate certificates; +2. Configure the certificate in APISIX; +3. Create and configure routes in APISIX; +4. Test verification. + +To make the test results clearer, the examples mentioned in this article pass some information about the client credentials upstream, including: `serial`, `fingerprint` and `common name`. + +### Generate certificates + +We need to generate three test certificates: the root, server, and client. Just use the following command to generate the test certificates we need via `OpenSSL`. + +```shell +# For ROOT CA +openssl genrsa -out ca.key 2048 +openssl req -new -sha256 -key ca.key -out ca.csr -subj "/CN=ROOTCA" +openssl x509 -req -days 36500 -sha256 -extensions v3_ca -signkey ca.key -in ca.csr -out ca.cer + +# For server certificate +openssl genrsa -out server.key 2048 +# Note: The `test.com` in the CN value is the domain name/hostname we want to test +openssl req -new -sha256 -key server.key -out server.csr -subj "/CN=test.com" +openssl x509 -req -days 36500 -sha256 -extensions v3_req -CA ca.cer -CAkey ca.key -CAserial ca.srl -CAcreateserial -in server.csr -out server.cer + +# For client certificate +openssl genrsa -out client.key 2048 +openssl req -new -sha256 -key client.key -out client.csr -subj "/CN=CLIENT" +openssl x509 -req -days 36500 -sha256 -extensions v3_req -CA ca.cer -CAkey ca.key -CAserial ca.srl -CAcreateserial -in client.csr -out client.cer + +# Convert client certificate to pkcs12 for Windows usage (optional) +openssl pkcs12 -export -clcerts -in client.cer -inkey client.key -out client.p12 +``` + +### Configure the certificate in APISIX + +Use the `curl` command to request APISIX Admin API to set up SSL for specific SNI. + +:::note + +Note that the newline character in the certificate needs to be replaced with its escape character `\n`. + +::: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/ssls/1' \ +--header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "sni": "test.com", + "cert": "", + "key": "", + "client": { + "ca": "" + } +}' +``` + +- `sni`: Specify the domain name (CN) of the certificate. When the client tries to handshake with APISIX via TLS, APISIX will match the SNI data in `ClientHello` with this field and find the corresponding server certificate for handshaking. +- `cert`: The server certificate. +- `key`: The private key of the server certificate. +- `client.ca`: The CA (certificate authority) file to verfiy the client certificate. For demonstration purposes, the same `CA` is used here. + +### Configure the route in APISIX + +Use the `curl` command to request the APISIX Admin API to create a route. + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/1' \ +--header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "uri": "/anything", + "plugins": { + "proxy-rewrite": { + "headers": { + "X-Ssl-Client-Fingerprint": "$ssl_client_fingerprint", + "X-Ssl-Client-Serial": "$ssl_client_serial", + "X-Ssl-Client-S-DN": "$ssl_client_s_dn" + } + } + }, + "upstream": { + "nodes": { + "httpbin.org":1 + }, + "type":"roundrobin" + } +}' +``` + +APISIX automatically handles the TLS handshake based on the SNI and the SSL resource created in the previous step, so we do not need to specify the hostname in the route (but it is possible to specify the hostname if you need it). + +Also, in the `curl` command above, we enabled the [proxy-rewrite](../plugins/proxy-rewrite.md) plugin, which will dynamically update the request header information. The source of the variable values in the example are the `NGINX` variables, and you can find them here: [http://nginx.org/en/docs/http/ngx_http_ssl_module.html#variables](http://nginx.org/en/docs/http/ngx_http_ssl_module.html#variables). + +### Test + +Since we are using the domain `test.com` as the test domain, we have to add the test domain to your DNS or local `hosts` file before we can start the verification. + +1. If we don't use `hosts` and just want to test the results, then you can do so directly using the following command. + +``` +curl --resolve "test.com:9443:127.0.0.1" https://test.com:9443/anything -k --cert ./client.cer --key ./client.key +``` + +2. If you need to modify `hosts`, please read the following example (for Ubuntu). + +- Modify the `/etc/hosts` file + + ```shell + # 127.0.0.1 localhost + 127.0.0.1 test.com + ``` + +- Verify that the test domain name is valid + + ```shell + ping test.com + + PING test.com (127.0.0.1) 56(84) bytes of data. + 64 bytes from localhost.localdomain (127.0.0.1): icmp_seq=1 ttl=64 time=0.028 ms + 64 bytes from localhost.localdomain (127.0.0.1): icmp_seq=2 ttl=64 time=0.037 ms + 64 bytes from localhost.localdomain (127.0.0.1): icmp_seq=3 ttl=64 time=0.036 ms + 64 bytes from localhost.localdomain (127.0.0.1): icmp_seq=4 ttl=64 time=0.031 ms + ^C + --- test.com ping statistics --- + 4 packets transmitted, 4 received, 0% packet loss, time 3080ms + rtt min/avg/max/mdev = 0.028/0.033/0.037/0.003 ms + ``` + +- Test results + + ```shell + curl https://test.com:9443/anything -k --cert ./client.cer --key ./client.key + ``` + + You will then receive the following response body. + + ```shell + { + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "test.com", + "User-Agent": "curl/7.81.0", + "X-Amzn-Trace-Id": "Root=1-63256343-17e870ca1d8f72dc40b2c5a9", + "X-Forwarded-Host": "test.com", + "X-Ssl-Client-Fingerprint": "c1626ce3bca723f187d04e3757f1d000ca62d651", + "X-Ssl-Client-S-Dn": "CN=CLIENT", + "X-Ssl-Client-Serial": "5141CC6F5E2B4BA31746D7DBFE9BA81F069CF970" + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1", + "url": "http://test.com/anything" + } + ``` + +Since we configured the [proxy-rewrite](../plugins/proxy-rewrite.md) plugin in the example, we can see that the response body contains the request body received upstream, containing the correct data. + +## MTLS bypass based on regular expression matching against URI + +APISIX allows configuring an URI whitelist to bypass MTLS. +If the URI of a request is in the whitelist, then the client certificate will not be checked. +Note that other URIs of the associated SNI will get HTTP 400 response +instead of alert error in the SSL handshake phase, if the client certificate is missing or invalid. + +### Timing diagram + +![skip mtls](../../../assets/images/skip-mtls.png) + +### Example + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +1. Configure route and ssl via admin API + +```bash +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/*", + "upstream": { + "nodes": { + "httpbin.org": 1 + } + } +}' + +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert": "'"$( GET /uuid HTTP/2 +> Host: admin.apisix.dev:9443 +> user-agent: curl/7.68.0 +> accept: */* +> +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* old SSL session ID is stale, removing +* Connection state changed (MAX_CONCURRENT_STREAMS == 128)! +< HTTP/2 400 +< date: Fri, 21 Apr 2023 07:53:23 GMT +< content-type: text/html; charset=utf-8 +< content-length: 229 +< server: APISIX/3.2.0 +< + +400 Bad Request + +

400 Bad Request

+
openresty
+

Powered by APISIX.

+ +* Connection #0 to host admin.apisix.dev left intact +``` + +3. Although the client certificate is missing, but the URI is in the whitelist, +you get successful response. + +```bash +curl https://admin.apisix.dev:9443/anything/foobar -i \ +--resolve 'admin.apisix.dev:9443:127.0.0.1' --cacert t/certs/mtls_ca.crt +HTTP/2 200 +content-type: application/json +content-length: 416 +date: Fri, 21 Apr 2023 07:58:28 GMT +access-control-allow-origin: * +access-control-allow-credentials: true +server: APISIX/3.2.0 +... +``` + +## Conclusion + +If you don't want to use curl or test on windows, you can read this gist for more details. [APISIX mTLS for client to APISIX](https://gist.github.com/bzp2010/6ce0bf7c15c191029ed54724547195b4). + +For more information about the mTLS feature of Apache APISIX, you can read [Mutual TLS Authentication](../mtls.md). diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/expose-api.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/expose-api.md new file mode 100644 index 0000000..cab143a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/expose-api.md @@ -0,0 +1,123 @@ +--- +title: Expose API +keywords: + - API Gateway + - Apache APISIX + - Expose Service +description: This article describes how to publish services through the API Gateway Apache APISIX. +--- + + + +This article will guide you through APISIX's upstream, routing, and service concepts and introduce how to publish your services through APISIX. + +## Concept introduction + +### Upstream + +[Upstream](../terminology/upstream.md) is a virtual host abstraction that performs load balancing on a given set of service nodes according to the configured rules. + +The role of the Upstream is to load balance the service nodes according to the configuration rules, and Upstream information can be directly configured to the Route or Service. + +When multiple routes or services refer to the same upstream, you can create an upstream object and use the upstream ID in the Route or Service to reference the upstream to reduce maintenance pressure. + +### Route + +[Routes](../terminology/route.md) match the client's request based on defined rules, load and execute the corresponding plugins, and forwards the request to the specified Upstream. + +### Service + +A [Service](../terminology/service.md) is an abstraction of an API (which can also be understood as a set of Route abstractions). It usually corresponds to an upstream service abstraction. + +## Prerequisites + +Please make sure you have [installed Apache APISIX](../installation-guide.md) before doing the following. + +## Expose your service + +1. Create an Upstream. + +Create an Upstream service containing `httpbin.org` that you can use for testing. This is a return service that will return the parameters we passed in the request. + +``` +curl "http://127.0.0.1:9180/apisix/admin/upstreams/1" \ +-H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } +}' +``` + +In this command, we specify the Admin API Key of Apache APISIX as `edd1c9f034335f136f87ad84b625c8f1`, use `roundrobin` as the load balancing mechanism, and set `httpbin.org:80` as the upstream service. To bind this upstream to a route, `upstream_id` needs to be set to `1` here. Here you can specify multiple upstreams under `nodes` to achieve load balancing. + +For more information, please refer to [Upstream](../terminology/upstream.md). + +2. Create a Route. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" \ +-H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "methods": ["GET"], + "host": "example.com", + "uri": "/anything/*", + "upstream_id": "1" +}' +``` + +:::note + +Adding an `upstream` object to your route can achieve the above effect. + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" \ +-H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "methods": ["GET"], + "host": "example.com", + "uri": "/anything/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +::: + +3. Test + +After creating the Route, you can test the Service with the following command: + +``` +curl -i -X GET "http://127.0.0.1:9080/anything/get?foo1=bar1&foo2=bar2" -H "Host: example.com" +``` + +APISIX will forward the request to `http://httpbin.org:80/anything/get?foo1=bar1&foo2=bar2`. + +## More Tutorials + +You can refer to [Protect API](./protect-api.md) to protect your API. + +You can also use APISIX's [Plugin](../terminology/plugin.md) to achieve more functions. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/health-check.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/health-check.md new file mode 100644 index 0000000..cf344a9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/health-check.md @@ -0,0 +1,240 @@ +--- +title: Health Check +keywords: + - APISIX + - API Gateway + - Health Check +description: This article describes how to use the health check feature of API Gateway Apache APISIX to check the health status of upstream nodes. +--- + + + +## Description + +This article mainly introduces the health check function of Apache APISIX. The health check function can proxy requests to healthy nodes when the upstream node fails or migrates, avoiding the problem of service unavailability to the greatest extent. The health check function of APISIX is implemented using [lua-resty-healthcheck](https://github.com/api7/lua-resty-healthcheck), which is divided into active check and passive check. + +## Active check + +Active health check mainly means that APISIX actively detects the survivability of upstream nodes through preset probe types. APISIX supports three probe types: `HTTP`, `HTTPS`, and `TCP`. + +When N consecutive probes sent to healthy node `A` fail, the node will be marked as unhealthy, and the unhealthy node will be ignored by APISIX's load balancer and cannot receive requests; if For an unhealthy node, if M consecutive probes are successful, the node will be re-marked as healthy and can be proxied. + +## Passive check + +Passive health check refers to judging whether the corresponding upstream node is healthy by judging the response status of the request forwarded from APISIX to the upstream node. Compared with the active health check, the passive health check method does not need to initiate additional probes, but it cannot sense the node status in advance, and there may be a certain amount of failed requests. + +If `N` consecutive requests to a healthy node A fail, the node will be marked as unhealthy. + +:::note + +Since unhealthy nodes cannot receive requests, nodes cannot be re-marked as healthy using the passive health check strategy alone, so combining the active health check strategy is usually necessary. + +::: + +:::tip + +- We only start the health check when the upstream is hit by a request. There won't be any health check if an upstream is configured but isn't in used. +- If there is no healthy node can be chosen, we will continue to access the upstream. + +::: + +### Configuration instructions + +| Name | Configuration type | Value type | Valid values | Default | Description | +| ----------------------------------------------- | ------------------------------- | ---------- | -------------------- | --------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | +| upstream.checks.active.type | Active check | string | `http` `https` `tcp` | http | The type of active check. | +| upstream.checks.active.timeout | Active check | integer | | 1 | The timeout period of the active check (unit: second). | +| upstream.checks.active.concurrency | Active check | integer | | 10 | The number of targets to be checked at the same time during the active check. | +| upstream.checks.active.http_path | Active check | string | | / | The HTTP request path that is actively checked. | +| upstream.checks.active.host | Active check | string | | ${upstream.node.host} | The hostname of the HTTP request actively checked. | +| upstream.checks.active.port | Active check | integer | `1` to `65535` | ${upstream.node.port} | The host port of the HTTP request that is actively checked. | +| upstream.checks.active.https_verify_certificate | Active check | boolean | | true | Active check whether to check the SSL certificate of the remote host when HTTPS type checking is used. | +| upstream.checks.active.req_headers | Active check | array | | [] | Active check When using HTTP or HTTPS type checking, set additional request header information. | +| upstream.checks.active.healthy.interval | Active check (healthy node) | integer | `>= 1` | 1 | Active check (healthy node) check interval (unit: second) | +| upstream.checks.active.healthy.http_statuses | Active check (healthy node) | array | `200` to `599` | [200, 302] | Active check (healthy node) HTTP or HTTPS type check, the HTTP status code of the healthy node. | +| upstream.checks.active.healthy.successes | Active check (healthy node) | integer | `1` to `254` | 2 | Active check (healthy node) determine the number of times a node is healthy. | +| upstream.checks.active.unhealthy.interval | Active check (unhealthy node) | integer | `>= 1` | 1 | Active check (unhealthy node) check interval (unit: second) | +| upstream.checks.active.unhealthy.http_statuses | Active check (unhealthy node) | array | `200` to `599` | [429, 404, 500, 501, 502, 503, 504, 505] | Active check (unhealthy node) HTTP or HTTPS type check, the HTTP status code of the non-healthy node. | +| upstream.checks.active.unhealthy.http_failures | Active check (unhealthy node) | integer | `1` to `254` | 5 | Active check (unhealthy node) HTTP or HTTPS type check, determine the number of times that the node is not healthy. | +| upstream.checks.active.unhealthy.tcp_failures | Active check (unhealthy node) | integer | `1` to `254` | 2 | Active check (unhealthy node) TCP type check, determine the number of times that the node is not healthy. | +| upstream.checks.active.unhealthy.timeouts | Active check (unhealthy node) | integer | `1` to `254` | 3 | Active check (unhealthy node) to determine the number of timeouts for unhealthy nodes. | +| upstream.checks.passive.type | Passive check | string | `http` `https` `tcp` | http | The type of passive check. | +| upstream.checks.passive.healthy.http_statuses | Passive check (healthy node) | array | `200` to `599` | [200, 201, 202, 203, 204, 205, 206, 207, 208, 226, 300, 301, 302, 303, 304, 305, 306, 307, 308] | Passive check (healthy node) HTTP or HTTPS type check, the HTTP status code of the healthy node. | +| upstream.checks.passive.healthy.successes | Passive check (healthy node) | integer | `0` to `254` | 5 | Passive checks (healthy node) determine the number of times a node is healthy. | +| upstream.checks.passive.unhealthy.http_statuses | Passive check (unhealthy node) | array | `200` to `599` | [429, 500, 503] | Passive check (unhealthy node) HTTP or HTTPS type check, the HTTP status code of the non-healthy node. | +| upstream.checks.passive.unhealthy.tcp_failures | Passive check (unhealthy node) | integer | `0` to `254` | 2 | Passive check (unhealthy node) When TCP type is checked, determine the number of times that the node is not healthy. | +| upstream.checks.passive.unhealthy.timeouts | Passive check (unhealthy node) | integer | `0` to `254` | 7 | Passive checks (unhealthy node) determine the number of timeouts for unhealthy nodes. | +| upstream.checks.passive.unhealthy.http_failures | Passive check (unhealthy node) | integer | `0` to `254` | 5 | Passive check (unhealthy node) The number of times that the node is not healthy during HTTP or HTTPS type checking. | + +### Configuration example + +You can enable health checks in routes via the Admin API: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1970": 1 + }, + "type": "roundrobin", + "retries": 2, + "checks": { + "active": { + "timeout": 5, + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 2, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + }, + "req_headers": ["User-Agent: curl/7.29.0"] + }, + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [500], + "http_failures": 3, + "tcp_failures": 3 + } + } + } + } +}' +``` + +If APISIX detects an unhealthy node, the following logs will be output in the error log: + +```shell +enabled healthcheck passive while logging request +failed to receive status line from 'nil (127.0.0.1:1980)': closed +unhealthy TCP increment (1/2) for '(127.0.0.1:1980)' +failed to receive status line from 'nil (127.0.0.1:1980)': closed +unhealthy TCP increment (2/2) for '(127.0.0.1:1980' +``` + +:::tip + +To observe the above log information, you need to adjust the error log level to `info`. + +::: + +The health check status can be fetched via `GET /v1/healthcheck` in [Control API](../control-api.md). + +```shell + +curl http://127.0.0.1:9090/v1/healthcheck/upstreams/healthycheck -s | jq . + +``` + +## Health Check Status + +APISIX provides comprehensive health check information, with particular emphasis on the `status` and `counter` parameters for effective health monitoring. In the APISIX context, nodes exhibit four states: `healthy`, `unhealthy`, `mostly_unhealthy`, and `mostly_healthy`. The `mostly_healthy` status indicates that the current node is considered healthy, but during health checks, the node's health status is not consistently successful. The `mostly_unhealthy` status indicates that the current node is considered unhealthy, but during health checks, the node's health detection is not consistently unsuccessful. The transition of a node's state depends on the success or failure of the current health check, along with the recording of four key metrics in the `counter`: `tcp_failure`, `http_failure`, `success`, and `timeout_failure`. + +To retrieve health check information, you can use the following curl command: + +```shell + curl -i http://127.0.0.1:9090/v1/healthcheck +``` + +Response Example: + +```json +[ + { + "nodes": {}, + "name": "/apisix/routes/1", + "type": "http" + }, + { + "nodes": [ + { + "port": 1970, + "hostname": "127.0.0.1", + "status": "healthy", + "ip": "127.0.0.1", + "counter": { + "tcp_failure": 0, + "http_failure": 0, + "success": 0, + "timeout_failure": 0 + } + }, + { + "port": 1980, + "hostname": "127.0.0.1", + "status": "healthy", + "ip": "127.0.0.1", + "counter": { + "tcp_failure": 0, + "http_failure": 0, + "success": 0, + "timeout_failure": 0 + } + } + ], + "name": "/apisix/routes/example-hc-route", + "type": "http" + } +] +``` + +### State Transition Diagram + +![image](../../../assets/images/health_check_node_state_diagram.png) + +Note that all nodes start with the `healthy` status without any initial probes, and the counter only resets and updates with a state change. Hence, when nodes are `healthy` and all subsequent checks are successful, the `success` counter is not updated and remains zero. + +### Counter Information + +In the event of a health check failure, the `success` count in the counter will be reset to zero. Upon a successful health check, the `tcp_failure`, `http_failure`, and `timeout_failure` data will be reset to zero. + +| Name | Description | Purpose | +|----------------|----------------------------------------|--------------------------------------------------------------------------------------------------------------------------| +| success | Number of successful health checks | When `success` exceeds the configured `healthy.successes` value, the node transitions to a `healthy` state. | +| tcp_failure | Number of TCP health check failures | When `tcp_failure` exceeds the configured `unhealthy.tcp_failures` value, the node transitions to an `unhealthy` state. | +| http_failure | Number of HTTP health check failures | When `http_failure` exceeds the configured `unhealthy.http_failures` value, the node transitions to an `unhealthy` state. | +| timeout_failure | Number of health check timeouts | When `timeout_failure` exceeds the configured `unhealthy.timeouts` value, the node transitions to an `unhealthy` state. | diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/keycloak-oidc.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/keycloak-oidc.md new file mode 100644 index 0000000..64e2ecf --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/keycloak-oidc.md @@ -0,0 +1,467 @@ +--- +title: Set Up SSO with Keycloak (OIDC) +keywords: + - APISIX + - API Gateway + - OIDC + - Keycloak +description: This article describes how to integrate APISIX with Keycloak using the authorization code grant, client credentials grant, and password grant, using the openid-connect Plugin. +--- + + + +[OpenID Connect (OIDC)](https://openid.net/connect/) is a simple identity layer on top of the [OAuth 2.0 protocol](https://www.rfc-editor.org/rfc/rfc6749). It allows clients to verify the identity of end users based on the authentication performed by the identity provider, as well as to obtain basic profile information about end users in an interoperable and REST-like manner. With APISIX and [Keycloak](https://www.keycloak.org/), you can implement OIDC-based authentication processes to protect your APIs and enable single sign-on (SSO). + +[Keycloak](https://www.keycloak.org/) is an open-source identity and access management solution for modern applications and services. Keycloak supports single sign-on (SSO), which enables services to interface with Keycloak through protocols such as OIDC and OAuth 2.0. In addition, Keycloak also supports delegating authentication to third party identity providers such as Facebook and Google. + +This tutorial will show you how to integrate APISIX with Keycloak using [authorization code grant](#implement-authorization-code-grant), [client credentials grant](#implement-client-credentials-grant), and [password grant](#implement-password-grant), using the [`openid-connect`](/hub/openid-connect) Plugin. + +## Configure Keycloak + +Start a Keycloak instance named `apisix-quickstart-keycloak` with the administrator name `quickstart-admin` and password `quickstart-admin-pass` in [development mode](https://www.keycloak.org/server/configuration#_starting_keycloak_in_development_mode) in Docker. The exposed port is mapped to `8080` on the host machine: + +```shell +docker run -d --name "apisix-quickstart-keycloak" \ + -e 'KEYCLOAK_ADMIN=quickstart-admin' \ + -e 'KEYCLOAK_ADMIN_PASSWORD=quickstart-admin-pass' \ + -p 8080:8080 \ + quay.io/keycloak/keycloak:18.0.2 start-dev +``` + +Keycloak provides an easy-to-use web UI to help the administrator manage all resources, such as clients, roles, and users. + +Navigate to `http://localhost:8080` in browser to access the Keycloak web page, then click __Administration Console__: + +![web-ui](https://static.api7.ai/uploads/2023/03/30/ItcwYPIx_web-ui.png) + +Enter the administrator’s username `quickstart-admin` and password `quickstart-admin-pass` and sign in: + +![admin-signin](https://static.api7.ai/uploads/2023/03/30/6W3pjzE1_admin-signin.png) + +You need to maintain the login status to configure Keycloak during the following steps. + +### Create a Realm + +_Realms_ in Keycloak are workspaces to manage resources such as users, credentials, and roles. The resources in different realms are isolated from each other. You need to create a realm named `quickstart-realm` for APISIX. + +In the left menu, hover over **Master**, and select __Add realm__ in the dropdown: + +![create-realm](https://static.api7.ai/uploads/2023/03/30/S1Xvqliv_create-realm.png) + +Enter the realm name `quickstart-realm` and click __Create__ to create it: + +![add-realm](https://static.api7.ai/uploads/2023/03/30/jwb7QU8k_add-realm.png) + +### Create a Client + +_Clients_ in Keycloak are entities that request Keycloak to authenticate a user. More often, clients are applications that want to use Keycloak to secure themselves and provide a single sign-on solution. APISIX is equivalent to a client that is responsible for initiating authentication requests to Keycloak, so you need to create its corresponding client named `apisix-quickstart-client`. + +Click __Clients__ > __Create__ to open the __Add Client__ page: + +![create-client](https://static.api7.ai/uploads/2023/03/30/qLom0axN_create-client.png) + +Enter __Client ID__ as `apisix-quickstart-client`, then select __Client Protocol__ as `openid-connect` and __Save__: + +![add-client](https://static.api7.ai/uploads/2023/03/30/X5on2r7x_add-client.png) + +The client `apisix-quickstart-client` is created. After redirecting to the detailed page, select `confidential` as the __Access Type__: + +![config-client](https://static.api7.ai/uploads/2023/03/30/v70c8y9F_config-client.png) + +When the user login is successful during the SSO, Keycloak will carry the state and code to redirect the client to the addresses in __Valid Redirect URIs__. To simplify the operation, enter wildcard `*` to consider any URI valid: + +![client-redirect](https://static.api7.ai/uploads/2023/03/30/xLxcyVkn_client-redirect.png) + +If you are implementing the [authorization code grant with PKCE](#implement-authorization-code-grant), configure the PKCE challenge method in the client's advanced settings: + +
+PKCE keycloak configuration +
+ +If you are implementing [client credentials grant](#implement-client-credentials-grant), enable service accounts for the client: + +![enable-service-account](https://static.api7.ai/uploads/2023/12/29/h1uNtghd_sa.png) + +Select __Save__ to apply custom configurations. + +### Create a User + +Users in Keycloak are entities that are able to log into the system. They can have attributes associated with themselves, such as username, email, and address. + +If you are only implementing [client credentials grant](#implement-client-credentials-grant), you can [skip this section](#obtain-the-oidc-configuration). + +Click __Users__ > __Add user__ to open the __Add user__ page: + +![create-user](https://static.api7.ai/uploads/2023/03/30/onQEp23L_create-user.png) + +Enter the __Username__ as `quickstart-user` and select __Save__: + +![add-user](https://static.api7.ai/uploads/2023/03/30/EKhuhgML_add-user.png) + +Click on __Credentials__, then set the __Password__ as `quickstart-user-pass`. Switch __Temporary__ to `OFF` to turn off the restriction, so that you need not to change password the first time you log in: + +![user-pass](https://static.api7.ai/uploads/2023/03/30/rQKEAEnh_user-pass.png) + +## Obtain the OIDC Configuration + +In this section, you will obtain the key OIDC configuration from Keycloak and define them as shell variables. Steps after this section will use these variables to configure the OIDC by shell commands. + +:::info + +Open a separate terminal to follow the steps and define related shell variables. Then steps after this section could use the defined variables directly. + +::: + +### Get Discovery Endpoint + +Click __Realm Settings__, then right click __OpenID Endpoints Configuration__ and copy the link. + +![get-discovery](https://static.api7.ai/uploads/2023/03/30/526lbJbg_get-discovery.png) + +The link should be the same as the following: + +```text +http://localhost:8080/realms/quickstart-realm/.well-known/openid-configuration +``` + +Configuration values exposed with this endpoint are required during OIDC authentication. Update the address with your host IP and save to environment variables: + +```shell +export KEYCLOAK_IP=192.168.42.145 # replace with your host IP +export OIDC_DISCOVERY=http://${KEYCLOAK_IP}:8080/realms/quickstart-realm/.well-known/openid-configuration +``` + +### Get Client ID and Secret + +Click on __Clients__ > `apisix-quickstart-client` > __Credentials__, and copy the client secret from __Secret__: + +![client-ID](https://static.api7.ai/uploads/2023/03/30/MwYmU20v_client-id.png) + +![client-secret](https://static.api7.ai/uploads/2023/03/30/f9iOG8aN_client-secret.png) + +Save the OIDC client ID and secret to environment variables: + +```shell +export OIDC_CLIENT_ID=apisix-quickstart-client +export OIDC_CLIENT_SECRET=bSaIN3MV1YynmtXvU8lKkfeY0iwpr9cH # replace with your value +``` + +## Implement Authorization Code Grant + +The authorization code grant is used by web and mobile applications. The flow starts by authorization server displaying a login page in browser where users could key in their credentials. During the process, a short-lived authorization code is exchanged for an access token, which APISIX stores in browser session cookies and will be sent with every request visiting the upstream resource server. + +To implement authorization code grant, create a Route with `openid-connect` Plugin as such: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "auth-with-oidc", + "uri":"/anything/*", + "plugins": { + "openid-connect": { + "bearer_only": false, + "client_id": "'"$OIDC_CLIENT_ID"'", + "client_secret": "'"$OIDC_CLIENT_SECRET"'", + "discovery": "'"$OIDC_DISCOVERY"'", + "scope": "openid profile", + "redirect_uri": "http://localhost:9080/anything/callback" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "httpbin.org:80":1 + } + } +}' +``` + +Alternatively, if you would like to implement authorization code grant with PKCE, create a Route with `openid-connect` Plugin as such: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "auth-with-oidc", + "uri":"/anything/*", + "plugins": { + "openid-connect": { + "bearer_only": false, + "use_pkce": true, + "client_id": "'"$OIDC_CLIENT_ID"'", + "client_secret": "'"$OIDC_CLIENT_SECRET"'", + "discovery": "'"$OIDC_DISCOVERY"'", + "scope": "openid profile", + "redirect_uri": "http://localhost:9080/anything/callback" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "httpbin.org:80":1 + } + } +}' +``` + +### Verify with Valid Credentials + +Navigate to `http://127.0.0.1:9080/anything/test` in browser. The request will be redirected to a login page: + +![test-sign-on](https://static.api7.ai/uploads/2023/03/30/i38u1x9a_validate-sign.png) + +Log in with the correct username `quickstart-user` and password `quickstart-user-pass`. If successful, the request will be forwarded to `httpbin.org` and you should see a response similar to the following: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "text/html..." + ... + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1, 59.71.244.81", + "url": "http://127.0.0.1/anything/test" +} +``` + +### Verify with Invalid Credentials + +Sign in with the wrong credentials. You should see an authentication failure: + +![test-sign-failed](https://static.api7.ai/uploads/2023/03/31/YOuSYX1r_validate-sign-failed.png) + +## Implement Client Credentials Grant + +In client credentials grant, clients obtain access tokens without any users involved. It is typically used in machine-to-machine (M2M) communications. + +To implement client credentials grant, create a Route with `openid-connect` Plugin to use the JWKS endpoint of the identity provider to verify the token. The endpoint would be obtained from the discovery document. + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "auth-with-oidc", + "uri":"/anything/*", + "plugins": { + "openid-connect": { + "use_jwks": true, + "client_id": "'"$OIDC_CLIENT_ID"'", + "client_secret": "'"$OIDC_CLIENT_SECRET"'", + "discovery": "'"$OIDC_DISCOVERY"'", + "scope": "openid profile", + "redirect_uri": "http://localhost:9080/anything/callback" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "httpbin.org:80":1 + } + } +}' +``` + +Alternatively, if you would like to use the introspection endpoint to verify the token, create the Route as such: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "auth-with-oidc", + "uri":"/anything/*", + "plugins": { + "openid-connect": { + "bearer_only": true, + "client_id": "'"$OIDC_CLIENT_ID"'", + "client_secret": "'"$OIDC_CLIENT_SECRET"'", + "discovery": "'"$OIDC_DISCOVERY"'", + "scope": "openid profile", + "redirect_uri": "http://localhost:9080/anything/callback" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "httpbin.org:80":1 + } + } +}' +``` + +The introspection endpoint will be obtained from the discovery document. + +### Verify With Valid Access Token + +Obtain an access token for the Keycloak server at the [token endpoint](https://www.keycloak.org/docs/latest/securing_apps/#token-endpoint): + +```shell +curl -i "http://$KEYCLOAK_IP:8080/realms/quickstart-realm/protocol/openid-connect/token" -X POST \ + -d 'grant_type=client_credentials' \ + -d 'client_id='$OIDC_CLIENT_ID'' \ + -d 'client_secret='$OIDC_CLIENT_SECRET'' +``` + +The expected response is similar to the following: + +```text +{"access_token":"eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJoT3ludlBPY2d6Y3VWWnYtTU42bXZKMUczb0dOX2d6MFo3WFl6S2FSa1NBIn0.eyJleHAiOjE3MDM4MjU1NjQsImlhdCI6MTcwMzgyNTI2NCwianRpIjoiMWQ4NWE4N2UtZDFhMC00NThmLThiMTItNGZiYWM2ODA5YmYwIiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguMS44Mzo4MDgwL3JlYWxtcy9xdWlja3N0YXJ0LXJlYWxtIiwiYXVkIjoiYWNjb3VudCIsInN1YiI6IjE1OGUzOWFlLTk0YjAtNDI3Zi04ZGU3LTU3MTRhYWYwOGYzOSIsInR5cCI6IkJlYXJlciIsImF6cCI6ImFwaXNpeC1xdWlja3N0YXJ0LWNsaWVudCIsImFjciI6IjEiLCJyZWFsbV9hY2Nlc3MiOnsicm9sZXMiOlsiZGVmYXVsdC1yb2xlcy1xdWlja3N0YXJ0LXJlYWxtIiwib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfYXV0aG9yaXphdGlvbiJdfSwicmVzb3VyY2VfYWNjZXNzIjp7ImFjY291bnQiOnsicm9sZXMiOlsibWFuYWdlLWFjY291bnQiLCJtYW5hZ2UtYWNjb3VudC1saW5rcyIsInZpZXctcHJvZmlsZSJdfX0sInNjb3BlIjoiZW1haWwgcHJvZmlsZSIsImVtYWlsX3ZlcmlmaWVkIjpmYWxzZSwiY2xpZW50SG9zdCI6IjE3Mi4xNy4wLjEiLCJjbGllbnRJZCI6ImFwaXNpeC1xdWlja3N0YXJ0LWNsaWVudCIsInByZWZlcnJlZF91c2VybmFtZSI6InNlcnZpY2UtYWNjb3VudC1hcGlzaXgtcXVpY2tzdGFydC1jbGllbnQiLCJjbGllbnRBZGRyZXNzIjoiMTcyLjE3LjAuMSJ9.TltzSXqrJuVID7aGrb35jn-oc07U_-jugSn-3jKz4A44LwtAsME_8b3qkmR4boMOIht_5pF6bnnp70MFAlg6JKu4_yIQDxF_GAHjnZXEO8OCKhtIKwXm2w-hnnJVIhIdGkIVkbPP0HfILuar_m0hpa53VpPBGYR-OS4pyh0KTUs8MB22xAEqyz9zjCm6SX9vXCqgeVkSpRW2E8NaGEbAdY25uY-ZC4dI_pON87Ey5e8GdD6HQLXQlGIOdCDi3N7k0HDoD9TZRv2bMRPfy4zVYm1ZlClIuF79A-ZBwr0c-XYuq7t6EY0gPGEXB-s0SaKlrIU5S9JBeVXRzYvqAih41g","expires_in":300,"refresh_expires_in":0,"token_type":"Bearer","not-before-policy":0,"scope":"email profile"} +``` + +Save the access token to an environment variable: + +```shell +# replace with your access token +export ACCESS_TOKEN="eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJoT3ludlBPY2d6Y3VWWnYtTU42bXZKMUczb0dOX2d6MFo3WFl6S2FSa1NBIn0.eyJleHAiOjE3MDM4MjU1NjQsImlhdCI6MTcwMzgyNTI2NCwianRpIjoiMWQ4NWE4N2UtZDFhMC00NThmLThiMTItNGZiYWM2ODA5YmYwIiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguMS44Mzo4MDgwL3JlYWxtcy9xdWlja3N0YXJ0LXJlYWxtIiwiYXVkIjoiYWNjb3VudCIsInN1YiI6IjE1OGUzOWFlLTk0YjAtNDI3Zi04ZGU3LTU3MTRhYWYwOGYzOSIsInR5cCI6IkJlYXJlciIsImF6cCI6ImFwaXNpeC1xdWlja3N0YXJ0LWNsaWVudCIsImFjciI6IjEiLCJyZWFsbV9hY2Nlc3MiOnsicm9sZXMiOlsiZGVmYXVsdC1yb2xlcy1xdWlja3N0YXJ0LXJlYWxtIiwib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfYXV0aG9yaXphdGlvbiJdfSwicmVzb3VyY2VfYWNjZXNzIjp7ImFjY291bnQiOnsicm9sZXMiOlsibWFuYWdlLWFjY291bnQiLCJtYW5hZ2UtYWNjb3VudC1saW5rcyIsInZpZXctcHJvZmlsZSJdfX0sInNjb3BlIjoiZW1haWwgcHJvZmlsZSIsImVtYWlsX3ZlcmlmaWVkIjpmYWxzZSwiY2xpZW50SG9zdCI6IjE3Mi4xNy4wLjEiLCJjbGllbnRJZCI6ImFwaXNpeC1xdWlja3N0YXJ0LWNsaWVudCIsInByZWZlcnJlZF91c2VybmFtZSI6InNlcnZpY2UtYWNjb3VudC1hcGlzaXgtcXVpY2tzdGFydC1jbGllbnQiLCJjbGllbnRBZGRyZXNzIjoiMTcyLjE3LjAuMSJ9.TltzSXqrJuVID7aGrb35jn-oc07U_-jugSn-3jKz4A44LwtAsME_8b3qkmR4boMOIht_5pF6bnnp70MFAlg6JKu4_yIQDxF_GAHjnZXEO8OCKhtIKwXm2w-hnnJVIhIdGkIVkbPP0HfILuar_m0hpa53VpPBGYR-OS4pyh0KTUs8MB22xAEqyz9zjCm6SX9vXCqgeVkSpRW2E8NaGEbAdY25uY-ZC4dI_pON87Ey5e8GdD6HQLXQlGIOdCDi3N7k0HDoD9TZRv2bMRPfy4zVYm1ZlClIuF79A-ZBwr0c-XYuq7t6EY0gPGEXB-s0SaKlrIU5S9JBeVXRzYvqAih41g" +``` + +Send a request to the route with the valid access token: + +```shell +curl -i "http://127.0.0.1:9080/anything/test" -H "Authorization: Bearer $ACCESS_TOKEN" +``` + +An `HTTP/1.1 200 OK` response verifies that the request to the upstream resource was authorized. + +### Verify With Invalid Access Token + +Send a request to the Route with invalid access token: + +```shell +curl -i "http://127.0.0.1:9080/anything/test" -H "Authorization: Bearer invalid-access-token" +``` + +An `HTTP/1.1 401 Unauthorized` response verifies that the OIDC Plugin rejects requests with invalid access token. + +### Verify without Access Token + +Send a request to the Route without access token: + +```shell +curl -i "http://127.0.0.1:9080/anything/test" +``` + +An `HTTP/1.1 401 Unauthorized` response verifies that the OIDC Plugin rejects requests without access token. + +## Implement Password Grant + +Password grant is a legacy approach to exchange user credentials for an access token. + +To implement password grant, create a Route with `openid-connect` Plugin to use the JWKS endpoint of the identity provider to verify the token. The endpoint would be obtained from the discovery document. + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "auth-with-oidc", + "uri":"/anything/*", + "plugins": { + "openid-connect": { + "use_jwks": true, + "client_id": "'"$OIDC_CLIENT_ID"'", + "client_secret": "'"$OIDC_CLIENT_SECRET"'", + "discovery": "'"$OIDC_DISCOVERY"'", + "scope": "openid profile", + "redirect_uri": "http://localhost:9080/anything/callback" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "httpbin.org:80":1 + } + } +}' +``` + +### Verify With Valid Access Token + +Obtain an access token for the Keycloak server at the [token endpoint](https://www.keycloak.org/docs/latest/securing_apps/#token-endpoint): + +```shell +OIDC_USER=quickstart-user +OIDC_PASSWORD=quickstart-user-pass +curl -i "http://$KEYCLOAK_IP:8080/realms/quickstart-realm/protocol/openid-connect/token" -X POST \ + -d 'grant_type=password' \ + -d 'client_id='$OIDC_CLIENT_ID'' \ + -d 'client_secret='$OIDC_CLIENT_SECRET'' \ + -d 'username='$OIDC_USER'' \ + -d 'password='$OIDC_PASSWORD'' +``` + +The expected response is similar to the following: + +```text +{"access_token":"eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJ6U3FFaXN6VlpuYi1sRWMzZkp0UHNpU1ZZcGs4RGN3dXI1Mkx5V05aQTR3In0.eyJleHAiOjE2ODAxNjA5NjgsImlhdCI6MTY4MDE2MDY2OCwianRpIjoiMzQ5MTc4YjQtYmExZC00ZWZjLWFlYTUtZGY2MzJiMDJhNWY5IiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguNDIuMTQ1OjgwODAvcmVhbG1zL3F1aWNrc3RhcnQtcmVhbG0iLCJhdWQiOiJhY2NvdW50Iiwic3ViIjoiMTg4MTVjM2EtNmQwNy00YTY2LWJjZjItYWQ5NjdmMmIwMTFmIiwidHlwIjoiQmVhcmVyIiwiYXpwIjoiYXBpc2l4LXF1aWNrc3RhcnQtY2xpZW50Iiwic2Vzc2lvbl9zdGF0ZSI6ImIxNmIyNjJlLTEwNTYtNDUxNS1hNDU1LWYyNWUwNzdjY2I3NiIsImFjciI6IjEiLCJyZWFsbV9hY2Nlc3MiOnsicm9sZXMiOlsiZGVmYXVsdC1yb2xlcy1xdWlja3N0YXJ0LXJlYWxtIiwib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfYXV0aG9yaXphdGlvbiJdfSwicmVzb3VyY2VfYWNjZXNzIjp7ImFjY291bnQiOnsicm9sZXMiOlsibWFuYWdlLWFjY291bnQiLCJtYW5hZ2UtYWNjb3VudC1saW5rcyIsInZpZXctcHJvZmlsZSJdfX0sInNjb3BlIjoicHJvZmlsZSBlbWFpbCIsInNpZCI6ImIxNmIyNjJlLTEwNTYtNDUxNS1hNDU1LWYyNWUwNzdjY2I3NiIsImVtYWlsX3ZlcmlmaWVkIjpmYWxzZSwicHJlZmVycmVkX3VzZXJuYW1lIjoicXVpY2tzdGFydC11c2VyIn0.uD_7zfZv5182aLXu9-YBzBDK0nr2mE4FWb_4saTog2JTqFTPZZa99Gm8AIDJx2ZUcZ_ElkATqNUZ4OpWmL2Se5NecMw3slJReewjD6xgpZ3-WvQuTGpoHdW5wN9-Rjy8ungilrnAsnDA3tzctsxm2w6i9KISxvZrzn5Rbk-GN6fxH01VC5eekkPUQJcJgwuJiEiu70SjGnm21xDN4VGkNRC6jrURoclv3j6AeOqDDIV95kA_MTfBswDFMCr2PQlj5U0RTndZqgSoxwFklpjGV09Azp_jnU7L32_Sq-8coZd0nj5mSdbkJLJ8ZDQDV_PP3HjCP7EHdy4P6TyZ7oGvjw","expires_in":300,"refresh_expires_in":1800,"refresh_token":"eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICI0YjFiNTQ3Yi0zZmZjLTQ5YzQtYjE2Ni03YjdhNzIxMjk1ODcifQ.eyJleHAiOjE2ODAxNjI0NjgsImlhdCI6MTY4MDE2MDY2OCwianRpIjoiYzRjNjNlMTEtZTdlZS00ZmEzLWJlNGYtNDMyZWQ4ZmY5OTQwIiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguNDIuMTQ1OjgwODAvcmVhbG1zL3F1aWNrc3RhcnQtcmVhbG0iLCJhdWQiOiJodHRwOi8vMTkyLjE2OC40Mi4xNDU6ODA4MC9yZWFsbXMvcXVpY2tzdGFydC1yZWFsbSIsInN1YiI6IjE4ODE1YzNhLTZkMDctNGE2Ni1iY2YyLWFkOTY3ZjJiMDExZiIsInR5cCI6IlJlZnJlc2giLCJhenAiOiJhcGlzaXgtcXVpY2tzdGFydC1jbGllbnQiLCJzZXNzaW9uX3N0YXRlIjoiYjE2YjI2MmUtMTA1Ni00NTE1LWE0NTUtZjI1ZTA3N2NjYjc2Iiwic2NvcGUiOiJwcm9maWxlIGVtYWlsIiwic2lkIjoiYjE2YjI2MmUtMTA1Ni00NTE1LWE0NTUtZjI1ZTA3N2NjYjc2In0.8xYP4bhDg1U9B5cTaEVD7B4oxNp8wwAYEynUne_Jm78","token_type":"Bearer","not-before-policy":0,"session_state":"b16b262e-1056-4515-a455-f25e077ccb76","scope":"profile email"} +``` + +Save the access token and refresh token to environment variables. The refresh token will be used in the [refresh token step](#refresh-token). + +```shell +# replace with your access token +export ACCESS_TOKEN="eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJ6U3FFaXN6VlpuYi1sRWMzZkp0UHNpU1ZZcGs4RGN3dXI1Mkx5V05aQTR3In0.eyJleHAiOjE2ODAxNjA5NjgsImlhdCI6MTY4MDE2MDY2OCwianRpIjoiMzQ5MTc4YjQtYmExZC00ZWZjLWFlYTUtZGY2MzJiMDJhNWY5IiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguNDIuMTQ1OjgwODAvcmVhbG1zL3F1aWNrc3RhcnQtcmVhbG0iLCJhdWQiOiJhY2NvdW50Iiwic3ViIjoiMTg4MTVjM2EtNmQwNy00YTY2LWJjZjItYWQ5NjdmMmIwMTFmIiwidHlwIjoiQmVhcmVyIiwiYXpwIjoiYXBpc2l4LXF1aWNrc3RhcnQtY2xpZW50Iiwic2Vzc2lvbl9zdGF0ZSI6ImIxNmIyNjJlLTEwNTYtNDUxNS1hNDU1LWYyNWUwNzdjY2I3NiIsImFjciI6IjEiLCJyZWFsbV9hY2Nlc3MiOnsicm9sZXMiOlsiZGVmYXVsdC1yb2xlcy1xdWlja3N0YXJ0LXJlYWxtIiwib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfYXV0aG9yaXphdGlvbiJdfSwicmVzb3VyY2VfYWNjZXNzIjp7ImFjY291bnQiOnsicm9sZXMiOlsibWFuYWdlLWFjY291bnQiLCJtYW5hZ2UtYWNjb3VudC1saW5rcyIsInZpZXctcHJvZmlsZSJdfX0sInNjb3BlIjoicHJvZmlsZSBlbWFpbCIsInNpZCI6ImIxNmIyNjJlLTEwNTYtNDUxNS1hNDU1LWYyNWUwNzdjY2I3NiIsImVtYWlsX3ZlcmlmaWVkIjpmYWxzZSwicHJlZmVycmVkX3VzZXJuYW1lIjoicXVpY2tzdGFydC11c2VyIn0.uD_7zfZv5182aLXu9-YBzBDK0nr2mE4FWb_4saTog2JTqFTPZZa99Gm8AIDJx2ZUcZ_ElkATqNUZ4OpWmL2Se5NecMw3slJReewjD6xgpZ3-WvQuTGpoHdW5wN9-Rjy8ungilrnAsnDA3tzctsxm2w6i9KISxvZrzn5Rbk-GN6fxH01VC5eekkPUQJcJgwuJiEiu70SjGnm21xDN4VGkNRC6jrURoclv3j6AeOqDDIV95kA_MTfBswDFMCr2PQlj5U0RTndZqgSoxwFklpjGV09Azp_jnU7L32_Sq-8coZd0nj5mSdbkJLJ8ZDQDV_PP3HjCP7EHdy4P6TyZ7oGvjw" +export REFRESH_TOKEN="eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICI0YjFiNTQ3Yi0zZmZjLTQ5YzQtYjE2Ni03YjdhNzIxMjk1ODcifQ.eyJleHAiOjE2ODAxNjI0NjgsImlhdCI6MTY4MDE2MDY2OCwianRpIjoiYzRjNjNlMTEtZTdlZS00ZmEzLWJlNGYtNDMyZWQ4ZmY5OTQwIiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguNDIuMTQ1OjgwODAvcmVhbG1zL3F1aWNrc3RhcnQtcmVhbG0iLCJhdWQiOiJodHRwOi8vMTkyLjE2OC40Mi4xNDU6ODA4MC9yZWFsbXMvcXVpY2tzdGFydC1yZWFsbSIsInN1YiI6IjE4ODE1YzNhLTZkMDctNGE2Ni1iY2YyLWFkOTY3ZjJiMDExZiIsInR5cCI6IlJlZnJlc2giLCJhenAiOiJhcGlzaXgtcXVpY2tzdGFydC1jbGllbnQiLCJzZXNzaW9uX3N0YXRlIjoiYjE2YjI2MmUtMTA1Ni00NTE1LWE0NTUtZjI1ZTA3N2NjYjc2Iiwic2NvcGUiOiJwcm9maWxlIGVtYWlsIiwic2lkIjoiYjE2YjI2MmUtMTA1Ni00NTE1LWE0NTUtZjI1ZTA3N2NjYjc2In0.8xYP4bhDg1U9B5cTaEVD7B4oxNp8wwAYEynUne_Jm78" +``` + +Send a request to the route with the valid access token: + +```shell +curl -i "http://127.0.0.1:9080/anything/test" -H "Authorization: Bearer $ACCESS_TOKEN" +``` + +An `HTTP/1.1 200 OK` response verifies that the request to the upstream resource was authorized. + +### Verify With Invalid Access Token + +Send a request to the Route with invalid access token: + +```shell +curl -i "http://127.0.0.1:9080/anything/test" -H "Authorization: Bearer invalid-access-token" +``` + +An `HTTP/1.1 401 Unauthorized` response verifies that the OIDC Plugin rejects requests with invalid access token. + +### Verify without Access Token + +Send a request to the Route without access token: + +```shell +curl -i "http://127.0.0.1:9080/anything/test" +``` + +An `HTTP/1.1 401 Unauthorized` response verifies that the OIDC Plugin rejects requests without access token. + +### Refresh Token + +To refresh the access token, send a request to the Keycloak token endpoint as such: + +```shell +curl -i "http://$KEYCLOAK_IP:8080/realms/quickstart-realm/protocol/openid-connect/token" -X POST \ + -d 'grant_type=refresh_token' \ + -d 'client_id='$OIDC_CLIENT_ID'' \ + -d 'client_secret='$OIDC_CLIENT_SECRET'' \ + -d 'refresh_token='$REFRESH_TOKEN'' +``` + +You should see a response similar to the following, with the new access token and refresh token, which you can use for subsequent requests and token refreshes: + +```text +{"access_token":"eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJTdnVwLXlPMHhDdTJBVi1za2pCZ0h6SHZNaG1mcDVDQWc0NHpYb2QxVTlNIn0.eyJleHAiOjE3MzAyNzQ3NDUsImlhdCI6MTczMDI3NDQ0NSwianRpIjoiMjk2Mjk5MWUtM2ExOC00YWFiLWE0NzAtODgxNWEzNjZjZmM4IiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguMTUyLjU6ODA4MC9yZWFsbXMvcXVpY2tzdGFydC1yZWFsbSIsImF1ZCI6ImFjY291bnQiLCJzdWIiOiI2ZWI0ZTg0Yy00NmJmLTRkYzUtOTNkMC01YWM5YzE5MWU0OTciLCJ0eXAiOiJCZWFyZXIiLCJhenAiOiJhcGlzaXgtcXVpY2tzdGFydC1jbGllbnQiLCJzZXNzaW9uX3N0YXRlIjoiNTU2ZTQyYjktMjE2Yi00NTEyLWE5ZjAtNzE3ZTAyYTQ4MjZhIiwiYWNyIjoiMSIsInJlYWxtX2FjY2VzcyI6eyJyb2xlcyI6WyJkZWZhdWx0LXJvbGVzLXF1aWNrc3RhcnQtcmVhbG0iLCJvZmZsaW5lX2FjY2VzcyIsInVtYV9hdXRob3JpemF0aW9uIl19LCJyZXNvdXJjZV9hY2Nlc3MiOnsiYWNjb3VudCI6eyJyb2xlcyI6WyJtYW5hZ2UtYWNjb3VudCIsIm1hbmFnZS1hY2NvdW50LWxpbmtzIiwidmlldy1wcm9maWxlIl19fSwic2NvcGUiOiJlbWFpbCBwcm9maWxlIiwic2lkIjoiNTU2ZTQyYjktMjE2Yi00NTEyLWE5ZjAtNzE3ZTAyYTQ4MjZhIiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJxdWlja3N0YXJ0LXVzZXIifQ.KLqn1LQdazoPBqLLR856C35XpqbMO9I7WFt3KrDxZF1N8vwv4AvZYWI_2rsbdjCakh9JmPgyYRgEGufYLiDBsqy9CrMVejAIJPYsJIonIXBCp5Ysu92ODJuqtTKuuJ6K7dam7fisBFfCBbVvGspnZ3p0caedpOaF_kSd-F8ARHKVsmkuX3_ucDrP3UctjEXHezefTY4YHjNMB9wuMDPXX2vXt2BsOasnznsIHHHX-ZH8JY6eEfWPtfx0qAED6lVZICT6Rqj_j5-Cf9ogzFtLyy_XvtG9BbHME2B8AXYpxdzqxOxmVVbZdrB8elfmFjs1R3vUn2r3xA9hO_znZo_IoQ","expires_in":300,"refresh_expires_in":1800,"refresh_token":"eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICIwYWYwZTAwYy0xMThjLTRkNDktYmIwMS1iMDIwNDE3MmFjMzIifQ.eyJleHAiOjE3MzAyNzYyNDUsImlhdCI6MTczMDI3NDQ0NSwianRpIjoiZGQyZTJmYTktN2Y3Zi00MjM5LWEwODAtNWQyZDFiZTdjNzk4IiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguMTUyLjU6ODA4MC9yZWFsbXMvcXVpY2tzdGFydC1yZWFsbSIsImF1ZCI6Imh0dHA6Ly8xOTIuMTY4LjE1Mi41OjgwODAvcmVhbG1zL3F1aWNrc3RhcnQtcmVhbG0iLCJzdWIiOiI2ZWI0ZTg0Yy00NmJmLTRkYzUtOTNkMC01YWM5YzE5MWU0OTciLCJ0eXAiOiJSZWZyZXNoIiwiYXpwIjoiYXBpc2l4LXF1aWNrc3RhcnQtY2xpZW50Iiwic2Vzc2lvbl9zdGF0ZSI6IjU1NmU0MmI5LTIxNmItNDUxMi1hOWYwLTcxN2UwMmE0ODI2YSIsInNjb3BlIjoiZW1haWwgcHJvZmlsZSIsInNpZCI6IjU1NmU0MmI5LTIxNmItNDUxMi1hOWYwLTcxN2UwMmE0ODI2YSJ9.Uad4BVuojHfyxqedFT5BHliWjIqVDbjM-Xeme0G2AAg","token_type":"Bearer","not-before-policy":0,"session_state":"556e42b9-216b-4512-a9f0-717e02a4826a","scope":"email profile"} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/manage-api-consumers.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/manage-api-consumers.md new file mode 100644 index 0000000..eea93d0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/manage-api-consumers.md @@ -0,0 +1,264 @@ +--- +title: Manage API Consumers +keywords: + - API Gateway + - Apache APISIX + - Rate Limit + - Consumer + - Consumer Group +description: This tutorial explains how to manage your single or multiple API consumers with Apache APISIX. +--- + + + +This tutorial explains how to manage your single or multiple API consumers with Apache APISIX. + +Nowadays [APIs](https://en.wikipedia.org/wiki/API) connect multiple systems, internal services, and third-party applications easily and securely. _API consumers_ are probably the most important stakeholders for API providers because they interact the most with the APIs and the developer portal. This post explains how to manage your single or multiple API consumers with an open-source API Management solution such as [Apache APISIX](https://apisix.apache.org/). + +![Manage API Consumers](https://static.apiseven.com/2022/11/29/6385b565b4c11.png) + +## API Consumers + +API consumers use an API without integrating it into an APP developed for it. In other words, API consumers are _the users of APIs_. This means, for example, a marketing department uses a [Facebook API](https://developers.facebook.com/docs/) to analyze social media responses to specific actions. It does this with individual, irregular requests to the API provided, as needed. + +An [API Management](https://en.wikipedia.org/wiki/API_management) solution should know who the consumer of the API is to configure different rules for different consumers. + +## Apache APISIX Consumers + +In Apache APISIX, the [Consumer object](https://apisix.apache.org/docs/apisix/terminology/consumer/) is the most common way for API consumers to access APIs published through its [API Gateway](https://apisix.apache.org/docs/apisix/terminology/api-gateway/). Consumer concept is extremely useful when you have different consumers requesting the same API and you need to execute various [Plugins](https://apisix.apache.org/docs/apisix/terminology/plugin/) and [Upstream](https://apisix.apache.org/docs/apisix/terminology/upstream/) configurations based on the consumer. + +By publishing APIs through **Apache APISIX API Gateway**, you can easily secure API access using consumer keys or sometimes it can be referred to as subscription keys. Developers who need to consume the published APIs must include a valid subscription key in `HTTP` requests when calling those APIs. Without a valid subscription key, the calls are rejected immediately by the API gateway and not forwarded to the back-end services. + +Consumers can be associated with various scopes: per Plugin, all APIs, or an individual API. There are many use cases for consumer objects in the API Gateway that you get with the combination of its plugins: + +1. Enable different authentication methods for different consumers. It can be useful when consumers are trying to access the API by using various authentication mechanisms such as [API key](https://apisix.apache.org/docs/apisix/plugins/key-auth/), [Basic](https://apisix.apache.org/docs/apisix/plugins/basic-auth/), or [JWT](https://apisix.apache.org/docs/apisix/plugins/jwt-auth/)-based auth. +2. Restrict access to API resources for specific consumers. +3. Route requests to the corresponding backend service based on the consumer. +4. Define rate limiting on the number of data clients can consume. +5. Analyze data usage for an individual and a subset of consumers. + +## Apache APISIX Consumer example + +Let's look at some examples of configuring the rate-limiting policy for a single consumer and a group of consumers with the help of [key-auth](https://apisix.apache.org/docs/apisix/plugins/key-auth/) authentication key (API Key) and [limit-count](https://apisix.apache.org/docs/apisix/plugins/limit-count/) plugins. For the demo case, we can leverage [the sample project](https://github.com/Boburmirzo/apisix-api-consumers-management) built on [ASP.NET Core WEB API](https://learn.microsoft.com/en-us/aspnet/core/?view=aspnetcore-7.0) with a single `GET` endpoint (retrieves all products list). You can find in [README file](https://github.com/Boburmirzo/apisix-api-consumers-management#readme) all instructions on how to run the sample app. + +### Enable rate-limiting for a single consumer + +Up to now, I assume that the sample project is up and running. To use consumer object along with the other two plugins we need to follow easy steps: + +- Create a new Consumer. +- Specify the authentication plugin key-auth and limit count for the consumer. +- Create a new Route, and set a routing rule (If necessary). +- Enable key-auth plugin configuration for the created route. + +The above steps can be achieved by running simple two [curl commands](https://en.wikipedia.org/wiki/CURL) against APISIX [Admin API](https://apisix.apache.org/docs/apisix/admin-api/). + +The first `cmd` creates a **new Consumer** with API Key based authentication enabled where the API consumer can only make 2 requests against the Product API within 60 seconds. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +``` shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username":"consumer1", + "plugins":{ + "key-auth":{ + "key":"auth-one" + }, + "limit-count":{ + "count":2, + "time_window":60, + "rejected_code":403, + "rejected_msg":"Requests are too many, please try again later or upgrade your subscription plan.", + "key":"remote_addr" + } + } +}' +``` + +Then, we define our **new Route and Upstream** so that all incoming requests to the gateway endpoint `/api/products` will be forwarded to our example product backend service after a successful authentication process. + +``` shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "name": "Route for consumer request rate limiting", + "methods": [ + "GET" + ], + "uri": "/api/products", + "plugins": { + "key-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "productapi:80": 1 + } + } +}' +``` + +Apache APISIX will handle the first two requests as usual, but a third request in the same period will return a `403` HTTP code. + +``` shell +curl http://127.0.0.1:9080/api/products -H 'apikey: auth-one' -i +``` + +Sample output after calling the API 3 times within 60 sec: + +``` shell +HTTP/1.1 403 Forbidden +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/2.13.1 + +{"error_msg":"Requests are too many, please try again later or upgrade your subscription plan."} +``` + +Indeed, after reaching the threshold, the subsequent requests are not allowed by APISIX. + +### Enable rate-limiting for consumer groups + +In Apache APISIX, [Consumer group](https://apisix.apache.org/docs/apisix/terminology/consumer-group/) object is used to manage the visibility of backend services to developers. Backend services are first made visible to groups, and then developers in those groups can view and subscribe to the products that are associated with the groups. + +With consumer groups, you can specify any number of rate-limiting tiers and apply them to a group of consumers, instead of managing each consumer individually. + +Typical scenarios can be different pricing models for your API Monetization like API Consumers with the basic plan are allowed to make 50 API calls per minute or in another use case, you can enable specific APIs for Admins, Developers, and Guests based on their roles in the system. + +You can create, update, delete and manage your groups using the Apache APISIX Admin REST API [Consumer Group entity](https://apisix.apache.org/docs/apisix/admin-api/#consumer-group). + +#### Consumer groups example + +For the sake of the demo, let’s create two consumer groups for the basic and premium plans respectively. We can add one or two consumers for each group and control the traffic from consumer groups with the help of the `rate-limiting` plugin. + +To use consumer groups with rate limiting, you need to: + +- Create one or more consumer groups with a limit-count plugin enabled. +- Create consumers and assign consumers to groups. + +Below two curl cmds create consumer groups named `basic_plan` and `premium_plan`: + +Create a Consumer Group Basic Plan. + +``` shell +curl http://127.0.0.1:9180/apisix/admin/consumer_groups/basic_plan -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 403, + "group": "basic_plan" + } + } +}' +``` + +Create a Consumer Group Premium Plan. + +``` shell +curl http://127.0.0.1:9180/apisix/admin/consumer_groups/premium_plan -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "limit-count": { + "count": 200, + "time_window": 60, + "rejected_code": 403, + "group": "premium_plan" + } + } +}' +``` + +In the above steps, we set up the rate limiting config for Basic plan to have only 2 requests per 60secs, and the Premium plan has 200 allowed API requests within the the same time window. + +Create and add first consumer to the Basic group. + +``` shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "consumer1", + "plugins": { + "key-auth": { + "key": "auth-one" + } + }, + "group_id": "basic_plan" +}' +``` + +Create and add second consumer to the Premium group. + +``` shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "consumer2", + "plugins": { + "key-auth": { + "key": "auth-two" + } + }, + "group_id": "premium_plan" +}' +``` + +Create and add third consumer to the Premium group. + +``` shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "consumer3", + "plugins": { + "key-auth": { + "key": "auth-three" + } + }, + "group_id": "premium_plan" +}' +``` + +Afterward, we can easily check that the first consumer `Consumer1` in the Basic Plan group will get a `403 HTTP status error` after hitting the 2 API calls per a minute, while the other two consumers in the Premium Plan group can request as many times as until they reach the limit. + +You can run below cmds by changing auth key for each consumer in the request header: + +``` shell +curl -i http://127.0.0.1:9080/api/products -H 'apikey: auth-one' +``` + +``` shell +curl -i http://127.0.0.1:9080/api/products -H 'apikey: auth-two' +``` + +``` shell +curl -i http://127.0.0.1:9080/api/products -H 'apikey: auth-three' +``` + +Note that you can also add or remove a consumer from any consumer group and enable other built-in plugins. + +## More Tutorials + +Read our other [tutorials](./expose-api.md) to learn more about API Management. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/monitor-api-health-check.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/monitor-api-health-check.md new file mode 100644 index 0000000..84edad3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/monitor-api-health-check.md @@ -0,0 +1,192 @@ +--- +title: Monitor API Health Check with Prometheus +keywords: + - API Health Check + - Monitoring with Prometheus + - API Gateway +description: In this tutorial, we'll guide you on how to enable and monitor API health checks using APISIX and Prometheus. +--- + + + +[APISIX](https://apisix.apache.org/) has a [health check](https://apisix.apache.org/docs/apisix/tutorials/health-check/) mechanism, which proactively checks the health status of the upstream nodes in your system. Also, APISIX integrates with [Prometheus](https://prometheus.io/) through its [plugin](https://apisix.apache.org/docs/apisix/plugins/prometheus/) that exposes upstream nodes (multiple instances of a backend API service that APISIX manages) health check metrics on the Prometheus metrics endpoint typically, on URL path **`/apisix/prometheus/metrics`**. + +In this tutorial, we'll guide you on how to **enable and monitor API health checks** using APISIX and Prometheus. + +## Prerequisite(s) + +- Before you start, it is good to have a basic understanding of APISIX. Familiarity with [API gateway](https://apisix.apache.org/docs/apisix/terminology/api-gateway/), and its key concepts such as [routes](https://docs.api7.ai/apisix/key-concepts/routes), [upstream](https://docs.api7.ai/apisix/key-concepts/upstreams), [Admin API](https://apisix.apache.org/docs/apisix/admin-api/), [plugins](https://docs.api7.ai/apisix/key-concepts/plugins), and HTTP protocol will also be beneficial. +- [Docker](https://docs.docker.com/get-docker/) is used to install the containerized etcd and APISIX. +- Install [cURL](https://curl.se/) to send requests to the services for validation. + +## Start the APISIX demo project + +This project leverages the pre-defined [Docker Compose configuration](https://github.com/apache/apisix-docker/blob/master/example/docker-compose.yml) file to set up, deploy and run APISIX, etcd, Prometheus, and other services with a single command. First, clone the [apisix-docker](https://github.com/apache/apisix-docker) repo on GitHub and open it in your favorite editor, navigate to `/example` folder, and start the project by simply running `docker compose up` from the folder. + +When you start the project, Docker downloads any images it needs to run. You can see the full list of services in [docker-compose.yaml](https://github.com/apache/apisix-docker/blob/master/example/docker-compose.yml) file. + +## Add health check API endpoints in upstream + +To check API health periodically, APISIX needs an HTTP path of the health endpoint of the upstream service. So, you need first to add `/health` endpoint for your backend service. From there, you inspect the most relevant metrics for that service such as memory usage, database connectivity, response duration, and more. Assume that we have two backend REST API services web1 and web2 running using the demo project and each has its **own health check** endpoint at URL path `/health`. At this point, you do not need to make additional configurations. In reality, you can replace them with your backend services. + +> The simplest and standardized way to validate the status of a service is to define a new [health check](https://datatracker.ietf.org/doc/html/draft-inadarei-api-health-check) endpoint like `/health` or `/status` + +## Setting Up Health Checks in APISIX + +This process involves checking the operational status of the 'upstream' nodes. APISIX provides two types of health checks: **Active checks** and **Passive Checks** respectively. Read more about Health Checks and how to enable them [here](https://apisix.apache.org/docs/apisix/tutorials/health-check/). Use the [Admin API](https://apisix.apache.org/docs/apisix/admin-api/) to create an Upstream object. Here is an example of creating an [Upstream](https://apisix.apache.org/docs/apisix/terminology/upstream/) object with two nodes (Per each backend service we defined) and configuring the health check parameters in the upstream object: + +```bash +curl "http://127.0.0.1:9180/apisix/admin/upstreams/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "nodes":{ + "web1:80":1, + "web2:80":1 + }, + "checks":{ + "active":{ + "timeout":5, + "type":"http", + "http_path":"/health", + "healthy":{ + "interval":2, + "successes":1 + }, + "unhealthy":{ + "interval":1, + "http_failures":2 + } + } + } +}' +``` + +This example configures an active health check on the **`/health`** endpoint of the node. It considers the node healthy after **one successful health check** and unhealthy **after two failed health checks**. + +> Note that sometimes you might need the IP addresses of upstream nodes, not their domains (`web1` and `web2`) if you are running services outside docker network. Health check will be started only if the number of nodes (resolved IPs) is bigger than 1. + +## Enable the Prometheus Plugin + +Create a global rule to enable the `prometheus` plugin on all routes by adding `"prometheus": {}` in the plugins option. APISIX gathers internal runtime metrics and exposes them through port `9091` and URI path `/apisix/prometheus/metrics` by default that Prometheus can scrape. It is also possible to customize the export port and **URI path**, **add** **extra labels, the frequency of these scrapes, and other parameters** by configuring them in the Prometheus configuration `/prometheus_conf/prometheus.yml`file. + +```bash +curl "http://127.0.0.1:9180/apisix/admin/global_rules" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "id":"rule-for-metrics", + "plugins":{ + "prometheus":{ + } + } +}' +``` + +## Create a Route + +Create a [Route](https://apisix.apache.org/docs/apisix/terminology/route/) object to route incoming requests to upstream nodes: + +```bash +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "name":"backend-service-route", + "methods":[ + "GET" + ], + "uri":"/", + "upstream_id":"1" +}' +``` + +## Send validation requests to the route + +To generate some metrics, you try to send few requests to the route we created in the previous step: + +```bash +curl -i -X GET "http://localhost:9080/" +``` + +If you run the above requests a couple of times, you can see from responses that APISIX routes some requests to `node1` and others to `node2`. That’s how Gateway load balancing works! + +```bash +HTTP/1.1 200 OK +Content-Type: text/plain; charset=utf-8 +Content-Length: 10 +Connection: keep-alive +Date: Sat, 22 Jul 2023 10:16:38 GMT +Server: APISIX/3.3.0 + +hello web2 + +... + +HTTP/1.1 200 OK +Content-Type: text/plain; charset=utf-8 +Content-Length: 10 +Connection: keep-alive +Date: Sat, 22 Jul 2023 10:16:39 GMT +Server: APISIX/3.3.0 + +hello web1 +``` + +## Collecting health check data with the Prometheus plugin + +Once the health checks and route are configured in APISIX, you can employ Prometheus to monitor health checks. APISIX **automatically exposes health check metrics data** for your APIs if the health check parameter is enabled for upstream nodes. You will see metrics in the response after fetching them from APISIX: + +```bash +curl -i http://127.0.0.1:9091/apisix/prometheus/metrics +``` + +Example Output: + +```bash +# HELP apisix_http_requests_total The total number of client requests since APISIX started +# TYPE apisix_http_requests_total gauge +apisix_http_requests_total 119740 +# HELP apisix_http_status HTTP status codes per service in APISIX +# TYPE apisix_http_status counter +apisix_http_status{code="200",route="1",matched_uri="/",matched_host="",service="",consumer="",node="172.27.0.5"} 29 +apisix_http_status{code="200",route="1",matched_uri="/",matched_host="",service="",consumer="",node="172.27.0.7"} 12 +# HELP apisix_upstream_status Upstream status from health check +# TYPE apisix_upstream_status gauge +apisix_upstream_status{name="/apisix/upstreams/1",ip="172.27.0.5",port="443"} 0 +apisix_upstream_status{name="/apisix/upstreams/1",ip="172.27.0.5",port="80"} 1 +apisix_upstream_status{name="/apisix/upstreams/1",ip="172.27.0.7",port="443"} 0 +apisix_upstream_status{name="/apisix/upstreams/1",ip="172.27.0.7",port="80"} 1 +``` + +Health check data is represented with metrics label `apisix_upstream_status`. It has attributes like upstream `name`, `ip` and `port`. A value of 1 represents healthy and 0 means the upstream node is unhealthy. + +## Visualize the data in the Prometheus dashboard + +Navigate to http://localhost:9090/ where the Prometheus instance is running in Docker and type **Expression** `apisix_upstream_status` in the search bar. You can also see the output of the health check statuses of upstream nodes on the **Prometheus dashboard** in the table or graph view: + +![Visualize the data in Prometheus dashboard](https://static.apiseven.com/uploads/2023/07/20/OGBtqbDq_output.png) + +## Next Steps + +You have now learned how to set up and monitor API health checks with Prometheus and APISIX. APISIX Prometheus plugin is configured to connect [Grafana](https://grafana.com/) automatically to visualize metrics. Keep exploring the data and customize the [Grafana dashboard](https://grafana.com/grafana/dashboards/11719-apache-apisix/) by adding a panel that shows the number of active health checks. + +### Related resources + +- [Monitoring API Metrics: How to Ensure Optimal Performance of Your API?](https://api7.ai/blog/api7-portal-monitor-api-metrics) +- [Monitoring Microservices with Prometheus and Grafana](https://api7.ai/blog/introduction-to-monitoring-microservices) + +### Recommended content + +- [Implementing resilient applications with API Gateway (Health Check)](https://dev.to/apisix/implementing-resilient-applications-with-api-gateway-health-check-338c) diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/observe-your-api.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/observe-your-api.md new file mode 100644 index 0000000..b828908 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/observe-your-api.md @@ -0,0 +1,267 @@ +--- +title: Observe APIs +keywords: + - API gateway + - Apache APISIX + - Observability + - Monitor + - Plugins +description: Apache APISIX Observability Plugins and take a look at how to set up these plugins. +--- + + + +In this guide, we can leverage the power of some [Apache APISIX](https://apisix.apache.org/) Observability Plugins and take a look at how to set up these plugins, how to use them to understand API behavior, and later solve problems that impact our users. + +## API Observability + +Nowadays **API Observability** is already a part of every API development as it addresses many problems related to API consistency, reliability, and the ability to quickly iterate on new API features. When you design for full-stack observability, you get everything you need to find issues and catch breaking changes. + +API observability can help every team in your organization: + +- Sales and growth teams to monitor your API usage, free trials, observe expansion opportunities and ensure that API serves the correct data. + +- Engineering teams to monitor and troubleshoot API issues. + +- Product teams to understand API usage and business value. + +- Security teams to detect and protect from API threats. + +![API observability in every team](https://static.apiseven.com/2022/09/14/6321ceff5548e.jpg) + +## A central point for observation + +We know that **an API gateway** offers a central control point for incoming traffic to a variety of destinations but it can also be a central point for observation as well since it is uniquely qualified to know about all the traffic moving between clients and our service networks. + +The core of observability breaks down into _three key areas_: structured logs, metrics, and traces. Let’s break down each pillar of API observability and learn how with Apache APISIX Plugins we can simplify these tasks and provides a solution that you can use to better understand API usage. + +![Observability of three key areas](https://static.apiseven.com/2022/09/14/6321cf14c555a.jpg) + +## Prerequisites + +Before enabling our plugins we need to install Apache APISIX, create a route, an upstream, and map the route to the upstream. You can simply follow [getting started guide](https://apisix.apache.org/docs/apisix/getting-started) provided on the website. + +## Logs + +**Logs** are also easy to instrument and trivial steps of API observability, they can be used to inspect API calls in real-time for debugging, auditing, and recording time-stamped events that happened over time. There are several logger plugins Apache APISIX provides such as: + +- [http-logger](https://apisix.apache.org/docs/apisix/plugins/http-logger/) + +- [skywalking-logger](https://apisix.apache.org/docs/apisix/plugins/skywalking-logger/) + +- [tcp-logger](https://apisix.apache.org/docs/apisix/plugins/tcp-logger) + +- [kafka-logger](https://apisix.apache.org/docs/apisix/plugins/kafka-logger) + +- [rocketmq-logger](https://apisix.apache.org/docs/apisix/plugins/rocketmq-logger) + +- [udp-logger](https://apisix.apache.org/docs/apisix/plugins/udp-logger) + +- [clickhouse-logger](https://apisix.apache.org/docs/apisix/plugins/clickhouse-logger) + +- [error-logger](https://apisix.apache.org/docs/apisix/plugins/error-log-logger) + +- [google-cloud-logging](https://apisix.apache.org/docs/apisix/plugins/google-cloud-logging) + +And you can see the [full list](../plugins/http-logger.md) on the official website of Apache APISIX. Now for demo purposes, let's choose a simple but mostly used _http-logger_ plugin that is capable of sending API Log data requests to HTTP/HTTPS servers or sends as JSON objects to Monitoring tools. We can assume that a route and an upstream are created. You can learn how to set up them in the **[Getting started with Apache APISIX](https://youtu.be/dUOjJkb61so)** video tutorial. Also, you can find all command-line examples on the GitHub page [apisix-observability-plugins](https://boburmirzo.github.io/apisix-observability-plugins/) + +You can generate a mock HTTP server at [mockbin.com](https://mockbin.org/) to record and view the logs. Note that we also bind the route to an upstream (You can refer to this documentation to learn about more [core concepts of Apache APISIX](https://apisix.apache.org/docs/apisix/architecture-design/apisix)). + +The following is an example of how to enable the http-logger for a specific route. + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell + +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "http-logger": { + "uri": "http://mockbin.org/bin/5451b7cd-af27-41b8-8df1-282ffea13a61" + } + }, + "upstream_id": "1", + "uri": "/get" +}' + +``` + +:::note + +To `http-logger` plugin settings, your can just put your mock server URI address like below: + +```json +{ + "uri": "http://mockbin.org/bin/5451b7cd-af27-41b8-8df1-282ffea13a61" +} +``` + +::: + +Once we get a successful response from APISIX server, we can send a request to this _get_ endpoint to generate logs. + +```shell + +curl -i http://127.0.0.1:9080/get + +``` + +Then if you click and navigate to the following our [mock server link](http://mockbin.org/bin/5451b7cd-af27-41b8-8df1-282ffea13a61/log) some recent logs are sent and we can see them: + +![http-logger-plugin-test-screenshot](https://static.apiseven.com/2022/09/14/6321d1d83eb7a.png) + +## Metrics + +**Metrics** are a numeric representation of data measured over intervals of time. You can also aggregate this data into daily or weekly frequency and run queries against a distributed system like [Elasticsearch](https://www.elastic.co/). Or sometimes based on metrics you trigger alerts to take any action later. Once API metrics are collected, you can track them with metrics tracking tools such as [Prometheus](https://prometheus.io/). + +Apache APISIX API Gateway also offers [prometheus-plugin](https://apisix.apache.org/docs/apisix/plugins/prometheus/) to fetch your API metrics and expose them in Prometheus. Behind the scene, Apache APISIX downloads the Grafana dashboard meta, imports it to [Grafana](https://grafana.com/), and fetches real-time metrics from the Prometheus plugin. + +Let’s enable prometheus-plugin for our route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/get", + "plugins": { + "prometheus": {} + }, + "upstream_id": "1" +}' +``` + +We fetch the metric data from the specified URL `/apisix/prometheus/metrics`. + +```shell +curl -i http://127.0.0.1:9091/apisix/prometheus/metrics +``` + +You will get a response with Prometheus metrics something like below: + +```text +HTTP/1.1 200 OK +Server: openresty +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive + +# HELP apisix_batch_process_entries batch process remaining entries +# TYPE apisix_batch_process_entries gauge +apisix_batch_process_entries{name="http logger",route_id="1",server_addr="172.19.0.8"} 0 +# HELP apisix_etcd_modify_indexes Etcd modify index for APISIX keys +# TYPE apisix_etcd_modify_indexes gauge +apisix_etcd_modify_indexes{key="consumers"} 17819 +apisix_etcd_modify_indexes{key="global_rules"} 17832 +apisix_etcd_modify_indexes{key="max_modify_index"} 20028 +apisix_etcd_modify_indexes{key="prev_index"} 18963 +apisix_etcd_modify_indexes{key="protos"} 0 +apisix_etcd_modify_indexes{key="routes"} 20028 +... +``` + +And we can also check the status of our endpoint at the Prometheus dashboard by pointing to this URL `http://localhost:9090/targets` + +![plugin-orchestration-configure-rule-screenshot](https://static.apiseven.com/2022/09/14/6321d30b32024.png) + +As you can see, Apache APISIX exposed metrics endpoint is upon and running. + +Now you can query metrics for `apisix_http_status` to see what HTTP requests are handled by API Gateway and what was the outcome. + +![prometheus-plugin-dashboard-query-http-status-screenshot](https://static.apiseven.com/2022/09/14/6321d30aed3b2.png) + +In addition to this, you can view the Grafana dashboard running in your local instance. Go to `http://localhost:3000/` + +![prometheus-plugin-grafana-dashboard-screenshot](https://static.apiseven.com/2022/09/14/6321d30bba97c.png) + +You can also check two other plugins for metrics: + +- [Node status Plugin](../plugins/node-status.md) + +- [Datadog Plugin](../plugins/datadog.md) + +## Tracing + +The third is **tracing** or distributed tracing allows you to understand the life of a request as it traverses your service network and allows you to answer questions like what service has this request touched and how much latency was introduced. Traces enable you to further explore which logs to look at for a particular session or related set of API calls. + +[Zipkin](https://zipkin.io/) an open-source distributed tracing system. [APISIX plugin](https://apisix.apache.org/docs/apisix/plugins/zipkin) is supported to collect tracing and report to Zipkin Collector based on [Zipkin API specification](https://zipkin.io/pages/instrumenting.html). + +Here’s an example to enable the `zipkin` plugin on the specified route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": [ + "GET" + ], + "uri": "/get", + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9411/api/v2/spans", + "sample_ratio": 1 + } + }, + "upstream_id": "1" +}' +``` + +We can test our example by simply running the following curl command: + +```shell +curl -i http://127.0.0.1:9080/get +``` + +As you can see, there are some additional trace identifiers (like traceId, spanId, parentId) were appended to the headers: + +```text +"X-B3-Parentspanid": "61bd3f4046a800e7", +"X-B3-Sampled": "1", +"X-B3-Spanid": "855cd5465957f414", +"X-B3-Traceid": "e18985df47dab632d62083fd96626692", +``` + +Then you can use a browser to access `http://127.0.0.1:9411/zipkin`, see traces on the Web UI of Zipkin. + +> Note that you need to run the Zipkin instance in order to install Zipkin Web UI. For example, by using docker you can simply run it: +>`docker run -d -p 9411:9411 openzipkin/zipkin` + +![Zipkin plugin output 1](https://static.apiseven.com/2022/09/14/6321dc27f3d33.png) + +![Zipkin plugin output 2](https://static.apiseven.com/2022/09/14/6321dc284049c.png) + +As you noticed, the recent traces were exposed in the above pictures. + +You can also check two other plugins for tracing: + +- [Skywalking-plugin](../plugins/skywalking.md) + +- [Opentelemetry-plugin](../plugins/opentelemetry.md) + +## Summary + +As we learned, API Observability is a sort of framework for managing your applications in an API world and Apache APISIX API Gateway plugins can help when observing modern API-driven applications by integrating to several observability platforms. So, you can make your development work focused on core business features instead of building a custom integration for observability tools. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/protect-api.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/protect-api.md new file mode 100644 index 0000000..9ef3770 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/protect-api.md @@ -0,0 +1,132 @@ +--- +title: Protect API +keywords: + - API Gateway + - Apache APISIX + - Rate Limit + - Protect API +description: This article describes how to secure your API with the rate limiting plugin for API Gateway Apache APISIX. +--- + + + +This article describes secure your API with the rate limiting plugin for API Gateway Apache APISIX. + +## Concept introduction + +### Plugin + +This represents the configuration of the plugins that are executed during the HTTP request/response lifecycle. A [Plugin](../terminology/plugin.md) configuration can be bound directly to a Route, a Service, a Consumer or a Plugin Config. + +:::note + +If [Route](../terminology/route.md), [Service](../terminology/service.md), [Plugin Config](../terminology/plugin-config.md) or [Consumer](../terminology/consumer.md) are all bound to the same for plugins, only one plugin configuration will take effect. The priority of plugin configurations is described in [plugin execution order](../terminology/plugin.md#plugins-execution-order). At the same time, there are various stages involved in the plugin execution process. See [plugin execution lifecycle](../terminology/plugin.md#plugins-execution-order). + +::: + +## Preconditions + +Before following this tutorial, ensure you have [exposed the service](./expose-api.md). + +## Protect your API + +We can use rate limits to limit our API services to ensure the stable operation of API services and avoid system crashes caused by some sudden traffic. We can restrict as follows: + +1. Limit the request rate; +2. Limit the number of requests per unit time; +3. Delay request; +4. Reject client requests; +5. Limit the rate of response data. + +APISIX provides several plugins for limiting current and speed, including [limit-conn](../plugins/limit-conn.md), [limit-count](../plugins/limit-count.md), [limit- req](../plugins/limit-req.md) and other plugins. + +- The `limit-conn` Plugin limits the number of concurrent requests to your services. +- The `limit-req` Plugin limits the number of requests to your service using the leaky bucket algorithm. +- The `limit-count` Plugin limits the number of requests to your service by a given count per time. + +Next, we will use the `limit-count` plugin as an example to show you how to protect your API with a rate limit plugin: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +1. Create a Route. + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key_type": "var", + "key": "remote_addr" + } + }, + "upstream_id": "1" +}' +``` + +In the above configuration, a Route with ID `1` is created using the upstream made in [Expose Service](./expose-api.md), and the `limit-count` plugin is enabled. The plugin only allows the client to access the upstream service `2` times within `60` seconds. If more than two times, the `503` error code will be returned. + +2. Test + +```shell +curl http://127.0.0.1:9080/index.html +``` + +After using the above command to access three times in a row, the following error will appear: + +``` + +503 Service Temporarily Unavailable + +

503 Service Temporarily Unavailable

+
openresty
+ + +``` + +If the above result is returned, the `limit-count` plugin has taken effect and protected your API. + +## More Traffic plugins + +In addition to providing plugins for limiting current and speed, APISIX also offers many other plugins to meet the needs of actual scenarios: + +- [proxy-cache](../plugins/proxy-cache.md): This plugin provides the ability to cache backend response data. It can be used with other plugins. The plugin supports both disk and memory-based caching. Currently, the data to be cached can be specified according to the response code and request mode, and more complex caching strategies can also be configured through the no_cache and cache_bypass attributes. +- [request-validation](../plugins/request-validation.md): This plugin is used to validate requests forwarded to upstream services in advance. +- [proxy-mirror](../plugins/proxy-mirror.md): This plugin provides the ability to mirror client requests. Traffic mirroring is copying the real online traffic to the mirroring service, so that the online traffic or request content can be analyzed in detail without affecting the online service. +- [api-breaker](../plugins/api-breaker.md): This plugin implements an API circuit breaker to help us protect upstream business services. +- [traffic-split](../plugins/traffic-split.md): You can use this plugin to gradually guide the percentage of traffic between upstreams to achieve blue-green release and grayscale release. +- [request-id](../plugins/request-id.md): The plugin adds a `unique` ID to each request proxy through APISIX for tracking API requests. +- [proxy-control](../plugins/proxy-control.md): This plugin can dynamically control the behavior of NGINX proxy. +- [client-control](../plugins/client-control.md): This plugin can dynamically control how NGINX handles client requests by setting an upper limit on the client request body size. + +## More Tutorials + +You can refer to the [Observe API](./observe-your-api.md) document to monitor APISIX, collect logs, and track. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/websocket-authentication.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/websocket-authentication.md new file mode 100644 index 0000000..f77d466 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/tutorials/websocket-authentication.md @@ -0,0 +1,129 @@ +--- +title: WebSocket Authentication +keywords: + - API Gateway + - Apache APISIX + - WebSocket + - Authentication +description: This article is a guide on how to configure authentication for WebSocket connections. +--- + + + +Apache APISIX supports [WebSocket](https://en.wikipedia.org/wiki/WebSocket) traffic, but the WebSocket protocol doesn't handle authentication. This article guides you on how to configure authentication for WebSocket connections using Apache APISIX. + +## WebSocket Protocol + +To establish a WebSocket connection, the client sends a WebSocket handshake request, for which the server returns a WebSocket handshake response as shown below: + +```text title="Client request" +GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw== +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 +Origin: http://example.com +``` + +```text title="Server response" +HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: HSmrc0sMlYUkAGmm5OPpG2HaGWk= +Sec-WebSocket-Protocol: chat +``` + +The handshake workflow is shown below: + +![Websocket Handshake Workflow](https://static.apiseven.com/2022/12/06/638eda2e2415f.png) + +## WebSocket Authentication + +APISIX supports several authentication methods like [basic-auth](https://apisix.apache.org/docs/apisix/plugins/basic-auth/), [key-auth](https://apisix.apache.org/docs/apisix/plugins/key-auth/), and [jwt-auth](https://apisix.apache.org/docs/apisix/plugins/jwt-auth/). + +While establishing connections from the client to server in the _handshake_ phase, APISIX first checks its authentication information before choosing to forward the request or deny it. + +## Prerequisites + +Before you move on, make sure you have: + +1. A WebSocket server as the Upstream. This article uses [Postman's public echo service](https://blog.postman.com/introducing-postman-websocket-echo-service/): `wss://ws.postman-echo.com/raw`. +2. APISIX 3.0 installed. + +## Configuring Authentication + +### Create a Route + +First we will create a Route to the Upstream echo service. + +Since the Upstream uses wss protocol, the scheme is set to `https`. We should also set `enable_websocket` to `true`. + +In this tutorial, we will use the [key-auth](https://apisix.apache.org/docs/apisix/plugins/key-auth/) Plugin. This would work similarly for other authentication methods: + +```shell +curl --location --request PUT 'http://127.0.0.1:9180/apisix/admin/routes/1' \ +--header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "uri": "/*", + "methods": ["GET"], + "enable_websocket": true, + "upstream": { + "type": "roundrobin", + "nodes": { + "ws.postman-echo.com:443": 1 + }, + "scheme": "https" + }, + "plugins": { + "key-auth": {} + } +}' +``` + +### Create a Consumer + +We will now create a [Consumer](https://apisix.apache.org/docs/apisix/terminology/consumer/) and add a key `this_is_the_key`. A user would now need to use this key configured in the Consumer object to access the API. + +```sh +curl --location --request PUT 'http://127.0.0.1:9180/apisix/admin/consumers/jack' \ +--header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "this_is_the_key" + } + } +}' +``` + +## Testing the Route + +Now, if you try to connect `ws://127.0.0.1:9080/raw` without the `apikey` header or an incorrect key, APISIX will return a `401 Unauthorized`. + +![Connect without Key](https://static.apiseven.com/2022/12/06/638ef6db9dd4b.png) + +To authenticate, you can add the header `apikey` with the value `this_is_the_key`: + +![Connect with key](https://static.apiseven.com/2022/12/06/638efac7c42b6.png) diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/upgrade-guide-from-2.15.x-to-3.0.0.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/upgrade-guide-from-2.15.x-to-3.0.0.md new file mode 100644 index 0000000..de51e45 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/upgrade-guide-from-2.15.x-to-3.0.0.md @@ -0,0 +1,454 @@ +--- +title: Upgrade Guide +keywords: + - APISIX + - APISIX Upgrade Guide + - APISIX Version Upgrade +description: Guide for upgrading APISIX from version 2.15.x to 3.0.0. +--- + + + +This document guides you in upgrading APISIX from version 2.15.x to 3.0.0. + +:::note + +Upgrading to version 3.0.0 is a major change and it is recommended that you first upgrade to version 2.15.x before you upgrade to 3.0.0. + +::: + +## Changelog + +Please refer to the [3.0.0-beta](https://github.com/apache/apisix/blob/master/CHANGELOG.md#300-beta) and [3.0.0](https://github.com/apache/apisix/blob/master/CHANGELOG.md#300) changelogs for a complete list of incompatible changes and major updates. + +## Deployments + +From 3.0.0, we no longer support the Alpine-based images of APISIX. You can use the [Debian or CentOS-based images](https://hub.docker.com/r/apache/apisix/tags?page=1&ordering=last_updated) instead. + +In addition to the Docker images, we also provide: + +1. RPM packages for CentOS 7 and CentOS 8 supporting both AMD64 and ARM64 architectures. +2. DEB packages for Debian 11 (bullseye) supporting both AMD64 and ARM64 architectures. + +See the [installation guide](/installation-guide.md) for more details. + +3.0.0 also introduces multiple deployment modes. The following modes are supported: + +1. [Traditional](./deployment-modes.md#traditional): As the name implies, this is the original deployment mode where one instance of APISIX acts as the control plane and the data plane. Use this deployment mode to keep your deployment similar to older versions. +2. [Decoupled](./deployment-modes.md#decoupled): In this mode, the data plane and the control plane are separated. You can deploy an instance of APISIX either as a control plane or a data plane. +3. [Standalone](./deployment-modes.md#standalone): Using this mode will disable etcd as the configuration center and use a static configuration file instead. You can use this to manage APISIX configuration decaratively or for using other configuration centers. + +## Dependencies + +All Docker images and binary packages (RPM, DEB) already come with all the necessary dependencies for APISIX. + +Some features might require additional Nginx modules in OpenResty and requires you to [build a custom OpenResty distribution (APISIX-Base)](https://github.com/api7/apisix-build-tools). + +To run APISIX on a native OpenResty instance use [OpenResty version 1.19.3.2](https://openresty.org/en/download.html#legacy-releases) and above. + +## Configurations + +There are some major changes to the configuration file in APISIX. You need to update your configuration file (`conf/config.yaml`) to reflect these changes. See the `conf/config-default.yaml` file for the complete changes. + +The following attributes in the configuration have been moved: + +1. `config_center` is replaced by `config_provider` and moved under `deployment`. +2. `etcd` is moved under `deployment`. +3. The following Admin API configuration attributes are moved to the `admin` attribute under `deployment`: + 1. `admin_key` + 2. `enable_admin_cors` + 3. `allow_admin` + 4. `admin_listen` + 5. `https_admin` + 6. `admin_api_mtls` + 7. `admin_api_version` + +The following attributes in the configuration have been replaced: + +1. `enable_http2` and `listen_port` under `apisix.ssl` are replaced by `apisix.ssl.listen`. i.e., the below configuration: + + ```yaml title="conf/config.yaml" + ssl: + enable_http2: true + listen_port: 9443 + ``` + + changes to: + + ```yaml title="conf/config.yaml" + ssl: + listen: + - port: 9443 + enable_http2: true + ``` + +2. `nginx_config.http.lua_shared_dicts` is replaced by `nginx_config.http.custom_lua_shared_dict`. i.e., the below configuration: + + ```yaml title="conf/config.yaml" + nginx_config: + http: + lua_shared_dicts: + my_dict: 1m + ``` + + changes to: + + ```yaml title="conf/config.yaml" + nginx_config: + http: + custom_lua_shared_dict: + my_dict: 1m + ``` + + This attribute declares custom shared memory blocks. + +3. `etcd.health_check_retry` is replaced by `deployment.etcd.startup_retry`. So this configuration: + + ```yaml title="conf/config.yaml" + etcd: + health_check_retry: 2 + ``` + + changes to: + + ```yaml title="conf/config.yaml" + deployment: + etcd: + startup_retry: 2 + ``` + + This attribute is to configure the number of retries when APISIX tries to connect to etcd. + +4. `apisix.port_admin` is replaced by `deployment.admin.admin_listen`. So your previous configuration: + + ```yaml title="conf/config.yaml" + apisix: + port_admin: 9180 + ``` + + Should be changed to: + + ```yaml title="conf/config.yaml" + deployment: + apisix: + admin_listen: + ip: 127.0.0.1 # replace with the actual IP exposed + port: 9180 + ``` + + This attribute configures the Admin API listening port. + +5. `apisix.real_ip_header` is replaced by `nginx_config.http.real_ip_header`. + +6. `enable_cpu_affinity` is set to `false` by default instead of `true`. This is because Nginx's `worker_cpu_affinity` does not count against the cgroup when APISIX is deployed in containers. In such scenarios, it can affect APISIX's behavior when multiple instances are bound to a single CPU. + +## Data Compatibility + +In 3.0.0, the data structures holding route, upstream, and plugin configuration have been modified and is not fully compatible with 2.15.x. You won't be able to connect an instance of APISIX 3.0.0 to an etcd cluster used by APISIX 2.15.x. + +To ensure compatibility, you can try one of the two ways mentioned below: + +1. Backup the incompatible data (see [etcdctl snapshot](https://etcd.io/docs/v3.5/op-guide/maintenance/#snapshot-backup)) in etcd and clear it. Convert the backed up data to be compatible with 3.0.0 as mentioned in the below examples and reconfigure it through the Admin API of 3.0.0 instance. +2. Use custom scripts to convert the data structure in etcd to be compatible with 3.0.0. + +The following changes have been made in version 3.0.0: + +1. `disable` attribute of a plugin has been moved under `_meta`. It enables or disables the plugin. For example, this configuration to disable the `limit-count` plugin: + + ```json + { + "plugins":{ + "limit-count":{ + ... // plugin configuration + "disable":true + } + } + } + ``` + + should be changed to: + + ```json + { + "plugins":{ + "limit-count":{ + ... // plugin configuration + "_meta":{ + "disable":true + } + } + } + } + ``` + +2. `service_protocol` in route has been replaced with `upstream.scheme`. For example, this configuration: + + ```json + { + "uri": "/hello", + "service_protocol": "grpc", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + } + ``` + + Should be changed to: + + ```json + { + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "scheme": "grpc", + "nodes": { + "127.0.0.1:1980": 1 + } + } + } + ``` + +3. `audience` field from the [authz-keycloak](./plugins/authz-keycloak.md) plugin has been replaced with `client_id`. So this configuration: + + ```json + { + "plugins":{ + "authz-keycloak":{ + ... // plugin configuration + "audience":"Client ID" + } + } + } + ``` + + should be changed to: + + ```json + { + "plugins":{ + "authz-keycloak":{ + ... // plugin configuration + "client_id":"Client ID" + } + } + } + ``` + +4. `upstream` attribute from the [mqtt-proxy](./plugins/mqtt-proxy.md) plugin has been moved outside the plugin conference and referenced in the plugin. The configuration below: + + ```json + { + "remote_addr": "127.0.0.1", + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4, + "upstream": { + "ip": "127.0.0.1", + "port": 1980 + } + } + } + } + ``` + + changes to: + + ```json + { + "remote_addr": "127.0.0.1", + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + } + ] + } + } + ``` + +5. `max_retry_times` and `retry_interval` fields from the [syslog](./plugins/syslog.md) plugin are replaced `max_retry_count` and `retry_delay` respectively. The configuration below: + + ```json + { + "plugins":{ + "syslog":{ + "max_retry_times":1, + "retry_interval":1, + ... // other configuration + } + } + } + ``` + + changes to: + + ```json + { + "plugins":{ + "syslog":{ + "max_retry_count":1, + "retry_delay":1, + ... // other configuration + } + } + } + ``` + +6. `scheme` attribute has been removed from the [proxy-rewrite](./plugins/proxy-rewrite.md) plugin and has been added to the upstream. The configuration below: + + ```json + { + "plugins":{ + "proxy-rewrite":{ + "scheme":"https", + ... // other configuration + } + }, + "upstream":{ + "nodes":{ + "127.0.0.1:1983":1 + }, + "type":"roundrobin" + }, + "uri":"/hello" + } + ``` + + changes to: + + ```json + { + "plugins":{ + "proxy-rewrite":{ + ... // other configuration + } + }, + "upstream":{ + "scheme":"https", + "nodes":{ + "127.0.0.1:1983":1 + }, + "type":"roundrobin" + }, + "uri":"/hello" + } + ``` + +## API + +Changes have been made to the Admin API to make it easier to use and be more RESTful. + +The following changes have been made: + +1. The `count`, `action`, and `node` fields in the response body when querying resources (single and list) are removed and the fields in `node` are moved up to the root of the response body. For example, if you query the `/apisix/admin/routes/1` endpoint of the Admin API in version 2.15.x, you get the response: + + ```json + { + "count":1, + "action":"get", + "node":{ + "key":"\/apisix\/routes\/1", + "value":{ + ... // content + } + } + } + ``` + + In 3.0.0, this response body is changes to: + + ```json + { + "key":"\/apisix\/routes\/1", + "value":{ + ... // content + } + } + ``` + +2. When querying list resources, the `dir` field is removed from the response body, a `list` field to store the data of the list resources and a `total` field to show the total number of list resources are added. For example, if you query the `/apisix/admin/routes` endpoint of the Admin API in version 2.15.x, you get the response: + + ```json + { + "action":"get", + "count":2, + "node":{ + "key":"\/apisix\/routes", + "nodes":[ + { + "key":"\/apisix\/routes\/1", + "value":{ + ... // content + } + }, + { + "key":"\/apisix\/routes\/2", + "value":{ + ... // content + } + } + ], + "dir":true + } + } + ``` + + In 3.0.0, the response body is: + + ```json + { + "list":[ + { + "key":"\/apisix\/routes\/1", + "value":{ + ... // content + } + + }, + { + "key":"\/apisix\/routes\/2", + "value":{ + ... // content + } + } + ], + "total":2 + } + ``` + +3. The endpoint to SSL resource is changed from `/apisix/admin/ssl/{id}` to `/apisix/admin/ssls/{id}`. + +4. The endpoint to Proto resource is changed from `/apisix/admin/proto/{id}` to `/apisix/admin/protos/{id}`. + +5. Admin API port is set to `9180` by default. diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/wasm.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/wasm.md new file mode 100644 index 0000000..306d0b8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/wasm.md @@ -0,0 +1,122 @@ +--- +title: Wasm +--- + + + +APISIX supports Wasm plugins written with [Proxy Wasm SDK](https://github.com/proxy-wasm/spec#sdks). + +Currently, only a few APIs are implemented. Please follow [wasm-nginx-module](https://github.com/api7/wasm-nginx-module) to know the progress. + +## Programming model + +The plugin supports the following concepts from Proxy Wasm: + +``` + Wasm Virtual Machine +┌────────────────────────────────────────────────────────────────┐ +│ Your Plugin │ +│ │ │ +│ │ 1: 1 │ +│ │ 1: N │ +│ VMContext ────────── PluginContext │ +│ ╲ 1: N │ +│ ╲ │ +│ ╲ HttpContext │ +│ (Http stream) │ +└────────────────────────────────────────────────────────────────┘ +``` + +* All plugins run in the same Wasm VM, like the Lua plugin in the Lua VM +* Each plugin has its own VMContext (the root ctx) +* Each configured route/global rules has its own PluginContext (the plugin ctx). +For example, if we have a service configuring with Wasm plugin, and two routes inherit from it, +there will be two plugin ctxs. +* Each HTTP request which hits the configuration will have its own HttpContext (the HTTP ctx). +For example, if we configure both global rules and route, the HTTP request will +have two HTTP ctxs, one for the plugin ctx from global rules and the other for the +plugin ctx from route. + +## How to use + +First of all, we need to define the plugin in `config.yaml`: + +```yaml +wasm: + plugins: + - name: wasm_log # the name of the plugin + priority: 7999 # priority + file: t/wasm/log/main.go.wasm # the path of `.wasm` file + http_request_phase: access # default to "access", can be one of ["access", "rewrite"] +``` + +That's all. Now you can use the wasm plugin as a regular plugin. + +For example, enable this plugin on the specified route: + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "wasm_log": { + "conf": "blahblah" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +Attributes below can be configured in the plugin: + +| Name | Type | Requirement | Default | Valid | Description | +| --------------------------------------| ------------| -------------- | -------- | --------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | +| conf | string or object | required | | != "" and != {} | the plugin ctx configuration which can be fetched via Proxy Wasm SDK | + +Here is the mapping between Proxy Wasm callbacks and APISIX's phases: + +* `proxy_on_configure`: run once there is not PluginContext for the new configuration. +For example, when the first request hits the route which has Wasm plugin configured. +* `proxy_on_http_request_headers`: run in the access/rewrite phase, depends on the configuration of `http_request_phase`. +* `proxy_on_http_request_body`: run in the same phase of `proxy_on_http_request_headers`. To run this callback, we need to set property `wasm_process_req_body` to non-empty value in `proxy_on_http_request_headers`. See `t/wasm/request-body/main.go` as an example. +* `proxy_on_http_response_headers`: run in the header_filter phase. +* `proxy_on_http_response_body`: run in the body_filter phase. To run this callback, we need to set property `wasm_process_resp_body` to non-empty value in `proxy_on_http_response_headers`. See `t/wasm/response-rewrite/main.go` as an example. + +## Example + +We have reimplemented some Lua plugin via Wasm, under `t/wasm/` of this repo: + +* fault-injection +* forward-auth +* response-rewrite diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/xrpc.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/xrpc.md new file mode 100644 index 0000000..f3b3c3d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/xrpc.md @@ -0,0 +1,211 @@ +--- +title: xRPC +--- + + + +## What is xRPC + +APISIX supports proxy TCP protocols, but there are times when a pure TCP protocol proxy is not enough. It would be helpful if you had an application-specific proxy, such as Redis Proxy, Kafka Proxy, etc. In addition, some features must be coded and decoded for that protocol before they can be implemented. + +Therefore, Apache APISIX implements an L4 protocol extension framework called xRPC that allows developers to customize application-specific protocols. Based on xRPC, developers can codec requests and responses through Lua code and then implement fault injection, log reporting, dynamic routing, and other functions based on understanding the protocol content. + +Based on the xRPC framework, APISIX can provide a proxy implementation of several major application protocols. In addition, users can also support their own private TCP-based application protocols based on this framework, giving them precise granularity and higher-level 7-layer control similar to HTTP protocol proxies. + +## How to use + +Currently, the steps for users to use xRPC are relatively simple and can be handled quickly in just two steps. + +1. First, enable the corresponding protocol in `conf/config.yaml`. + +```yaml +xrpc: + protocols: + - name: redis +``` + +2. Then specify the protocol in the relevant `stream_routes`. + +```json +{ + ... + "protocol": { + "name": "redis", + "conf": { + "faults": [ + { "delay": 5, "key": "bogus_key", "commands":["GET", "MGET"]} + ] + } + } +} +``` + +The TCP connection that hits that `stream_route` is then handled according to that protocol. + +## Configuration + +| Name | Type | Required | Default | Description | +|-------------|--------|----------|---------|-------------------------------------------------| +| name | string | True | | the protocol name | +| conf | | False | | the application-specific protocol configuration | +| superior_id | ID | False | | the ID of the superior stream route | + +## Scenarios + +### Fault injection + +Taking Redis protocol as an example, after decoding the RESP protocol of Redis, we can know the command and parameters of the current request and then get the corresponding content according to the configuration, encode it using RESP protocol, and return it to the client. + +Suppose the user uses the following routing configuration. + +```json +{ + ... + "protocol": { + "name": "redis", + "conf": { + "faults": [ + { "delay": 5, "key": "bogus_key", "commands":["GET", "MGET"]} + ] + } + } +} +``` + +Then when the command is "GET" or "MGET", and the operation key contains "bogus_key", it will get "delay" according to the configuration: "5" parameter, and the corresponding operation will be performed with a delay of 5 seconds. + +Since xRPC requires developers to codec the protocol when customizing it, the same operation can be applied to other protocols. + +### Dynamic Routing + +In the process of proxy RPC protocol, there are often different RPC calls that need to be forwarded to different upstream requirements. Therefore, the xRPC framework has built-in support for dynamic routing. + +To solve this problem, the concept of superior and subordinate is used in xRPC routing, as shown in the following two examples. + +```json +# /stream_routes/1 +{ + "sni": "a.test.com", + "protocol": { + "name": "xx", + "conf": { + ... + } + }, + "upstream_id": "1" +} +``` + +```json +# /stream_routes/2 +{ + "protocol": { + "name": "xx", + "superior_id": "1", + "conf": { + ... + } + }, + "upstream_id": "2" +} +``` + +One specifies the `superior_id`, whose corresponding value is the ID of another route; the other specifies that the route with the `superior_id` is a subordinate route, subordinate to the one with the `superior_id`. Only the superior route is involved in matching at the entry point. The subordinate route is then matched by the specific protocol when the request is decoded. + +For example, for the Dubbo RPC protocol, the subordinate route is matched based on the service_name and other parameters configured in the route and the actual service_name brought in the request. If the match is successful, the configuration above the subordinate route is used, otherwise, the configuration of the superior route is still used. In the above example, if the match for route 2 is successful, it will be forwarded to upstream 2; otherwise, it will still be forwarded to upstream 1. + +### Log Reporting + +xRPC supports logging-related functions. You can use this feature to filter requests that require attention, such as high latency, excessive transfer content, etc. + +Each logger item configuration parameter will contain + +- name: the Logger plugin name, +- filter: the prerequisites for the execution of the logger plugin(e.g., request processing time exceeding a given value), +- conf: the configuration of the logger plugin itself. + + The following configuration is an example: + +```json +{ + ... + "protocol": { + "name": "redis", + "logger": { + { + "name": "syslog", + "filter": [ + ["rpc_time", ">=", 0.01] + ], + "conf": { + "host": "127.0.0.1", + "port": 8125, + } + } + } + } +} +``` + +This configuration means that when the `rpc_time` is greater than 0.01 seconds, xRPC reports the request log to the log server via the `syslog` plugin. `conf` is the configuration of the logging server required by the `syslog` plugin. + +Unlike standard TCP proxies, which only execute a logger when the connection is closed, xRPC executes a logger at the end of each 'request'. + +The protocol itself defines the granularity of the specific request, and the xRPC extension code implements the request's granularity. + +For example, in the Redis protocol, the execution of a command is considered a request. + +### Dynamic metrics + +xRPC also supports gathering metrics on the fly and exposing them via Prometheus. + +To know how to enable Prometheus metrics for TCP and collect them, please refer to [prometheus](./plugins/prometheus.md). + +To get the protocol-specific metrics, you need to: + +1. Make sure the Prometheus is enabled for TCP +2. Add the metric field to the specific route and ensure the `enable` is true: + +```json +{ + ... + "protocol": { + "name": "redis", + "metric": { + "enable": true + } + } +} +``` + +Different protocols will have different metrics. Please refer to the `Metrics` section of their own documentation. + +## How to write your own protocol + +Assuming that your protocol is named `my_proto`, you need to create a directory that can be introduced by `require "apisix.stream.xrpc.protocols.my_proto"`. +Inside this directory you need to have two files, `init.lua`, which implements the methods required by the xRPC framework, and `schema.lua`, which implements the schema checks for the protocol configuration. + +For a concrete implementation, you can refer to the existing protocols at: + +* https://github.com/apache/apisix/tree/master/apisix/stream/xrpc/protocols +* https://github.com/apache/apisix/tree/master/t/xrpc/apisix/stream/xrpc/protocols + +To know what methods are required to be implemented and how the xRPC framework works, please refer to: +https://github.com/apache/apisix/tree/master/apisix/stream/xrpc/runner.lua diff --git a/CloudronPackages/APISIX/apisix-source/docs/en/latest/xrpc/redis.md b/CloudronPackages/APISIX/apisix-source/docs/en/latest/xrpc/redis.md new file mode 100644 index 0000000..72c07ce --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/en/latest/xrpc/redis.md @@ -0,0 +1,132 @@ +--- +title: redis +keywords: + - Apache APISIX + - API Gateway + - xRPC + - redis +description: This document contains information about the Apache APISIX xRPC implementation for Redis. +--- + + + +## Description + +The Redis protocol support allows APISIX to proxy Redis commands, and provide various features according to the content of the commands, including: + +* [Redis protocol](https://redis.io/docs/reference/protocol-spec/) codec +* Fault injection according to the commands and key + +:::note + +This feature requires APISIX to be run on [APISIX-Runtime](../FAQ.md#how-do-i-build-the-apisix-runtime-environment). + +It also requires the data sent from clients are well-formed and sane. Therefore, it should only be used in deployments where both the downstream and upstream are trusted. + +::: + +## Granularity of the request + +Like other protocols based on the xRPC framework, the Redis implementation here also has the concept of `request`. + +Each Redis command is considered a request. However, the message subscribed from the server won't be considered a request. + +For example, when a Redis client subscribes to channel `foo` and receives the message `bar`, then it unsubscribes the `foo` channel, there are two requests: `subscribe foo` and `unsubscribe foo`. + +## Attributes + +| Name | Type          | Required | Default                                       | Valid values                                                       | Description                                                                                                                                                                                                                                           | +|----------------------------------------------|---------------|----------|-----------------------------------------------|--------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| faults | array[object]        | False    |                                               |  | Fault injections which can be applied based on the commands and keys | + +Fields under an entry of `faults`: + +| Name | Type          | Required | Default                                       | Valid values                                                       | Description                                                                                                                                                                                                                                           | +|----------------------------------------------|---------------|----------|-----------------------------------------------|--------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| commands | array[string]        | True    |                                               | ["get", "mget"]  | Commands fault is restricted to | +| key | string        | False    |                                               | "blahblah"  | Key fault is restricted to | +| delay | number        | True    |                                               | 0.1  | Duration of the delay in seconds | + +## Metrics + +* `apisix_redis_commands_total`: Total number of requests for a specific Redis command. + + | Labels | Description | + | ------------- | -------------------- | + | route | matched stream route ID | + | command | the Redis command | + +* `apisix_redis_commands_latency_seconds`: Latency of requests for a specific Redis command. + + | Labels | Description | + | ------------- | -------------------- | + | route | matched stream route ID | + | command | the Redis command | + +## Example usage + +:::note +You can fetch the `admin_key` from `config.yaml` and save to an environment variable with the following command: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +Assumed the APISIX is proxying TCP on port `9101`, and the Redis is listening on port `6379`. + +Let's create a Stream Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ +    "upstream": { +        "type": "none", +        "nodes": { +            "127.0.0.1:6379": 1 +        } +    }, +    "protocol": { +        "name": "redis", +        "conf": { +            "faults": [{ +                "commands": ["get", "ping"], +                "delay": 5 +            }] +        } +    } +} +' +``` + +Once you have configured the stream route, as shown above, you can make a request to it: + +```shell +redis-cli -p 9101 +``` + +``` +127.0.0.1:9101> ping +PONG +(5.00s) +``` + +You can notice that there is a 5 seconds delay for the ping command. diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/CHANGELOG.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/CHANGELOG.md new file mode 100644 index 0000000..4151f95 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/CHANGELOG.md @@ -0,0 +1,1593 @@ +--- +title: 版本发布 +--- + + + +## Table of Contents + +- [3.8.0](#380) +- [3.7.0](#370) +- [3.6.0](#360) +- [3.5.0](#350) +- [3.4.0](#340) +- [3.3.0](#330) +- [3.2.1](#321) +- [3.2.0](#320) +- [3.1.0](#310) +- [3.0.0](#300) +- [3.0.0-beta](#300-beta) +- [2.15.3](#2153) +- [2.15.2](#2152) +- [2.15.1](#2151) +- [2.15.0](#2150) +- [2.14.1](#2141) +- [2.14.0](#2140) +- [2.13.3](#2133) +- [2.13.2](#2132) +- [2.13.1](#2131) +- [2.13.0](#2130) +- [2.12.1](#2121) +- [2.12.0](#2120) +- [2.11.0](#2110) +- [2.10.5](#2105) +- [2.10.4](#2104) +- [2.10.3](#2103) +- [2.10.2](#2102) +- [2.10.1](#2101) +- [2.10.0](#2100) +- [2.9.0](#290) +- [2.8.0](#280) +- [2.7.0](#270) +- [2.6.0](#260) +- [2.5.0](#250) +- [2.4.0](#240) +- [2.3.0](#230) +- [2.2.0](#220) +- [2.1.0](#210) +- [2.0.0](#200) +- [1.5.0](#150) +- [1.4.1](#141) +- [1.4.0](#140) +- [1.3.0](#130) +- [1.2.0](#120) +- [1.1.0](#110) +- [1.0.0](#100) +- [0.9.0](#090) +- [0.8.0](#080) +- [0.7.0](#070) +- [0.6.0](#060) + +## 3.8.0 + +### Core + +- :sunrise: 支持使用 lua-resty-events 模块以提高性能: + - [#10550](https://github.com/apache/apisix/pull/10550) + - [#10558](https://github.com/apache/apisix/pull/10558) +- :sunrise: 将 OpenSSL 1.1.1 升级到 OpenSSL 3:[#10724](https://github.com/apache/apisix/pull/10724) + +### Plugins + +- :sunrise: 添加 jwe-decryp 插件:[#10252](https://github.com/apache/apisix/pull/10252) +- :sunrise: response-rewrite 插件使用 filters.regex 选项时支持 brotli:[#10733](https://github.com/apache/apisix/pull/10733) +- :sunrise: 添加多重认证插件:[#10482](https://github.com/apache/apisix/pull/10482) +- :sunrise: 在 `openid-connect` 插件中添加 `required scopes` 配置属性:[#10493](https://github.com/apache/apisix/pull/10493) +- :sunrise: cors 插件支持 Timing-Allow-Origin 头:[#9365](https://github.com/apache/apisix/pull/9365) +- :sunrise: 添加 brotli 插件:[#10515](https://github.com/apache/apisix/pull/10515) +- :sunrise: body-transformer 插件增强:[#10496](https://github.com/apache/apisix/pull/10496) +- :sunrise: limit-count 插件设置 redis_cluster_nodes 的最小长度为 1:[#10612](https://github.com/apache/apisix/pull/10612) +- :sunrise: 允许通过环境变量配置 limit-count 插件:[#10607](https://github.com/apache/apisix/pull/10607) + +### Bugfixes + +- 修复:upstream nodes 为数组类型时,port 应为可选字段:[#10477](https://github.com/apache/apisix/pull/10477) +- 修复:fault-injection 插件中变量提取不正确:[#10485](https://github.com/apache/apisix/pull/10485) +- 修复:所有消费者应共享同一计数器 (limit-count):[#10541](https://github.com/apache/apisix/pull/10541) +- 修复:在向 opa 插件发送路由时安全地删除上游:[#10552](https://github.com/apache/apisix/pull/10552) +- 修复:缺少 etcd init_dir 和无法列出资源:[#10569](https://github.com/apache/apisix/pull/10569) +- 修复:Forward-auth 请求体过大:[#10589](https://github.com/apache/apisix/pull/10589) +- 修复:永不退出的定时器导致的内存泄漏:[#10614](https://github.com/apache/apisix/pull/10614) +- 修复:如果在 proxy-rewrite 插件中解析的值为 nil,则不调用 add_header:[#10619](https://github.com/apache/apisix/pull/10619) +- 修复:频繁遍历 etcd 所有的键,导致 cpu 使用率高:[#10671](https://github.com/apache/apisix/pull/10671) +- 修复:对于 prometheus 的 upstream_status 指标,mostly_healthy 是健康的:[#10639](https://github.com/apache/apisix/pull/10639) +- 修复:在 zipkin 中避免在日志阶段获取 nil 值:[#10666](https://github.com/apache/apisix/pull/10666) +- 修复:启用 openid-connect 插件而没有 redirect_uri 导致 500 错误:[#7690](https://github.com/apache/apisix/pull/7690) +- 修复:为没有 end_session_endpoint 的 ODIC 添加 redirect_after_logout_uri:[#10653](https://github.com/apache/apisix/pull/10653) +- 修复:当 content-encoding 为 gzip 时,response-rewrite 的 filters.regex 不适用:[#10637](https://github.com/apache/apisix/pull/10637) +- 修复:prometheus 指标的泄漏:[#10655](https://github.com/apache/apisix/pull/10655) +- 修复:Authz-keycloak 添加返回详细错误:[#10691](https://github.com/apache/apisix/pull/10691) +- 修复:服务发现未正确更新上游节点:[#10722](https://github.com/apache/apisix/pull/10722) +- 修复:apisix 重启失败:[#10696](https://github.com/apache/apisix/pull/10696) + +## 3.7.0 + +### Change + +- :warning: 创建核心资源时不允许传入 `create_time` 和 `update_time`:[#10232](https://github.com/apache/apisix/pull/10232) +- :warning: 从 SSL schema 中移除自包含的信息字段 `exptime`、`validity_start` 和 `validity_end`:[10323](https://github.com/apache/apisix/pull/10323) +- :warning: 在 opentelemetry 插件的属性中,将 `route` 替换为 `apisix.route_name`,将 `service` 替换为 `apisix.service_name`,以遵循 span 名称和属性的标准:[#10393](https://github.com/apache/apisix/pull/10393) + +### Core + +- :sunrise: 添加令牌以支持 Consul 的访问控制:[#10278](https://github.com/apache/apisix/pull/10278) +- :sunrise: 支持在 stream_route 中配置 `service_id` 引用 service 资源:[#10298](https://github.com/apache/apisix/pull/10298) +- :sunrise: 使用 `apisix-runtime` 作为 apisix 运行时: + - [#10415](https://github.com/apache/apisix/pull/10415) + - [#10427](https://github.com/apache/apisix/pull/10427) + +### Plugins + +- :sunrise: 为 authz-keycloak 添加测试,使用 apisix secrets:[#10353](https://github.com/apache/apisix/pull/10353) +- :sunrise: 向 openid-connect 插件添加授权参数:[#10058](https://github.com/apache/apisix/pull/10058) +- :sunrise: 支持在 zipkin 插件中设置变量:[#10361](https://github.com/apache/apisix/pull/10361) +- :sunrise: 支持 Nacos ak/sk 认证:[#10445](https://github.com/apache/apisix/pull/10445) + +### Bugfixes + +- 修复:获取健康检查目标状态失败时使用警告日志: + - [#10156](https://github.com/apache/apisix/pull/10156) +- 修复:更新上游时应保留健康检查的状态: + - [#10312](https://github.com/apache/apisix/pull/10312) + - [#10307](https://github.com/apache/apisix/pull/10307) +- 修复:在插件配置模式中添加 name 字段以保持一致性:[#10315](https://github.com/apache/apisix/pull/10315) +- 修复:优化 upstream_schema 中的 tls 定义和错误的变量:[#10269](https://github.com/apache/apisix/pull/10269) +- 修复(consul):无法正常退出:[#10342](https://github.com/apache/apisix/pull/10342) +- 修复:请求头 `Content-Type: application/x-www-form-urlencoded;charset=utf-8` 会导致 var 条件 `post_arg_xxx` 匹配失败:[#10372](https://github.com/apache/apisix/pull/10372) +- 修复:在 Mac 上安装失败:[#10403](https://github.com/apache/apisix/pull/10403) +- 修复(log-rotate):日志压缩超时导致数据丢失:[#8620](https://github.com/apache/apisix/pull/8620) +- 修复(kafka-logger):从 required_acks 枚举值中移除 0:[#10469](https://github.com/apache/apisix/pull/10469) + +## 3.6.0 + +### Change + +- :warning: 移除 `etcd.use_grpc`,不再支持使用 gRPC 协议与 etcd 进行通信:[#10015](https://github.com/apache/apisix/pull/10015) +- :warning: 移除 conf server,数据平面不再支持与控制平面进行通信,需要从 `config_provider: control_plane` 调整为 `config_provider: etcd`:[#10012](https://github.com/apache/apisix/pull/10012) +- :warning: 严格验证核心资源的输入:[#10233](https://github.com/apache/apisix/pull/10233) + +### Core + +- :sunrise: 支持配置访问日志的缓冲区大小:[#10225](https://github.com/apache/apisix/pull/10225) +- :sunrise: 支持在 DNS 发现服务中允许配置 `resolv_conf` 来使用本地 DNS 解析器:[#9770](https://github.com/apache/apisix/pull/9770) +- :sunrise: 安装不再依赖 Rust:[#10121](https://github.com/apache/apisix/pull/10121) +- :sunrise: 在 xRPC 中添加 Dubbo 协议支持:[#9660](https://github.com/apache/apisix/pull/9660) + +### Plugins + +- :sunrise: 在 `traffic-split` 插件中支持 HTTPS:[#9115](https://github.com/apache/apisix/pull/9115) +- :sunrise: 在 `ext-plugin` 插件中支持重写请求体:[#9990](https://github.com/apache/apisix/pull/9990) +- :sunrise: 在 `opentelemetry` 插件中支持设置 NGINX 变量:[#8871](https://github.com/apache/apisix/pull/8871) +- :sunrise: 在 `chaitin-waf` 插件中支持 UNIX sock 主机模式:[#10161](https://github.com/apache/apisix/pull/10161) + +### Bugfixes + +- 修复 GraphQL POST 请求路由匹配异常:[#10198](https://github.com/apache/apisix/pull/10198) +- 修复 `apisix.yaml` 中多行字符串数组的错误:[#10193](https://github.com/apache/apisix/pull/10193) +- 修复在 proxy-cache 插件中缺少 cache_zone 时提供错误而不是 nil panic:[#10138](https://github.com/apache/apisix/pull/10138) + +## 3.5.0 + +### Change + +- :warning: request-id 插件移除雪花算法:[#9715](https://github.com/apache/apisix/pull/9715) +- :warning: 不再兼容 OpenResty 1.19 版本,需要将其升级到 1.21+ 版本:[#9913](https://github.com/apache/apisix/pull/9913) +- :warning: 删除配置项 `apisix.stream_proxy.only`,L4/L7 代理需要通过配置项 `apesix.proxy_mode` 来启用:[#9607](https://github.com/apache/apisix/pull/9607) +- :warning: admin-api 的 `/apisix/admin/plugins?all=true` 接口标记为弃用:[#9580](https://github.com/apache/apisix/pull/9580) +- :warning: ua-restriction 插件不允许同时启用黑名单和白名单:[#9841](https://github.com/apache/apisix/pull/9841) + +### Core + +- :sunrise: 支持根据 host 级别动态设置 TLS 协议版本:[#9903](https://github.com/apache/apisix/pull/9903) +- :sunrise: 支持强制删除资源:[#9810](https://github.com/apache/apisix/pull/9810) +- :sunrise: 支持从 yaml 中提取环境变量:[#9855](https://github.com/apache/apisix/pull/9855) +- :sunrise: admin-api 新增 schema validate API 校验资源配置:[#10065](https://github.com/apache/apisix/pull/10065) + +### Plugins + +- :sunrise: 新增 chaitin-waf 插件:[#9838](https://github.com/apache/apisix/pull/9838) +- :sunrise: file-logger 支持设置 var 变量:[#9712](https://github.com/apache/apisix/pull/9712) +- :sunrise: mock 插件支持添加响应头:[#9720](https://github.com/apache/apisix/pull/9720) +- :sunrise: proxy-rewrite 插件支持正则匹配 URL 编码:[#9813](https://github.com/apache/apisix/pull/9813) +- :sunrise: google-cloud-logging 插件支持 client_email 配置:[#9813](https://github.com/apache/apisix/pull/9813) +- :sunrise: opa 插件支持向上游发送 OPA server 返回的头:[#9710](https://github.com/apache/apisix/pull/9710) +- :sunrise: openid-connect 插件支持配置代理服务器:[#9948](https://github.com/apache/apisix/pull/9948) + +### Bugfixes + +- 修复 log-rotate 插件使用自定义名称时,max_kept 配置不起作用:[#9749](https://github.com/apache/apisix/pull/9749) +- 修复 limit_conn 在 stream 模式下非法使用 http 变量:[#9816](https://github.com/apache/apisix/pull/9816) +- 修复 loki-logger 插件在获取 log_labels 时会索引空值:[#9850](https://github.com/apache/apisix/pull/9850) +- 修复使用 limit-count 插件时,当请求被拒绝后,X-RateLimit-Reset 不应设置为 0:[#9978](https://github.com/apache/apisix/pull/9978) +- 修复 nacos 插件在运行时索引一个空值:[#9960](https://github.com/apache/apisix/pull/9960) +- 修复 etcd 在同步数据时,如果密钥有特殊字符,则同步异常:[#9967](https://github.com/apache/apisix/pull/9967) +- 修复 tencent-cloud-cls 插件 DNS 解析失败:[#9843](https://github.com/apache/apisix/pull/9843) +- 修复执行 reload 或 quit 命令时 worker 未退出:[#9909](https://github.com/apache/apisix/pull/9909) +- 修复在 traffic-split 插件中 upstream_id 有效性验证:[#10008](https://github.com/apache/apisix/pull/10008) + +## 3.4.0 + +### Core + +- :sunrise: 支持路由级别的 MTLS [#9322](https://github.com/apache/apisix/pull/9322) +- :sunrise: 支持全局规则的 id schema [#9517](https://github.com/apache/apisix/pull/9517) +- :sunrise: 支持使用单个长连接来监视 etcd 的所有资源 [#9456](https://github.com/apache/apisix/pull/9456) +- :sunrise: 支持 ssl 标签的最大长度为 256 [#9301](https://github.com/apache/apisix/pull/9301) + +### Plugins + +- :sunrise: 支持 proxy_rewrite 插件的多个正则表达式匹配 [#9194](https://github.com/apache/apisix/pull/9194) +- :sunrise: 添加 loki-logger 插件 [#9399](https://github.com/apache/apisix/pull/9399) +- :sunrise: 允许用户为 prometheus 插件配置 DEFAULT_BUCKETS [#9673](https://github.com/apache/apisix/pull/9673) + +### Bugfixes + +- 修复 (body-transformer):xml2lua 将空表替换为空字符串 [#9669](https://github.com/apache/apisix/pull/9669) +- 修复:opentelemetry 和 grpc-transcode 插件无法同时启用 [#9606](https://github.com/apache/apisix/pull/9606) +- 修复 (skywalking-logger, error-log-logger):支持在 skywalking service_instance_name 中使用 $hostname [#9401](https://github.com/apache/apisix/pull/9401) +- 修复 (admin):修复 secrets 不支持通过 PATCH 更新属性 [#9510](https://github.com/apache/apisix/pull/9510) +- 修复 (http-logger):默认请求路径应为'/' [#9472](https://github.com/apache/apisix/pull/9472) +- 修复:syslog 插件不起作用 [#9425](https://github.com/apache/apisix/pull/9425) +- 修复:splunk-hec-logging 的日志格式错误 [#9478](https://github.com/apache/apisix/pull/9478) +- 修复:etcd 复用 cli 并启用 keepalive [#9420](https://github.com/apache/apisix/pull/9420) +- 修复:upstream key 添加 mqtt_client_id 支持 [#9450](https://github.com/apache/apisix/pull/9450) +- 修复:body-transformer 插件总是返回原始 body [#9446](https://github.com/apache/apisix/pull/9446) +- 修复:当 consumer 使用 wolf-rbac 插件时,consumer 中的其他插件无效 [#9298](https://github.com/apache/apisix/pull/9298) +- 修复:当 host 是域名时,总是解析域名 [#9332](https://github.com/apache/apisix/pull/9332) +- 修复:response-rewrite 插件不能只添加一个字符 [#9372](https://github.com/apache/apisix/pull/9372) +- 修复:consul 支持只获取 health endpoint [#9204](https://github.com/apache/apisix/pull/9204) + +## 3.3.0 + +### Change + +- 默认路由从 `radixtree_uri` 修改为 `radixtree_host_uri`: [#9047](https://github.com/apache/apisix/pull/9047) +- CORS 插件将会在 `allow_origin` 不为 `*` 时默认添加 `Vary: Origin` 响应头:[#9010](https://github.com/apache/apisix/pull/9010) + +### Core + +- :sunrise: 支持将路由证书存储在 secrets manager 中:[#9247](https://github.com/apache/apisix/pull/9247) +- :sunrise: 支持通过配置绕过 Admin API 身份验证:[#9147](https://github.com/apache/apisix/pull/9147) + +### Plugins + +- :sunrise: fault-injection 插件支持请求头注入:[#9039](https://github.com/apache/apisix/pull/9039) +- :sunrise: 提供在其他插件中引用 proxy-rewrite 插件中路由改写捕捉到的变量支持:[#9112](https://github.com/apache/apisix/pull/9112) +- :sunrise: limit-count 插件提供 `username` 与 `ssl` redis 认证方式:[#9185](https://github.com/apache/apisix/pull/9185) + +### Bugfixes + +- 修复 etcd 数据同步异常:[#8493](https://github.com/apache/apisix/pull/8493) +- 修复在 `core.request.add_header` 中的无效缓存:[#8824](https://github.com/apache/apisix/pull/8824) +- 修复由健康检查引起的高 CPU 和内存占用:[#9015](https://github.com/apache/apisix/pull/9015) +- 仅当 `allow_origins_by_regex` 不为 `nil` 时生效:[#9028](https://github.com/apache/apisix/pull/9028) +- 在删除 upstream 时,检查 `traffic-split` 插件中的引用:[#9044](https://github.com/apache/apisix/pull/9044) +- 修复启动时无法连接到 etcd 的问题:[#9077](https://github.com/apache/apisix/pull/9077) +- 修复域节点的健康检查泄漏问题:[#9090](https://github.com/apache/apisix/pull/9090) +- 禁止非 `127.0.0.0/24` 的用户在没有 admin_key 的情况下访问 Admin API: [#9146](https://github.com/apache/apisix/pull/9146) +- 确保 hold_body_chunk 函数对每个插件设置独立缓冲区,避免数据污染:[#9266](https://github.com/apache/apisix/pull/9266) +- 确保 batch-requests 插件能够在尾部响应头存在时能够正确读取:[#9289](https://github.com/apache/apisix/pull/9289) +- 确保 `proxy-rewrite` 改写 `ngx.var.uri`: [#9309](https://github.com/apache/apisix/pull/9309) + +## 3.2.1 + +**这是一个 LTS 维护版本,您可以在 `release/3.2` 分支中看到 CHANGELOG。** + +## 3.2.0 + +### Change + +- 废弃了 jwt-auth 内单独的 Vault 配置。用户能用密钥来实现同样的功能:[#8660](https://github.com/apache/apisix/pull/8660) + +### Core + +- :sunrise: 支持通过环境变量来配置密钥的 Vault token:[#8866](https://github.com/apache/apisix/pull/8866) +- :sunrise: 支持四层上的服务发现: + - [#8583](https://github.com/apache/apisix/pull/8583) + - [#8593](https://github.com/apache/apisix/pull/8593) + - [#8584](https://github.com/apache/apisix/pull/8584) + - [#8640](https://github.com/apache/apisix/pull/8640) + - [#8633](https://github.com/apache/apisix/pull/8633) + - [#8696](https://github.com/apache/apisix/pull/8696) + - [#8826](https://github.com/apache/apisix/pull/8826) + +### Plugin + +- :sunrise: 新增 RESTful 请求转 graphQL 的插件:[#8959](https://github.com/apache/apisix/pull/8959) +- :sunrise: 支持在每个日志插件上设置日志格式: + - [#8806](https://github.com/apache/apisix/pull/8806) + - [#8643](https://github.com/apache/apisix/pull/8643) +- :sunrise: 新增请求体/响应体转换插件:[#8766](https://github.com/apache/apisix/pull/8766) +- :sunrise: 支持发送错误日志到 Kafka:[#8693](https://github.com/apache/apisix/pull/8693) +- :sunrise: limit-count 插件支持 X-RateLimit-Reset:[#8578](https://github.com/apache/apisix/pull/8578) +- :sunrise: limit-count 插件支持设置 TLS 来访问 Redis 集群:[#8558](https://github.com/apache/apisix/pull/8558) +- :sunrise: consumer-restriction 插件支持通过 consumer_group_id 来做权限控制:[#8567](https://github.com/apache/apisix/pull/8567) + +### Bugfix + +- 修复 Host 和 SNI 不匹配时,mTLS 失效的问题:[#8967](https://github.com/apache/apisix/pull/8967) +- 如果 URI 参数部分不来自于用户配置,proxy-rewrite 插件应当对其转义:[#8888](https://github.com/apache/apisix/pull/8888) +- Admin API PATCH 操作成功后应返回 200 状态码:[#8855](https://github.com/apache/apisix/pull/8855) +- 修复特定条件下,etcd 同步失败之后的 reload 不生效:[#8736](https://github.com/apache/apisix/pull/8736) +- 修复 Consul 服务发现得到的节点不全的问题:[#8651](https://github.com/apache/apisix/pull/8651) +- 修复 grpc-transcode 插件对 Map 数据的转换问题:[#8731](https://github.com/apache/apisix/pull/8731) +- 外部插件应当可以设置 content-type 响应头:[#8588](https://github.com/apache/apisix/pull/8588) +- 插件热加载时,如果 request-id 插件中初始化 snowflake 生成器出错,可能遗留多余的计时器:[#8556](https://github.com/apache/apisix/pull/8556) +- 插件热加载时,关闭 grpc-transcode 的 proto 同步器:[#8557](https://github.com/apache/apisix/pull/8557) + +## 3.1.0 + +### Core + +- :sunrise: 支持通过 gRPC 来同步 etcd 的配置: + - [#8485](https://github.com/apache/apisix/pull/8485) + - [#8450](https://github.com/apache/apisix/pull/8450) + - [#8411](https://github.com/apache/apisix/pull/8411) +- :sunrise: 支持在插件中配置加密字段: + - [#8487](https://github.com/apache/apisix/pull/8487) + - [#8403](https://github.com/apache/apisix/pull/8403) +- :sunrise: 支持使用 secret 资源将部分字段放到 Vault 或环境变量中: + - [#8448](https://github.com/apache/apisix/pull/8448) + - [#8421](https://github.com/apache/apisix/pull/8421) + - [#8412](https://github.com/apache/apisix/pull/8412) + - [#8394](https://github.com/apache/apisix/pull/8394) + - [#8390](https://github.com/apache/apisix/pull/8390) +- :sunrise: 允许在 stream 子系统中以域名的形式配置上游:[#8500](https://github.com/apache/apisix/pull/8500) +- :sunrise: 支持 Consul 服务发现:[#8380](https://github.com/apache/apisix/pull/8380) + +### Plugin + +- :sunrise: 优化 prometheus 采集的资源占用:[#8434](https://github.com/apache/apisix/pull/8434) +- :sunrise: 增加便于调试的 inspect 插件: [#8400](https://github.com/apache/apisix/pull/8400) +- :sunrise: jwt-auth 插件支持对上游隐蔽认证的参数:[#8206](https://github.com/apache/apisix/pull/8206) +- :sunrise: proxy-rewrite 插件支持新增请求头的同时不覆盖现有同名请求头:[#8336](https://github.com/apache/apisix/pull/8336) +- :sunrise: grpc-transcode 插件支持将 grpc-status-details-bin 响应头设置到响应体中:[#7639](https://github.com/apache/apisix/pull/7639) +- :sunrise: proxy-mirror 插件支持设置前缀:[#8261](https://github.com/apache/apisix/pull/8261) + +### Bugfix + +- 修复某些情况下,配置在 service 对象下的插件无法及时生效的问题:[#8482](https://github.com/apache/apisix/pull/8482) +- 修复因连接池复用,http 和 grpc 共用同一个上游节点时偶发 502 的问题:[#8364](https://github.com/apache/apisix/pull/8364) +- file-logger 在写日志时,应避免缓冲区造成的日志截断:[#7884](https://github.com/apache/apisix/pull/7884) +- log-rotate 插件的 max_kept 参数应对压缩文件生效:[#8366](https://github.com/apache/apisix/pull/8366) +- 修复 openid-connect 插件中当 use_jwks 为 true 时没有设置 userinfo 的问题:[#8347](https://github.com/apache/apisix/pull/8347) +- 修复无法在 proxy-rewrite 插件中修改 x-forwarded-host 的问题:[#8200](https://github.com/apache/apisix/pull/8200) +- 修复某些情况下,禁用 v3 admin API 导致响应体丢失:[#8349](https://github.com/apache/apisix/pull/8349) +- zipkin 插件中,即使存在 reject 的 sampling decision,也要传递 trace ID:[#8099](https://github.com/apache/apisix/pull/8099) +- 修复插件配置中的 `_meta.filter` 无法使用上游响应后才赋值的变量和 APISIX 中自定义变量的问题: + - [#8162](https://github.com/apache/apisix/pull/8162) + - [#8256](https://github.com/apache/apisix/pull/8256) + +## 3.0.0 + +### Change + +- 默认关闭 `enable_cpu_affinity`,避免在容器部署场景中该配置影响 APSISIX 的行为:[#8074](https://github.com/apache/apisix/pull/8074) + +### Core + +- :sunrise: 新增 Consumer Group 实体,用于管理多个 Consumer:[#7980](https://github.com/apache/apisix/pull/7980) +- :sunrise: 支持配置 DNS 解析域名类型的顺序:[#7935](https://github.com/apache/apisix/pull/7935) +- :sunrise: 支持配置多个 `key_encrypt_salt` 进行轮转:[#7925](https://github.com/apache/apisix/pull/7925) + +### Plugin + +- :sunrise: 新增 ai 插件,根据场景动态优化 APISIX 的执行路径: + - [#8102](https://github.com/apache/apisix/pull/8102) + - [#8113](https://github.com/apache/apisix/pull/8113) + - [#8120](https://github.com/apache/apisix/pull/8120) + - [#8128](https://github.com/apache/apisix/pull/8128) + - [#8130](https://github.com/apache/apisix/pull/8130) + - [#8149](https://github.com/apache/apisix/pull/8149) + - [#8157](https://github.com/apache/apisix/pull/8157) +- :sunrise: openid-connect 插件支持设置 `session_secret`,解决多个 worker 间 `session_secret` 不一致的问题:[#8068](https://github.com/apache/apisix/pull/8068) +- :sunrise: kafka-logger 插件支持设置 sasl 相关配置:[#8050](https://github.com/apache/apisix/pull/8050) +- :sunrise: proxy-mirror 插件支持设置域名作为 host:[#7861](https://github.com/apache/apisix/pull/7861) +- :sunrise: kafka-logger 插件新增 brokers 属性,支持不同 broker 设置相同 host:[#7999](https://github.com/apache/apisix/pull/7999) +- :sunrise: ext-plugin-post-resp 插件支持获取上游响应体:[#7947](https://github.com/apache/apisix/pull/7947) +- :sunrise: 新增 cas-auth 插件,支持 CAS 认证:[#7932](https://github.com/apache/apisix/pull/7932) + +### Bugfix + +- workflow 插件的条件表达式应该支持操作符:[#8121](https://github.com/apache/apisix/pull/8121) +- 修复禁用 prometheus 插件时 batch processor 加载问题:[#8079](https://github.com/apache/apisix/pull/8079) +- APISIX 启动时,如果存在旧的 conf server 的 sock 文件则删除:[#8022](https://github.com/apache/apisix/pull/8022) +- 没有编译 gRPC-client-nginx-module 模块时禁用 core.grpc:[#8007](https://github.com/apache/apisix/pull/8007) + +## 3.0.0-beta + +这里我们使用 `2.99.0` 作为源代码中的版本号,而不是代码名称 +`3.0.0-beta`,有两个原因。 + +1. 避免在一些程序试图比较版本时出现意外的错误,因为 `3.0.0-beta` 包含 `3.0.0` 并且比它长。 +2. 一些软件包系统可能不允许在版本号后面有一个后缀。 + +### Change + +#### 移动 config_center、etcd 和 Admin API 的配置到 deployment 下面 + +我们调整了下静态配置文件里面的配置,所以你需要同步更新下 config.yaml 里面的配置了: + +- `config_center` 功能改由 `deployment` 下面的 `config_provider` 实现: [#7901](https://github.com/apache/apisix/pull/7901) +- `etcd` 字段整体搬迁到 `deployment` 下面: [#7860](https://github.com/apache/apisix/pull/7860) +- 以下的 Admin API 配置移动到 `deployment` 下面的 `admin` 字段:[#7823](https://github.com/apache/apisix/pull/7823) + - admin_key + - enable_admin_cors + - allow_admin + - admin_listen + - https_admin + - admin_api_mtls + - admin_api_version + +具体可以参考最新的 config-default.yaml。 + +#### 移除多个已废弃的配置 + +借着 3.0 新版本的机会,我们把许多之前标记为 deprecated 的配置清理出去。 + +在静态配置中,我们移除了以下若干字段: + +- 移除 `apisix.ssl` 中的 `enable_http2` 和 `listen_port`:[#7717](https://github.com/apache/apisix/pull/7717) +- 移除 `apisix.port_admin`: [#7716](https://github.com/apache/apisix/pull/7716) +- 移除 `etcd.health_check_retry`: [#7676](https://github.com/apache/apisix/pull/7676) +- 移除 `nginx_config.http.lua_shared_dicts`: [#7677](https://github.com/apache/apisix/pull/7677) +- 移除 `apisix.real_ip_header`: [#7696](https://github.com/apache/apisix/pull/7696) + +在动态配置中,我们做了以下调整: + +- 将插件配置的 `disable` 移到 `_meta` 下面:[#7707](https://github.com/apache/apisix/pull/7707) +- 从 Route 里面移除了 `service_protocol`:[#7701](https://github.com/apache/apisix/pull/7701) + +此外还有具体插件级别上的改动: + +- authz-keycloak 中移除了 `audience` 字段: [#7683](https://github.com/apache/apisix/pull/7683) +- mqtt-proxy 中移除了 `upstream` 字段:[#7694](https://github.com/apache/apisix/pull/7694) +- error-log-logger 中把 tcp 相关配置放到 `tcp` 字段下面:[#7700](https://github.com/apache/apisix/pull/7700) +- syslog 中移除了 `max_retry_times` 和 `retry_interval` 字段: [#7699](https://github.com/apache/apisix/pull/7699) +- proxy-rewrite 中移除了 `scheme` 字段: [#7695](https://github.com/apache/apisix/pull/7695) + +#### 新的 Admin API 响应格式 + +我们在以下若干个 PR 中调整了 Admin API 的响应格式: + +- [#7630](https://github.com/apache/apisix/pull/7630) +- [#7622](https://github.com/apache/apisix/pull/7622) + +新的响应格式展示如下: + +返回单个配置: + +```json +{ + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 +} +``` + +返回多个配置: + +```json +{ + "list": [ + { + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 + }, + { + "modifiedIndex": 2685163, + "value": { + "id": "2", + ... + }, + "key": "/apisix/routes/2", + "createdIndex": 2685163 + } + ], + "total": 2 +} +``` + +#### 其他 + +- Admin API 的端口改为 9180:[#7806](https://github.com/apache/apisix/pull/7806) +- 我们只支持 OpenResty 1.19.3.2 及以上的版本:[#7625](https://github.com/apache/apisix/pull/7625) +- 调整了 Plugin Config 对象的优先级,同名插件配置的优先级由 Consumer > Plugin Config > Route > Service 变成 Consumer > Route > Plugin Config > Service: [#7614](https://github.com/apache/apisix/pull/7614) + +### Core + +- 集成 grpc-client-nginx-module 到 APISIX: [#7917](https://github.com/apache/apisix/pull/7917) +- k8s 服务发现支持配置多个集群:[#7895](https://github.com/apache/apisix/pull/7895) + +### Plugin + +- 支持在 opentelemetry 插件里注入指定前缀的 header:[#7822](https://github.com/apache/apisix/pull/7822) +- 新增 openfunction 插件:[#7634](https://github.com/apache/apisix/pull/7634) +- 新增 elasticsearch-logger 插件:[#7643](https://github.com/apache/apisix/pull/7643) +- response-rewrite 插件支持增加响应体:[#7794](https://github.com/apache/apisix/pull/7794) +- log-rorate 支持指定最大大小来切割日志:[#7749](https://github.com/apache/apisix/pull/7749) +- 新增 workflow 插件: + - [#7760](https://github.com/apache/apisix/pull/7760) + - [#7771](https://github.com/apache/apisix/pull/7771) +- 新增 Tencent Cloud Log Service 插件:[#7593](https://github.com/apache/apisix/pull/7593) +- jwt-auth 支持 ES256 算法: [#7627](https://github.com/apache/apisix/pull/7627) +- ldap-auth 内部实现,由 lualdap 换成 lua-resty-ldap:[#7590](https://github.com/apache/apisix/pull/7590) +- prometheus 插件内的 http request metrics 支持通过变量来设置额外的 labels:[#7549](https://github.com/apache/apisix/pull/7549) +- clickhouse-logger 插件支持指定多个 clickhouse endpoints: [#7517](https://github.com/apache/apisix/pull/7517) + +### Bugfix + +- gRPC 代理设置 :authority 请求头为配置的上游 Host: [#7939](https://github.com/apache/apisix/pull/7939) +- response-rewrite 写入空 body 时有可能导致 AIPSIX 无法响应该请求:[#7836](https://github.com/apache/apisix/pull/7836) +- 修复同时使用 Plugin Config 和 Consumer,有一定概率发生插件配置没有更新的问题:[#7965](https://github.com/apache/apisix/pull/7965) +- 日志切割时,只 reopen 一次日志文件:[#7869](https://github.com/apache/apisix/pull/7869) +- 默认不应开启被动健康检查: [#7850](https://github.com/apache/apisix/pull/7850) +- zipkin 插件即使不进行 sample,也要向上游传递 trace IDs: [#7833](https://github.com/apache/apisix/pull/7833) +- 将 opentelemetry 的 span kind 更正为 server: [#7830](https://github.com/apache/apisix/pull/7830) +- limit-count 插件中,同样配置的不同路由不应该共享同一个计数器:[#7750](https://github.com/apache/apisix/pull/7750) +- 修复偶发的移除 clean_handler 时抛异常的问题: [#7648](https://github.com/apache/apisix/pull/7648) +- 允许配置上游节点时直接使用 IPv6 字面量: [#7594](https://github.com/apache/apisix/pull/7594) +- wolf-rbac 插件调整对错误的响应方式: + - [#7561](https://github.com/apache/apisix/pull/7561) + - [#7497](https://github.com/apache/apisix/pull/7497) +- 当代理到上游之前发生 500 错误时,代理到上游之后运行的插件不应被跳过 [#7703](https://github.com/apache/apisix/pull/7703) +- 当 consumer 上绑定了多个插件且该插件定义了 rewrite 方法时,避免抛出异常 [#7531](https://github.com/apache/apisix/pull/7531) +- 升级 lua-resty-etcd 到 1.8.3。该版本修复了若干问题。 [#7565](https://github.com/apache/apisix/pull/7565) + +## 2.15.3 + +**这是一个 LTS 维护版本,您可以在 `release/2.15` 分支中看到 CHANGELOG。** + +## 2.15.2 + +**这是一个 LTS 维护版本,您可以在 `release/2.15` 分支中看到 CHANGELOG。** + +## 2.15.1 + +**这是一个 LTS 维护版本,您可以在 `release/2.15` 分支中看到 CHANGELOG。** + +## 2.15.0 + +### Change + +- grpc 状态码 OUT_OF_RANGE 如今会在 grpc-transcode 插件中作为 http 状态码 400: [#7419](https://github.com/apache/apisix/pull/7419) +- 重命名 `etcd.health_check_retry` 配置项为 `startup_retry`。 [#7304](https://github.com/apache/apisix/pull/7304) +- 移除 `upstream.enable_websocket`。该配置已于 2020 年标记成已过时。 [#7222](https://github.com/apache/apisix/pull/7222) + +### Core + +- 支持动态启用插件 [#7453](https://github.com/apache/apisix/pull/7453) +- 支持动态指定插件执行顺序 [#7273](https://github.com/apache/apisix/pull/7273) +- 支持 Upstream 对象从 SSL 对象中引用证书 [#7221](https://github.com/apache/apisix/pull/7221) +- 允许在插件中使用自定义错误 [#7128](https://github.com/apache/apisix/pull/7128) +- xRPC Redis 代理增加 metrics: [#7183](https://github.com/apache/apisix/pull/7183) +- 引入 deployment role 概念来简化 APISIX 的部署: + - [#7405](https://github.com/apache/apisix/pull/7405) + - [#7417](https://github.com/apache/apisix/pull/7417) + - [#7392](https://github.com/apache/apisix/pull/7392) + - [#7365](https://github.com/apache/apisix/pull/7365) + - [#7249](https://github.com/apache/apisix/pull/7249) + +### Plugin + +- prometheus 指标中提供 ngx.shared.dict 统计信息 [#7412](https://github.com/apache/apisix/pull/7412) +- 允许在 proxy-rewrite 插件中使用客户端发过来的原始 URL [#7401](https://github.com/apache/apisix/pull/7401) +- openid-connect 插件支持 PKCE: [#7370](https://github.com/apache/apisix/pull/7370) +- sls-logger 插件支持自定义日志格式 [#7328](https://github.com/apache/apisix/pull/7328) +- kafka-logger 插件支持更多的 Kafka 客户端配置 [#7266](https://github.com/apache/apisix/pull/7266) +- openid-connect 插件支持暴露 refresh token [#7220](https://github.com/apache/apisix/pull/7220) +- 移植 prometheus 插件到 stream 子系统 [#7174](https://github.com/apache/apisix/pull/7174) + +### Bugfix + +- Kubernetes 服务发现在重试时应当清除上一次尝试时遗留的状态 [#7506](https://github.com/apache/apisix/pull/7506) +- redirect 插件禁止同时启用冲突的 http_to_https 和 append_query_string 配置 [#7433](https://github.com/apache/apisix/pull/7433) +- 默认配置下,http-logger 不再发送空 Authorization 头 [#7444](https://github.com/apache/apisix/pull/7444) +- 修复 limit-count 插件不能同时配置 group 和 disable 的问题 [#7384](https://github.com/apache/apisix/pull/7384) +- 让 request-id 插件优先执行,这样 tracing 插件可以用到 request id [#7281](https://github.com/apache/apisix/pull/7281) +- 更正 grpc-transcode 插件中对 repeated Message 的处理。 [#7231](https://github.com/apache/apisix/pull/7231) +- 允许 proxy-cache 插件 cache key 出现缺少的值。 [#7168](https://github.com/apache/apisix/pull/7168) +- 减少 chash 负载均衡节点权重过大时额外的内存消耗。 [#7103](https://github.com/apache/apisix/pull/7103) +- proxy-cache 插件 method 不匹配时不应该返回缓存结果。 [#7111](https://github.com/apache/apisix/pull/7111) +- 上游 keepalive 应考虑 TLS 参数: + - [#7054](https://github.com/apache/apisix/pull/7054) + - [#7466](https://github.com/apache/apisix/pull/7466) +- 重定向插件在将 HTTP 重定向到 HTTPS 时设置了正确的端口。 + - [#7065](https://github.com/apache/apisix/pull/7065) + +## 2.14.1 + +### Bugfix + +- `real_ip_from` 中配置 "unix: " 不应该导致 batch-requests 插件无法使用 [#7106](https://github.com/apache/apisix/pull/7106) + +## 2.14.0 + +### Change + +- 为了适应 OpenTelemetry 规范的变化,OTLP/HTTP 的默认端口改为 4318: [#7007](https://github.com/apache/apisix/pull/7007) + +### Core + +- 引入一个实验性功能,允许通过 APISIX 订阅 Kafka 消息。这个功能是基于 websocket 上面运行的 pubsub 框架。 + - [#7028](https://github.com/apache/apisix/pull/7028) + - [#7032](https://github.com/apache/apisix/pull/7032) +- 引入一个名为 xRPC 的实验性框架来管理非 HTTP 的 L7 流量。 + - [#6885](https://github.com/apache/apisix/pull/6885) + - [#6901](https://github.com/apache/apisix/pull/6901) + - [#6919](https://github.com/apache/apisix/pull/6919) + - [#6960](https://github.com/apache/apisix/pull/6960) + - [#6965](https://github.com/apache/apisix/pull/6965) + - [#7040](https://github.com/apache/apisix/pull/7040) +- 现在我们支持在代理 Redis traffic 过程中根据命令和键添加延迟,它建立在 xRPC 之上。 + - [#6999](https://github.com/apache/apisix/pull/6999) +- 引入实验性支持,通过 xDS 配置 APISIX。 + - [#6614](https://github.com/apache/apisix/pull/6614) + - [#6759](https://github.com/apache/apisix/pull/6759) +- 增加 `normalize_uri_like_servlet` 配置选项,像 servlet 一样规范化 URI。[#6984](https://github.com/apache/apisix/pull/6984) +- 通过 apisix-seed 实现 Zookeeper 服务发现:[#6751](https://github.com/apache/apisix/pull/6751) + +### Plugin + +- real-ip 插件支持像 `real_ip_recursive` 那样的递归 IP 搜索。[#6988](https://github.com/apache/apisix/pull/6988) +- api-breaker 插件允许配置响应。[#6949](https://github.com/apache/apisix/pull/6949) +- response-rewrite 插件支持正文过滤器。[#6750](https://github.com/apache/apisix/pull/6750) +- request-id 插件增加了 nanoid 算法来生成 ID:[#6779](https://github.com/apache/apisix/pull/6779) +- file-logger 插件可以缓存和重开 file handler。[#6721](https://github.com/apache/apisix/pull/6721) +- 增加 casdoor 插件。[#6382](https://github.com/apache/apisix/pull/6382) +- authz-keycloak 插件支持 password grant:[#6586](https://github.com/apache/apisix/pull/6586) + +### Bugfix + +- 上游 keepalive 应考虑 TLS 参数:[#7054](https://github.com/apache/apisix/pull/7054) +- 不要将内部错误信息暴露给客户端。 + - [#6982](https://github.com/apache/apisix/pull/6982) + - [#6859](https://github.com/apache/apisix/pull/6859) + - [#6854](https://github.com/apache/apisix/pull/6854) + - [#6853](https://github.com/apache/apisix/pull/6853) + - [#6846](https://github.com/apache/apisix/pull/6846) +- DNS 支持端口为 0 的 SRV 记录:[#6739](https://github.com/apache/apisix/pull/6739) +- 修复客户端 mTLS 在 TLS 会话重用中有时不生效的问题:[#6906](https://github.com/apache/apisix/pull/6906) +- grpc-web 插件不会在响应中覆盖 Access-Control-Allow-Origin 头。[#6842](https://github.com/apache/apisix/pull/6842) +- syslog 插件的默认超时已被纠正。[#6807](https://github.com/apache/apisix/pull/6807) +- 修复 authz-keycloak 插件的 `access_denied_redirect_uri` 的设置有时不生效的问题。[#6794](https://github.com/apache/apisix/pull/6794) +- 正确处理 `USR2` 信号。[#6758](https://github.com/apache/apisix/pull/6758) +- 重定向插件在将 HTTP 重定向到 HTTPS 时设置了正确的端口。 + - [#7065](https://github.com/apache/apisix/pull/7065) + - [#6686](https://github.com/apache/apisix/pull/6686) +- Admin API 拒绝未知的 stream 插件。[#6813](https://github.com/apache/apisix/pull/6813) + +## 2.13.3 + +**这是一个 LTS 维护版本,您可以在 `release/2.13` 分支中看到 CHANGELOG。** + +## 2.13.2 + +**这是一个 LTS 维护版本,您可以在 `release/2.13` 分支中看到 CHANGELOG。** + +## 2.13.1 + +**这是一个 LTS 维护版本,您可以在 `release/2.13` 分支中看到 CHANGELOG。** + +## 2.13.0 + +### Change + +- 更正 syslog 插件的配置 [#6551](https://github.com/apache/apisix/pull/6551) +- server-info 插件使用新方法来上报 DP 面信息 [#6202](https://github.com/apache/apisix/pull/6202) +- Admin API 返回的空 nodes 应当被编码为数组 [#6384](https://github.com/apache/apisix/pull/6384) +- 更正 prometheus 统计指标 apisix_nginx_http_current_connections{state="total"} [#6327](https://github.com/apache/apisix/pull/6327) +- 不再默认暴露 public API 并移除 plugin interceptor [#6196](https://github.com/apache/apisix/pull/6196) + +### Core + +- :sunrise: 新增 delayed_body_filter 阶段 [#6605](https://github.com/apache/apisix/pull/6605) +- :sunrise: standalone 模式的配置支持环境变量 [#6505](https://github.com/apache/apisix/pull/6505) +- :sunrise: consumer 新增的插件都能被执行 [#6502](https://github.com/apache/apisix/pull/6502) +- :sunrise: 添加配置项来控制是否在 x-upsream-apisix-status 中记录所有状态码 [#6392](https://github.com/apache/apisix/pull/6392) +- :sunrise: 新增 kubernetes 服务发现 [#4880](https://github.com/apache/apisix/pull/4880) +- :sunrise: graphql 路由支持 JSON 类型和 GET 方法 [#6343](https://github.com/apache/apisix/pull/6343) + +### Plugin + +- :sunrise: jwt-auth 支持自定义参数名 [#6561](https://github.com/apache/apisix/pull/6561) +- :sunrise: cors 参数支持通过 plugin metadata 配置 [#6546](https://github.com/apache/apisix/pull/6546) +- :sunrise: openid-connect 支持 post_logout_redirect_uri [#6455](https://github.com/apache/apisix/pull/6455) +- :sunrise: mocking 插件 [#5940](https://github.com/apache/apisix/pull/5940) +- :sunrise: error-log-logger 新增 clickhouse 支持 [#6256](https://github.com/apache/apisix/pull/6256) +- :sunrise: clickhouse 日志插件 [#6215](https://github.com/apache/apisix/pull/6215) +- :sunrise: grpc-transcode 支持处理 .pb 文件 [#6264](https://github.com/apache/apisix/pull/6264) +- :sunrise: loggly 日志插件 [#6113](https://github.com/apache/apisix/pull/6113) +- :sunrise: opentelemetry 日志插件 [#6119](https://github.com/apache/apisix/pull/6119) +- :sunrise: public api 插件 [#6145](https://github.com/apache/apisix/pull/6145) +- :sunrise: CSRF 插件 [#5727](https://github.com/apache/apisix/pull/5727) + +### Bugfix + +- 修复 skywalking,opentelemetry 没有追踪认证失败的问题 [#6617](https://github.com/apache/apisix/pull/6617) +- log-rotate 切割日志时按整点完成 [#6521](https://github.com/apache/apisix/pull/6521) +- deepcopy 没有复制 metatable [#6623](https://github.com/apache/apisix/pull/6623) +- request-validate 修复对 JSON 里面重复键的处理 [#6625](https://github.com/apache/apisix/pull/6625) +- prometheus 避免重复计算指标 [#6579](https://github.com/apache/apisix/pull/6579) +- 修复 proxy-rewrite 中,当 conf.headers 缺失时,conf.method 不生效的问题 [#6300](https://github.com/apache/apisix/pull/6300) +- 修复 traffic-split 首条规则失败时无法匹配的问题 [#6292](https://github.com/apache/apisix/pull/6292) +- etcd 超时不应触发 resync_delay [#6259](https://github.com/apache/apisix/pull/6259) +- 解决 proto 定义冲突 [#6199](https://github.com/apache/apisix/pull/6199) +- limit-count 配置不变,不应重置计数器 [#6151](https://github.com/apache/apisix/pull/6151) +- Admin API 的 plugin-metadata 和 global-rule 计数有误 [#6155](https://github.com/apache/apisix/pull/6155) +- 解决合并 route 和 service 时 labels 丢失问题 [#6177](https://github.com/apache/apisix/pull/6177) + +## 2.12.1 + +**这是一个 LTS 维护版本,您可以在 `release/2.12` 分支中看到 CHANGELOG。** + +## 2.12.0 + +### Change + +- 重命名 serverless 插件的 "balancer" phase 为 "before_proxy" [#5992](https://github.com/apache/apisix/pull/5992) +- 不再承诺支持 Tengine [#5961](https://github.com/apache/apisix/pull/5961) +- 当 L4 支持 和 Admin API 都启用时,自动开启 HTTP 支持 [#5867](https://github.com/apache/apisix/pull/5867) + +### Core + +- :sunrise: 支持 TLS over TCP upstream [#6030](https://github.com/apache/apisix/pull/6030) +- :sunrise: 支持自定义 APISIX variable [#5941](https://github.com/apache/apisix/pull/5941) +- :sunrise: 支持集成 Vault [#5745](https://github.com/apache/apisix/pull/5745) +- :sunrise: 支持 L4 的 access log [#5768](https://github.com/apache/apisix/pull/5768) +- :sunrise: 支持自定义 http_server_location_configuration_snippet 配置 [#5740](https://github.com/apache/apisix/pull/5740) +- :sunrise: 支持配置文件环境变量中设置默认值 [#5675](https://github.com/apache/apisix/pull/5675) +- :sunrise: 支持在 header_filter 阶段运行 Wasm 代码 [#5544](https://github.com/apache/apisix/pull/5544) + +### Plugin + +- :sunrise: 支持在 basic-auth 中隐藏 Authorization 请求头 [#6039](https://github.com/apache/apisix/pull/6039) +- :sunrise: 支持动态设置 proxy_request_buffering [#6075](https://github.com/apache/apisix/pull/6075) +- :sunrise: mqtt 支持通过 client id 负载均衡 [#6079](https://github.com/apache/apisix/pull/6079) +- :sunrise: 添加 forward-auth 插件 [#6037](https://github.com/apache/apisix/pull/6037) +- :sunrise: 支持 gRPC-Web 代理 [#5964](https://github.com/apache/apisix/pull/5964) +- :sunrise: limit-count 支持请求间共享计数器 [#5984](https://github.com/apache/apisix/pull/5984) +- :sunrise: limit-count 支持在路由间共享计数器 [#5881](https://github.com/apache/apisix/pull/5881) +- :sunrise: 新增 splunk hec logging 插件 [#5819](https://github.com/apache/apisix/pull/5819) +- :sunrise: 新增 OPA 插件 [#5734](https://github.com/apache/apisix/pull/5734) +- :sunrise: 新增 rocketmq logger 插件 [#5653](https://github.com/apache/apisix/pull/5653) +- :sunrise: mqtt 支持直接使用 route 上配置的 upstream [#5666](https://github.com/apache/apisix/pull/5666) +- :sunrise: ext-plugin 支持获取请求体 [#5600](https://github.com/apache/apisix/pull/5600) +- :sunrise: 新增 aws lambda 插件 [#5594](https://github.com/apache/apisix/pull/5594) +- :sunrise: http/kafka-logger 插件支持记录响应体 [#5550](https://github.com/apache/apisix/pull/5550) +- :sunrise: 新增 Apache OpenWhisk 插件 [#5518](https://github.com/apache/apisix/pull/5518) +- :sunrise: 支持 google cloud logging service [#5538](https://github.com/apache/apisix/pull/5538) + +### Bugfix + +- 同时启用 error-log-logger 和 prometheusis 时报告 labels inconsistent 的问题 [#6055](https://github.com/apache/apisix/pull/6055) +- 支持禁止 IPv6 IP 解析 [#6023](https://github.com/apache/apisix/pull/6023) +- 正确处理 MQTT 5 中的 properties [#5916](https://github.com/apache/apisix/pull/5916) +- sls-logger 上报的 timestamp 补上毫秒部分 [#5820](https://github.com/apache/apisix/pull/5820) +- MQTT 中的 client id 可以为空 [#5816](https://github.com/apache/apisix/pull/5816) +- ext-plugin 避免使用过期的 key [#5782](https://github.com/apache/apisix/pull/5782) +- 解决 log-rotate 中 reopen log 和压缩中的 race [#5715](https://github.com/apache/apisix/pull/5715) +- 释放 batch-processor 中过期对象 [#5700](https://github.com/apache/apisix/pull/5700) +- 解决被动健康检查时配置被污染的问题 [#5589](https://github.com/apache/apisix/pull/5589) + +## 2.11.0 + +### Change + +- wolf-rbac 插件变更默认端口,并在文档中增加 authType 参数 [#5477](https://github.com/apache/apisix/pull/5477) + +### Core + +- :sunrise: 支持基于 POST 表单的高级路由匹配 [#5409](https://github.com/apache/apisix/pull/5409) +- :sunrise: 初步的 WASM 支持 [#5288](https://github.com/apache/apisix/pull/5288) +- :sunrise: control API 暴露 service 配置 [#5271](https://github.com/apache/apisix/pull/5271) +- :sunrise: control API 暴露 upstream 配置 [#5259](https://github.com/apache/apisix/pull/5259) +- :sunrise: 支持在 etcd 少于半数节点不可用时成功启动 [#5158](https://github.com/apache/apisix/pull/5158) +- :sunrise: 支持 etcd 配置里面自定义 SNI [#5206](https://github.com/apache/apisix/pull/5206) + +### Plugin + +- :sunrise: 新增 Azure-functions 插件 [#5479](https://github.com/apache/apisix/pull/5479) +- :sunrise: kafka-logger 支持动态记录请求体 [#5501](https://github.com/apache/apisix/pull/5501) +- :sunrise: 新增 skywalking-logger 插件 [#5478](https://github.com/apache/apisix/pull/5478) +- :sunrise: 新增 datadog 插件 [#5372](https://github.com/apache/apisix/pull/5372) +- :sunrise: limit-* 系列插件,在 key 对应的值不存在时,回退到用客户端地址作为限流的 key [#5422](https://github.com/apache/apisix/pull/5422) +- :sunrise: limit-count 支持使用多个变量作为 key [#5378](https://github.com/apache/apisix/pull/5378) +- :sunrise: limit-conn 支持使用多个变量作为 key [#5354](https://github.com/apache/apisix/pull/5354) +- :sunrise: proxy-rewrite 支持改写 HTTP method [#5292](https://github.com/apache/apisix/pull/5292) +- :sunrise: limit-req 支持使用多个变量作为 key [#5302](https://github.com/apache/apisix/pull/5302) +- :sunrise: proxy-cache 支持基于内存的缓存机制 [#5028](https://github.com/apache/apisix/pull/5028) +- :sunrise: ext-plugin 避免发送重复的 conf 请求 [#5183](https://github.com/apache/apisix/pull/5183) +- :sunrise: 新增 ldap-auth 插件 [#3894](https://github.com/apache/apisix/pull/3894) + +## 2.10.5 + +**这是一个 LTS 维护版本,您可以在 `release/2.10` 分支中看到 CHANGELOG。** + +[https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2105](https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2105) + +## 2.10.4 + +**这是一个 LTS 维护版本,您可以在 `release/2.10` 分支中看到 CHANGELOG。** + +[https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2104](https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2104) + +## 2.10.3 + +**这是一个 LTS 维护版本,您可以在 `release/2.10` 分支中看到 CHANGELOG。** + +[https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2103](https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2103) + +## 2.10.2 + +**这是一个 LTS 维护版本,您可以在 `release/2.10` 分支中看到 CHANGELOG。** + +[https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2102](https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2102) + +## 2.10.1 + +**这是一个 LTS 维护版本,您可以在 `release/2.10` 分支中看到 CHANGELOG。** + +[https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2101](https://github.com/apache/apisix/blob/release/2.10/CHANGELOG.md#2101) + +## 2.10.0 + +### Change + +- 将 enable_debug 配置从 config.yaml 移到 debug.yaml [#5046](https://github.com/apache/apisix/pull/5046) +- 更改自定义 lua_shared_dict 配置的名称 [#5030](https://github.com/apache/apisix/pull/5030) +- 不再提供 APISIX 安装 shell 脚本 [#4985](https://github.com/apache/apisix/pull/4985) + +### Core + +- :sunrise: debug-mode 支持动态请求过滤 [#5012](https://github.com/apache/apisix/pull/5012) +- :sunrise: 支持注入逻辑到 APISIX 方法中 [#5068](https://github.com/apache/apisix/pull/5068) +- :sunrise: 支持配置 fallback SNI [#5000](https://github.com/apache/apisix/pull/5000) +- :sunrise: stream_route 支持在 IP 匹配中使用 CIDR [#4980](https://github.com/apache/apisix/pull/4980) +- :sunrise: 支持 route 从 service 中继承 hosts [#4977](https://github.com/apache/apisix/pull/4977) +- :sunrise: 改善数据面监听地址的配置 [#4856](https://github.com/apache/apisix/pull/4856) + +### Plugin + +- :sunrise: hmac-auth 支持校验请求体 [#5038](https://github.com/apache/apisix/pull/5038) +- :sunrise: proxy-mirror 支持控制镜像流量的比例 [#4965](https://github.com/apache/apisix/pull/4965) +- :sunrise: referer-restriction 增加黑名单和自定义信息 [#4916](https://github.com/apache/apisix/pull/4916) +- :sunrise: kafka-logger 增加 cluster 支持 [#4876](https://github.com/apache/apisix/pull/4876) +- :sunrise: kafka-logger 增加 required_acks 选项 [#4878](https://github.com/apache/apisix/pull/4878) +- :sunrise: uri-blocker 支持大小写无关的匹配 [#4868](https://github.com/apache/apisix/pull/4868) + +### Bugfix + +- radixtree_host_uri 路由更正匹配结果的 host [#5124](https://github.com/apache/apisix/pull/5124) +- radixtree_host_uri 路由更正匹配结果的 path [#5104](https://github.com/apache/apisix/pull/5104) +- Nacos 服务发现,区分处于不同 group/namespace 的同名 service [#5083](https://github.com/apache/apisix/pull/5083) +- Nacos 服务发现,当一个服务的地址获取失败后,继续处理剩下的服务 [#5112](https://github.com/apache/apisix/pull/5112) +- 匹配 SNI 时需要大小写无关 [#5074](https://github.com/apache/apisix/pull/5074) +- upstream 的 keepalive_pool 配置,缺省时不应覆盖默认的 keepalive 配置 [#5054](https://github.com/apache/apisix/pull/5054) +- DNS 服务发现,优先查询 SRV 记录 [#4992](https://github.com/apache/apisix/pull/4992) +- Consul 服务发现,重试前需等待一段时间 [#4979](https://github.com/apache/apisix/pull/4979) +- 当 upstream domain 背后的 IP 改变时,避免复制多余数据 [#4952](https://github.com/apache/apisix/pull/4952) +- 当 plugin_config 变化时,恢复之前被覆盖的配置 [#4888](https://github.com/apache/apisix/pull/4888) + +## 2.9.0 + +### Change + +- 为避免误解,将插件中的 balancer 方法改成 before_proxy [#4697](https://github.com/apache/apisix/pull/4697) + +### Core + +- :sunrise: 增大总 timer 数的限制 [#4843](https://github.com/apache/apisix/pull/4843) +- :sunrise: 移除禁止额外字段的检验,方便给 APISIX 做 A/B 测试 [#4797](https://github.com/apache/apisix/pull/4797) +- :sunrise: 支持在 arg 变量中使用 '-' (#4519) [#4676](https://github.com/apache/apisix/pull/4676) +- :sunrise: Admin API 拒绝错误的 proto 文件内容 [#4750](https://github.com/apache/apisix/pull/4750) + +### Plugin + +- :sunrise: ext-plugin 插件允许 Runner 查询请求信息 [#4835](https://github.com/apache/apisix/pull/4835) +- :sunrise: gzip 插件支持通过 * 匹配任意类型 [#4817](https://github.com/apache/apisix/pull/4817) +- :sunrise: 增加 real-ip 插件 [#4813](https://github.com/apache/apisix/pull/4813) +- :sunrise: limit-* 系列插件允许自定义请求拒绝信息 [#4808](https://github.com/apache/apisix/pull/4808) +- :sunrise: request-id 插件增加 snowflake 算法支持 [#4559](https://github.com/apache/apisix/pull/4559) +- :sunrise: 增加 authz-casbin 插件 [#4710](https://github.com/apache/apisix/pull/4710) +- :sunrise: error-log-logger 插件增加 skywalking 后端 [#4633](https://github.com/apache/apisix/pull/4633) +- :sunrise: ext-plugin 插件在发送配置时会额外发送一个 idempotent key [#4736](https://github.com/apache/apisix/pull/4736) + +### Bugfix + +- 避免特定条件下缓存过期的全局规则 [#4867](https://github.com/apache/apisix/pull/4867) +- grpc-transcode 插件支持嵌套信息 [#4859](https://github.com/apache/apisix/pull/4859) +- authz-keycloak 插件避免当 lazy_load_path 为 false 且没有配置 permissions 时出错 [#4845](https://github.com/apache/apisix/pull/4845) +- proxy-cache 插件保持 cache_method 配置和 nginx's proxy_cache_methods 一致 [#4814](https://github.com/apache/apisix/pull/4814) +- Admin API 确保 PATCH with sub path 时也能注入 updatetime [#4765](https://github.com/apache/apisix/pull/4765) +- Admin API 更新 consumer 时校验 username [#4756](https://github.com/apache/apisix/pull/4756) +- error-log-logger 插件避免发送过期的错误日志 [#4690](https://github.com/apache/apisix/pull/4690) +- grpc-transcode 插件支持 enum 类型 [#4706](https://github.com/apache/apisix/pull/4706) +- 当非 HEAD/GET 请求触发 500 错误时,会被错误转成 405 [#4696](https://github.com/apache/apisix/pull/4696) + +## 2.8.0 + +### Change + +- 如果启用 stream proxy,默认将不再一并启用 HTTP proxy 功能 [#4580](https://github.com/apache/apisix/pull/4580) + +### Core + +- :sunrise: 允许用户自定义 balancer [#4605](https://github.com/apache/apisix/pull/4605) +- :sunrise: upstream 中添加 retry_timeout,类似于 Nginx 的 proxy_next_upstream_timeout [#4574](https://github.com/apache/apisix/pull/4574) +- :sunrise: 允许在 balancer_by_lua 中运行插件 [#4549](https://github.com/apache/apisix/pull/4549) +- :sunrise: 允许给 upstream 指定单独的连接池 [#4506](https://github.com/apache/apisix/pull/4506) +- :sunrise: etcd 连接开启健康检查 [#4191](https://github.com/apache/apisix/pull/4191) + +### Plugin + +- :sunrise: 增加 gzip 插件 [#4640](https://github.com/apache/apisix/pull/4640) +- :sunrise: 增加 ua-restriction 插件来拒绝爬虫请求 [#4587](https://github.com/apache/apisix/pull/4587) +- :sunrise: stream 模块增加 ip-restriction 插件 [#4602](https://github.com/apache/apisix/pull/4602) +- :sunrise: stream 模块增加 limit-conn 插件 [#4515](https://github.com/apache/apisix/pull/4515) +- :sunrise: 将 ext-plugin 的超时提升到 60s [#4557](https://github.com/apache/apisix/pull/4557) +- :sunrise: key-auth 支持从 query string 中获取 key [#4490](https://github.com/apache/apisix/pull/4490) +- :sunrise: kafka-logger 支持通过 admin API 设置日志格式 [#4483](https://github.com/apache/apisix/pull/4483) + +### Bugfix + +- 修复 stream proxy 的 SNI router 在 session 复用中不可用的问题 [#4607](https://github.com/apache/apisix/pull/4607) +- 修复 limit-conn 同时在全局和 route 中指定会出错的问题 [#4585](https://github.com/apache/apisix/pull/4585) +- 修复 Admin API 中检查 proto 引用关系的错误 [#4575](https://github.com/apache/apisix/pull/4575) +- 修复 skywalking 同时在全局和 route 中指定会出错的问题 [#4589](https://github.com/apache/apisix/pull/4589) +- 调用 `ctx.var.cookie_*` 时如果没有找到 cookie 不再报错 [#4564](https://github.com/apache/apisix/pull/4564) +- 修复 request-id 同时在全局和 route 中指定会出错的问题 [#4479](https://github.com/apache/apisix/pull/4479) + +## 2.7.0 + +### Change + +- 修改 metadata_schema 校验方式,让它跟其他 schema 一致 [#4381](https://github.com/apache/apisix/pull/4381) +- 移除 echo 插件的 auth_value 字段 [#4055](https://github.com/apache/apisix/pull/4055) +- 更正 Admin API count 字段的计算,并把它的类型变成 integer [#4385](https://github.com/apache/apisix/pull/4385) + +### Core + +- :sunrise: TCP 代理支持客户端证书校验 [#4445](https://github.com/apache/apisix/pull/4445) +- :sunrise: TCP 代理支持接收 TLS over TCP 连接 [#4409](https://github.com/apache/apisix/pull/4409) +- :sunrise: TCP/UDP 代理上游配置支持用域名 [#4386](https://github.com/apache/apisix/pull/4386) +- :sunrise: CLI 中封装 nginx quit 操作 [#4360](https://github.com/apache/apisix/pull/4360) +- :sunrise: 允许在 route 配置上游超时时间 [#4340](https://github.com/apache/apisix/pull/4340) +- :sunrise: Nacos 服务发现支持 group 参数 [#4325](https://github.com/apache/apisix/pull/4325) +- :sunrise: Nacos 服务发现支持 namespace 参数 [#4313](https://github.com/apache/apisix/pull/4313) + +### Plugin + +- :sunrise: client-control 允许动态设置 client_max_body_size [#4423](https://github.com/apache/apisix/pull/4423) +- :sunrise: ext-plugin 使用 SIGTERM 结束 runner [#4367](https://github.com/apache/apisix/pull/4367) +- :sunrise: limit-req 增加 nodelay 参数 [#4395](https://github.com/apache/apisix/pull/4395) +- :sunrise: mqtt-proxy 允许配置域名 [#4391](https://github.com/apache/apisix/pull/4391) +- :sunrise: redirect 支持带上 query string [#4298](https://github.com/apache/apisix/pull/4298) + +### Bugfix + +- 修复客户端断开连接导致的内存泄漏 [#4405](https://github.com/apache/apisix/pull/4405) +- 修复处理 etcd 响应时有一个地方没有检查 res.body.error 的问题 [#4371](https://github.com/apache/apisix/pull/4371) +- 修复 ext-plugin 插件 token 过期后没有刷新 token 的问题 [#4345](https://github.com/apache/apisix/pull/4345) +- 修复 ext-plugin 插件没有传递环境变量的问题 [#4349](https://github.com/apache/apisix/pull/4349) +- 修复插件热加载时,插件可能不会重新加载的问题 [#4319](https://github.com/apache/apisix/pull/4319) + +## 2.6.0 + +### Change + +- 更改 prometheus 里面关于 latency 的指标的 label [#3993](https://github.com/apache/apisix/pull/3993) +- 修改 prometheus 默认端口,不再暴露到数据面的端口上 [#3994](https://github.com/apache/apisix/pull/3994) +- limit-count 里面如果使用 redis cluster,需要指定名称 [#3910](https://github.com/apache/apisix/pull/3910) +- 不再支持 OpenResty 1.15 [#3960](https://github.com/apache/apisix/pull/3960) + +### Core + +- :sunrise: 允许 pass_host 为 node 时,upstream 配置多个节点 [#4208](https://github.com/apache/apisix/pull/4208) +- :sunrise: 自定义 500 错误页 [#4164](https://github.com/apache/apisix/pull/4164) +- :sunrise: stream_route 中支持 upstream_id [#4121](https://github.com/apache/apisix/pull/4121) +- :sunrise: 支持客户端证书认证 [#4034](https://github.com/apache/apisix/pull/4034) +- :sunrise: 实验性支持 nacos 服务发现 [#3820](https://github.com/apache/apisix/pull/3820) +- :sunrise: 给 tcp.sock.connect 打补丁,采用配置的 DNS resolver [#4114](https://github.com/apache/apisix/pull/4114) + +### Plugin + +- :sunrise: redirect 插件,支持编码 uri [#4244](https://github.com/apache/apisix/pull/4244) +- :sunrise: key-auth 插件:支持自定义鉴权头 [#4013](https://github.com/apache/apisix/pull/4013) +- :sunrise: response-rewrite 插件:允许在 header 里面使用变量 [#4194](https://github.com/apache/apisix/pull/4194) +- :sunrise: 实现 ext-plugin 第一版,APISIX 现在支持使用其他语言编写自定义插件 [#4183](https://github.com/apache/apisix/pull/4183) + +### Bugfix + +- 支持 IPv6 DNS resolver [#4242](https://github.com/apache/apisix/pull/4242) +- 修复被动健康检查可能重复报告的问题 [#4116](https://github.com/apache/apisix/pull/4116) +- 修复 traffic-split 中偶发的规则紊乱 [#4092](https://github.com/apache/apisix/pull/4092) +- 修复带域名的 upstream 配置的访问问题 [#4061](https://github.com/apache/apisix/pull/4061) +- 修复 2.5 版本的 APISIX 无法识别之前版本的 route 配置的问题 [#4056](https://github.com/apache/apisix/pull/4056) +- standalone 模式下,启动程序时应该可以读取配置 [#4027](https://github.com/apache/apisix/pull/4027) +- limit-count 插件 redis 模式下原子化计数操作 [#3991](https://github.com/apache/apisix/pull/3991) + +## 2.5.0 + +### Change + +- 更改 zipkin 插件的 span 类型 [#3877](https://github.com/apache/apisix/pull/3877) + +### Core + +- :sunrise: 支持 etcd 客户端证书校验 [#3905](https://github.com/apache/apisix/pull/3905) +- :sunrise: 支持表达式使用“或”和“非”的逻辑 [#3809](https://github.com/apache/apisix/pull/3809) +- :sunrise: 默认启动时会同步 etcd 配置 [#3799](https://github.com/apache/apisix/pull/3799) +- :sunrise: 负载均衡支持节点优先级 [#3755](https://github.com/apache/apisix/pull/3755) +- :sunrise: 服务发现提供了一系列 control API [#3742](https://github.com/apache/apisix/pull/3742) + +### Plugin + +- :sunrise: 允许热更新 skywalking 插件配置,并允许配置上报间隔 [#3925](https://github.com/apache/apisix/pull/3925) +- :sunrise: consumer-restriction 支持 HTTP method 级别的白名单配置 [#3691](https://github.com/apache/apisix/pull/3691) +- :sunrise: cors 插件支持通过正则表达式匹配 Origin [#3839](https://github.com/apache/apisix/pull/3839) +- :sunrise: response-rewrite 插件支持条件改写 [#3577](https://github.com/apache/apisix/pull/3577) + +### Bugfix + +- error-log-logger 插件需要在每个进程中上报日志 [#3912](https://github.com/apache/apisix/pull/3912) +- 当使用 snippet 引入 Nginx server 段配置时,确保内置 server 是默认 server [#3907](https://github.com/apache/apisix/pull/3907) +- 修复 traffic-split 插件通过 upstream_id 绑定上游的问题 [#3842](https://github.com/apache/apisix/pull/3842) +- 修复 ssl_trusted_certificate 配置项的校验 [#3832](https://github.com/apache/apisix/pull/3832) +- 启用 proxy-cache 时,避免覆盖到其他路由缓存相关的响应头 [#3789](https://github.com/apache/apisix/pull/3789) +- 解决 macOS 下无法 `make deps` 的问题 [#3718](https://github.com/apache/apisix/pull/3718) + +## 2.4.0 + +### Change + +- 插件暴露的公共 API 将默认不再执行全局插件 [#3396](https://github.com/apache/apisix/pull/3396) +- DNS 记录缓存时间默认按 TTL 设置 [#3530](https://github.com/apache/apisix/pull/3530) + +### Core + +- :sunrise: 支持 DNS SRV 记录 [#3686](https://github.com/apache/apisix/pull/3686) +- :sunrise: 新的 DNS 服务发现模块 [#3629](https://github.com/apache/apisix/pull/3629) +- :sunrise: 支持 Consul HTTP 接口服务发现模块 [#3615](https://github.com/apache/apisix/pull/3615) +- :sunrise: 支持插件复用 [#3567](https://github.com/apache/apisix/pull/3567) +- :sunrise: 支持 plaintext HTTP2 [#3547](https://github.com/apache/apisix/pull/3547) +- :sunrise: 支持 DNS AAAA 记录 [#3484](https://github.com/apache/apisix/pull/3484) + +### Plugin + +- :sunrise: traffic-split 插件支持 upstream_id [#3512](https://github.com/apache/apisix/pull/3512) +- :sunrise: zipkin 插件 b3 请求头 [#3551](https://github.com/apache/apisix/pull/3551) + +### Bugfix + +- 一致性 hash 负载均衡确保重试所有节点 [#3651](https://github.com/apache/apisix/pull/3651) +- 当 route 绑定 service 后仍能执行 script [#3678](https://github.com/apache/apisix/pull/3678) +- 应当依赖 openssl111 [#3603](https://github.com/apache/apisix/pull/3603) +- zipkin 避免缓存请求特定的数据 [#3522](https://github.com/apache/apisix/pull/3522) + +更多的变动可以参考[里程碑](https://github.com/apache/apisix/milestone/13) + +## 2.3.0 + +### Change + +- 默认使用 LuaJIT 运行命令行 [#3335](https://github.com/apache/apisix/pull/3335) +- 命令行采用 luasocket 而不是 curl 访问 etcd [#2965](https://github.com/apache/apisix/pull/2965) + +### Core + +- :sunrise: 命令行中访问 etcd 可以禁用 HTTPS 检验 [#3415](https://github.com/apache/apisix/pull/3415) +- :sunrise: 添加 etcd 无法连接时的 Chaos 测试 [#3404](https://github.com/apache/apisix/pull/3404) +- :sunrise: ewma 负载均衡算法更新 [#3300](https://github.com/apache/apisix/pull/3300) +- :sunrise: 允许在 Upstream 中配置 HTTPS scheme 来跟 HTTPS 后端通信 [#3430](https://github.com/apache/apisix/pull/3430) +- :sunrise: 允许自定义 lua_package_path & lua_package_cpath [#3417](https://github.com/apache/apisix/pull/3417) +- :sunrise: HTTPS 代理时传递 SNI [#3420](https://github.com/apache/apisix/pull/3420) +- :sunrise: 支持 gRPCS [#3411](https://github.com/apache/apisix/pull/3411) +- :sunrise: 支持通过 control API 获得健康检查状态 [#3345](https://github.com/apache/apisix/pull/3345) +- :sunrise: 支持代理 HTTP 到 dubbo 后端 [#3224](https://github.com/apache/apisix/pull/3224) +- :sunrise: 支持最少连接负载均衡算法 [#3304](https://github.com/apache/apisix/pull/3304) + +### Plugin + +- :sunrise: kafka-logger 支持复用 kafka 生产者对象 [#3429](https://github.com/apache/apisix/pull/3429) +- :sunrise: authz-keycloak 支持动态 scope & resource 映射 [#3308](https://github.com/apache/apisix/pull/3308) +- :sunrise: proxy-rewrite 支持在域名中带端口 [#3428](https://github.com/apache/apisix/pull/3428) +- :sunrise: fault-injection 支持通过变量条件动态做错误注入 [#3363](https://github.com/apache/apisix/pull/3363) + +### Bugfix + +- 修复 standalone 下 consumer 的 id 跟 username 可以不一致的问题 [#3394](https://github.com/apache/apisix/pull/3394) +- gRPC 中可以用 upstream_id & consumer [#3387](https://github.com/apache/apisix/pull/3387) +- 修复没有匹配规则时命中 global rule 报错的问题 [#3332](https://github.com/apache/apisix/pull/3332) +- 避免缓存过期的服务发现得到的节点 [#3295](https://github.com/apache/apisix/pull/3295) +- 应该在 access 阶段创建 health checker [#3240](https://github.com/apache/apisix/pull/3240) +- 修复 chash 负载均衡算法时重试的问题 [#2676](https://github.com/apache/apisix/pull/2676) + +更多的变动可以参考[里程碑](https://github.com/apache/apisix/milestone/12) + +## 2.2.0 + +### Change + +- 默认不启用 node-status 插件 [#2968](https://github.com/apache/apisix/pull/2968) +- upstreeam 配置中不再允许使用 k8s_deployment_info [#3098](https://github.com/apache/apisix/pull/3098) +- 默认不再匹配路由中以 ':' 开头的参数变量 [#3154](https://github.com/apache/apisix/pull/3154) + +### Core + +- :sunrise: 允许一个 consumer 关联多个认证插件 [#2898](https://github.com/apache/apisix/pull/2898) +- :sunrise: 增加 etcd 重试间隔,并允许配置 [#2977](https://github.com/apache/apisix/pull/2977) +- :sunrise: 允许启用或禁用 route [#2943](https://github.com/apache/apisix/pull/2943) +- :sunrise: 允许通过 graphql 属性进行路由 [#2964](https://github.com/apache/apisix/pull/2964) +- :sunrise: 共享 etcd 鉴权 token [#2932](https://github.com/apache/apisix/pull/2932) +- :sunrise: 新增 control API [#3048](https://github.com/apache/apisix/pull/3048) + +### Plugin + +- :sunrise: limt-count 中使用 'remote_addr' 作为默认 key [#2927](https://github.com/apache/apisix/pull/2927) +- :sunrise: 支持在 fault-injection 的 abort.body 中使用变量 [#2986](https://github.com/apache/apisix/pull/2986) +- :sunrise: 新增插件 `server-info` [#2926](https://github.com/apache/apisix/pull/2926) +- :sunrise: 增加 batch process 指标 [#3070](https://github.com/apache/apisix/pull/3070) +- :sunrise: 新增 traffic-split 插件 [#2935](https://github.com/apache/apisix/pull/2935) +- :sunrise: proxy-rewrite 支持在 header 中使用变量 [#3144](https://github.com/apache/apisix/pull/3144) +- :sunrise: openid-connect 插件增加更多配置项 [#2903](https://github.com/apache/apisix/pull/2903) +- :sunrise: proxy-rewrite 支持在 upstream_uri 中使用变量 [#3139](https://github.com/apache/apisix/pull/3139) + +### Bugfix + +- basic-auth 应该在 rewrite phase 执行 [#2905](https://github.com/apache/apisix/pull/2905) +- http/udp-logger 中插件配置运行时变更没有生效 [#2901](https://github.com/apache/apisix/pull/2901) +- 修复 limit-conn 对象没有被正确释放的问题 [#2465](https://github.com/apache/apisix/pull/2465) +- 修复自动生成的 id 可能重复的问题 [#3003](https://github.com/apache/apisix/pull/3003) +- 修复 OpenResty 1.19 下 ctx 互相影响的问题。**对于使用 OpenResty 1.19 的用户,请尽快升级到该版本。** [#3105](https://github.com/apache/apisix/pull/3105) +- 修复 route.vars 字段的校验 [#3124](https://github.com/apache/apisix/pull/3124) + +更多的变动可以参考[里程碑](https://github.com/apache/apisix/milestone/10) + +## 2.1.0 + +### Core + +- :sunrise: **支持使用环境变量来配置参数。** [#2743](https://github.com/apache/apisix/pull/2743) +- :sunrise: **支持使用 TLS 来连接 etcd.** [#2548](https://github.com/apache/apisix/pull/2548) +- 自动生成对象的创建和更新时间。[#2740](https://github.com/apache/apisix/pull/2740) +- 在上游中开启 websocket 时,增加日志来提示此功能即将废弃。[#2691](https://github.com/apache/apisix/pull/2691) +- 增加日志来提示 consumer id 即将废弃。[#2829](https://github.com/apache/apisix/pull/2829) +- 增加 `X-APISIX-Upstream-Status` 头来区分 5xx 错误来自上游还是 APISIX 自身。[#2817](https://github.com/apache/apisix/pull/2817) +- 支持 Nginx 配置片段。[#2803](https://github.com/apache/apisix/pull/2803) + +### Plugin + +- :sunrise: **升级协议来 Apache Skywalking 8.0**[#2389](https://github.com/apache/apisix/pull/2389). 这个版本只支持 skywalking 8.0 协议。此插件默认关闭,需要修改 config.yaml 来开启。这是不向下兼容的修改。 +- :sunrise: 新增阿里云 sls 日志服务插件。[#2169](https://github.com/apache/apisix/issues/2169) +- proxy-cache: cache_zone 字段改为可选。[#2776](https://github.com/apache/apisix/pull/2776) +- 在数据平面校验插件的配置。[#2856](https://github.com/apache/apisix/pull/2856) + +### Bugfix + +- :bug: fix(etcd): 处理 etcd compaction.[#2687](https://github.com/apache/apisix/pull/2687) +- 将 `conf/cert` 中的测试证书移动到 `t/certs` 目录中,并且默认关闭 SSL。这是不向下兼容的修改。 [#2112](https://github.com/apache/apisix/pull/2112) +- 检查 decrypt key 来阻止 lua thread 中断。 [#2815](https://github.com/apache/apisix/pull/2815) + +### 不向下兼容特性预告 + +- 在 2.3 发布版本中,consumer 将只支持用户名,废弃 id,consumer 需要在 etcd 中手工清理掉 id 字段,不然使用时 schema 校验会报错 +- 在 2.3 发布版本中,将不再支持在 upstream 上开启 websocket +- 在 3.0 版本中,数据平面和控制平面将分开为两个独立的端口,即现在的 9080 端口将只处理数据平面的请求,不再处理 admin API 的请求 + +更多的变动可以参考[里程碑](https://github.com/apache/apisix/milestone/8) + +## 2.0.0 + +这是一个 release candidate。 + +### Core + +- :sunrise: **从 etcd v2 协议迁移到 v3,这是不向下兼容的修改。Apache APISIX 只支持 etcd 3.4 以及后续的版本。** [#2036](https://github.com/apache/apisix/pull/2036) +- 支持为上游对象增加标签。[#2279](https://github.com/apache/apisix/pull/2279) +- 为上游、路由等资源增加更多字段,比如 create_time 和 update_time。[#2444](https://github.com/apache/apisix/pull/2444) +- 使用拦截器来保护插件的路由。[#2416](https://github.com/apache/apisix/pull/2416) +- 支持 http 和 https 监听多个端口。[#2409](https://github.com/apache/apisix/pull/2409) +- 实现 `core.sleep` 函数。[#2397](https://github.com/apache/apisix/pull/2397) + +### Plugin + +- :sunrise: **增加 AK/SK(HMAC) 认证插件。**[#2192](https://github.com/apache/apisix/pull/2192) +- :sunrise: 增加 referer-restriction 插件。[#2352](https://github.com/apache/apisix/pull/2352) +- `limit-count` 插件支持 `redis` cluster。[#2406](https://github.com/apache/apisix/pull/2406) +- proxy-cache 插件支持存储临时文件。[#2317](https://github.com/apache/apisix/pull/2317) +- http-logger 插件支持通过 admin API 来指定文件格式。[#2309](https://github.com/apache/apisix/pull/2309) + +### Bugfix + +- :bug: **`高优先级`** 当数据平面接收到删除某一个资源 (路由、上游等) 的指令时,没有正确的清理缓存,导致存在的资源也会找不到。这个问题在长时间、频繁删除操作的情况下才会出现。[#2168](https://github.com/apache/apisix/pull/2168) +- 修复路由优先级不生效的问题。[#2447](https://github.com/apache/apisix/pull/2447) +- 在 `init_worker` 阶段设置随机数,而不是 `init` 阶段。[#2357](https://github.com/apache/apisix/pull/2357) +- 删除 jwt 插件中不支持的算法。[#2356](https://github.com/apache/apisix/pull/2356) +- 当重定向插件的 `http_to_https` 开启时,返回正确的响应码。[#2311](https://github.com/apache/apisix/pull/2311) + +更多的变动可以参考[里程碑](https://github.com/apache/apisix/milestone/7) + +### CVE + +- 修复 Admin API 默认访问令牌漏洞 + +## 1.5.0 + +### Core + +- Admin API:支持使用 SSL 证书进行身份验证。[1747](https://github.com/apache/apisix/pull/1747) +- Admin API:同时支持标准的 PATCH 和子路径 PATCH。[1930](https://github.com/apache/apisix/pull/1930) +- HealthCheck:支持自定义检查端口。[1914](https://github.com/apache/apisix/pull/1914) +- Upstream:支持禁用 `Nginx` 默认重试机制。[1919](https://github.com/apache/apisix/pull/1919) +- URI:支持以配置方式删除 `URI` 末尾的 `/` 符号。[1766](https://github.com/apache/apisix/pull/1766) + +### New Plugin + +- :sunrise: **新增 请求验证器 插件** [1709](https://github.com/apache/apisix/pull/1709) + +### Improvements + +- 变更:nginx `worker_shutdown_timeout` 配置默认值由 `3s` 变更为推荐值 `240s`。[1883](https://github.com/apache/apisix/pull/1883) +- 变更:`healthcheck` 超时时间类型 由 `integer` 变更为 `number`。[1892](https://github.com/apache/apisix/pull/1892) +- 变更:`request-validation` 插件输入参数支持 `JsonSchema` 验证。[1920](https://github.com/apache/apisix/pull/1920) +- 变更:为 Makefile `install` 命令添加注释。[1912](https://github.com/apache/apisix/pull/1912) +- 变更:更新 config.yaml `etcd.timeout` 默认配置的注释。[1929](https://github.com/apache/apisix/pull/1929) +- 变更:为 `prometheus` 添加更多度量指标,以更好地了解 `APISIX` 节点的情况。[1888](https://github.com/apache/apisix/pull/1888) +- 变更:为 `cors` 插件添加更多配置选项。[1963](https://github.com/apache/apisix/pull/1963) + +### Bugfix + +- 修复:`healthcheck` 获取 `host` 配置失败。 [1871](https://github.com/apache/apisix/pull/1871) +- 修复:插件运行时数据保存到 `etcd`。 [1910](https://github.com/apache/apisix/pull/1910) +- 修复:多次运行 `apisix start` 将启动多个 `Nginx` 进程。[1913](https://github.com/apache/apisix/pull/1913) +- 修复:从临时文件读取请求正文(如果已缓存)。[1863](https://github.com/apache/apisix/pull/1863) +- 修复:批处理器名称和错误返回类型。[1927](https://github.com/apache/apisix/pull/1927) +- 修复:`limit-count` 插件 `redis.ttl` 读取异常。[1928](https://github.com/apache/apisix/pull/1928) +- 修复:被动健康检查不能提供健康报告。[1918](https://github.com/apache/apisix/pull/1918) +- 修复:避免插件中直接修改或使用原始配置数据。[1958](https://github.com/apache/apisix/pull/1958) +- 修复:`invalid-upstream` 测试用例稳定性问题。[1925](https://github.com/apache/apisix/pull/1925) + +### Doc + +- 文档:添加 `APISIX Lua` 代码风格指南。[1874](https://github.com/apache/apisix/pull/1874) +- 文档:修正 `README` 中语法错误。[1894](https://github.com/apache/apisix/pull/1894) +- 文档:修正 `benchmark` 文档中图片链接错误。[1896](https://github.com/apache/apisix/pull/1896) +- 文档:修正 `FAQ`、`admin-api`、`architecture-design`、`discovery`、`prometheus`、`proxy-rewrite`、`redirect`、`http-logger` 文档中错别字。[1916](https://github.com/apache/apisix/pull/1916) +- 文档:更新 `request-validation` 插件示例。[1926](https://github.com/apache/apisix/pull/1926) +- 文档:修正 `architecture-design` 文档中错别字。[1938](https://github.com/apache/apisix/pull/1938) +- 文档:添加 `how-to-build` 文档中在 `Linux` 和 `macOS` 系统中单元测试 `Nginx` 的默认引入路径。[1936](https://github.com/apache/apisix/pull/1936) +- 文档:添加 `request-validation` 插件中文文档。[1932](https://github.com/apache/apisix/pull/1932) +- 文档:修正 `README` 中 `gRPC transcoding` 文档路径。[1945](https://github.com/apache/apisix/pull/1945) +- 文档:修正 `README` 中 `uri-blocker` 文档路径。[1950](https://github.com/apache/apisix/pull/1950) +- 文档:修正 `README` 中 `grpc-transcode` 文档路径。[1946](https://github.com/apache/apisix/pull/1946) +- 文档:删除 `k8s` 文档中不必要的配置。[1891](https://github.com/apache/apisix/pull/1891) + +## 1.4.1 + +### Bugfix + +- 修复在配置了多个 SSL 证书的情况下,只有一个证书生效的问题。 [1818](https://github.com/apache/incubator-apisix/pull/1818) + +## 1.4.0 + +### Core + +- Admin API: 路由支持唯一 name 字段 [1655](https://github.com/apache/incubator-apisix/pull/1655) +- 优化 log 缓冲区大小和刷新时间 [1570](https://github.com/apache/incubator-apisix/pull/1570) + +### New plugins + +- :sunrise: **Apache Skywalking plugin** [1241](https://github.com/apache/incubator-apisix/pull/1241) +- :sunrise: **Keycloak Identity Server Plugin** [1701](https://github.com/apache/incubator-apisix/pull/1701) +- :sunrise: **Echo Plugin** [1632](https://github.com/apache/incubator-apisix/pull/1632) +- :sunrise: **Consume Restriction Plugin** [1437](https://github.com/apache/incubator-apisix/pull/1437) + +### Improvements + +- Batch Request : 对每个请求拷贝头 [1697](https://github.com/apache/incubator-apisix/pull/1697) +- SSL 私钥加密 [1678](https://github.com/apache/incubator-apisix/pull/1678) +- 众多插件文档改善 + +## 1.3.0 + +1.3 版本主要带来安全更新。 + +## Security + +- 拒绝无效的 header [#1462](https://github.com/apache/incubator-apisix/pull/1462) 并对 uri 进行安全编码 [#1461](https://github.com/apache/incubator-apisix/pull/1461) +- 默认只允许本地环回地址 127.0.0.1 访问 admin API 和 dashboard. [#1458](https://github.com/apache/incubator-apisix/pull/1458) + +### Plugin + +- :sunrise: **新增 batch request 插件**. [#1388](https://github.com/apache/incubator-apisix/pull/1388) +- 实现完成 `sys logger` 插件。[#1414](https://github.com/apache/incubator-apisix/pull/1414) + +## 1.2.0 + +1.2 版本在内核以及插件上带来了非常多的更新。 + +### Core + +- :sunrise: **支持 etcd 集群**. [#1283](https://github.com/apache/incubator-apisix/pull/1283) +- 默认使用本地 DNS resolver,这对于 k8s 环境更加友好。[#1387](https://github.com/apache/incubator-apisix/pull/1387) +- 支持在 `header_filter`、`body_filter` 和 `log` 阶段运行全局插件。[#1364](https://github.com/apache/incubator-apisix/pull/1364) +- 将目录 `lua/apisix` 修改为 `apisix`(**不向下兼容**). [#1351](https://github.com/apache/incubator-apisix/pull/1351) +- 增加 dashboard 子模块。[#1360](https://github.com/apache/incubator-apisix/pull/1360) +- 允许自定义共享字典。[#1367](https://github.com/apache/incubator-apisix/pull/1367) + +### Plugin + +- :sunrise: **新增 Apache Kafka 插件**. [#1312](https://github.com/apache/incubator-apisix/pull/1312) +- :sunrise: **新增 CORS 插件**. [#1327](https://github.com/apache/incubator-apisix/pull/1327) +- :sunrise: **新增 TCP logger 插件**. [#1221](https://github.com/apache/incubator-apisix/pull/1221) +- :sunrise: **新增 UDP logger 插件**. [1070](https://github.com/apache/incubator-apisix/pull/1070) +- :sunrise: **新增 proxy mirror 插件**. [#1288](https://github.com/apache/incubator-apisix/pull/1288) +- :sunrise: **新增 proxy cache 插件**. [#1153](https://github.com/apache/incubator-apisix/pull/1153) +- 在 proxy-rewrite 插件中废弃 websocket 开关 (**不向下兼容**). [1332](https://github.com/apache/incubator-apisix/pull/1332) +- OAuth 插件中增加基于公钥的自省支持。[#1266](https://github.com/apache/incubator-apisix/pull/1266) +- response-rewrite 插件通过 base64 来支持传输二进制数据。[#1381](https://github.com/apache/incubator-apisix/pull/1381) +- gRPC 转码插件支持 `deadline`. [#1149](https://github.com/apache/incubator-apisix/pull/1149) +- limit count 插件支持 redis 权限认证。[#1150](https://github.com/apache/incubator-apisix/pull/1150) +- Zipkin 插件支持名字和本地服务器 ip 的记录。[#1386](https://github.com/apache/incubator-apisix/pull/1386) +- Wolf-Rbac 插件增加 `change_pwd` 和 `user_info` 参数。[#1204](https://github.com/apache/incubator-apisix/pull/1204) + +### Admin API + +- :sunrise: 对调用 Admin API 增加 key-auth 权限认证 (**not backward compatible**). [#1169](https://github.com/apache/incubator-apisix/pull/1169) +- 隐藏 SSL 私钥的返回值。[#1240](https://github.com/apache/incubator-apisix/pull/1240) + +### Bugfix + +- 在复用 table 之前遗漏了对数据的清理 (**会引发内存泄漏**). [#1134](https://github.com/apache/incubator-apisix/pull/1134) +- 如果 yaml 中路由非法就打印警告信息。[#1141](https://github.com/apache/incubator-apisix/pull/1141) +- 使用空字符串替代空的 balancer IP. [#1166](https://github.com/apache/incubator-apisix/pull/1166) +- 修改 node-status 和 heartbeat 插件没有 schema 的问题。[#1249](https://github.com/apache/incubator-apisix/pull/1249) +- basic-auth 增加 required 字段。[#1251](https://github.com/apache/incubator-apisix/pull/1251) +- 检查上游合法节点的个数。[#1292](https://github.com/apache/incubator-apisix/pull/1292) + +## 1.1.0 + +这个版本主要是加强代码的稳定性,以及增加更多的文档。 + +### Core + +- 每次跑测试用例都指定 perl 包含路径。 [#1097](https://github.com/apache/incubator-apisix/pull/1097) +- 增加对代理协议的支持。 [#1113](https://github.com/apache/incubator-apisix/pull/1113) +- 增加用于校验 nginx.conf 的命令。 [#1112](https://github.com/apache/incubator-apisix/pull/1112) +- 支持「nginx 最多可以打开文件数」可配置,并增大其默认配置。[#1105](https://github.com/apache/incubator-apisix/pull/1105) [#1098](https://github.com/apache/incubator-apisix/pull/1098) +- 优化日志模块。 [#1093](https://github.com/apache/incubator-apisix/pull/1093) +- 支持 SO_REUSEPORT。 [#1085](https://github.com/apache/incubator-apisix/pull/1085) + +### Doc + +- 增加 Grafana 元数据下载链接。[#1119](https://github.com/apache/incubator-apisix/pull/1119) +- 更新 README.md。 [#1118](https://github.com/apache/incubator-apisix/pull/1118) +- 增加 wolf-rbac 插件说明文档 [#1116](https://github.com/apache/incubator-apisix/pull/1116) +- 更新 rpm 下载链接。 [#1108](https://github.com/apache/incubator-apisix/pull/1108) +- 增加更多英文文章链接。 [#1092](https://github.com/apache/incubator-apisix/pull/1092) +- 增加文档贡献指引。 [#1086](https://github.com/apache/incubator-apisix/pull/1086) +- 检查更新「快速上手」文档。 [#1084](https://github.com/apache/incubator-apisix/pull/1084) +- 检查更新「插件开发指南」。 [#1078](https://github.com/apache/incubator-apisix/pull/1078) +- 更新 admin-api-cn.md。 [#1067](https://github.com/apache/incubator-apisix/pull/1067) +- 更新 architecture-design-cn.md。 [#1065](https://github.com/apache/incubator-apisix/pull/1065) + +### CI + +- 移除不再必须的补丁。 [#1090](https://github.com/apache/incubator-apisix/pull/1090) +- 修复使用 luarocks 安装时路径错误问题。[#1068](https://github.com/apache/incubator-apisix/pull/1068) +- 为 luarocks 安装专门配置一个 travis 进行回归测试。 [#1063](https://github.com/apache/incubator-apisix/pull/1063) + +### Plugins + +- 在「节点状态」插件使用 nginx 内部请求替换原来的外部请求。 [#1109](https://github.com/apache/incubator-apisix/pull/1109) +- 增加 wolf-rbac 插件。 [#1095](https://github.com/apache/incubator-apisix/pull/1095) +- 增加 udp-logger 插件。 [#1070](https://github.com/apache/incubator-apisix/pull/1070) + +## 1.0.0 + +这个版本主要是加强代码的稳定性,以及增加更多的文档。 + +### Core + +- :sunrise: 支持路由的优先级。可以在 URI 相同的条件下,根据 header、args、优先级等条件,来匹配到不同的上游服务。 [#998](https://github.com/apache/incubator-apisix/pull/998) +- 在没有匹配到任何路由的时候,返回错误信息。以便和其他的 404 请求区分开。[#1013](https://github.com/apache/incubator-apisix/pull/1013) +- dashboard 的地址 `/apisix/admin` 支持 CORS。[#982](https://github.com/apache/incubator-apisix/pull/982) +- jsonschema 校验器返回更清晰的错误提示。[#1011](https://github.com/apache/incubator-apisix/pull/1011) +- 升级 `ngx_var` 模块到 0.5 版本。[#1005](https://github.com/apache/incubator-apisix/pull/1005) +- 升级 `lua-resty-etcd` 模块到 0.8 版本。[#980](https://github.com/apache/incubator-apisix/pull/980) +- 在开发模式下,自动把 worker 数调整为 1。[#926](https://github.com/apache/incubator-apisix/pull/926) +- 从代码仓库中移除 nginx.conf 文件,它每次都会自动生成,不可手工修改。[#974](https://github.com/apache/incubator-apisix/pull/974) + +### Doc + +- 增加如何自定义开发插件的文档。[#909](https://github.com/apache/incubator-apisix/pull/909) +- 修复 serverless 插件文档中错误的示例。[#1006](https://github.com/apache/incubator-apisix/pull/1006) +- 增加 Oauth 插件的使用文档。[#987](https://github.com/apache/incubator-apisix/pull/987) +- 增加 dashboard 编译的文档。[#985](https://github.com/apache/incubator-apisix/pull/985) +- 增加如何进行 a/b 测试的文档。[#957](https://github.com/apache/incubator-apisix/pull/957) +- 增加如何开启 MQTT 插件的文档。[#916](https://github.com/apache/incubator-apisix/pull/916) + +### Test case + +- 增加 key-auth 插件正常情况下的测试案例。[#964](https://github.com/apache/incubator-apisix/pull/964/) +- 增加 grpc transcode pb 选项的测试。[#920](https://github.com/apache/incubator-apisix/pull/920) + +## 0.9.0 + +这个版本带来很多新特性,比如支持使用 Tengine 运行 APISIX,增加了对开发人员更友好的高级调试模式,还有新的 URI 重定向插件等。 + +### Core + +- :sunrise: 支持使用 Tengine 运行 APISIX。 [#683](https://github.com/apache/incubator-apisix/pull/683) +- :sunrise: 启用 HTTP2 并支持设置 ssl_protocols。 [#663](https://github.com/apache/incubator-apisix/pull/663) +- :sunrise: 增加高级调试模式,可在不重启的服务的情况下动态打印指定模块方法的请求参数或返回值。[#614](https://github.com/apache/incubator-apisix/pull/641) +- 安装程序增加了仪表盘开关,支持用户自主选择是否安装仪表板程序。 [#686](https://github.com/apache/incubator-apisix/pull/686) +- 取消对 R3 路由的支持,并移除 R3 路由模块。 [#725](https://github.com/apache/incubator-apisix/pull/725) + +### Plugins + +- :sunrise: **[Redirect URI](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/redirect.md)**:URI 重定向插件。 [#732](https://github.com/apache/incubator-apisix/pull/732) +- [Proxy Rewrite](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/proxy-rewrite.md):支持 `header` 删除功能。 [#658](https://github.com/apache/incubator-apisix/pull/658) +- [Limit Count](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/limit-count.md):通过 `Redis Server` 聚合 `APISIX` 节点之间将共享流量限速结果,实现集群流量限速。[#624](https://github.com/apache/incubator-apisix/pull/624) + +### lua-resty-* + +- lua-resty-radixtree + - 支持将`host + uri`作为索引。 +- lua-resty-jsonschema + - 该扩展作用是 JSON 数据验证器,用于替换现有的 `lua-rapidjson` 扩展。 + +### Bugfix + +- 在多个使用者的情况下,`key-auth` 插件无法正确运行。 [#826](https://github.com/apache/incubator-apisix/pull/826) +- 无法在 `API Server` 中获取 `serverless`插件配置。 [#787](https://github.com/apache/incubator-apisix/pull/787) +- 解决使用 `proxy-write` 重写 URI 时 GET 参数丢失问题。 [#642](https://github.com/apache/incubator-apisix/pull/642) +- `Zipkin` 插件未将跟踪数据设置为请求头。[#715](https://github.com/apache/incubator-apisix/pull/715) +- 使用本地文件作为配置中心时,跳过 etcd 初始化。 [#737](https://github.com/apache/incubator-apisix/pull/737) +- 在 APISIX CLI 中跳过 luajit 环境的`check cjson`。[#652](https://github.com/apache/incubator-apisix/pull/652) +- 配置 `Upstream` 时,选择 `balancer` 类型为 `chash` 时,支持更多 Nginx 内置变量作为计算 key。 [#775](https://github.com/apache/incubator-apisix/pull/775) + +### Dependencies + +- 使用 `lua-resty-jsonschema` 全局替换 `lua-rapidjson` 扩展,`lua-resty-jsonschema` 解析速度更快,更容易编译。 + +## 0.8.0 + +> Released on 2019/09/30 + +这个版本带来很多新的特性,比如四层协议的代理,支持 MQTT 协议代理,以及对 ARM 平台的支持,和代理改写插件等。 + +### Core + +- :sunrise: **[增加单机模式](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/deployment-modes.md#Standalone)**: 使用 yaml 配置文件来更新 APISIX 的配置,这对于 kubernetes 更加友好。 [#464](https://github.com/apache/incubator-apisix/pull/464) +- :sunrise: **[支持 stream 代理](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest/stream-proxy.md)**. [#513](https://github.com/apache/incubator-apisix/pull/513) +- :sunrise: 支持[在 consumer 上绑定插件](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest/terminology/consumer.md). [#544](https://github.com/apache/incubator-apisix/pull/544) +- 上游增加对域名的支持,而不仅是 IP。[#522](https://github.com/apache/incubator-apisix/pull/522) +- 当上游节点的权重为 0 时自动忽略。[#536](https://github.com/apache/incubator-apisix/pull/536) + +### Plugins + +- :sunrise: **[MQTT 代理](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/mqtt-proxy.md)**: 支持用 `client_id` 对 MQTT 进行负载均衡,同时支持 MQTT 3.1 和 5.0 两个协议标准。 [#513](https://github.com/apache/incubator-apisix/pull/513) +- [proxy-rewrite](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/proxy-rewrite.md): 对代理到上游的请求进行改写,包括 host, uri 和 schema。 [#594](https://github.com/apache/incubator-apisix/pull/594) + +### ARM + +- :sunrise: **APISIX 可以在基于 ARM64 架构的 Ubuntu 18.04 系统中正常运行**, 搭配上 MQTT 插件,你可以把它当做 IoT 网关来使用。 + +### lua-resty-* + +- lua-resty-ipmatcher + - 支持 IPv6。 + - 支持 IP 黑白名单和路由。 +- lua-resty-radixtree + - 允许指定多个 host, remote_addr 和 uri。 + - 允许设置用户自定义函数来做额外的过滤。 + - 使用 `lua-resty-ipmatcher` 替代 `lua-resty-iputils`, `lua-resty-ipmatcher` 支持 IPv6 并且速度更快。 + +### Bugfix + +- 健康检查:修复在多 worker 下运行时健康检查 checker 的名字错误。 [#568](https://github.com/apache/incubator-apisix/issues/568) + +### Dependencies + +- 把 `lua-tinyyaml` 从源码中移除,通过 Luarocks 来安装。 + +## 0.7.0 + +> Released on 2019/09/06 + +这个版本带来很多新的特性,比如 IP 黑白名单、gPRC 协议转换、支持 IPv6、对接 IdP(身份认证提供商)服务、serverless、默认路由修改为 radix tree(**不向下兼容**)等。 + +### Core + +- :sunrise: **[gRPC 协议转换](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/grpc-transcode.md)**: 支持 gRPC 协议的转换,这样客户端可以通过 HTTP/JSON 来访问你的 gRPC API. [#395](https://github.com/apache/incubator-apisix/issues/395) +- :sunrise: **[radix tree 路由](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//router-radixtree.md)**: 默认的路由器更改为 radix tree,支持把 uri、host、cookie、请求头、请求参数、Nginx 内置变量等作为路由的条件,并支持等于、大于、小于等常见操作符,更加强大和灵活。**需要注意的是,这个改动不向下兼容,所有使用历史版本的用户,需要手动修改路由才能正常使用**。[#414](https://github.com/apache/incubator-apisix/issues/414) +- 动态上游支持更多的参数,可以指定上游的 uri 和 host,以及是否开启 websocket. [#451](https://github.com/apache/incubator-apisix/pull/451) +- 支持从 `ctx.var` 中直接获取 cookie 中的值。[#449](https://github.com/apache/incubator-apisix/pull/449) +- 路由支持 IPv6. [#331](https://github.com/apache/incubator-apisix/issues/331) + +### Plugins + +- :sunrise: **[serverless](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/serverless.md)**: 支持 serverless,用户可以把任意 Lua 函数动态的在网关节点上运行。用户也可以把这个功能当做是轻量级的插件来使用。[#86](https://github.com/apache/incubator-apisix/pull/86) +- :sunrise: **IdP 支持**: 支持外部的身份认证服务,比如 Auth0,okta 等,用户可以借此来对接 Oauth2.0 等认证方式。 [#447](https://github.com/apache/incubator-apisix/pull/447) +- [限流限速](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/limit-conn.md)支持更多的限制 key,比如 X-Forwarded-For 和 X-Real-IP,并且允许用户把 Nginx 变量、请求头和请求参数作为 key. [#228](https://github.com/apache/incubator-apisix/issues/228) +- [IP 黑白名单](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/ip-restriction.md) 支持 IP 黑白名单,提供更高的安全性。[#398](https://github.com/apache/incubator-apisix/pull/398) + +### CLI + +- 增加 `version` 指令,获取 APISIX 的版本号。[#420](https://github.com/apache/incubator-apisix/issues/420) + +### Admin + +- 支持 `PATCH` API,可以针对某个配置单独修改,而不再用提交整段配置。[#365](https://github.com/apache/incubator-apisix/pull/365) + +### Dashboard + +- :sunrise: **增加在线版本的 dashboard**,用户不用安装即可[体验 APISIX](http://apisix.iresty.com/). [#374](https://github.com/apache/incubator-apisix/issues/374) + +[Back to TOC](#table-of-contents) + +## 0.6.0 + +> Released on 2019/08/05 + +这个版本带来很多新的特性,比如健康检查、服务熔断、debug 模式,分布式追踪、JWT +认证等,以及**内置的 dashboard**. + +### Core + +- :sunrise: **[健康检查和服务熔断](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest/tutorials/health-check.md)**: 对上游节点开启健康检查,智能判断服务状态进行熔断和连接。[#249](https://github.com/apache/incubator-apisix/pull/249) +- 阻止 ReDoS(Regular expression Denial of Service). [#252](https://github.com/apache/incubator-apisix/pull/250) +- 支持 debug 模式。[#319](https://github.com/apache/incubator-apisix/pull/319) +- 允许自定义路由。[#364](https://github.com/apache/incubator-apisix/pull/364) +- 路由支持 host 和 uri 的组合。[#325](https://github.com/apache/incubator-apisix/pull/325) +- 允许在 balance 阶段注入插件。[#299](https://github.com/apache/incubator-apisix/pull/299) +- 为 upstream 和 service 在 schema 中增加描述信息。[#289](https://github.com/apache/incubator-apisix/pull/289) + +### Plugins + +- :sunrise: **[分布式追踪 OpenTracing](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/zipkin.md)**: 支持 Zipkin 和 Apache SkyWalking. [#304](https://github.com/apache/incubator-apisix/pull/304) +- [JWT 认证](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/jwt-auth.md). [#303](https://github.com/apache/incubator-apisix/pull/303) + +### CLI + +- `allow` 指令中支持多个 ip 地址。[#340](https://github.com/apache/incubator-apisix/pull/340) +- 支持在 nginx.conf 中配置 real_ip 指令,以及增加函数来获取 ip. [#236](https://github.com/apache/incubator-apisix/pull/236) + +### Dashboard + +- :sunrise: **增加内置的 dashboard**. [#327](https://github.com/apache/incubator-apisix/pull/327) + +### Test + +- 在 Travis CI 中支持 OSX. [#217](https://github.com/apache/incubator-apisix/pull/217) +- 把所有依赖安装到 `deps` 目录。[#248](https://github.com/apache/incubator-apisix/pull/248) + +[Back to TOC](#table-of-contents) diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/CODE_STYLE.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/CODE_STYLE.md new file mode 100644 index 0000000..56cbe0e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/CODE_STYLE.md @@ -0,0 +1,426 @@ +--- +title: APISIX Lua 编码风格指南 +--- + + + +## 缩进 + +使用 4 个空格作为缩进的标记: + +```lua +--No +if a then +ngx.say("hello") +end +``` + +```lua +--Yes +if a then + ngx.say("hello") +end +``` + +你可以在使用的编辑器中把 tab 改为 4 个空格来简化操作。 + +## 空格 + +在操作符的两边,都需要用一个空格来做分隔: + +```lua +--No +local i=1 +local s = "apisix" +``` + +```lua +--Yes +local i = 1 +local s = "apisix" +``` + +## 空行 + +不少开发者会在行尾增加一个分号: + +```lua +--No +if a then +    ngx.say("hello"); +end; +``` + +增加分号会让 Lua 代码显得非常丑陋,也是没有必要的。 + +另外,不要为了显得“简洁”节省代码行数,而把多行代码变为一行。这样会在定位错误的时候不知道到底哪一段代码出了问题: + +```lua +--No +if a then ngx.say("hello") end +``` + +```lua +--Yes +if a then + ngx.say("hello") +end +``` + +函数之间需要用两个空行来做分隔: + +```lua +--No +local function foo() +end +local function bar() +end +``` + +```lua +--Yes +local function foo() +end + + +local function bar() +end +``` + +如果有多个 if elseif 的分支,它们之间需要一个空行来做分隔: + +```lua +--No +if a == 1 then + foo() +elseif a== 2 then + bar() +elseif a == 3 then + run() +else + error() +end +``` + +```lua +--Yes +if a == 1 then + foo() + +elseif a== 2 then + bar() + +elseif a == 3 then + run() + +else + error() +end +``` + +## 每行最大长度 + +每行不能超过 80 个字符,超过的话,需要换行并对齐: + +```lua +--No +return limit_conn_new("plugin-limit-conn", conf.conn, conf.burst, conf.default_conn_delay) +``` + +```lua +--Yes +return limit_conn_new("plugin-limit-conn", conf.conn, conf.burst, + conf.default_conn_delay) +``` + +在换行对齐的时候,要体现出上下两行的对应关系。 + +就上面示例而言,第二行函数的参数,要在第一行左括号的右边。 + +如果是字符串拼接的对齐,需要把 `..` 放到下一行中: + +```lua +--No +return limit_conn_new("plugin-limit-conn" .. "plugin-limit-conn" .. + "plugin-limit-conn") +``` + +```lua +--Yes +return limit_conn_new("plugin-limit-conn" .. "plugin-limit-conn" + .. "plugin-limit-conn") +``` + +```lua +--Yes +return "param1", "plugin-limit-conn" + .. "plugin-limit-conn") +``` + +## 变量 + +应该永远使用局部变量,不要使用全局变量: + +```lua +--No +i = 1 +s = "apisix" +``` + +```lua +--Yes +local i = 1 +local s = "apisix" +``` + +变量命名使用 `snake_case`(蛇形命名法)风格: + +```lua +--No +local IndexArr = 1 +local str_Name = "apisix" +``` + +```lua +--Yes +local index_arr = 1 +local str_name = "apisix" +``` + +对于常量要使用全部大写: + +```lua +--No +local max_int = 65535 +local server_name = "apisix" +``` + +```lua +--Yes +local MAX_INT = 65535 +local SERVER_NAME = "apisix" +``` + +## 表格/数组 + +使用 `table.new` 来预先分配数组: + +```lua +--No +local t = {} +for i = 1, 100 do + t[i] = i +end +``` + +```lua +--Yes +local new_tab = require "table.new" +local t = new_tab(100, 0) +for i = 1, 100 do + t[i] = i +end +``` + +不要在数组中使用 `nil`: + +```lua +--No +local t = {1, 2, nil, 3} +``` + +如果一定要使用空值,请用 `ngx.null` 来表示: + +```lua +--Yes +local t = {1, 2, ngx.null, 3} +``` + +## 字符串 + +不要在热代码路径上拼接字符串: + +```lua +--No +local s = "" +for i = 1, 100000 do + s = s .. "a" +end +``` + +```lua +--Yes +local new_tab = require "table.new" +local t = new_tab(100000, 0) +for i = 1, 100000 do + t[i] = "a" +end +local s = table.concat(t, "") +``` + +## 函数 + +函数的命名也同样遵循 `snake_case`(蛇形命名法): + +```lua +--No +local function testNginx() +end +``` + +```lua +--Yes +local function test_nginx() +end +``` + +函数应该尽可能早的返回: + +```lua +--No +local function check(age, name) + local ret = true + if age < 20 then + ret = false + end + + if name == "a" then + ret = false + end + -- do something else + return ret +end +``` + +```lua +--Yes +local function check(age, name) + if age < 20 then + return false + end + + if name == "a" then + return false + end + -- do something else + return true +end +``` + +## 模块 + +所有 `require` 的库都要 `local` 化: + +```lua +--No +local function foo() + local ok, err = ngx.timer.at(delay, handler) +end +``` + +```lua +--Yes +local timer_at = ngx.timer.at + +local function foo() + local ok, err = timer_at(delay, handler) +end +``` + +为了风格的统一,`require` 和 `ngx` 也需要 `local` 化: + +```lua +--No +local core = require("apisix.core") +local timer_at = ngx.timer.at + +local function foo() + local ok, err = timer_at(delay, handler) +end +``` + +```lua +--Yes +local ngx = ngx +local require = require +local core = require("apisix.core") +local timer_at = ngx.timer.at + +local function foo() + local ok, err = timer_at(delay, handler) +end +``` + +## 错误处理 + +对于有错误信息返回的函数,必须对错误信息进行判断和处理: + +```lua +--No +local sock = ngx.socket.tcp() +local ok = sock:connect("www.google.com", 80) +ngx.say("successfully connected to google!") +``` + +```lua +--Yes +local sock = ngx.socket.tcp() +local ok, err = sock:connect("www.google.com", 80) +if not ok then + ngx.say("failed to connect to google: ", err) + return +end +ngx.say("successfully connected to google!") +``` + +自己编写的函数,错误信息要作为第二个参数,用字符串的格式返回: + +```lua +--No +local function foo() + local ok, err = func() + if not ok then + return false + end + return true +end +``` + +```lua +--No +local function foo() + local ok, err = func() + if not ok then + return false, {msg = err} + end + return true +end +``` + +```lua +--Yes +local function foo() + local ok, err = func() + if not ok then + return false, "failed to call func(): " .. err + end + return true +end +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/FAQ.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/FAQ.md new file mode 100644 index 0000000..d605d09 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/FAQ.md @@ -0,0 +1,777 @@ +--- +title: 常见问题 +keywords: + - Apache APISIX + - API 网关 + - 常见问题 + - FAQ +description: 本文列举了使用 Apache APISIX 时常见问题解决方法。 +--- + + + +## 为什么需要一个新的 API 网关?不是已经有其他的开源网关了吗? + +随着企业向云本地微服务的发展,企业对高性能、灵活、安全、可扩展的 API 网关的需求越来越大。 + +APISIX 在这些指标表现上优于其它 API 网关,同时具有平台无关性和完全动态的特性,如支持多种协议、细粒度路由和多语言支持。 + +## APISIX 和其他的 API 网关有什么不同之处? + +Apache APISIX 在以下方面有所不同: + +— 它使用 etcd 来保存和同步配置,而不是使用如 PostgreSQL 或 MySQL 这类的关系数据库。etcd 中的实时事件通知系统比这些替代方案更容易扩展。这允许 APISIX 实时同步配置,使代码简洁,并避免单点故障。 + +- 完全动态 +- 支持[热加载插件](./terminology/plugin.md#热加载)。 + +## APISIX 所展现出的性能如何? + +与其它 API 网关相比,Apache APISIX 提供了更好的性能,其单核 QPS 高达 18,000,平均延迟仅为 0.2 ms。 + +如果您想获取性能基准测试的具体结果,请查看 [benchmark](benchmark.md)。 + +## Apache APISIX 支持哪些平台? + +Apache APISIX 是一个开源的云原生 API 网关,它支持在裸金属服务器上运行,也支持在 Kubernetes 上使用,甚至也可以运行在 Apple Silicon ARM 芯片上。 + +## 如何理解“Apache APISIX 是全动态的”? + +Apache APISIX 是全动态的 API 网关,意味着当你在更改一个配置后,只需要重新加载配置文件就可以使其生效。 + +APISIX 可以动态处理以下行为: + +- 重新加载插件 +- 代理重写 +- 对请求进⾏镜像复制 +- 对请求进⾏修改 +- 健康状态的检查 +- 动态控制指向不同上游服务的流量⽐ + +## APISIX 是否有控制台界面? + +APISIX 具有功能强大的 Dashboard,并且 [APISIX Dashboard](https://github.com/apache/apisix-dashboard) 是一个独立的项目。你可以通过 [APISIX Dashboard](https://github.com/apache/apisix-dashboard) 用户操作界面来部署 APISIX Dashboard。 + +## 我可以为 Apache APISIX 开发适合自身业务的插件吗? + +当然可以,APISIX 提供了灵活的自定义插件,方便开发者和企业编写自己的逻辑。 + +如果你想开发符合自身业务逻辑的插件,请参考:[如何开发插件](plugin-develop.md)。 + +## 为什么 Apache APISIX 选择 etcd 作为配置中心? + +对于配置中心,配置存储只是最基本功能,APISIX 还需要下面几个特性: + +1. 集群中的分布式部署 +2. 通过比较来监视业务 +3. 多版本并发控制 +4. 变化通知 +5. 高性能和最小的读/写延迟 + +etcd 提供了这些特性,并且使它比 PostgreSQL 和 MySQL 等其他数据库更理想。 + +如果你想了解更多关于 etcd 与其他替代方案的比较,请参考[对比图表](https://etcd.io/docs/latest/learning/why/#comparison-chart)。 + +## 使用 LuaRocks 安装 Apache APISIX 依赖项时,为什么会导致超时、安装缓慢或安装失败? + +可能是因为使用的 LuaRocks 服务器延迟过高。 + +为了解决这个问题,你可以使用 https_proxy 或者使用 `--server` 参数指定一个更快的 LuaRocks 服务器。 + +你可以运行如下命令来查看可用的服务器(需要 LuaRocks 3.0+): + +```shell +luarocks config rocks_servers +``` + +中国大陆用户可以使用 `luarocks.cn` 作为 LuaRocks 的服务器。 + +以下命令可以帮助你更快速的安装依赖: + +```bash +make deps ENV_LUAROCKS_SERVER=https://luarocks.cn +``` + +如果通过上述操作仍然无法解决问题,可以尝试使用 `--verbose` 或 `-v` 参数获取详细的日志来诊断问题。 + +## 如何构建 APISIX-Runtime 环境? + +有些功能需要引入额外的 NGINX 模块,这就要求 APISIX 需要运行在 APISIX-Runtime 上。如果你需要这些功能,你可以参考 [api7/apisix-build-tools](https://github.com/api7/apisix-build-tools) 中的代码,构建自己的 APISIX-Runtime 环境。 + +## 我该如何使用 Apache APISIX 进行灰度发布? + +举个例子,比如:`foo.com/product/index.html?id=204&page=2`,并考虑您需要根据查询字符串中的 `id` 在此条件下进行灰度发布: + +1. Group A:`id <= 1000` +2. Group B:`id > 1000` + +在 Apache APISIX 中有两种不同的方法来实现这一点: + +1. 创建一个[Route](terminology/route.md)并配置 `vars` 字段: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "vars": [ + ["arg_id", "<=", "1000"] + ], + "plugins": { + "redirect": { + "uri": "/test?group_id=1" + } + } +}' + +curl -i http://127.0.0.1:9180/apisix/admin/routes/2 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "vars": [ + ["arg_id", ">", "1000"] + ], + "plugins": { + "redirect": { + "uri": "/test?group_id=2" + } + } +}' +``` + +更多 `lua-resty-radixtree` 匹配操作,请参考:[lua-resty-radixtree](https://github.com/api7/lua-resty-radixtree#operator-list)。 + +2、通过 [traffic-split](plugins/traffic-split.md) 插件来实现。 + +## 我如何通过 Apache APISIX 实现从 HTTP 自动跳转到 HTTPS? + +比如,将 `http://foo.com` 重定向到 `https://foo.com`。 + +Apache APISIX 提供了几种不同的方法来实现: + +1. 在 [redirect](plugins/redirect.md) 插件中将 `http_to_https` 设置为 `true`: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true + } + } +}' +``` + +2. 结合高级路由规则 `vars` 和 `redirect` 插件一起使用: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "host": "foo.com", + "vars": [ + [ + "scheme", + "==", + "http" + ] + ], + "plugins": { + "redirect": { + "uri": "https://$host$request_uri", + "ret_code": 301 + } + } +}' +``` + +3. 使用 `serverless` 插件: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": ["return function() if ngx.var.scheme == \"http\" and ngx.var.host == \"foo.com\" then ngx.header[\"Location\"] = \"https://foo.com\" .. ngx.var.request_uri; ngx.exit(ngx.HTTP_MOVED_PERMANENTLY); end; end"] + } + } +}' +``` + +然后测试下是否生效: + +```shell +curl -i -H 'Host: foo.com' http://127.0.0.1:9080/hello +``` + +响应信息应该是: + +``` +HTTP/1.1 301 Moved Permanently +Date: Mon, 18 May 2020 02:56:04 GMT +Content-Type: text/html +Content-Length: 166 +Connection: keep-alive +Location: https://foo.com/hello +Server: APISIX web server + + +301 Moved Permanently + +

301 Moved Permanently

+
openresty
+ + +``` + +## 我应该如何更改 Apache APISIX 的日志等级? + +Apache APISIX 默认的日志等级为 `warn`,你需要将日志等级调整为 `info` 来查看 `core.log.info` 的打印结果。 + +你需要将 `./conf/config.yaml` 中的 `nginx_config` 配置参数 `error_log_level: "warn"` 修改为 `error_log_level: "info"`,然后重新加载 Apache APISIX 使其生效。 + +```yaml +nginx_config: + error_log_level: "info" +``` + +## 我应该如何重新加载 Apache APISIX 的自定义插件? + +所有的 Apache APISIX 的插件都支持热加载的方式。 + +如果你想了解更多关于热加载的内容,请参考[热加载](./terminology/plugin.md#热加载)。 + +## 在处理 HTTP 或 HTTPS 请求时,我该如何配置 Apache APISIX 来监听多个端口? + +默认情况下,APISIX 在处理 HTTP 请求时只监听 9080 端口。 + +要配置 Apache APISIX 监听多个端口,你可以: + +1. 修改 `conf/config.yaml` 中 HTTP 端口监听的参数 `node_listen`,示例: + + ``` + apisix: + node_listen: + - 9080 + - 9081 + - 9082 + ``` + + 处理 HTTPS 请求也类似,修改 `conf/config.yaml` 中 HTTPS 端口监听的参数 `ssl.listen`,示例: + + ``` + apisix: + ssl: + enable: true + listen: + - port: 9443 + - port: 9444 + - port: 9445 + ``` + +2. 重启或者重新加载 APISIX。 + +## 启用 SSL 证书后,为什么无法通过 HTTPS + IP 访问对应的路由? + +如果直接使用 HTTPS + IP 地址访问服务器,服务器将会使用 IP 地址与绑定的 SNI 进行比对,由于 SSL 证书是和域名进行绑定的,无法在 SNI 中找到对应的资源,因此证书就会校验失败,进而导致用户无法通过 HTTPS + IP 访问网关。 + +此时你可以通过在配置文件中设置 `fallback_sni` 参数,并配置域名,实现该功能。当用户使用 HTTPS + IP 访问网关时,SNI 为空时,则 fallback 到默认 SNI,从而实现 HTTPS + IP 访问网关。 + +```yaml title="./conf/config.yaml" +apisix + ssl: + fallback_sni: "${your sni}" +``` + +## APISIX 如何利用 etcd 如何实现毫秒级别的配置同步? + +Apache APISIX 使用 etcd 作为它的配置中心。etcd 提供以下订阅功能(比如:[watch](https://github.com/api7/lua-resty-etcd/blob/master/api_v3.md#watch)、[watchdir](https://github.com/api7/lua-resty-etcd/blob/master/api_v3.md#watchdir))。它可以监视对特定关键字或目录的更改。 + +APISIX 主要使用 [etcd.watchdir](https://github.com/api7/lua-resty-etcd/blob/master/api_v3.md#watchdir) 监视目录内容变更: + +- 如果监听目录没有数据更新:则该调用会被阻塞,直到超时或其他错误返回。 + +- 如果监听目录有数据更新:etcd 将立刻返回订阅(毫秒级)到的新数据,APISIX 将它更新到内存缓存。 + +## 我应该如何自定义 APISIX 实例 id? + +默认情况下,APISIX 从 `conf/apisix.uid` 中读取实例 id。如果找不到,且没有配置 id,APISIX 会生成一个 `uuid` 作为实例 id。 + +要指定一个有意义的 id 来绑定 Apache APISIX 到你的内部系统,请在你的 `./conf/config.yaml` 中设置 id: + +```yaml +apisix: + id: "your-id" +``` + +## 为什么 `error.log` 中会出现 "failed to fetch data from etcd, failed to read etcd dir, etcd key: xxxxxx" 的错误? + +请按照以下步骤进行故障排除: + +1. 确保 Apache APISIX 和集群中的 etcd 部署之间没有任何网络问题。 +2. 如果网络正常,请检查是否为 etcd 启用了[gRPC gateway](https://etcd.io/docs/v3.4.0/dev-guide/api_grpc_gateway/)。默认状态取决于你是使用命令行还是配置文件来启动 etcd 服务器。 + +- 如果使用命令行选项,默认启用 gRPC 网关。可以手动启用,如下所示: + +```shell +etcd --enable-grpc-gateway --data-dir=/path/to/data +``` + +**注意**:当运行 `etcd --help` 时,这个参数不会显示。 + +- 如果使用配置文件,默认关闭 gRPC 网关。你可以手动启用,如下所示: + + 在 `etcd.json` 配置: + +```json +{ + "enable-grpc-gateway": true, + "data-dir": "/path/to/data" +} +``` + + 在 `etcd.conf.yml` 配置 + +```yml +enable-grpc-gateway: true +``` + +**注意**:事实上这种差别已经在 etcd 的 master 分支中消除,但并没有向后兼容到已经发布的版本中,所以在部署 etcd 集群时,依然需要小心。 + +## 我应该如何创建高可用的 Apache APISIX 集群? + +Apache APISIX 可以通过在其前面添加一个负载均衡器来实现高可用性,因为 APISIX 的数据面是无状态的,并且可以在需要时进行扩展。 + +Apache APISIX 的控制平面是依赖于 `etcd cluster` 的高可用实现的,它只依赖于 etcd 集群。 + +## 为什么使用源码安装 Apache APISIX 时,执行 `make deps` 命令会失败? + +当使用源代码安装 Apache APISIX 时,执行 `make deps` 命令可能会出现如下错误: + +```shell +$ make deps +...... +Error: Failed installing dependency: https://luarocks.org/luasec-0.9-1.src.rock - Could not find header file for OPENSSL + No file openssl/ssl.h in /usr/local/include +You may have to install OPENSSL in your system and/or pass OPENSSL_DIR or OPENSSL_INCDIR to the luarocks command. +Example: luarocks install luasec OPENSSL_DIR=/usr/local +make: *** [deps] Error 1 +``` + +这是由于缺少 OpenResty openssl 开发工具包。要安装它,请参考[installation dependencies](install-dependencies.md)。 + +## 我如何通过 APISIX 代理访问 APISIX Dashboard? + +你可以按照以下步骤进行配置: + +1. 为 Apache APISIX 代理和 Admin API 配置不同的端口,或者禁用 Admin API。 + +```yaml +deployment: + admin: + admin_listen: # use a separate port + ip: 127.0.0.1 + port: 9180 +``` + +2、添加 APISIX Dashboard 的代理路由: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uris":[ "/*" ], + "name":"apisix_proxy_dashboard", + "upstream":{ + "nodes":[ + { + "host":"127.0.0.1", + "port":9000, + "weight":1 + } + ], + "type":"roundrobin" + } +}' +``` + +**注意**: Apache APISIX Dashboard 正在监听 `127.0.0.1:9000`。 + +## 如何使用正则表达式 (regex) 匹配 Route 中的 `uri`? + +你可以在 Route 中使用 `vars` 字段来匹配正则表达式: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/*", + "vars": [ + ["uri", "~~", "^/[a-z]+$"] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +测试请求: + +```shell +# uri 匹配成功 +$ curl http://127.0.0.1:9080/hello -i +HTTP/1.1 200 OK +... + +# uri 匹配失败 +$ curl http://127.0.0.1:9080/12ab -i +HTTP/1.1 404 Not Found +... +``` + +如果你想了解 `vars` 字段的更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 + +## Upstream 节点是否支持配置 [FQDN](https://en.wikipedia.org/wiki/Fully_qualified_domain_name) 地址? + +这是支持的,下面是一个 `FQDN` 为 `httpbin.default.svc.cluster.local`(一个 Kubernetes Service)的示例: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/ip", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.default.svc.cluster.local": 1 + } + } +}' +``` + +使用如下命令测试路由: + +```shell +curl http://127.0.0.1:9080/ip -i +``` + +## Admin API 的 `X-API-KEY` 指的是什么?是否可以修改? + +Admin API 的 `X-API-KEY` 指的是 `./conf/config.yaml` 文件中的 `deployment.admin.admin_key.key`,默认值是 `edd1c9f034335f136f87ad84b625c8f1`。它是 Admin API 的访问 token。 + +默认情况下,它被设置为 `edd1c9f034335f136f87ad84b625c8f1`,也可以通过修改 `./conf/conf/config` 中的参数来修改,如下示例: + +```yaml +deployment: + admin: + admin_key + - name: "admin" + key: newkey + role: admin +``` + +然后访问 Admin API: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: newkey' -X PUT -d ' +{ + "uris":[ "/*" ], + "name":"admin-token-test", + "upstream":{ + "nodes":[ + { + "host":"127.0.0.1", + "port":1980, + "weight":1 + } + ], + "type":"roundrobin" + } +}' + +HTTP/1.1 200 OK +...... +``` + +**注意**:通过使用默认令牌,可能会面临安全风险。在将其部署到生产环境时,需要对其进行更新。 + +## 如何允许所有 IP 访问 Apache APISIX 的 Admin API? + +Apache APISIX 默认只允许 `127.0.0.0/24` 的 IP 段范围访问 `Admin API`, + +如果你想允许所有的 IP 访问,只需在 `./conf/config.yaml` 配置文件中添加如下的配置,然后重启或重新加载 APISIX 就可以让所有 IP 访问 `Admin API`。 + +```yaml +deployment: + admin: + allow_admin: + - 0.0.0.0/0 +``` + +**注意**:你可以在非生产环境中使用此方法,以允许所有客户端从任何地方访问 Apache APISIX 实例,但是在生产环境中该设置并不安全。在生产环境中,请仅授权特定的 IP 地址或地址范围访问 Apache APISIX 实例。 + +## 如何基于 acme.sh 自动更新 APISIX SSL 证书? + +你可以运行以下命令来实现这一点: + +```bash +curl --output /root/.acme.sh/renew-hook-update-apisix.sh --silent https://gist.githubusercontent.com/anjia0532/9ebf8011322f43e3f5037bc2af3aeaa6/raw/65b359a4eed0ae990f9188c2afa22bacd8471652/renew-hook-update-apisix.sh +``` + +```bash +chmod +x /root/.acme.sh/renew-hook-update-apisix.sh +``` + +```bash +acme.sh --issue --staging -d demo.domain --renew-hook "/root/.acme.sh/renew-hook-update-apisix.sh -h http://apisix-admin:port -p /root/.acme.sh/demo.domain/demo.domain.cer -k /root/.acme.sh/demo.domain/demo.domain.key -a xxxxxxxxxxxxx" +``` + +```bash +acme.sh --renew --domain demo.domain +``` + +详细步骤,请参考 [APISIX 基于 acme.sh 自动更新 HTTPS 证书](https://juejin.cn/post/6965778290619449351)。 + +## 在 Apache APISIX 中,我如何在转发到上游之前从路径中删除一个前缀? + +在转发至上游之前移除请求路径中的前缀,比如说从 `/foo/get` 改成 `/get`,可以通过 `[proxy-rewrite](plugins/proxy-rewrite.md)` 插件来实现: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/foo/*", + "plugins": { + "proxy-rewrite": { + "regex_uri": ["^/foo/(.*)","/$1"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +测试这个配置: + +```shell +curl http://127.0.0.1:9080/foo/get -i +HTTP/1.1 200 OK +... +{ + ... + "url": "http://127.0.0.1/get" +} +``` + +## 我应该如何解决 `unable to get local issuer certificate` 这个错误? + +你可以手动设置证书的路径,将其添加到 `./conf/config.yaml` 文件中,具体操作如下所示: + +```yaml +apisix: + ssl: + ssl_trusted_certificate: /path/to/certs/ca-certificates.crt +``` + +**注意:**当你尝试使用 cosocket 连接任何 TLS 服务时,如果 APISIX 不信任对端 TLS 服务证书,都需要配置 `apisix.ssl.ssl_trusted_certificate`。 + +例如:如果在 APISIX 中使用 Nacos 作为服务发现时,Nacos 开启了 TLS 协议,即 Nacos 配置的 `host` 是 `https://` 开头,就需要配置 `apisix.ssl.ssl_trusted_certificate`,并且使用和 Nacos 相同的 CA 证书。 + +## 我应该如何解决 `module 'resty.worker.events' not found` 这个错误? + +引起这个错误的原因是在 `/root` 目录下安装了 APISIX。因为 worker 进程的用户是 nobody,无权访问 `/root` 目录下的文件。 + +解决办法是改变 APISIX 的安装目录,推荐安装在 `/usr/local` 目录下。 + +## 在 Apache APISIX 中,`plugin-metadata` 和 `plugin-configs` 有什么区别? + +两者之间的差异如下: + +| `plugin-metadata` | `plugin-config` | +| ---------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | +| 当更改该 Plugin 属性后,需要应用到配置该插件的所有路由上时使用。 | 当你需要复用一组通用的插件配置时使用,可以把 Plugin 配置提取到一个 `plugin-config` 并绑定到不同的路由。 | +| 对绑定到 Plugin 的配置实例的所有实体生效。 | 对绑定到 `plugin-config` 的路由生效。 | +| 对绑定到 Plugin 的配置实例的所有实体生效。 | 对绑定到 `plugin-config` 的路由生效。 | + +## 部署了 Apache APISIX 之后,如何检测 APISIX 数据平面的存活情况(如何探活)? + +可以创建一个名为 `health-info` 的路由,并开启 [fault-injection](https://apisix.apache.org/zh/docs/apisix/plugins/fault-injection/) 插件(其中 YOUR-TOKEN 是用户自己的 token;127.0.0.1 是控制平面的 IP 地址,可以自行修改): + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/health-info \ +-H 'X-API-KEY: YOUR-TOKEN' -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "fine" + } + } + }, + "uri": "/status" +}' +``` + +验证方式: + +访问 Apache APISIX 数据平面的 `/status` 来探测 APISIX,如果 response code 是 200 就代表 APISIX 存活。 + +:::note + +这个方式只是探测 APISIX 数据平面是否存活,并不代表 APISIX 的路由和其他功能是正常的,这些需要更多路由级别的探测。 + +::: + +## APISIX 与 [etcd](https://etcd.io/) 相关的延迟较高的问题有哪些,如何修复? + +etcd 作为 APISIX 的数据存储组件,它的稳定性关乎 APISIX 的稳定性。在实际场景中,如果 APISIX 使用证书通过 HTTPS 的方式连接 etcd,可能会出现以下 2 种数据查询或写入延迟较高的问题: + +1. 通过接口操作 APISIX Admin API 进行数据的查询或写入,延迟较高。 +2. 在监控系统中,Prometheus 抓取 APISIX 数据面 Metrics 接口超时。 + +这些延迟问题,严重影响了 APISIX 的服务稳定性,而之所以会出现这类问题,主要是因为 etcd 对外提供了 2 种操作方式:HTTP(HTTPS)、gRPC。而 APISIX 默认是基于 HTTP(HTTPS)协议来操作 etcd 的。 + +在这个场景中,etcd 存在一个关于 HTTP/2 的 BUG:如果通过 HTTPS 操作 etcd(HTTP 不受影响),HTTP/2 的连接数上限为 Golang 默认的 `250` 个。 + +所以,当 APISIX 数据面节点数较多时,一旦所有 APISIX 节点与 etcd 连接数超过这个上限,则 APISIX 的接口响应会非常的慢。 + +Golang 中,默认的 HTTP/2 上限为 `250`,代码如下: + +```go +package http2 + +import ... + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + maxQueuedControlFrames = 10000 +) + +``` + +目前,etcd 官方主要维护了 `3.4` 和 `3.5` 这两个主要版本。在 `3.4` 系列中,近期发布的 `3.4.20` 版本已修复了这个问题。至于 `3.5` 版本,其实,官方很早之前就在筹备发布 `3.5.5` 版本了,但截止目前(2022.09.13)仍尚未发布。所以,如果你使用的是 etcd 的版本小于 `3.5.5`,可以参考以下几种方式解决这个问题: + +1. 将 APISIX 与 etcd 的通讯方式由 HTTPS 改为 HTTP。 +2. 将 etcd 版本回退到 `3.4.20`。 +3. 将 etcd 源码克隆下来,直接编译 `release-3.5` 分支(此分支已修复,只是尚未发布新版本而已)。 + +重新编译 etcd 的方式如下: + +```shell +git checkout release-3.5 +make GOOS=linux GOARCH=amd64 +``` + +编译的二进制在 `bin` 目录下,将其替换掉你服务器环境的 etcd 二进制后,然后重启 etcd 即可。 + +更多信息,请参考: + +- [when etcd node have many http long polling connections, it may cause etcd to respond slowly to http requests.](https://github.com/etcd-io/etcd/issues/14185) +- [bug: when apisix starts for a while, its communication with etcd starts to time out](https://github.com/apache/apisix/issues/7078) +- [the prometheus metrics API is tool slow](https://github.com/apache/apisix/issues/7353) +- [Support configuring `MaxConcurrentStreams` for http2](https://github.com/etcd-io/etcd/pull/14169) + +另外一种解决办法是改用实验性的基于 gRPC 的配置同步。需要在配置文件 `conf/config.yaml` 中设置 `use_grpc: true`: + +```yaml + etcd: + use_grpc: true + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" +``` + +## 为什么 file-logger 记录日志会出现乱码? + +如果你使用的是 `file-logger` 插件,但是在日志文件中出现了乱码,那么可能是因为上游服务的响应体被进行了压缩。你可以将请求头带上不接收压缩响应参数(`gzip;q=0,deflate,sdch`)以解决这个问题,你可以使用 [proxy-rewirte](https://apisix.apache.org/docs/apisix/plugins/proxy-rewrite/) 插件将请求头中的 `accept-encoding` 设置为不接收压缩响应: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: YOUR-TOKEN' -X PUT -d ' +{ + "methods":[ + "GET" + ], + "uri":"/test/index.html", + "plugins":{ + "proxy-rewrite":{ + "headers":{ + "set":{ + "accept-encoding":"gzip;q=0,deflate,sdch" + } + } + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1 + } + } +}' +``` + +## APISIX 如何配置带认证的 ETCD? + +假设您有一个启用身份验证的 ETCD 集群。要访问该集群,需要在 `conf/config.yaml` 中为 Apache APISIX 配置正确的用户名和密码: + +```yaml +deployment: + etcd: + host: + - "http://127.0.0.1:2379" + user: etcd_user # username for etcd + password: etcd_password # password for etcd +``` + +关于 ETCD 的其他配置,比如过期时间、重试次数等等,你可以参考 `conf/config.yaml.example` 文件中的 `etcd` 部分。 + +## SSLs 对象与 `upstream` 对象中的 `tls.client_cert` 以及 `config.yaml` 中的 `ssl_trusted_certificate` 区别是什么? + +Admin API 中 `/apisix/admin/ssls` 用于管理 SSL 对象,如果 APISIX 需要接收来自外网的 HTTPS 请求,那就需要用到存放在这里的证书完成握手。SSL 对象中支持配置多个证书,不同域名的证书 APISIX 将使用 Server Name Indication (SNI) 进行区分。 + +Upstream 对象中的 `tls.client_cert`、`tls.client_key` 与 `tls.client_cert_id` 用于存放客户端的证书,适用于需要与上游进行 [mTLS 通信](https://apisix.apache.org/zh/docs/apisix/tutorials/client-to-apisix-mtls/)的情况。 + +`config.yaml` 中的 `ssl_trusted_certificate` 用于配置一个受信任的根证书。它仅用于在 APISIX 内部访问某些具有自签名证书的服务时,避免提示拒绝对方的 SSL 证书。注意:它不用于信任 APISIX 上游的证书,因为 APISIX 不会验证上游证书的合法性。因此,即使上游使用了无效的 TLS 证书,APISIX 仍然可以与其通信,而无需配置根证书。 + +## 如果在使用 APISIX 过程中遇到问题,我可以在哪里寻求更多帮助? + +- [Apache APISIX Slack Channel](/docs/general/join/#加入-slack-频道):加入后请选择 channel-apisix 频道,即可通过此频道进行 APISIX 相关问题的提问。 +- [邮件列表](/docs/general/join/#订阅邮件列表):任何问题或对项目提议都可以通过社区邮件进行讨论。 +- [GitHub Issues](https://github.com/apache/apisix/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc) 与 [GitHub Discussions](https://github.com/apache/apisix/discussions):也可直接在 GitHub 中进行相关 issue 创建进行问题的表述。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/README.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/README.md new file mode 100644 index 0000000..f254998 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/README.md @@ -0,0 +1,259 @@ +--- +title: Apache APISIX +--- + + + +APISIX logo + +[![Build Status](https://github.com/apache/apisix/workflows/build/badge.svg?branch=master)](https://github.com/apache/apisix/actions) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/apache/apisix/blob/master/LICENSE) + +**Apache APISIX** 是一个动态、实时、高性能的 API 网关, +提供负载均衡、动态上游、灰度发布、服务熔断、身份认证、可观测性等丰富的流量管理功能。 + +你可以使用 Apache APISIX 来处理传统的南北向流量,以及服务间的东西向流量, +也可以当做 [k8s ingress controller](https://github.com/apache/apisix-ingress-controller) 来使用。 + +Apache APISIX 的技术架构如下图所示: + +![Apache APISIX 的技术架构](../../assets/images/apisix.png) + +## 社区 + +- 邮件列表 - 发送任意内容到 dev-subscribe@apisix.apache.org 后,根据回复以订阅邮件列表。 +- QQ 群 - 781365357 +- Slack - [查看加入方式](https://apisix.apache.org/zh/docs/general/join/#join-the-slack-channel) +- ![Twitter Follow](https://img.shields.io/twitter/follow/ApacheAPISIX?style=social) - 使用标签 `#ApacheAPISIX` 关注我们并与我们互动。 +- [哔哩哔哩](https://space.bilibili.com/551921247) +- **新手任务列表** + - [Apache APISIX®](https://github.com/apache/apisix/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) + - [Apache APISIX® Ingress Controller](https://github.com/apache/apisix-ingress-controller/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) + - [Apache APISIX® dashboard](https://github.com/apache/apisix-dashboard/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) + - [Apache APISIX® Helm Chart](https://github.com/apache/apisix-helm-chart/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) + - [Docker distribution for Apache APISIX®](https://github.com/apache/apisix-docker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) + - [Apache APISIX® Website](https://github.com/apache/apisix-website/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) + - [Apache APISIX® Java Plugin Runner](https://github.com/apache/apisix-java-plugin-runner/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) + - [Apache APISIX® Go Plugin Runner](https://github.com/apache/apisix-go-plugin-runner/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) + - [Apache APISIX® Python Plugin Runner](https://github.com/apache/apisix-python-plugin-runner/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) +- **微信公众号** +
![wechat official account](../../assets/images/OA.jpg) +- **微信视频号** +
![wechat video account](../../assets/images/MA.jpeg) + +## 特性 + +你可以把 Apache APISIX 当做流量入口,来处理所有的业务数据,包括动态路由、动态上游、动态证书、 +A/B 测试、金丝雀发布(灰度发布)、蓝绿部署、限流限速、抵御恶意攻击、监控报警、服务可观测性、服务治理等。 + +- **全平台** + + - 云原生:平台无关,没有供应商锁定,无论裸机还是 Kubernetes,APISIX 都可以运行。 + - 支持 ARM64:不用担心底层技术的锁定。 + +- **多协议** + + - [TCP/UDP 代理](stream-proxy.md):动态 TCP/UDP 代理。 + - [Dubbo 代理](plugins/dubbo-proxy.md):动态代理 HTTP 请求到 Dubbo 后端。 + - [动态 MQTT 代理](plugins/mqtt-proxy.md):支持用 `client_id` 对 MQTT 进行负载均衡,同时支持 MQTT [3.1.\*](http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html) 和 [5.0](https://docs.oasis-open.org/mqtt/mqtt/v5.0/mqtt-v5.0.html) 两个协议标准。 + - [gRPC 代理](grpc-proxy.md):通过 APISIX 代理 gRPC 连接,并使用 APISIX 的大部分特性管理你的 gRPC 服务。 + - [gRPC Web 代理](plugins/grpc-web.md):通过 APISIX 代理 gRPC Web 请求到上游 gRPC 服务。 + - [gRPC 协议转换](plugins/grpc-transcode.md):支持协议的转换,这样客户端可以通过 HTTP/JSON 来访问你的 gRPC API。 + - Websocket 代理 + - Proxy Protocol + - HTTP(S) 反向代理 + - [SSL](certificate.md):动态加载 SSL 证书。 + +- **全动态能力** + + - [热更新和热插件](terminology/plugin.md):无需重启服务,就可以持续更新配置和插件。 + - [代理请求重写](plugins/proxy-rewrite.md):支持重写请求上游的`host`、`uri`、`schema`、`method`、`headers`信息。 + - [输出内容重写](plugins/response-rewrite.md):支持自定义修改返回内容的 `status code`、`body`、`headers`。 + - [Serverless](plugins/serverless.md):在 APISIX 的每一个阶段,你都可以添加并调用自己编写的函数。 + - 动态负载均衡:动态支持有权重的 round-robin 负载平衡。 + - 支持一致性 hash 的负载均衡:动态支持一致性 hash 的负载均衡。 + - [健康检查](./tutorials/health-check.md):启用上游节点的健康检查,将在负载均衡期间自动过滤不健康的节点,以确保系统稳定性。 + - 熔断器:智能跟踪不健康上游服务。 + - [代理镜像](plugins/proxy-mirror.md):提供镜像客户端请求的能力。 + - [流量拆分](plugins/traffic-split.md):允许用户逐步控制各个上游之间的流量百分比。 + +- **精细化路由** + + - [支持全路径匹配和前缀匹配](../../en/latest/router-radixtree.md#how-to-use-libradixtree-in-apisix) + - [支持使用 Nginx 所有内置变量做为路由的条件](../../en/latest/router-radixtree.md#how-to-filter-route-by-nginx-builtin-variable),所以你可以使用 `cookie`, `args` 等做为路由的条件,来实现灰度发布、A/B 测试等功能 + - 支持[各类操作符做为路由的判断条件](https://github.com/api7/lua-resty-radixtree#operator-list),比如 `{"arg_age", ">", 24}` + - 支持[自定义路由匹配函数](https://github.com/api7/lua-resty-radixtree/blob/master/t/filter-fun.t#L10) + - IPv6:支持使用 IPv6 格式匹配路由 + - 支持路由的[自动过期 (TTL)](admin-api.md#route) + - [支持路由的优先级](../../en/latest/router-radixtree.md#3-match-priority) + - [支持批量 Http 请求](plugins/batch-requests.md) + - [支持通过 GraphQL 属性过滤路由](../../en/latest/router-radixtree.md#how-to-filter-route-by-graphql-attributes) + +- **安全防护** + + - 丰富的认证、鉴权支持: + * [key-auth](plugins/key-auth.md) + * [JWT](plugins/jwt-auth.md) + * [basic-auth](plugins/basic-auth.md) + * [wolf-rbac](plugins/wolf-rbac.md) + * [casbin](plugins/authz-casbin.md) + * [keycloak](plugins/authz-keycloak.md) + * [casdoor](../../en/latest/plugins/authz-casdoor.md) + - [IP 黑白名单](plugins/ip-restriction.md) + - [Referer 黑白名单](plugins/referer-restriction.md) + - [IdP 支持](plugins/openid-connect.md):支持外部的身份认证平台,比如 Auth0,Okta,Authing 等。 + - [限制速率](plugins/limit-req.md) + - [限制请求数](plugins/limit-count.md) + - [限制并发](plugins/limit-conn.md) + - 防御 ReDoS(正则表达式拒绝服务):内置策略,无需配置即可抵御 ReDoS。 + - [CORS](plugins/cors.md):为你的 API 启用 CORS。 + - [URI 拦截器](plugins/uri-blocker.md):根据 URI 拦截用户请求。 + - [请求验证器](plugins/request-validation.md)。 + - [CSRF](plugins/csrf.md):基于 [`Double Submit Cookie`](https://en.wikipedia.org/wiki/Cross-site_request_forgery#Double_Submit_Cookie) 的方式保护你的 API 远离 CSRF 攻击。 + +- **运维友好** + + - OpenTracing 可观测性:支持 [Apache Skywalking](plugins/skywalking.md) 和 [Zipkin](plugins/zipkin.md)。 + - 对接外部服务发现:除了内置的 etcd 外,还支持 [Consul](../../en/latest/discovery/consul_kv.md)、[Nacos](discovery/nacos.md)、[Eureka](discovery/eureka.md) 和 [Zookeeper(CP)](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md)。 + - 监控和指标:[Prometheus](plugins/prometheus.md) + - 集群:APISIX 节点是无状态的,创建配置中心集群请参考 [etcd Clustering Guide](https://etcd.io/docs/v3.5/op-guide/clustering/)。 + - 高可用:支持配置同一个集群内的多个 etcd 地址。 + - [控制台](https://github.com/apache/apisix-dashboard): 操作 APISIX 集群。 + - 版本控制:支持操作的多次回滚。 + - CLI:使用命令行来启动、关闭和重启 APISIX。 + - [单机模式](../../en/latest/deployment-modes.md#standalone):支持从本地配置文件中加载路由规则,在 kubernetes(k8s) 等环境下更友好。 + - [全局规则](terminology/global-rule.md):允许对所有请求执行插件,比如黑白名单、限流限速等。 + - 高性能:在单核上 QPS 可以达到 18k,同时延迟只有 0.2 毫秒。 + - [故障注入](plugins/fault-injection.md) + - [REST Admin API](admin-api.md):使用 REST Admin API 来控制 Apache APISIX,默认只允许 127.0.0.1 访问,你可以修改 `conf/config.yaml` 中的 `allow_admin` 字段,指定允许调用 Admin API 的 IP 列表。同时需要注意的是,Admin API 使用 key auth 来校验调用者身份,**在部署前需要修改 `conf/config.yaml` 中的 `admin_key` 字段,来保证安全。** + - 外部日志记录器:将访问日志导出到外部日志管理工具。([HTTP Logger](plugins/http-logger.md)、[TCP Logger](plugins/tcp-logger.md)、[Kafka Logger](plugins/kafka-logger.md)、[UDP Logger](plugins/udp-logger.md)、[RocketMQ Logger](plugins/rocketmq-logger.md)、[SkyWalking Logger](plugins/skywalking-logger.md)、[Alibaba Cloud Logging(SLS)](plugins/sls-logger.md)、[Google Cloud Logging](plugins/google-cloud-logging.md)、[Splunk HEC Logging](plugins/splunk-hec-logging.md)、[File Logger](plugins/file-logger.md)、[Elasticsearch Logger](plugins/elasticsearch-logger.md)、[TencentCloud CLS](plugins/tencent-cloud-cls.md)) + - [Helm charts](https://github.com/apache/apisix-helm-chart) + +- **高度可扩展** + - [自定义插件](plugin-develop.md):允许挂载常见阶段,例如`init`,`rewrite`,`access`,`balancer`,`header filter`,`body filter` 和 `log` 阶段。 + - [插件可以用 Java/Go/Python 编写](../../zh/latest/external-plugin.md) + - 自定义负载均衡算法:可以在 `balancer` 阶段使用自定义负载均衡算法。 + - 自定义路由:支持用户自己实现路由算法。 + +- **多语言支持** +- Apache APISIX 是一个通过 `RPC` 和 `Wasm` 支持不同语言来进行插件开发的网关。 + ![Multi Language Support into Apache APISIX](../../../docs/assets/images/external-plugin.png) + - RPC 是当前采用的开发方式。开发者可以使用他们需要的语言来进行 RPC 服务的开发,该 RPC 通过本地通讯来跟 APISIX 进行数据交换。到目前为止,APISIX 已支持[Java](https://github.com/apache/apisix-java-plugin-runner), [Golang](https://github.com/apache/apisix-go-plugin-runner), [Python](https://github.com/apache/apisix-python-plugin-runner) 和 Node.js。 + - Wasm 或 WebAssembly 是实验性的开发方式。APISIX 能加载运行使用[Proxy Wasm SDK](https://github.com/proxy-wasm/spec#sdks)编译的 Wasm 字节码。开发者仅需要使用该 SDK 编写代码,然后编译成 Wasm 字节码,即可运行在 APISIX 中的 Wasm 虚拟机中。 + +- **Serverless** + - [Lua functions](plugins/serverless.md):能在 APISIX 每个阶段调用 lua 函数。 + - [Azure functions](./plugins/azure-functions.md):能无缝整合进 Azure Serverless Function 中。作为动态上游,能将特定的 URI 请求全部代理到微软 Azure 云中。 + - [Apache OpenWhisk](./plugins/openwhisk.md):与 Apache OpenWhisk 集成。作为动态上游,能将特定的 URI 请求代理到你自己的 OpenWhisk 集群。 + +## 立刻开始 + +1. 安装 + + 请参考[APISIX 安装指南](https://apisix.apache.org/zh/docs/apisix/installation-guide/)。 + +2. 入门指南 + + 入门指南是学习 APISIX 基础知识的好方法。按照 [入门指南](https://apisix.apache.org/zh/docs/apisix/getting-started/)的步骤即可。 + + 更进一步,你可以跟着文档来尝试更多的[插件](plugins)。 + +3. Admin API + + Apache APISIX 提供了 [REST Admin API](admin-api.md),方便动态控制 Apache APISIX 集群。 + +4. 插件二次开发 + + 可以参考[插件开发指南](plugin-develop.md),以及示例插件 `example-plugin` 的代码实现。 + 阅读[插件概念](terminology/plugin.md) 会帮助你学到更多关于插件的知识。 + +更多文档请参考 [Apache APISIX 文档站](https://apisix.apache.org/zh/docs/apisix/getting-started/)。 + +## 性能测试 + +使用 AWS 的 8 核心服务器来压测 APISIX,QPS 可以达到 140000,同时延时只有 0.2 毫秒。 + +[性能测试脚本](https://github.com/apache/apisix/blob/master/benchmark/run.sh) 已经开源,欢迎补充。 + +## 贡献者变化 + +> [访问此处](https://www.apiseven.com/contributor-graph) 使用贡献者数据服务。 + +[![贡献者变化](https://contributor-graph-api.apiseven.com/contributors-svg?repo=apache/apisix)](https://www.apiseven.com/en/contributor-graph?repo=apache/apisix) + +## 视频和文章 + +- 2020.10.16 [Apache APISIX: How to implement plugin orchestration in API Gateway](https://www.youtube.com/watch?v=iEegNXOtEhQ) +- 2020.10.16 [Improve Apache APISIX observability with Apache Skywalking](https://www.youtube.com/watch?v=DleVJwPs4i4) +- 2020.1.17 [API 网关 Apache APISIX 和 Kong 的选型对比](https://mp.weixin.qq.com/s/c51apneVj0O9yxiZAHF34Q) +- 2019.12.14 [从 0 到 1:Apache APISIX 的 Apache 之路](https://zhuanlan.zhihu.com/p/99620158) +- 2019.12.14 [基于 Apache APISIX 的下一代微服务架构](https://www.upyun.com/opentalk/445.html) +- 2019.10.30 [Apache APISIX 微服务架构极致性能架构解析](https://www.upyun.com/opentalk/440.html) +- 2019.9.27 [想把 APISIX 运行在 ARM64 平台上?只要三步](https://zhuanlan.zhihu.com/p/84467919) +- 2019.8.31 [APISIX 技术选型、测试和持续集成](https://www.upyun.com/opentalk/433.html) +- 2019.8.31 [APISIX 高性能实战 2](https://www.upyun.com/opentalk/437.html) +- 2019.7.6 [APISIX 高性能实战](https://www.upyun.com/opentalk/429.html) + +## 用户实际使用案例 + +- [新浪微博:基于 Apache APISIX,新浪微博 API 网关的定制化开发之路](https://apisix.apache.org/zh/blog/2021/07/06/the-road-to-customization-of-sina-weibo-api-gateway-based-on-apache-apisix/) +- [欧盟数字工厂平台:API Security Gateway – Using APISIX in the eFactory Platform](https://www.efactory-project.eu/post/api-security-gateway-using-apisix-in-the-efactory-platform) +- [贝壳找房:如何基于 Apache APISIX 搭建网关](https://mp.weixin.qq.com/s/yZl9MWPyF1-gOyCp8plflA) +- [360:Apache APISIX 在基础运维平台项目中的实践](https://mp.weixin.qq.com/s/mF8w8hW4alIMww0MSu9Sjg) +- [HelloTalk:基于 OpenResty 和 Apache APISIX 的全球化探索之路](https://www.upyun.com/opentalk/447.html) +- [腾讯云:为什么选择 Apache APISIX 来实现 k8s ingress controller?](https://www.upyun.com/opentalk/448.html) +- [思必驰:为什么我们重新写了一个 k8s ingress controller?](https://mp.weixin.qq.com/s/bmm2ibk2V7-XYneLo9XAPQ) + +更多用户案例,请查看 [Case Studies](https://apisix.apache.org/zh/blog/tags/case-studies/)。 + +## APISIX 的用户有哪些? + +有很多公司和组织把 APISIX 用于学习、研究、生产环境和商业产品中,包括: + + + +欢迎用户把自己加入到 [Powered By](../../../powered-by.md) 页面。 + +## 全景图 + +

+   +

+APISIX 被纳入 云原生软件基金会 API 网关全景图 +

+ +## Logo + +- [Apache APISIX logo(PNG)](../../../logos/apache-apisix.png) +- [Apache APISIX logo 源文件](https://apache.org/logos/#apisix) + +## 贡献 + +我们欢迎来自开源社区、个人和合作伙伴的各种贡献。 + +- [贡献指南](../../../CONTRIBUTING.md) + +## 致谢 + +灵感来自 Kong 和 Orange。 + +## 协议 + +[Apache 2.0 License](../../../LICENSE) diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/admin-api.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/admin-api.md new file mode 100644 index 0000000..3e90379 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/admin-api.md @@ -0,0 +1,1717 @@ +--- +title: Admin API +keywords: + - APISIX + - API 网关 + - Admin API + - 路由 + - 插件 + - 上游 +description: 本文介绍了 Apache APISIX Admin API 支持的功能,你可以通过 Admin API 来获取、创建、更新以及删除资源。 +--- + + + +## 描述 {#description} + +Admin API 是一组用于配置 Apache APISIX 路由、上游、服务、SSL 证书等功能的 RESTful API。 + +你可以通过 Admin API 来获取、创建、更新以及删除资源。同时得益于 APISIX 的热加载能力,资源配置完成后 APISIX 将会自动更新配置,无需重启服务。如果你想要了解其工作原理,请参考 [Architecture Design](./architecture-design/apisix.md)。 + +## 相关配置 {#basic-configuration} + +当 APISIX 启动时,Admin API 默认情况下将会监听 `9180` 端口,并且会占用前缀为 `/apisix/admin` 的 API。 + +因此,为了避免你设计的 API 与 `/apisix/admin` 冲突,你可以通过修改配置文件 [`/conf/config.yaml`](https://github.com/apache/apisix/blob/master/conf/config.yaml) 中的配置修改默认监听端口。 + +APISIX 支持设置 Admin API 的 IP 访问白名单,防止 APISIX 被非法访问和攻击。你可以在 `./conf/config.yaml` 文件中的 `deployment.admin.allow_admin` 选项中,配置允许访问的 IP 地址。 + +在下文出现的 `X-API-KEY` 指的是 `./conf/config.yaml` 文件中的 `deployment.admin.admin_key.key`,它是 Admin API 的访问 token。 + +:::tip 提示 + +建议你修改 Admin API 默认的监听端口、IP 访问白名单以及 Admin API 的 token,以保证你的 API 安全。 + +::: + +```yaml title="./conf/config.yaml" +deployment: + admin: + admin_key: + - name: admin + key: edd1c9f034335f136f87ad84b625c8f1 # 使用默认的 Admin API Key 存在安全风险,部署到生产环境时请及时更新 + role: admin + allow_admin: # http://nginx.org/en/docs/http/ngx_http_access_module.html#allow + - 127.0.0.0/24 + admin_listen: + ip: 0.0.0.0 # Admin API 监听的 IP,如果不设置,默认为“0.0.0.0”。 + port: 9180 # Admin API 监听的 端口,必须使用与 node_listen 不同的端口。 +``` + +### 使用环境变量 {#using-environment-variables} + +要通过环境变量进行配置,可以使用 `${{VAR}}` 语法。例如: + +```yaml title="./conf/config.yaml" +deployment: + admin: + admin_key: + - name: admin + key: ${{ADMIN_KEY}} + role: admin + allow_admin: + - 127.0.0.0/24 + admin_listen: + ip: 0.0.0.0 + port: 9180 +``` + +然后在 `make init` 之前运行 `export ADMIN_KEY=$your_admin_key`. + +如果找不到配置的环境变量,将抛出错误。 + +此外,如果要在未设置环境变量时使用默认值,请改用 `${{VAR:=default_value}}`。例如: + +```yaml title="./conf/config.yaml" +deployment: + admin: + admin_key: + - name: admin + key: ${{ADMIN_KEY:=edd1c9f034335f136f87ad84b625c8f1}} + role: admin + allow_admin: + - 127.0.0.0/24 + admin_listen: + ip: 0.0.0.0 + port: 9180 +``` + +首先查找环境变量 `ADMIN_KEY`,如果该环境变量不存在,它将使用 `edd1c9f034335f136f87ad84b625c8f1` 作为默认值。 + +您还可以在 yaml 键中指定环境变量。这在 `standalone` 模式 中特别有用,您可以在其中指定上游节点,如下所示: + +```yaml title="./conf/apisix.yaml" +routes: + - + uri: "/test" + upstream: + nodes: + "${{HOST_IP}}:${{PORT}}": 1 + type: roundrobin +#END +``` + +### 强制删除 {#force-delete} + +默认情况下,Admin API 会检查资源间的引用关系,将会拒绝删除正在使用中的资源。 + +可以通过在删除请求中添加请求参数 `force=true` 来进行强制删除,例如: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```bash +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H "X-API-KEY: $admin_key" -X PUT -d '{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" +}' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d '{ + "uri": "/*", + "upstream_id": 1 +}' +{"value":{"priority":0,"upstream_id":1,"uri":"/*","create_time":1689038794,"id":"1","status":1,"update_time":1689038916},"key":"/apisix/routes/1"} + +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H "X-API-KEY: $admin_key" -X DELETE +{"error_msg":"can not delete this upstream, route [1] is still using it now"} +$ curl "http://127.0.0.1:9180/apisix/admin/upstreams/1?force=anyvalue" -H "X-API-KEY: $admin_key" -X DELETE +{"error_msg":"can not delete this upstream, route [1] is still using it now"} +$ curl "http://127.0.0.1:9180/apisix/admin/upstreams/1?force=true" -H "X-API-KEY: $admin_key" -X DELETE +{"deleted":"1","key":"/apisix/upstreams/1"} +``` + +## v3 版本新功能 {#v3-new-function} + +在 APISIX v3 版本中,Admin API 支持了一些不向下兼容的新特性,比如支持新的响应体格式、支持分页查询、支持过滤资源等。 + +### 支持新的响应体格式 {#support-new-response-body-format} + +APISIX 在 v3 版本对响应体做了以下调整: + +- 移除旧版本响应体中的 `action` 字段; +- 调整获取资源列表时的响应体结构,新的响应体结构示例如下: + +返回单个资源: + +```json + { + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 + } +``` + +返回多个资源: + +```json + { + "list": [ + { + "modifiedIndex": 2685183, + "value": { + "id": "1", + ... + }, + "key": "/apisix/routes/1", + "createdIndex": 2684956 + }, + { + "modifiedIndex": 2685163, + "value": { + "id": "2", + ... + }, + "key": "/apisix/routes/2", + "createdIndex": 2685163 + } + ], + "total": 2 + } +``` + +### 支持分页查询 {#support-paging-query} + +获取资源列表时支持分页查询,目前支持分页查询的资源如下: + +- [Consumer](#consumer) +- [Consumer Group](#consumer-group) +- [Global Rules](#global-rules) +- [Plugin Config](#plugin-config) +- [Protos](https://apisix.apache.org/zh/docs/apisix/plugins/grpc-transcode/#%E5%90%AF%E7%94%A8%E6%8F%92%E4%BB%B6) +- [Route](#route) +- [Service](#service) +- [SSL](#ssl) +- [Stream Route](#stream-route) +- [Upstream](#upstream) + +参数如下: + +| 名称 | 默认值 | 范围 | 描述 | +| --------- | ------ | -------- | -------------------------------------------------- | +| page | 1 | [1, ...] | 页数,默认展示第一页。 | +| page_size | | [10, 500]| 每页资源数量。如果不配置该参数,则展示所有查询到的资源。| + +示例如下: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes?page=1&page_size=10" \ +-H "X-API-KEY: $admin_key" -X GET +``` + +```json +{ + "total": 1, + "list": [ + { + ... + } + ] +} +``` + +### 支持过滤资源 {#support-filtering-query} + +在 APISIX v3 版本中,在获取资源列表时,你可以使用 `name`、`label` 和 `uri` 参数过滤资源。支持参数如下: + +| 名称 | 描述 | +| ----- | ------------------------------------------------------------------------------------------------------------------------ | +| name | 根据资源的 `name` 属性进行查询,如果资源本身没有 `name` 属性则不会出现在查询结果中。 | +| label | 根据资源的 `label` 属性进行查询,如果资源本身没有 `label` 属性则不会出现在查询结果中。 | +| uri | 该参数仅在 Route 资源上支持。如果 Route 的 `uri` 等于查询的 `uri` 或 `uris` 包含查询的 `uri`,则该 Route 资源出现在查询结果中。 | + +:::tip 提示 + +当使用多个过滤参数时,APISIX 将对不同过滤参数的查询结果取交集。 + +::: + +以下示例将返回一个路由列表,该路由列表中的所有路由需要满足以下条件:路由的 `name` 包含字符串 `test`;`uri` 包含字符串 `foo`;对路由的 `label` 没有限制,因为 `label` 为空字符串。 + +```shell +curl 'http://127.0.0.1:9180/apisix/admin/routes?name=test&uri=foo&label=' \ +-H "X-API-KEY: $admin_key" -X GET +``` + +返回结果: + +```json +{ + "total": 1, + "list": [ + { + ... + } + ] +} +``` + +### 支持引用过滤资源 {#support-reference-filtering-query} + +:::note + +这个特性于 APISIX 3.13.0 引入。 + +APISIX 支持通过 `service_id` 和 `upstream_id` 查询路由和 Stream 路由。现在不支持其他资源或字段。 + +::: + +在获取资源列表时,你可以使用 `filter` 参数过滤资源。 + +它以以下方式编码: + +```text +filter=escape_uri(key1=value1&key2=value2) +``` + +以下是一个使用 `service_id` 进行路由列表过滤的例子。当同时设置了多个过滤条件,结果将为它们的交集。 + +```shell +curl 'http://127.0.0.1:9180/apisix/admin/routes?filter=service_id%3D1' \ +-H "X-API-KEY: $admin_key" -X GET +``` + +```json +{ + "total": 1, + "list": [ + { + ... + } + ] +} +``` + +## Route + +Route 也称之为路由,可以通过定义一些规则来匹配客户端的请求,然后根据匹配结果加载并执行相应的插件,并把请求转发给到指定 Upstream(上游)。 + +### 请求地址 {#route-uri} + +路由资源请求地址:/apisix/admin/routes/{id}?ttl=0 + +### 请求方法 {#route-request-methods} + +| 名称 | 请求 URI | 请求 body | 描述 | +| ------ | -------------------------------- | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| GET | /apisix/admin/routes | 无 | 获取资源列表。 | +| GET | /apisix/admin/routes/{id} | 无 | 获取资源。 | +| PUT | /apisix/admin/routes/{id} | {...} | 根据 id 创建资源。 | +| POST | /apisix/admin/routes | {...} | 创建资源,id 将会自动生成。 | +| DELETE | /apisix/admin/routes/{id} | 无 | 删除指定资源。 | +| PATCH | /apisix/admin/routes/{id} | {...} | 标准 PATCH,修改指定 Route 的部分属性,其他不涉及的属性会原样保留;如果你需要删除某个属性,可以将该属性的值设置为 `null`;当需要修改属性的值为数组时,该属性将全量更新。 | +| PATCH | /apisix/admin/routes/{id}/{path} | {...} | SubPath PATCH,通过 `{path}` 指定 Route 要更新的属性,全量更新该属性的数据,其他不涉及的属性会原样保留。两种 PATCH 的区别,请参考使用示例。 | + +### URI 请求参数 {#route-uri-request-parameters} + +| 名称 | 必选项 | 类型 | 描述 | 示例 | +| ---- | ------ | ---- | -------------------------------------------- | ----- | +| ttl | 否 | 辅助 | 路由的有效期。超过定义的时间,APISIX 将会自动删除路由,单位为秒。 | ttl=1 | + +### body 请求参数 {#route-request-body-parameters} + +| 名称 | 必选项 | 类型 | 描述 | 示例值 | +| ---------------- | -------------------------------- | -------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| ---------------------------------------------------- | +| uri | 是,与 `uris` 二选一。 | 匹配规则 | 除了如 `/foo/bar`、`/foo/gloo` 这种全量匹配外,使用不同 [Router](terminology/router.md) 还允许更高级匹配,更多信息请参考 [Router](terminology/router.md)。 | "/hello" | +| uris | 是,不能与 `uri` 二选一。 | 匹配规则 | 非空数组形式,可以匹配多个 `uri`。 | ["/hello", "/world"] | +| plugins | 否 | Plugin | Plugin 配置,请参考 [Plugin](terminology/plugin.md)。 | | +| script | 否 | Script | Script 配置,请参考 [Script](terminology/script.md)。 | | +| upstream | 否 | Upstream | Upstream 配置,请参考 [Upstream](terminology/upstream.md)。 | | +| upstream_id | 否 | Upstream | 需要使用的 Upstream id,请参考 [Upstream](terminology/upstream.md)。 | | +| service_id | 否 | Service | 需要绑定的 Service id,请参考 [Service](terminology/service.md)。 | | +| plugin_config_id | 否,不能与 Script 共同使用。 | Plugin | 需要绑定的 Plugin Config id,请参考 [Plugin Config](terminology/plugin-config.md)。 | | +| name | 否 | 辅助 | 路由名称。 | route-test | +| desc | 否 | 辅助 | 路由描述信息。 | 用来测试的路由。 | +| host | 否,与 `hosts` 二选一。 | 匹配规则 | 当前请求域名,比如 `foo.com`;也支持泛域名,比如 `*.foo.com`。 | "foo.com" | +| hosts | 否,与 `host` 二选一。 | 匹配规则 | 非空列表形态的 `host`,表示允许有多个不同 `host`,匹配其中任意一个即可。 | ["foo.com", "\*.bar.com"] | +| remote_addr | 否,与 `remote_addrs` 二选一。| 匹配规则 | 客户端请求的 IP 地址。支持 IPv4 地址,如:`192.168.1.101` 以及 CIDR 格式的支持 `192.168.1.0/24`;支持 IPv6 地址匹配,如 `::1`,`fe80::1`,`fe80::1/64` 等。 | "192.168.1.0/24" | +| remote_addrs | 否,与 `remote_addr` 二选一。| 匹配规则 | 非空列表形态的 `remote_addr`,表示允许有多个不同 IP 地址,符合其中任意一个即可。 | ["127.0.0.1", "192.0.0.0/8", "::1"] | +| methods | 否 | 匹配规则 | 如果为空或没有该选项,则表示没有任何 `method` 限制。你也可以配置一个或多个的组合:`GET`,`POST`,`PUT`,`DELETE`,`PATCH`,`HEAD`,`OPTIONS`,`CONNECT`,`TRACE`,`PURGE`。 | ["GET", "POST"] | +| priority | 否 | 匹配规则 | 如果不同路由包含相同的 `uri`,则根据属性 `priority` 确定哪个 `route` 被优先匹配,值越大优先级越高,默认值为 `0`。 | priority = 10 | +| vars | 否 | 匹配规则 | 由一个或多个`[var, operator, val]`元素组成的列表,类似 `[[var, operator, val], [var, operator, val], ...]]`。例如:`["arg_name", "==", "json"]` 则表示当前请求参数 `name` 是 `json`。此处 `var` 与 NGINX 内部自身变量命名是保持一致的,所以也可以使用 `request_uri`、`host` 等。更多细节请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 | [["arg_name", "==", "json"], ["arg_age", ">", 18]] | +| filter_func | 否 | 匹配规则 | 用户自定义的过滤函数。可以使用它来实现特殊场景的匹配要求实现。该函数默认接受一个名为 `vars` 的输入参数,可以用它来获取 NGINX 变量。 | function(vars) return vars["arg_name"] == "json" end | +| labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} | +| timeout | 否 | 辅助 | 为 Route 设置 Upstream 连接、发送消息和接收消息的超时时间(单位为秒)。该配置将会覆盖在 Upstream 中配置的 [timeout](#upstream) 选项。 | {"connect": 3, "send": 3, "read": 3} | +| enable_websocket | 否 | 辅助 | 当设置为 `true` 时,启用 `websocket`(boolean), 默认值为 `false`。 | | +| status | 否 | 辅助 | 当设置为 `1` 时,启用该路由,默认值为 `1`。 | `1` 表示启用,`0` 表示禁用。 | + +:::note 注意 + +- 对于同一类参数比如 `uri`与 `uris`,`upstream` 与 `upstream_id`,`host` 与 `hosts`,`remote_addr` 与 `remote_addrs` 等,是不能同时存在,二者只能选择其一。如果同时启用,则会出现异常。 +- 在 `vars` 中,当获取 Cookie 的值时,Cookie name 是**区分大小写字母**的。例如:`var = cookie_x_foo` 与 `var = cookie_X_Foo` 表示不同的 `cookie`。 + +::: + +Route 对象 JSON 配置示例: + +```shell +{ + "id": "1", # id,非必填 + "uris": ["/a","/b"], # 一组 URL 路径 + "methods": ["GET","POST"], # 可以填多个方法 + "hosts": ["a.com","b.com"], # 一组 host 域名 + "plugins": {}, # 指定 route 绑定的插件 + "priority": 0, # apisix 支持多种匹配方式,可能会在一次匹配中同时匹配到多条路由,此时优先级高的优先匹配中 + "name": "路由 xxx", + "desc": "hello world", + "remote_addrs": ["127.0.0.1"], # 一组客户端请求 IP 地址 + "vars": [["http_user", "==", "ios"]], # 由一个或多个 [var, operator, val] 元素组成的列表 + "upstream_id": "1", # upstream 对象在 etcd 中的 id,建议使用此值 + "upstream": {}, # upstream 信息对象,建议尽量不要使用 + "timeout": { # 为 route 设置 upstream 的连接、发送消息、接收消息的超时时间。 + "connect": 3, + "send": 3, + "read": 3 + }, + "filter_func": "" # 用户自定义的过滤函数,非必填 +} +``` + +### 使用示例 {#route-example} + +- 创建一个路由: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' + { + "uri": "/index.html", + "hosts": ["foo.com", "*.bar.com"], + "remote_addrs": ["127.0.0.0/8"], + "methods": ["PUT", "GET"], + "enable_websocket": true, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }' + ``` + + ``` + HTTP/1.1 201 Created + Date: Sat, 31 Aug 2019 01:17:15 GMT + ... + ``` + +- 创建一个有效期为 60 秒的路由,过期后自动删除: + + ```shell + curl 'http://127.0.0.1:9180/apisix/admin/routes/2?ttl=60' \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' + { + "uri": "/aa/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }' + ``` + + ``` + HTTP/1.1 201 Created + Date: Sat, 31 Aug 2019 01:17:15 GMT + ... + ``` + +- 在路由中新增一个上游节点: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + } + } + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,上游节点将更新为: + + ``` + { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + ``` + +- 更新路由中上游节点的权重: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "upstream": { + "nodes": { + "127.0.0.1:1981": 10 + } + } + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,上游节点将更新为: + + ``` + { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 10 + } + ``` + +- 从路由中删除一个上游节点: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "upstream": { + "nodes": { + "127.0.0.1:1980": null + } + } + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,Upstream `nodes` 将更新为: + + ```shell + { + "127.0.0.1:1981": 10 + } + ``` + +- 更新路由中的 `methods` 数组 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d '{ + "methods": ["GET", "POST"] + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,`methods` 将不保留原来的数据,将更新为: + + ``` + ["GET", "POST"] + ``` + +- 使用 `sub path` 更新路由中的上游节点: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1/upstream/nodes \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "127.0.0.1:1982": 1 + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,`nodes` 将不保留原来的数据,整个更新为: + + ``` + { + "127.0.0.1:1982": 1 + } + ``` + +- 使用 `sub path` 更新路由中的 `methods`: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1/methods \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d '["POST", "DELETE", "PATCH"]' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,`methods` 将不保留原来的数据,更新为: + + ``` + ["POST", "DELETE", "PATCH"] + ``` + +- 禁用路由 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "status": 0 + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,`status` 将更新为: + + ``` + { + "status": 0 + } + ``` + +- 启用路由 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "status": 1 + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,`status` 将更新为: + + ``` + { + "status": 1 + } + ``` + +### 应答参数 {#route-response-parameters} + +目前是直接返回与 etcd 交互后的结果。 + +## Service + +Service 是某类 API 的抽象(也可以理解为一组 Route 的抽象)。它通常与上游服务抽象是一一对应的,`Route` 与 `Service` 之间,通常是 N:1 的关系。 + +### 请求地址 {#service-uri} + +服务资源请求地址:/apisix/admin/services/{id} + +### 请求方法 {#service-request-methods} + +| 名称 | 请求 URI | 请求 body | 描述 | +| ------ | ---------------------------------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| GET | /apisix/admin/services | 无 | 获取资源列表。 | +| GET | /apisix/admin/services/{id} | 无 | 获取资源。 | +| PUT | /apisix/admin/services/{id} | {...} | 创建指定 id 资源。 | +| POST | /apisix/admin/services | {...} | 创建资源,id 由后台服务自动生成。 | +| DELETE | /apisix/admin/services/{id} | 无 | 删除资源。 | +| PATCH | /apisix/admin/services/{id} | {...} | 标准 PATCH,修改已有 Service 的部分属性,其他不涉及的属性会原样保留;如果你要删除某个属性,将该属性的值设置为 null 即可删除;**注意**:当需要修改属性的值为数组时,该属性将全量更新。| +| PATCH | /apisix/admin/services/{id}/{path} | {...} | SubPath PATCH,通过 {path} 指定 Service 需要更新的属性,全量更新该属性的数据,其他不涉及的属性会原样保留。 | + +### body 请求参数 {#service-request-body-parameters} + +| 名称 | 必选项 | 类型 | 描述 | 示例 | +| ---------------- | ----------------------- | -------- | ---------------------------------------------------------------- | ------------------------------------------------ | +| plugins | 否 | Plugin | Plugin 配置,请参考 [Plugin](terminology/plugin.md)。 | | +| upstream | 与 `upstream_id` 二选一。 | Upstream | Upstream 配置,请参考 [Upstream](terminology/upstream.md)。 | | +| upstream_id | 与 `upstream` 二选一。 | Upstream | 需要使用的 upstream id,请参考 [Upstream](terminology/upstream.md)。| | +| name | 否 | 辅助 | 服务名称。 | | +| desc | 否 | 辅助 | 服务描述。 | | +| labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} | +| enable_websocket | 否 | 辅助 | `websocket`(boolean) 配置,默认值为 `false`。 | | +| hosts | 否 | 匹配规则 | 非空列表形态的 `host`,表示允许有多个不同 `host`,匹配其中任意一个即可。| ["foo.com", "\*.bar.com"] | + +Service 对象 JSON 配置示例: + +```shell +{ + "id": "1", # id + "plugins": {}, # 指定 service 绑定的插件 + "upstream_id": "1", # upstream 对象在 etcd 中的 id,建议使用此值 + "upstream": {}, # upstream 信息对象,不建议使用 + "name": "test svc", # service 名称 + "desc": "hello world", # service 描述 + "enable_websocket": true, # 启动 websocket 功能 + "hosts": ["foo.com"] +} +``` + +### 使用示例 {#service-example} + +- 创建一个 Service: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/services/201 \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' + { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "enable_websocket": true, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }' + ``` + + ``` + HTTP/1.1 201 Created + ... + ``` + +- 在 Service 中添加一个上游节点: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/services/201 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + } + } + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,上游节点将更新为: + + ```json + { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + ``` + +- 更新一个上游节点的权重: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/services/201 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "upstream": { + "nodes": { + "127.0.0.1:1981": 10 + } + } + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,上游节点将更新为: + + ``` + { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 10 + } + ``` + +- 删除 Service 中的一个上游节点: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/services/201 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "upstream": { + "nodes": { + "127.0.0.1:1980": null + } + } + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,上游节点将更新为: + + ``` + { + "127.0.0.1:1981": 10 + } + ``` + +- 替换 Service 的上游节点: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/services/201/upstream/nodes \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "127.0.0.1:1982": 1 + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,上游节点不再保留原来的数据,将更新为: + + ``` + { + "127.0.0.1:1982": 1 + } + ``` + +### 应答参数 {#service-response-parameters} + +目前是直接返回与 etcd 交互后的结果。 + +## Consumer + +Consumer 是某类服务的消费者,需要与用户认证体系配合才能使用。Consumer 使用 `username` 作为唯一标识,仅支持使用 HTTP `PUT` 方法创建 Consumer。 + +### 请求地址 {#consumer-uri} + +Consumer 资源请求地址:/apisix/admin/consumers/{username} + +### 请求方法 {#consumer-request-methods} + +| 名称 | 请求 URI | 请求 body | 描述 | +| ------ | ---------------------------------- | --------- | ------------- | +| GET | /apisix/admin/consumers | 无 | 获取资源列表。| +| GET | /apisix/admin/consumers/{username} | 无 | 获取资源。 | +| PUT | /apisix/admin/consumers | {...} | 创建资源。 | +| DELETE | /apisix/admin/consumers/{username} | 无 | 删除资源。 | + +### body 请求参数 {#consumer-body-request-methods} + +| 名称 | 必选项 | 类型 | 描述 | 示例值 | +| ----------- | ----- | ------- | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ | +| username | 是 | 辅助 | Consumer 名称。 | | +| group_id | 否 | 辅助 | Consumer Group 名称。 | | +| plugins | 否 | Plugin | 该 Consumer 对应的插件配置,它的优先级是最高的:Consumer > Route > Plugin Config > Service。对于具体插件配置,请参考 [Plugins](#plugin)。 | | +| desc | 否 | 辅助 | consumer 描述。 | | +| labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} | + +Consumer 对象 JSON 配置示例: + +```shell +{ + "plugins": {}, # 指定 consumer 绑定的插件 + "username": "name", # 必填 + "desc": "hello world" # consumer 描述 +} +``` + +当认证插件与 Consumer 一起使用时,需要提供用户名、密码等信息;当认证插件与 Route 或 Service 绑定时,则不需要任何参数,因为此时 APISIX 是根据用户请求数据来判断用户对应的是哪个 Consumer。 + +:::note 注意 + +从 APISIX v2.2 版本开始,同一个 Consumer 可以绑定多个认证插件。 + +::: + +### 使用示例 {#consumer-example} + +- 创建 Consumer,并指定认证插件 `key-auth`,并开启指定插件 `limit-count`: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/consumers \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' + { + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }' + ``` + + ``` + HTTP/1.1 200 OK + Date: Thu, 26 Dec 2019 08:17:49 GMT + ... + + {"key":"\/apisix\/consumers\/jack","value":{"username":"jack","update_time":1666260780,"plugins":{"limit-count":{"key_type":"var","count":2,"rejected_code":503,"show_limit_quota_header":true,"time_window":60,"key":"remote_addr","allow_degradation":false,"policy":"local"},"key-auth":{"key":"auth-one"}},"create_time":1666260780}} + ``` + +### 应答参数 {#consumer-response-parameters} + +目前是直接返回与 etcd 交互后的结果。 + +## Credential + +Credential 用以存放 Consumer 的认证凭证。当需要为 Consumer 配置多个凭证时,可以使用 Credential。 + +### 请求地址 {#credential-uri} + +Credential 资源请求地址:/apisix/admin/consumers/{username}/credentials/{credential_id} + +### 请求方法 {#consumer-request-methods} + +| 名称 | 请求 URI | 请求 body | 描述 | +| ------ |----------------------------------------------------------------| --------- | ------------- | +| GET | /apisix/admin/consumers/{username}/credentials | 无 | 获取资源列表。| +| GET | /apisix/admin/consumers/{username}/credentials/{credential_id} | 无 | 获取资源。 | +| PUT | /apisix/admin/consumers/{username}/credentials/{credential_id} | {...} | 创建资源。 | +| DELETE | /apisix/admin/consumers/{username}/credentials/{credential_id} | 无 | 删除资源。 | + +### body 请求参数 {#credential-body-request-methods} + +| 名称 | 必选项 | 类型 | 描述 | 示例值 | +| ----------- |-----| ------- |-----------------------| ------------------------------------------------ | +| plugins | 是 | Plugin | 该 Credential 对应的插件配置。 | | +| name | 否 | 辅助 | 消费者 Credential 名 | credential_primary | +| desc | 否 | 辅助 | Credential 描述。 | | +| labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} | + +Credential 对象 JSON 配置示例: + +```shell +{ + "plugins": { + "key-auth": { + "key": "auth-one" + } + }, + "desc": "hello world" +} +``` + +### 使用示例 {#credential-example} + +前提:已创建 Consumer `jack`。 + +创建 Credential,并启用认证插件 `key-auth`: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials/auth-one \ +-H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "plugins": { + "key-auth": { + "key": "auth-one" + } + } +}' +``` + +``` +HTTP/1.1 200 OK +Date: Thu, 26 Dec 2019 08:17:49 GMT +... + +{"key":"\/apisix\/consumers\/jack\/credentials\/auth-one","value":{"update_time":1666260780,"plugins":{"key-auth":{"key":"auth-one"}},"create_time":1666260780}} +``` + +## Upstream + +Upstream 是虚拟主机抽象,对给定的多个服务节点按照配置规则进行负载均衡。Upstream 的地址信息可以直接配置到 `Route`(或 `Service`) 上,当 Upstream 有重复时,需要用“引用”方式避免重复。 + +### 请求地址 {#upstream-uri} + +Upstream 资源请求地址:/apisix/admin/upstreams/{id} + +### 请求方法 {#upstream-request-methods} + +| 名称 | 请求 URI | 请求 body | 描述 | +| ------ | ----------------------------------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| GET | /apisix/admin/upstreams/{id} | 无 | 获取资源。 | +| PUT | /apisix/admin/upstreams/{id} | {...} | 创建指定 id 的资源。 | +| POST | /apisix/admin/upstreams | {...} | 创建资源,id 由后台服务自动生成。 | +| DELETE | /apisix/admin/upstreams/{id} | 无 | 删除资源。 | +| PATCH | /apisix/admin/upstreams/{id} | {...} | 标准 PATCH,修改已有 Upstream 的部分属性,其他不涉及的属性会原样保留;如果需要删除某个属性,可将该属性的值设置为 `null`;**注意**:当需要修改属性的值为数组时,该属性将全量更新。| +| PATCH | /apisix/admin/upstreams/{id}/{path} | {...} | SubPath PATCH,通过 `{path}` 指定 Upstream 需要更新的属性,全量更新该属性的数据,其他不涉及的属性会原样保留。 | + +### body 请求参数 {#upstream-body-request-methods} + +APISIX 的 Upstream 除了基本的负载均衡算法选择外,还支持对上游做主被动健康检查、重试等逻辑。详细信息如下: + +| 名称 | 必选项 | 类型 | 描述 | 示例 | +| -------------- |-----------------------------------------------| -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ | +| type | 否 | 枚举 | 负载均衡算法,默认值是`roundrobin`。 | | | +| nodes | 是,与 `service_name` 二选一。 | Node | 哈希表或数组。当它是哈希表时,内部元素的 key 是上游机器地址列表,格式为`地址 +(可选的)端口`,其中地址部分可以是 IP 也可以是域名,比如 `192.168.1.100:80`、`foo.com:80`等。对于哈希表的情况,如果 key 是 IPv6 地址加端口,则必须用中括号将 IPv6 地址括起来。`value` 则是节点的权重。当它是数组时,数组中每个元素都是一个哈希表,其中包含 `host`、`weight` 以及可选的 `port`、`priority`。`nodes` 可以为空,这通常用作占位符。客户端命中这样的上游会返回 `502`。 | `192.168.1.100:80`, `[::1]:80` | +| service_name | 是,与 `nodes` 二选一。 | string | 服务发现时使用的服务名,请参考 [集成服务发现注册中心](./discovery.md)。 | `a-bootiful-client` | +| discovery_type | 是,与 `service_name` 配合使用。 | string | 服务发现类型,请参考 [集成服务发现注册中心](./discovery.md)。 | `eureka` | +| key | 条件必需 | 匹配类型 | 该选项只有类型是 `chash` 才有效。根据 `key` 来查找对应的节点 `id`,相同的 `key` 在同一个对象中,则返回相同 id。目前支持的 NGINX 内置变量有 `uri, server_name, server_addr, request_uri, remote_port, remote_addr, query_string, host, hostname, arg_***`,其中 `arg_***` 是来自 URL 的请求参数,详细信息请参考 [NGINX 变量列表](http://nginx.org/en/docs/varindex.html)。 | | +| checks | 否 | health_checker | 配置健康检查的参数,详细信息请参考 [health-check](./tutorials/health-check.md)。 | | +| retries | 否 | 整型 | 使用 NGINX 重试机制将请求传递给下一个上游,默认启用重试机制且次数为后端可用的节点数量。如果指定了具体重试次数,它将覆盖默认值。当设置为 `0` 时,表示不启用重试机制。 | | +| retry_timeout | 否 | number | 限制是否继续重试的时间,若之前的请求和重试请求花费太多时间就不再继续重试。当设置为 `0` 时,表示不启用重试超时机制。 | | +| timeout | 否 | 超时时间对象 | 设置连接、发送消息、接收消息的超时时间,以秒为单位。| `{"connect": 0.5,"send": 0.5,"read": 0.5}` | +| hash_on | 否 | 辅助 | `hash_on` 支持的类型有 `vars`(NGINX 内置变量),`header`(自定义 header),`cookie`,`consumer`,默认值为 `vars`。 | +| name | 否 | 辅助 | 标识上游服务名称、使用场景等。 | | +| desc | 否 | 辅助 | 上游服务描述、使用场景等。 | | +| pass_host | 否 | 枚举 | 请求发给上游时的 `host` 设置选型。 [`pass`,`node`,`rewrite`] 之一,默认是 `pass`。`pass`: 将客户端的 host 透传给上游; `node`: 使用 `upstream` node 中配置的 `host`; `rewrite`: 使用配置项 `upstream_host` 的值。 | | +| upstream_host | 否 | 辅助 | 指定上游请求的 host,只在 `pass_host` 配置为 `rewrite` 时有效。 | | +| scheme | 否 | 辅助 | 跟上游通信时使用的 scheme。对于 7 层代理,可选值为 [`http`, `https`, `grpc`, `grpcs`]。对于 4 层代理,可选值为 [`tcp`, `udp`, `tls`]。默认值为 `http`,详细信息请参考下文。 | +| labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} | +| tls.client_cert | 否,不能和 `tls.client_cert_id` 一起使用 | https 证书 | 设置跟上游通信时的客户端证书,详细信息请参考下文。 | | +| tls.client_key | 否,不能和 `tls.client_cert_id` 一起使用 | https 证书私钥 | 设置跟上游通信时的客户端私钥,详细信息请参考下文。 | | +| tls.client_cert_id | 否,不能和 `tls.client_cert`、`tls.client_key` 一起使用 | SSL | 设置引用的 SSL id,详见 [SSL](#ssl)。 | | +|keepalive_pool.size | 否 | 辅助 | 动态设置 `keepalive` 指令,详细信息请参考下文。 | +|keepalive_pool.idle_timeout | 否 | 辅助 | 动态设置 `keepalive_timeout` 指令,详细信息请参考下文。 | +|keepalive_pool.requests | 否 | 辅助 | 动态设置 `keepalive_requests` 指令,详细信息请参考下文。 | + +`type` 详细信息如下: + +- `roundrobin`: 带权重的 Round Robin。 +- `chash`: 一致性哈希。 +- `ewma`: 选择延迟最小的节点,请参考 [EWMA_chart](https://en.wikipedia.org/wiki/EWMA_chart)。 +- `least_conn`: 选择 `(active_conn + 1) / weight` 最小的节点。此处的 `active connection` 概念跟 NGINX 的相同,它是当前正在被请求使用的连接。 +- 用户自定义的 balancer,需要可以通过 `require("apisix.balancer.your_balancer")` 来加载。 + +`hash_on` 详细信息如下: + +- 设为 `vars` 时,`key` 为必传参数,目前支持的 NGINX 内置变量有 `uri, server_name, server_addr, request_uri, remote_port, remote_addr, query_string, host, hostname, arg_***`,其中 `arg_***` 是来自 URL 的请求参数。详细信息请参考 [NGINX 变量列表](http://nginx.org/en/docs/varindex.html)。 +- 设为 `header` 时,`key` 为必传参数,其值为自定义的 Header name,即 "http\_`key`"。 +- 设为 `cookie` 时,`key` 为必传参数,其值为自定义的 cookie name,即 "cookie\_`key`"。请注意 cookie name 是**区分大小写字母**的。例如:`cookie_x_foo` 与 `cookie_X_Foo` 表示不同的 `cookie`。 +- 设为 `consumer` 时,`key` 不需要设置。此时哈希算法采用的 `key` 为认证通过的 `consumer_name`。 + +以下特性需要 APISIX 运行于 [APISIX-Runtime](./FAQ.md#如何构建-APISIX-Runtime-环境?): + +- `scheme` 可以设置成 `tls`,表示 `TLS over TCP`。 +- `tls.client_cert/key` 可以用来跟上游进行 mTLS 通信。他们的格式和 SSL 对象的 `cert` 和 `key` 一样。 +- `tls.client_cert_id` 可以用来指定引用的 SSL 对象。只有当 SSL 对象的 `type` 字段为 client 时才能被引用,否则请求会被 APISIX 拒绝。另外,SSL 对象中只有 `cert` 和 `key` 会被使用。 +- `keepalive_pool` 允许 Upstream 有自己单独的连接池。它下属的字段,比如 `requests`,可以用于配置上游连接保持的参数。 + +Upstream 对象 JSON 配置示例: + +```shell +{ + "id": "1", # id + "retries": 1, # 请求重试次数 + "timeout": { # 设置连接、发送消息、接收消息的超时时间,每项都为 15 秒 + "connect":15, + "send":15, + "read":15 + }, + "nodes": {"host:80": 100}, # 上游机器地址列表,格式为`地址 + 端口` + # 等价于 "nodes": [ {"host":"host", "port":80, "weight": 100} ], + "type":"roundrobin", + "checks": {}, # 配置健康检查的参数 + "hash_on": "", + "key": "", + "name": "upstream-xxx", # upstream 名称 + "desc": "hello world", # upstream 描述 + "scheme": "http" # 跟上游通信时使用的 scheme,默认是 `http` +} +``` + +### 使用示例 {#upstream-example} + +#### 创建 Upstream 并对 `nodes` 的数据进行修改 {#create-upstream} + +1. 创建 Upstream: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/upstreams/100 \ + -H "X-API-KEY: $admin_key" -i -X PUT -d ' + { + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980": 1 + } + }' + ``` + + ``` + HTTP/1.1 201 Created + ... + ``` + +2. 在 Upstream 中添加一个节点: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/upstreams/100 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "nodes": { + "127.0.0.1:1981": 1 + } + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,`nodes` 将更新为: + + ``` + { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + ``` + +3. 更新 Upstream 中单个节点的权重: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/upstreams/100 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "nodes": { + "127.0.0.1:1981": 10 + } + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,`nodes` 将更新为: + + ``` + { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 10 + } + ``` + +4. 删除 Upstream 中的一个节点: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/upstreams/100 \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "nodes": { + "127.0.0.1:1980": null + } + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,`nodes` 将更新为: + + ``` + { + "127.0.0.1:1981": 10 + } + ``` + +5. 更新 Upstream 的 `nodes`: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/upstreams/100/nodes \ + -H "X-API-KEY: $admin_key" -X PATCH -i -d ' + { + "127.0.0.1:1982": 1 + }' + ``` + + ``` + HTTP/1.1 200 OK + ... + ``` + + 执行成功后,`nodes` 将不再保留原来的数据: + + ``` + { + "127.0.0.1:1982": 1 + } + ``` + +#### 将客户端请求代理到上游 `https` 服务 {#proxy-https} + +1. 创建 Route 并配置 Upstream 的 Scheme 为 `https`: + + ```shell + curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "uri": "/get", + "upstream": { + "type": "roundrobin", + "scheme": "https", + "nodes": { + "httpbin.org:443": 1 + } + } + }' + ``` + + 执行成功后,请求与上游通信时的 Scheme 将为 `https`。 + +2. 发送请求进行测试: + + ```shell + curl http://127.0.0.1:9080/get + ``` + + ```shell + { + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/7.29.0", + "X-Amzn-Trace-Id": "Root=1-6058324a-0e898a7f04a5e95b526bb183", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "127.0.0.1", + "url": "https://127.0.0.1/get" + } + ``` + + 请求成功,表示代理上游 `https` 生效了。 + + :::tip 提示 + + 每个节点均可以配置优先级,只有在高优先级的节点不可用或者尝试过,才会访问一个低优先级的节点。 + + ::: + + 由于上游节点的默认优先级是 `0`,你可以将一些节点的优先级设置为负数,让其作为备份节点。例如: + + ```JSON + { + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": [ + {"host": "127.0.0.1", "port": 1980, "weight": 2000}, + {"host": "127.0.0.1", "port": 1981, "weight": 1, "priority": -1} + ], + "checks": { + "active": { + "http_path": "/status", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 1 + } + } + } + } + } + ``` + + 节点 `127.0.0.2` 只有在 `127.0.0.1` 不可用或者尝试过之后才会被访问,因此它是 `127.0.0.1` 的备份。 + +### 应答参数 {#upstream-response-parameters} + +目前是直接返回与 etcd 交互后的结果。 + +## SSL + +你可以使用该资源创建 SSL 证书。 + +### 请求地址 {#ssl-uri} + +SSL 资源请求地址:/apisix/admin/ssls/{id} + +### 请求方法 {#ssl-request-methods} + +| 名称 | 请求 URI | 请求 body | 描述 | +| ------ | ----------------------- | --------- | ------------------------------- | +| GET | /apisix/admin/ssls | 无 | 获取资源列表。 | +| GET | /apisix/admin/ssls/{id} | 无 | 获取资源。 | +| PUT | /apisix/admin/ssls/{id} | {...} | 创建指定 id 的资源。 | +| POST | /apisix/admin/ssls | {...} | 创建资源,id 由后台服务自动生成。 | +| DELETE | /apisix/admin/ssls/{id} | 无 | 删除资源。 | + +### body 请求参数 {#ssl-body-request-methods} + +| 名称 | 必选项 | 类型 | 描述 | 示例 | +| ----------- | ------ | -------------- | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------ | +| cert | 是 | 证书 | HTTP 证书。该字段支持使用 [APISIX Secret](./terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | | +| key | 是 | 私钥 | HTTPS 证书私钥。该字段支持使用 [APISIX Secret](./terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | | +| certs | 否 | 证书字符串数组 | 当你想给同一个域名配置多个证书时,除了第一个证书需要通过 `cert` 传递外,剩下的证书可以通过该参数传递上来。该字段支持使用 [APISIX Secret](./terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | | +| keys | 否 | 私钥字符串数组 | `certs` 对应的证书私钥,需要与 `certs` 一一对应。该字段支持使用 [APISIX Secret](./terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | | +| client.ca | 否 | 证书 | 设置将用于客户端证书校验的 `CA` 证书。该特性需要 OpenResty 为 1.19 及以上版本。 | | +| client.depth | 否 | 辅助 | 设置客户端证书校验的深度,默认为 1。该特性需要 OpenResty 为 1.19 及以上版本。 | | +| client.skip_mtls_uri_regex | 否 | PCRE 正则表达式数组 | 用来匹配请求的 URI,如果匹配,则该请求将绕过客户端证书的检查,也就是跳过 MTLS。 | ["/hello[0-9]+", "/foobar"] | +| snis | 是 | 匹配规则 | 非空数组形式,可以匹配多个 SNI。 | | +| desc | 否 | 辅助 | 证书描述。 | certs for production env | +| labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} | +| type | 否 | 辅助 | 标识证书的类型,默认值为 `server`。 | `client` 表示证书是客户端证书,APISIX 访问上游时使用;`server` 表示证书是服务端证书,APISIX 验证客户端请求时使用。 | +| status | 否 | 辅助 | 当设置为 `1` 时,启用此 SSL,默认值为 `1`。 | `1` 表示启用,`0` 表示禁用 | +| ssl_protocols | 否 | tls 协议字符串数组 | 用于控制服务器与客户端之间使用的 SSL/TLS 协议版本。更多的配置示例,请参考[SSL 协议](./ssl-protocol.md)。 | `["TLSv1.1", "TLSv1.2", "TLSv1.3"]` | + +SSL 对象 JSON 配置示例: + +```shell +{ + "id": "1", # id + "cert": "cert", # 证书 + "key": "key", # 私钥 + "snis": ["t.com"] # HTTPS 握手时客户端发送的 SNI +} +``` + +更多的配置示例,请参考[证书](./certificate.md)。 + +## Global Rules + +Global Rule 可以设置全局运行的插件,设置为全局规则的插件将在所有路由级别的插件之前优先运行。 + +### 请求地址 {#global-rule-uri} + +Global Rule 资源请求地址:/apisix/admin/global_rules/{id} + +### 请求方法 {#global-rule-request-methods} + +| 名称 | 请求 URI | 请求 body | 描述 | +| ------ | -------------------------------------- | --------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| GET | /apisix/admin/global_rules | 无 | 获取资源列表。 | +| GET | /apisix/admin/global_rules/{id} | 无 | 获取资源。 | +| PUT | /apisix/admin/global_rules/{id} | {...} | 将创建指定 id 的资源。 | +| DELETE | /apisix/admin/global_rules/{id} | 无 | 删除资源。 | +| PATCH | /apisix/admin/global_rules/{id} | {...} | 标准 PATCH,修改已有 Global Rule 的部分属性,其他不涉及的属性会原样保留;如果你要删除某个属性,将该属性的值设置为 null 即可删除;**注意**:当需要修改属性的值为数组时,该属性将全量更新。 | +| PATCH | /apisix/admin/global_rules/{id}/{path} | {...} | SubPath PATCH,通过 `{path}` 指定 Global Rule 要更新的属性,全量更新该属性的数据,其他不涉及的属性会原样保留。 | + +### body 请求参数 {#global-rule-body-request-parameters} + +| 名称 | 必选项 | 类型 | 描述 | 示例值 | +| ----------- | ------ | ------ | ------------------------------------------------- | ---------- | +| plugins | 是 | Plugin | 插件配置。详细信息请参考 [Plugin](terminology/plugin.md)。 | | + +## Consumer Group + +你可以使用该资源配置一组可以在 Consumer 间复用的插件。 + +### 请求地址 {#consumer-group-uri} + +Consumer Group 资源请求地址:/apisix/admin/consumer_groups/{id} + +### 请求方法 {#consumer-group-request-methods} + +| 名称 | 请求 URI | 请求 body | 描述 | +| ------ | ----------------------------------------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| GET | /apisix/admin/consumer_groups | 无 | 获取资源列表。 | +| GET | /apisix/admin/consumer_groups/{id} | 无 | 获取资源。 | +| PUT | /apisix/admin/consumer_groups/{id} | {...} | 将创建指定 id 的资源。 | +| DELETE | /apisix/admin/consumer_groups/{id} | 无 | 删除资源。 | +| PATCH | /apisix/admin/consumer_groups/{id} | {...} | 标准 PATCH,修改已有 Consumer Group 的部分属性,其他不涉及的属性会原样保留;如果你要删除某个属性,将该属性的值设置为 null 即可删除;**注意**:当需要修改属性的值为数组时,该属性将全量更新。 | +| PATCH | /apisix/admin/consumer_groups/{id}/{path} | {...} | SubPath PATCH,通过 `{path}` 指定 Consumer Group 要更新的属性,全量更新该属性的数据,其他不涉及的属性会原样保留。 | + +### body 请求参数 {#consumer-group-body-request-parameters} + +| 名称 | 必选项 | 类型 | 描述 | 示例值 | +|--------- |--------- |------|----------------------------------------------- |------| +|plugins | 是 |Plugin| 插件配置。详细信息请参考 [Plugin](terminology/plugin.md)。 | | +|name | 否 | 辅助 | 消费者组名。 | premium-tier | +|desc | 否 | 辅助 | 标识描述、使用场景等。 | Consumer 测试。| +|labels | 否 | 辅助 | 标识附加属性的键值对。 |{"version":"v2","build":"16","env":"production"}| + +## Plugin Config + +你可以使用 Plugin Config 资源创建一组可以在路由间复用的插件。 + +### 请求地址 {#plugin-config-uri} + +Plugin Config 资源请求地址:/apisix/admin/plugin_configs/{id} + +### 请求方法 {#plugin-config-request-methods} + +| 名称 | 请求 URI | 请求 body | 描述 | +| ------ | ---------------------------------------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| GET | /apisix/admin/plugin_configs | 无 | 获取资源列表。 | +| GET | /apisix/admin/plugin_configs/{id} | 无 | 获取资源。 | +| PUT | /apisix/admin/plugin_configs/{id} | {...} | 根据 id 创建资源。 | +| DELETE | /apisix/admin/plugin_configs/{id} | 无 | 删除资源。 | +| PATCH | /apisix/admin/plugin_configs/{id} | {...} | 标准 PATCH,修改已有 Plugin Config 的部分属性,其他不涉及的属性会原样保留;如果你要删除某个属性,将该属性的值设置为 null 即可删除;**注意**:当需要修改属性的值为数组时,该属性将全量更新。 | +| PATCH | /apisix/admin/plugin_configs/{id}/{path} | {...} | SubPath PATCH,通过 {path} 指定 Plugin Config 要更新的属性,全量更新该属性的数据,其他不涉及的属性会原样保留。 | + +### body 请求参数 {#plugin-config-body-request-parameters} + +| 名称 | 必选项 | 类型 | 描述 | 示例值 | +|--------- |---------|----|-----------|----| +|plugins | 是 |Plugin| 更多信息请参考 [Plugin](terminology/plugin.md)。|| +|desc | 否 | 辅助 | 标识描述、使用场景等。 |customer xxxx| +|labels | 否 | 辅助 | 标识附加属性的键值对。 |{"version":"v2","build":"16","env":"production"}| + +## Plugin Metadata + +你可以使用 Plugin Metadata 资源配置插件元数据。 + +### 请求地址 {#plugin-metadata-uri} + +Plugin Config 资源请求地址:/apisix/admin/plugin_metadata/{plugin_name} + +### 请求方法 {#plugin-metadata-request-methods} + +| Method | 请求 URI | 请求 body | 描述 | +| ------ | ------------------------------------------- | --------- | ------------------------- | +| GET | /apisix/admin/plugin_metadata/{plugin_name} | 无 | 获取资源。 | +| PUT | /apisix/admin/plugin_metadata/{plugin_name} | {...} | 根据 `plugin name` 创建资源。 | +| DELETE | /apisix/admin/plugin_metadata/{plugin_name} | 无 | 删除资源。 | + +### body 请求参数 {#plugin-metadata-body-request-parameters} + +根据插件 (`{plugin_name}`) 的 `metadata_schema` 定义的数据结构的 JSON 对象。 + +### 使用示例 {#plugin-metadata-example} + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/example-plugin \ +-H "X-API-KEY: $admin_key" -i -X PUT -d ' +{ + "skey": "val", + "ikey": 1 +}' +``` + +``` +HTTP/1.1 201 Created +Date: Thu, 26 Dec 2019 04:19:34 GMT +Content-Type: text/plain +``` + +## Plugin + +你可以通过该资源获取插件列表。 + +### 请求地址 {#plugin-uri} + +Plugin 资源请求地址:/apisix/admin/plugins/{plugin_name} + +### 请求参数 + +| 名称 | 描述 | 默认 | +| --------- | -------------------------------------- | -------- | +| subsystem | 插件子系统。 | http | + +可以在子系统上过滤插件,以便在通过查询参数传递的子系统中搜索 ({plugin_name}) + +### 请求方法 {#plugin-request-methods} + +| 名称        | 请求 URI | 请求 body | 描述          | +| ----------- | ----------------------------------- | ---------- | ------------- | +| GET       | /apisix/admin/plugins/list | 无 | 获取资源列表。 | +| GET       | /apisix/admin/plugins/{plugin_name} | 无 | 获取资源。 | +| GET | /apisix/admin/plugins?all=true | 无 | 获取所有插件的所有属性。 | +| GET | /apisix/admin/plugins?all=true&subsystem=stream| 无 | 获取所有 Stream 插件的属性。| +| GET | /apisix/admin/plugins?all=true&subsystem=http| 无 | 获取所有 HTTP 插件的属性。| +| PUT | /apisix/admin/plugins/reload | 无 | 根据代码中所做的更改重新加载插件。 | +| GET | apisix/admin/plugins/{plugin_name}?subsystem=stream | 无 | 获取指定 Stream 插件的属性。 | +| GET | apisix/admin/plugins/{plugin_name}?subsystem=http | 无 | 获取指定 HTTP 插件的属性。 | + +:::caution + +获取所有插件属性的接口 `/apisix/admin/plugins?all=true` 将很快被弃用。 + +::: + +### 使用示例 {#plugin-example} + +获取插件  (`{plugin_name}`)  数据结构的 JSON 对象。 + +- 获取插件列表 + + ```shell + curl "http://127.0.0.1:9180/apisix/admin/plugins/list" \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' + ``` + + ```shell + ["zipkin","request-id",...] + ``` + +- 获取指定插件的属性 + + ```shell + curl "http://127.0.0.1:9180/apisix/admin/plugins/key-auth?subsystem=http" \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' + ``` + + ```json + {"$comment":"this is a mark for our injected plugin schema","properties":{"header":{"default":"apikey","type":"string"},"hide_credentials":{"default":false,"type":"boolean"},"_meta":{"properties":{"filter":{"type":"array","description":"filter determines whether the plugin needs to be executed at runtime"},"disable":{"type":"boolean"},"error_response":{"oneOf":[{"type":"string"},{"type":"object"}]},"priority":{"type":"integer","description":"priority of plugins by customized order"}},"type":"object"},"query":{"default":"apikey","type":"string"}},"type":"object"} + ``` + +:::tip + +你可以使用 `/apisix/admin/plugins?all=true` 接口获取所有插件的所有属性,每个插件包括 `name`,`priority`,`type`,`schema`,`consumer_schema` 和 `version`。 + +这个 API 将很快被弃用。 + +::: + +## Stream Route + +Stream Route 是用于 TCP/UDP 动态代理的路由。详细信息请参考 [TCP/UDP 动态代理](./stream-proxy.md)。 + +### 请求地址 {#stream-route-uri} + +Plugin 资源请求地址:/apisix/admin/stream_routes/{id} + +### 请求方法 {#stream-route-request-methods} + +| 名称 | 请求 URI | 请求 body | 描述 | +| ------ | --------------------------------- | --------- | -------------------------------------------------- | +| GET | /apisix/admin/stream_routes | 无 | 获取资源列表。 | +| GET | /apisix/admin/stream_routes/{id} | 无 | 获取资源。 | +| PUT | /apisix/admin/stream_routes/{id} | {...} | 创建指定 id 的资源。 | +| POST | /apisix/admin/stream_routes | {...} | 创建资源,id 由后台服务自动生成。 | +| DELETE | /apisix/admin/stream_routes/{id} | 无 | 删除资源。 | + +### body 请求参数{#stream-route-body-request-parameters} + +| 名称 | 必选项 | 类型 | 描述 | 示例值 | +| ---------------- | ------| -------- | ------------------------------------------------------------------------------| ------ | +| name | 否 | 辅助 | Stream 路由名。 | postgres-proxy | +| desc | 否 | 辅助 | Stream 路由描述。 | proxy endpoint for postgresql | +| labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"17","service":"user","env":"production"} | +| upstream | 否 | Upstream | Upstream 配置,详细信息请参考 [Upstream](terminology/upstream.md)。 | | +| upstream_id | 否 | Upstream | 需要使用的 Upstream id,详细信息请 [Upstream](terminology/upstream.md)。 | | +| service_id | 否 | String | 需要使用的 [Service](terminology/service.md) id. | | +| remote_addr | 否 | IPv4, IPv4 CIDR, IPv6 | 过滤选项:如果客户端 IP 匹配,则转发到上游 | "127.0.0.1" 或 "127.0.0.1/32" 或 "::1" | +| server_addr | 否 | IPv4, IPv4 CIDR, IPv6 | 过滤选项:如果 APISIX 服务器的 IP 与 `server_addr` 匹配,则转发到上游。 | "127.0.0.1" 或 "127.0.0.1/32" 或 "::1" | +| server_port | 否 | 整数 | 过滤选项:如果 APISIX 服务器的端口 与 `server_port` 匹配,则转发到上游。 | 9090 | +| sni | 否 | Host | 服务器名称。 | "test.com" | +| protocol.name | 否 | 字符串 | xRPC 框架代理的协议的名称。 | "redis" | +| protocol.conf | 否 | 配置 | 协议特定的配置。 | | + +你可以查看 [Stream Proxy](./stream-proxy.md#更多-route-匹配选项) 了解更多过滤器的信息。 + +## Secret + +Secret 指的是 `Secrets Management`(密钥管理),可以使用任何支持的密钥管理器,例如 `vault`。 + +### 请求地址 {#secret-config-uri} + +Secret 资源请求地址:/apisix/admin/secrets/{secretmanager}/{id} + +### 请求方法 {#secret-config-request-methods} + +| 名称 | 请求 URI | 请求 body | 描述 | +| :--: | :----------------------------: | :---: | :---------------------------------------: | +| GET | /apisix/admin/secrets | NULL | 获取所有 secret 的列表。 | +| GET | /apisix/admin/secrets/{manager}/{id} | NULL | 根据 id 获取指定的 secret。 | +| PUT | /apisix/admin/secrets/{manager} | {...} | 创建新的 secret 配置。 | +| DELETE | /apisix/admin/secrets/{manager}/{id} | NULL | 删除具有指定 id 的 secret。 | +| PATCH | /apisix/admin/secrets/{manager}/{id} | {...} | 更新指定 secret 的选定属性。如果要删除一个属性,可以将该属性的值设置为 null。| +| PATCH | /apisix/admin/secrets/{manager}/{id}/{path} | {...} | 更新路径中指定的属性。其他属性的值保持不变。 + +### body 请求参数 {#secret-config-body-requset-parameters} + +#### 当 Secret Manager 是 Vault 时 + +| 名称 | 必选项 | 类型 | 描述 | 例子 | +| ----------- | -------- | ----------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------ | +| uri | 是 | URI | Vault 服务器的 URI | | +| prefix | 是 | 字符串 | 密钥前缀 +| token | 是 | 字符串 | Vault 令牌 | | +| namespace | 否 | 字符串 | Vault 命名空间,该字段无默认值 | `admin` | + +配置示例: + +```shell +{ + "uri": "https://localhost/vault", + "prefix": "/apisix/kv", + "token": "343effad" +} + +``` + +使用示例: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/secrets/vault/test2 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "http://xxx/get", + "prefix" : "apisix", + "token" : "apisix" +}' +``` + +```shell +HTTP/1.1 200 OK +... + +{"key":"\/apisix\/secrets\/vault\/test2","value":{"id":"vault\/test2","token":"apisix","prefix":"apisix","update_time":1669625828,"create_time":1669625828,"uri":"http:\/\/xxx\/get"}} +``` + +#### 当 Secret Manager 是 AWS 时 + +| 名称 | 必选项 | 默认值 | 描述 | +| ----------------- | ------ | --------------------------------------------- | ----------------------- | +| access_key_id | 是 | | AWS 访问密钥 ID | +| secret_access_key | 是 | | AWS 访问密钥 | +| session_token | 否 | | 临时访问凭证信息 | +| region | 否 | us-east-1 | AWS 区域 | +| endpoint_url | 否 | https://secretsmanager.{region}.amazonaws.com | AWS Secret Manager 地址 | + +配置示例: + +```json +{ + "endpoint_url": "http://127.0.0.1:4566", + "region": "us-east-1", + "access_key_id": "access", + "secret_access_key": "secret", + "session_token": "token" +} + +``` + +使用示例: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/secrets/aws/test3 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "endpoint_url": "http://127.0.0.1:4566", + "region": "us-east-1", + "access_key_id": "access", + "secret_access_key": "secret", + "session_token": "token" +}' +``` + +```shell +HTTP/1.1 200 OK +... + +{"value":{"create_time":1726069970,"endpoint_url":"http://127.0.0.1:4566","region":"us-east-1","access_key_id":"access","secret_access_key":"secret","id":"aws/test3","update_time":1726069970,"session_token":"token"},"key":"/apisix/secrets/aws/test3"} +``` + +#### 当 Secret Manager 是 GCP 时 + +| 名称 | 必选项 | 默认值 | 描述 | +| ------------------------ | ------ | ---------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | +| auth_config | 是 | | `auth_config` 和 `auth_file` 必须配置一个。 | +| auth_config.client_email | 是 | | 谷歌服务帐号的 email 参数。 | +| auth_config.private_key | 是 | | 谷歌服务帐号的私钥参数。 | +| auth_config.project_id | 是 | | 谷歌服务帐号的项目 ID。 | +| auth_config.token_uri | 否 | https://oauth2.googleapis.com/token | 请求谷歌服务帐户的令牌的 URI。 | +| auth_config.entries_uri | 否 | https://secretmanager.googleapis.com/v1 | 谷歌密钥服务访问端点 API。 | +| auth_config.scope | 否 | https://www.googleapis.com/auth/cloud-platform | 谷歌服务账号的访问范围,可参考 [OAuth 2.0 Scopes for Google APIs](https://developers.google.com/identity/protocols/oauth2/scopes) | +| auth_file | 是 | | `auth_config` 和 `auth_file` 必须配置一个。 | +| ssl_verify | 否 | true | 当设置为 `true` 时,启用 `SSL` 验证。 | + +配置示例: + +```json +{ + "auth_config" : { + "client_email": "email@apisix.iam.gserviceaccount.com", + "private_key": "private_key", + "project_id": "apisix-project", + "token_uri": "https://oauth2.googleapis.com/token", + "entries_uri": "https://secretmanager.googleapis.com/v1", + "scope": ["https://www.googleapis.com/auth/cloud-platform"] + } +} + +``` + +使用示例: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/secrets/gcp/test4 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "auth_config" : { + "client_email": "email@apisix.iam.gserviceaccount.com", + "private_key": "private_key", + "project_id": "apisix-project", + "token_uri": "https://oauth2.googleapis.com/token", + "entries_uri": "https://secretmanager.googleapis.com/v1", + "scope": ["https://www.googleapis.com/auth/cloud-platform"] + } +}' +``` + +```shell +HTTP/1.1 200 OK +... + +{"value":{"id":"gcp/test4","ssl_verify":true,"auth_config":{"token_uri":"https://oauth2.googleapis.com/token","scope":["https://www.googleapis.com/auth/cloud-platform"],"entries_uri":"https://secretmanager.googleapis.com/v1","client_email":"email@apisix.iam.gserviceaccount.com","private_key":"private_key","project_id":"apisix-project"},"create_time":1726070161,"update_time":1726070161},"key":"/apisix/secrets/gcp/test4"} +``` + +### 应答参数 {#secret-config-response-parameters} + +当前的响应是从 etcd 返回的。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/apisix-variable.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/apisix-variable.md new file mode 100644 index 0000000..d122c70 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/apisix-variable.md @@ -0,0 +1,53 @@ +--- +title: APISIX 变量 +keywords: + - Apache APISIX + - API 网关 + - APISIX variable +description: 本文介绍了 Apache APISIX 支持的变量。 +--- + + + +## 描述 + +APISIX 除了支持 [NGINX 变量](http://nginx.org/en/docs/varindex.html)外,自身也提供了一些变量。 + +## 变量列表 + +| 变量名称 | 来源 | 描述 | 示例 | +|---------------------|----------- |--------------------------------------------------------------------------------- | ---------------- | +| balancer_ip | core | 上游服务器的 IP 地址。 | 192.168.1.2 | +| balancer_port | core | 上游服务器的端口。 | 80 | +| consumer_name | core | 消费者的名称。 | | +| consumer_group_id | core | 消费者所在的组的 ID。 | | +| graphql_name | core | GraphQL 的 [operation name](https://graphql.org/learn/queries/#operation-name)。 | HeroComparison | +| graphql_operation | core | GraphQL 的操作类型。 | mutation | +| graphql_root_fields | core | GraphQL 最高级别的字段。 | ["hero"] | +| mqtt_client_id | mqtt-proxy | MQTT 协议中的客户端 ID。 | | +| route_id | core | APISIX 路由的 ID。 | | +| route_name | core | APISIX 路由的名称。 | | +| service_id | core | APISIX 服务的 ID。 | | +| service_name | core | APISIX 服务的名称。 | | +| redis_cmd_line | Redis | Redis 命令的内容。 | | +| resp_body | core | 在 logger 插件中,如果部分插件支持记录响应的 body 信息,比如配置 `include_resp_body: true`,那可以在 log format 中使用该变量。| | +| rpc_time | xRPC | 在 RPC 请求级别所花费的时间。 | | + +当然,除上述变量外,你也可以创建自定义[变量](./plugin-develop.md#register-custom-variable)。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/architecture-design/apisix.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/architecture-design/apisix.md new file mode 100644 index 0000000..0a146d1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/architecture-design/apisix.md @@ -0,0 +1,48 @@ +--- +title: 软件架构 +keywords: + - 网关 + - Apache APISIX + - APISIX 架构 +description: 云原生网关 Apache APISIX 的软件架构 +--- + + + +Apache APISIX 是一个动态、实时、高性能的云原生 API 网关。它构建于 NGINX + ngx_lua 的技术基础之上,充分利用了 LuaJIT 所提供的强大性能。 [为什么 Apache APISIX 选择 NGINX+Lua 技术栈?](https://apisix.apache.org/zh/blog/2021/08/25/why-apache-apisix-chose-nginx-and-lua/)。 + +![软件架构](../../../assets/images/flow-software-architecture.png) + +APISIX 主要分为两个部分: + +1. APISIX 核心:包括 Lua 插件、多语言插件运行时(Plugin Runner)、Wasm 插件运行时等; +2. 功能丰富的各种内置插件:包括可观测性、安全、流量控制等。 + +APISIX 在其核心中,提供了路由匹配、负载均衡、服务发现、API 管理等重要功能,以及配置管理等基础性模块。除此之外,APISIX 插件运行时也包含其中,提供原生 Lua 插件的运行框架和多语言插件的运行框架,以及实验性的 Wasm 插件运行时等。APISIX 多语言插件运行时提供多种开发语言的支持,比如 Golang、Java、Python、JS 等。 + +APISIX 目前也内置了各类插件,覆盖了 API 网关的各种领域,如认证鉴权、安全、可观测性、流量管理、多协议接入等。当前 APISIX 内置的插件使用原生 Lua 实现,关于各个插件的介绍与使用方式,可以查看相关[插件文档](https://apisix.apache.org/docs/apisix/plugins/batch-requests)。 + +## 插件加载流程 + +![插件加载流程](../../../assets/images/flow-load-plugin.png) + +## 插件内部结构 + +![插件内部结构](../../../assets/images/flow-plugin-internal.png) diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/batch-processor.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/batch-processor.md new file mode 100644 index 0000000..b8f0290 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/batch-processor.md @@ -0,0 +1,147 @@ +--- +title: 批处理器 +--- + + + +批处理器可用于聚合条目(日志/任何数据)并进行批处理。 +当 `batch_max_size` 设置为 1 时,处理器将立即执行每个条目。将批处理的最大值设置为大于 1 将开始聚合条目,直到达到最大值或超时。 + +## 配置 + +创建批处理器的唯一必需参数是函数。当批处理达到最大值或缓冲区持续时间超过时,函数将被执行。 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------ | ------- | ------------------------------------------------------------ | +| name | string | 可选 | xxx logger | ["http logger", "Some strings",...] | 用于标识批处理器的唯一标识符,默认为调用批处理器的日志插件名字,如配置插件为 `http logger`,name 默认为 http logger。 | +| batch_max_size | integer | 可选 | 1000 | [1,...] | 设置每批发送日志的最大条数,当日志条数达到设置的最大值时,会自动推送全部日志到 HTTP/HTTPS 服务。 | +| inactive_timeout | integer | 可选 | 5 | [1,...] | 刷新缓冲区的最大时间(以秒为单位),当达到最大的刷新时间时,无论缓冲区中的日志数量是否达到设置的最大条数,也会自动将全部日志推送到 HTTP/HTTPS 服务。 | +| buffer_duration | integer | 可选 | 60 | [1,...] | 必须先处理批次中最旧条目的最长期限(以秒为单位)。 | +| max_retry_count | integer | 可选 | 0 | [0,...] | 从处理管道中移除之前的最大重试次数。 | +| retry_delay | integer | 可选 | 1 | [0,...] | 如果执行失败,则应延迟执行流程的秒数。 | +以下代码显示了如何在你的插件中使用批处理器: + +```lua +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +... + +local plugin_name = "xxx-logger" +local batch_processor_manager = bp_manager_mod.new(plugin_name) +local schema = {...} +local _M = { + ... + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), +} + +... + + +function _M.log(conf, ctx) + local entry = {...} -- data to log + + if batch_processor_manager:add_entry(conf, entry) then + return + end + -- create a new processor if not found + + -- entries is an array table of entry, which can be processed in batch + local func = function(entries) + -- serialize to json array core.json.encode(entries) + -- process/send data + return true + -- return false, err_msg, first_fail if failed + -- first_fail(optional) indicates first_fail-1 entries have been successfully processed + -- and during processing of entries[first_fail], the error occurred. So the batch processor + -- only retries for the entries having index >= first_fail as per the retry policy. + end + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func) +end +``` + +批处理器的配置将通过该插件的配置设置。 +举个例子: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "http-logger": { + "uri": "http://mockbin.org/bin/:ID", + "batch_max_size": 10, + "max_retry_count": 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +如果你的插件只使用一个全局的批处理器, +你可以直接使用它: + +```lua +local entry = {...} -- data to log +if log_buffer then + log_buffer:push(entry) + return +end + +local config_bat = { + name = config.name, + retry_delay = config.retry_delay, + ... +} + +local err +-- entries is an array table of entry, which can be processed in batch +local func = function(entries) + ... + return true + -- return false, err_msg, first_fail if failed +end +log_buffer, err = batch_processor:new(func, config_bat) + +if not log_buffer then + core.log.warn("error when creating the batch processor: ", err) + return +end + +log_buffer:push(entry) +``` + +注意:请确保批处理的最大值(条目数)在函数执行的范围内。 +刷新批处理的计时器基于 `inactive_timeout` 配置运行。因此,为了获得最佳使用效果, +保持 `inactive_timeout` 小于 `buffer_duration`。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/benchmark.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/benchmark.md new file mode 100644 index 0000000..2ceda91 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/benchmark.md @@ -0,0 +1,150 @@ +--- +title: 压力测试 +--- + + + +### 测试环境 + +使用谷歌云的服务器进行测试,型号为 n1-highcpu-8 (8 vCPUs, 7.2 GB memory) + +我们最多只使用 4 核去运行 APISIX,剩下的 4 核用于系统和压力测试工具 [wrk](https://github.com/wg/wrk)。 + +### 测试反向代理 + +我们把 APISIX 当做反向代理来使用,不开启任何插件,响应体的大小为 1KB。 + +#### QPS + +下图中 x 轴为 CPU 的使用个数,y 轴为每秒处理的请求数: + +![benchmark-1](../../assets/images/benchmark-1.jpg) + +#### 延时 + +请注意 y 轴延时的单位是**微秒(μs)**,而不是毫秒: + +![latency-1](../../assets/images/latency-1.jpg) + +#### 火焰图 + +火焰图的采样结果: + +![flamegraph-1](../../assets/images/flamegraph-1.jpg) + +如果你需要在本地服务器上运行基准测试,你需要同时运行另一个 NGINX 实例来监听 80 端口: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```bash +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1, + "127.0.0.2:80": 1 + } + } +}' +``` + +在完成配置并安装 [wrk](https://github.com/wg/wrk/) 之后,可以使用以下命令进行测试: + +```bash +wrk -d 60 --latency http://127.0.0.1:9080/hello +``` + +### 测试反向代理,开启 2 个插件 + +我们把 APISIX 当做反向代理来使用,开启限速和 prometheus 插件,响应体的大小为 1KB。 + +#### QPS + +下图中 x 轴为 CPU 的使用个数,y 轴为每秒处理的请求数: + +![benchmark-2](../../assets/images/benchmark-2.jpg) + +#### Latency + +请注意 y 轴延时的单位是**微秒(μs)**,而不是毫秒: + +![latency-2](../../assets/images/latency-2.jpg) + +#### 火焰图 + +火焰图的采样结果: +![火焰图采样结果](../../assets/images/flamegraph-2.jpg) + +如果你需要在本地服务器上运行基准测试,你需要同时运行另一个 NGINX 实例来监听 80 端口: + +```bash +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 999999999, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + }, + "prometheus":{} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1, + "127.0.0.2:80": 1 + } + } +}' +``` + +在完成配置并安装 [wrk](https://github.com/wg/wrk/) 之后,可以使用以下命令进行测试: + +```bash +wrk -d 60 --latency http://127.0.0.1:9080/hello +``` + +有关如何运行基准测试的更多参考,你可以查看此[PR](https://github.com/apache/apisix/pull/6136)和此[脚本](https://gist.github.com/membphis/137db97a4bf64d3653aa42f3e016bd01)。 + +:::tip + +如果您想使用大量连接运行基准测试,您可能需要更新 [**keepalive**](https://github.com/apache/apisix/blob/master/conf/config.yaml.example#L241) 配置,将配置添加到 [`config.yaml`](https://github.com/apache/apisix/blob/master/conf/config.yaml) 并重新加载 APISIX。否则超过配置数量的连接将成为短连接。你可以使用以下命令运行大量连接的基准测试: + +```bash +wrk -t200 -c5000 -d30s http://127.0.0.1:9080/hello +``` + +如果你需要了解更多信息,请参考:[ngx_http_upstream_module](http://nginx.org/en/docs/http/ngx_http_upstream_module.html)。 + +::: diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/build-apisix-dev-environment-on-mac.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/build-apisix-dev-environment-on-mac.md new file mode 100644 index 0000000..e38c0b3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/build-apisix-dev-environment-on-mac.md @@ -0,0 +1,94 @@ +--- +id: build-apisix-dev-environment-on-mac +title: 在 Mac 上构建开发环境 +description: 本文介绍了如何用 Docker 的方式在 Mac 上快速构建 API 网关 Apache APISIX 的开发环境。 +--- + + + +如果你希望快速的在你的 Mac 平台上构建和开发 APISIX,你可以参考本教程。 + +:::note + +本教程适合需要在 Mac 平台快速开始入门阶段开发的情况,如果你想要更进一步,有更好的开发体验,更好的选择是 Linux-based 虚拟机,或是直接使用这类系统作为你的开发环境。 + +你可以在[这里](install-dependencies.md#安装)看到具体支持的系统。 + +::: + +## 快速构建 Apache APISIX 开发环境 + +### 实现思路 + +我们通过 Docker 来构建 Apache APISIX 的测试环境,在容器启动时将 Apache APISIX 的源代码挂载到容器内,就可以做到在容器内构建以及运行测试用例。 + +### 实现步骤 + +首先,我们需要拉取 APISIX 源码,并构建一个可以运行测试用例以及编译运行 Apache APISIX 的镜像: + +```shell +git clone https://github.com/apache/apisix.git +cd apisix +docker build -t apisix-dev-env -f example/build-dev-image.dockerfile . +``` + +然后,我们要启动 Etcd: + +```shell +docker run -d --name etcd-apisix --net=host pachyderm/etcd:v3.5.2 +``` + +挂载 APISIX 目录并启动开发环境容器: + +```shell +docker run -d --name apisix-dev-env --net=host -v $(pwd):/apisix:rw apisix-dev-env:latest +``` + +最后,构建 Apache APISIX 运行时并配置测试环境: + +```shell +docker exec -it apisix-dev-env make deps +docker exec -it apisix-dev-env ln -s /usr/bin/openresty /usr/bin/nginx +``` + +### 启动和停止 APISIX + +```shell +docker exec -it apisix-dev-env make run +docker exec -it apisix-dev-env make stop +``` + +:::note + +如果你在运行 `make run` 时收到类似 `nginx: [emerg] bind() to unix:/apisix/logs/worker_events.sock failed (95: Operation not supported)` 的错误消息,请使用此解决方案。 + +更改你的 Docker-Desktop 的 `File Sharing` 设置: + +![Docker-Desktop File Sharing 设置](../../assets/images/update-docker-desktop-file-sharing.png) + +修改为 `gRPC FUSE` 或 `osxfs` 都可以解决此问题。 + +::: + +### 运行指定测试用例 + +```shell +docker exec -it apisix-dev-env prove t/admin/routes.t +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/building-apisix.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/building-apisix.md new file mode 100644 index 0000000..2f168b1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/building-apisix.md @@ -0,0 +1,265 @@ +--- +id: building-apisix +title: 源码安装 APISIX +keywords: + - API 网关 + - Apache APISIX + - 贡献代码 + - 构建 APISIX + - 源码安装 APISIX +description: 本文介绍了如何在本地使用源码安装 API 网关 Apache APISIX 来构建开发环境。 +--- + + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +如果你希望为 APISIX 做出贡献或配置开发环境,你可以参考本教程。 + +如果你想通过其他方式安装 APISIX,你可以参考[安装指南](./installation-guide.md)。 + +:::note + +如果你想为特定的环境或打包 APISIX,请参考 [apisix-build-tools](https://github.com/api7/apisix-build-tools)。 + +::: + +## 源码安装 APISIX + +首先,我们需要指定需要安装的版本`APISIX_VERSION`: + +```shell +APISIX_BRANCH='release/3.13' +``` + +然后,你可以运行以下命令,从 Github 克隆 APISIX 源码: + +```shell +git clone --depth 1 --branch ${APISIX_BRANCH} https://github.com/apache/apisix.git apisix-${APISIX_BRANCH} +``` + +你可以从[下载页面](https://apisix.apache.org/downloads/)下载源码包。但是官网的源码包缺少测试用例,可能会对你后续操作产生困扰。 + +另外,你也可以在该页面找到 APISIX Dashboard 和 APISIX Ingress Controller 的源码包。 + +安装之前,请安装[OpenResty](https://openresty.org/en/installation.html)。 + +然后切换到 APISIX 源码的目录,创建依赖项并安装 APISIX,命令如下所示: + +```shell +cd apisix-${APISIX_BRANCH} +make deps +make install +``` + +该命令将安装 APISIX 运行时依赖的 Lua 库以及 `apisix-runtime` 和 `apisix` 命令。 + +:::note + +如果你在运行 `make deps` 时收到类似 `Could not find header file for LDAP/PCRE/openssl` 的错误消息,请使用此解决方案。 + +`luarocks` 支持自定义编译时依赖项(请参考:[配置文件格式](https://github.com/luarocks/luarocks/wiki/Config-file-format))。你可以使用第三方工具安装缺少的软件包并将其安装目录添加到 `luarocks` 变量表中。此方法适用于 macOS、Ubuntu、CentOS 和其他类似操作系统。 + +此处仅给出 macOS 的具体解决步骤,其他操作系统的解决方案类似: + +1. 安装 `openldap`: + + ```shell + brew install openldap + ``` + +2. 使用以下命令命令找到本地安装目录: + + ```shell + brew --prefix openldap + ``` + +3. 将路径添加到项目配置文件中(选择两种方法中的一种即可): + 1. 你可以使用 `luarocks config` 命令设置 `LDAP_DIR`: + + ```shell + luarocks config variables.LDAP_DIR /opt/homebrew/cellar/openldap/2.6.1 + ``` + + 2. 你还可以更改 `luarocks` 的默认配置文件。打开 `~/.luaorcks/config-5.1.lua` 文件并添加以下内容: + + ```shell + variables = { LDAP_DIR = "/opt/homebrew/cellar/openldap/2.6.1", LDAP_INCDIR = "/opt/homebrew/cellar/openldap/2.6.1/include", } + ``` + + `/opt/homebrew/cellar/openldap/` 是 `brew` 在 macOS(Apple Silicon) 上安装 `openldap` 的默认位置。`/usr/local/opt/openldap/` 是 brew 在 macOS(Intel) 上安装 openldap 的默认位置。 + +::: + +如果你不再需要 APISIX,可以执行以下命令卸载: + +```shell +make uninstall && make undeps +``` + +:::danger + +该操作将删除所有相关文件。 + +::: + +## 安装 etcd + +APISIX 默认使用 [etcd](https://github.com/etcd-io/etcd) 来保存和同步配置。在运行 APISIX 之前,你需要在你的机器上安装 etcd。 + + + + +```shell +ETCD_VERSION='3.4.18' +wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz +tar -xvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz && \ + cd etcd-v${ETCD_VERSION}-linux-amd64 && \ + sudo cp -a etcd etcdctl /usr/bin/ +nohup etcd >/tmp/etcd.log 2>&1 & +``` + + + + + +```shell +brew install etcd +brew services start etcd +``` + + + + +## 管理 APISIX 服务 + +运行以下命令初始化 NGINX 配置文件和 etcd。 + +```shell +apisix init +``` + +:::tip + +你可以运行 `apisix help` 命令,查看返回结果,获取其他操作命令及其描述。 + +::: + +运行以下命令测试配置文件,APISIX 将根据 `config.yaml` 生成 `nginx.conf`,并检查 `nginx.conf` 的语法是否正确。 + +```shell +apisix test +``` + +最后,你可以使用以下命令运行 APISIX。 + +```shell +apisix start +``` + +如果需要停止 APISIX,你可以使用 `apisix quit` 或者 `apisix stop` 命令。 + +`apisix quit` 将正常关闭 APISIX,该指令确保在停止之前完成所有收到的请求。 + +```shell +apisix quit +``` + +`apisix stop` 命令会强制关闭 APISIX 并丢弃所有请求。 + +```shell +apisix stop +``` + +## 为 APISIX 构建 APISIX-Runtime + +APISIX 的一些特性需要在 OpenResty 中引入额外的 NGINX 模块。 + +如果要使用这些功能,你需要构建一个自定义的 OpenResty 发行版(APISIX-Runtime)。请参考 [apisix-build-tools](https://github.com/api7/apisix-build-tools) 配置你的构建环境并进行构建。 + +## 运行测试用例 + +以下步骤展示了如何运行 APISIX 的测试用例: + +1. 安装 `perl` 的包管理器 [cpanminus](https://metacpan.org/pod/App::cpanminus#INSTALLATION)。 +2. 通过 `cpanm` 来安装 [test-nginx](https://github.com/openresty/test-nginx) 的依赖: + + ```shell + sudo cpanm --notest Test::Nginx IPC::Run > build.log 2>&1 || (cat build.log && exit 1) + ``` + +3. 将 `test-nginx` 源码克隆到本地: + + ```shell + git clone https://github.com/openresty/test-nginx.git + ``` + +4. 运行以下命令将当前目录添加到 Perl 的模块目录: + + ```shell + export PERL5LIB=.:$PERL5LIB + ``` + + 你可以通过运行以下命令指定 NGINX 二进制路径: + + ```shell + TEST_NGINX_BINARY=/usr/local/bin/openresty prove -Itest-nginx/lib -r t + ``` + +5. 运行测试: + + ```shell + make test + ``` + +:::note + +部分测试需要依赖外部服务和修改系统配置。如果想要完整地构建测试环境,请参考 [ci/linux_openresty_common_runner.sh](https://github.com/apache/apisix/blob/master/ci/linux_openresty_common_runner.sh)。 + +::: + +### 故障排查 + +以下是运行 APISIX 测试用例的常见故障排除步骤。 + +出现 `Error unknown directive "lua_package_path" in /API_ASPIX/apisix/t/servroot/conf/nginx.conf` 报错,是因为默认的 NGINX 安装路径未找到,解决方法如下: + +- Linux 默认安装路径: + + ```shell + export PATH=/usr/local/openresty/nginx/sbin:$PATH + ``` + +### 运行指定的测试用例 + +使用以下命令运行指定的测试用例: + +```shell +prove -Itest-nginx/lib -r t/plugin/openid-connect.t +``` + +如果你想要了解更多信息,请参考 [testing framework](../../en/latest/internal/testing-framework.md)。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/certificate.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/certificate.md new file mode 100644 index 0000000..f85e663 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/certificate.md @@ -0,0 +1,324 @@ +--- +title: 证书 +--- + + + +`APISIX` 支持通过 TLS 扩展 SNI 实现加载特定的 SSL 证书以实现对 https 的支持。 + +SNI(Server Name Indication)是用来改善 SSL 和 TLS 的一项特性,它允许客户端在服务器端向其发送证书之前向服务器端发送请求的域名,服务器端根据客户端请求的域名选择合适的 SSL 证书发送给客户端。 + +### 单一域名指定 + +通常情况下一个 SSL 证书只包含一个静态域名,配置一个 `ssl` 参数对象,它包括 `cert`、`key`和`sni`三个属性,详细如下: + +* `cert`:SSL 密钥对的公钥,pem 格式 +* `key`:SSL 密钥对的私钥,pem 格式 +* `snis`:SSL 证书所指定的一个或多个域名,注意在设置这个参数之前,你需要确保这个证书对应的私钥是有效的。 + +创建一个包含证书和密钥,单一域名 SNI 的 SSL 对象: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat t/certs/apisix.crt)"'", + "key": "'"$(cat t/certs/apisix.key)"'", + "snis": ["test.com"] +}' +``` + +创建路由: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/get", + "hosts": ["test.com"], + "methods": ["GET"], + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } +}' +``` + +测试: + +```shell +curl --resolve 'test.com:9443:127.0.0.1' https://test.com:9443/get -k -vvv + +* Added test.com:9443:127.0.0.1 to DNS cache +* About to connect() to test.com port 9443 (#0) +* Trying 127.0.0.1... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +* ALPN, server accepted to use h2 +* Server certificate: +* subject: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* start date: Jun 24 22:18:05 2019 GMT +* expire date: May 31 22:18:05 2119 GMT +* issuer: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* SSL certificate verify result: self-signed certificate (18), continuing anyway. +> GET /get HTTP/2 +> Host: test.com:9443 +> user-agent: curl/7.81.0 +> accept: */* +``` + +### 泛域名 + +一个 SSL 证书的域名也可能包含泛域名,如 `*.test.com`,它代表所有以 `test.com` 结尾的域名都可以使用该证书。比如 `*.test.com`,可以匹配 `www.test.com`、`mail.test.com`。 + +以下是在 APISIX 中配置泛域名 SNI 的 SSL 证书的示例。 + +创建一个包含证书和密钥,泛域名 SNI 的 SSL 对象: + +```shell +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat t/certs/apisix.crt)"'", + "key": "'"$(cat t/certs/apisix.key)"'", + "snis": ["*.test.com"] +}' +``` + +创建路由: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/hello", + "hosts": ["*.test.com"], + "methods": ["GET"], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +测试: + +```shell +curl --resolve 'www.test.com:9443:127.0.0.1' https://www.test.com:9443/get -k -vvv + +* Added www.test.com:9443:127.0.0.1 to DNS cache +* Hostname www.test.com was found in DNS cache +* Trying 127.0.0.1:9443... +* Connected to www.test.com (127.0.0.1) port 9443 (#0) +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +* ALPN, server accepted to use h2 +* Server certificate: +* subject: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* start date: Jun 24 22:18:05 2019 GMT +* expire date: May 31 22:18:05 2119 GMT +* issuer: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* SSL certificate verify result: self signed certificate (18), continuing anyway. +> GET /get HTTP/2 +> Host: www.test.com:9443 +> user-agent: curl/7.74.0 +> accept: */* +``` + +### 多域名的情况 + +如果一个 SSL 证书包含多个独立域名,比如 `www.test.com` 和 `mail.test.com`,你可以把它们都放入 `snis` 数组中,就像这样: + +```json +{ + "snis": ["www.test.com", "mail.test.com"] +} +``` + +### 单域名多证书的情况 + +如果你期望为一个域名配置多张证书,例如以此来同时支持使用 ECC 和 RSA +的密钥交换算法,那么你可以将额外的证书和私钥(第一张证书和其私钥依然使用 `cert` 和 `key`)配置在 `certs` 和 `keys` 中。 + +* `certs`:PEM 格式的 SSL 证书列表 +* `keys`:PEM 格式的 SSL 证书私钥列表 + +`APISIX` 会将相同下标的证书和私钥配对使用,因此 `certs` 和 `keys` 列表的长度必须一致。 + +### 设置多个 CA 证书 + +APISIX 目前支持在多处设置 CA 证书,比如 [保护 Admin API](./mtls.md#保护-admin-api),[保护 ETCD](./mtls.md#保护-etcd),以及 [部署模式](../../en/latest/deployment-modes.md) 等。 + +在这些地方,使用 `ssl_trusted_certificate` 或 `trusted_ca_cert` 来配置 CA 证书,但是这些配置最终将转化为 OpenResty 的 [lua_ssl_trusted_certificate](https://github.com/openresty/lua-nginx-module#lua_ssl_trusted_certificate) 指令。 + +如果你需要在不同的地方指定不同的 CA 证书,你可以将这些 CA 证书制作成一个 CA bundle 文件,在需要用到 CA 证书的地方将配置指向这个文件。这样可以避免生成的 `lua_ssl_trusted_certificate` 存在多处并且互相覆盖的问题。 + +下面用一个完整的例子来展示如何在 APISIX 设置多个 CA 证书。 + +假设让 client 与 APISIX Admin API,APISIX 与 ETCD 之间都使用 mTLS 协议进行通信,目前有两张 CA 证书,分别是 `foo_ca.crt` 和 `bar_ca.crt`,用这两张 CA 证书各自签发 client 与 server 证书对,`foo_ca.crt` 及其签发的证书对用于保护 Admin API,`bar_ca.crt` 及其签发的证书对用于保护 ETCD。 + +下表详细列出这个示例所涉及到的配置及其作用: + +| 配置 | 类型 | 用途 | +| ------------- | ------- | ----------------------------------------------------------------------------------------------------------- | +| foo_ca.crt | CA 证书 | 签发客户端与 APISIX Admin API 进行 mTLS 通信所需的次级证书。 | +| foo_client.crt | 证书 | 由 `foo_ca.crt` 签发,客户端使用,访问 APISIX Admin API 时证明自身身份的证书。 | +| foo_client.key | 密钥文件 | 由 `foo_ca.crt` 签发,客户端使用,访问 APISIX Admin API 所需的密钥文件。 | +| foo_server.crt | 证书 | 由 `foo_ca.crt` 签发,APISIX 使用,对应 `admin_api_mtls.admin_ssl_cert` 配置项。 | +| foo_server.key | 密钥文件 | 由 `foo_ca.crt` 签发,APISIX 使用,对应 `admin_api_mtls.admin_ssl_cert_key` 配置项。 | +| admin.apisix.dev | 域名 | 签发 `foo_server.crt` 证书时使用的 Common Name,客户端通过该域名访问 APISIX Admin API | +| bar_ca.crt | CA 证书 | 签发 APISIX 与 ETCD 进行 mTLS 通信所需的次级证书。 | +| bar_etcd.crt | 证书 | 由 `bar_ca.crt` 签发,ETCD 使用,对应 ETCD 启动命令中的 `--cert-file` 选项。 | +| bar_etcd.key | 密钥文件 | 由 `bar_ca.crt` 签发,ETCD 使用,对应 ETCD 启动命令中的 `--key-file` 选项。 | +| bar_apisix.crt | 证书 | 由 `bar_ca.crt` 签发,APISIX 使用,对应 `etcd.tls.cert` 配置项。 | +| bar_apisix.key | 密钥文件 | 由 `bar_ca.crt` 签发,APISIX 使用,对应 `etcd.tls.key` 配置项。 | +| etcd.cluster.dev | 域名 | 签发 `bar_etcd.crt` 证书时使用的 Common Name,APISIX 与 ETCD 进行 mTLS 通信时,使用该域名作为 SNI。对应 `etcd.tls.sni` 配置项。| +| apisix.ca-bundle | CA bundle | 由 `foo_ca.crt` 与 `bar_ca.crt` 合并而成,替代 `foo_ca.crt` 与 `bar_ca.crt`。 | + +1. 制作 CA bundle 文件 + +```shell +cat /path/to/foo_ca.crt /path/to/bar_ca.crt > apisix.ca-bundle +``` + +2. 启动 ETCD 集群,并开启客户端验证 + +先编写 `goreman` 配置,命名为 `Procfile-single-enable-mtls`,内容如下: + +```text +# 运行 `go get github.com/mattn/goreman` 安装 goreman,用 goreman 执行以下命令: +etcd1: etcd --name infra1 --listen-client-urls https://127.0.0.1:12379 --advertise-client-urls https://127.0.0.1:12379 --listen-peer-urls http://127.0.0.1:12380 --initial-advertise-peer-urls http://127.0.0.1:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +etcd2: etcd --name infra2 --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://127.0.0.1:22379 --listen-peer-urls http://127.0.0.1:22380 --initial-advertise-peer-urls http://127.0.0.1:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +etcd3: etcd --name infra3 --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://127.0.0.1:32379 --listen-peer-urls http://127.0.0.1:32380 --initial-advertise-peer-urls http://127.0.0.1:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster 'infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380' --initial-cluster-state new --cert-file /path/to/bar_etcd.crt --key-file /path/to/bar_etcd.key --client-cert-auth --trusted-ca-file /path/to/apisix.ca-bundle +``` + +使用 `goreman` 来启动 ETCD 集群: + +```shell +goreman -f Procfile-single-enable-mtls start > goreman.log 2>&1 & +``` + +3. 更新 `config.yaml` + +```yaml title="conf/config.yaml" +deployment: + admin: + admin_key + - name: admin + key: edd1c9f034335f136f87ad84b625c8f1 + role: admin + admin_listen: + ip: 127.0.0.1 + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_ca_cert: /path/to/apisix.ca-bundle + admin_ssl_cert: /path/to/foo_server.crt + admin_ssl_cert_key: /path/to/foo_server.key + +apisix: + ssl: + ssl_trusted_certificate: /path/to/apisix.ca-bundle + +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + - "https://127.0.0.1:22379" + - "https://127.0.0.1:32379" + tls: + cert: /path/to/bar_apisix.crt + key: /path/to/bar_apisix.key + sni: etcd.cluster.dev +``` + +4. 测试 Admin API + +启动 APISIX,如果 APISIX 启动成功,`logs/error.log` 中没有异常输出,表示 APISIX 与 ETCD 之间进行 mTLS 通信正常。 + +用 curl 模拟客户端,与 APISIX Admin API 进行 mTLS 通信,并创建一条路由: + +```shell +curl -vvv \ + --resolve 'admin.apisix.dev:9180:127.0.0.1' https://admin.apisix.dev:9180/apisix/admin/routes/1 \ + --cert /path/to/foo_client.crt \ + --key /path/to/foo_client.key \ + --cacert /path/to/apisix.ca-bundle \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/get", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +如果输出以下 SSL 握手过程,表示 curl 与 APISIX Admin API 之间 mTLS 通信成功: + +```shell +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Request CERT (13): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Certificate (11): +* TLSv1.3 (OUT), TLS handshake, CERT verify (15): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +``` + +5. 验证 APISIX 代理 + +```shell +curl http://127.0.0.1:9080/get -i + +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 298 +Connection: keep-alive +Date: Tue, 26 Jul 2022 16:31:00 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/2.14.1 + +…… +``` + +APISIX 将请求代理到了上游 `httpbin.org` 的 `/get` 路径,并返回了 `HTTP/1.1 200 OK`。整个过程使用 CA bundle 替代 CA 证书是正常可用的。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/config.json b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/config.json new file mode 100644 index 0000000..f7c9ebd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/config.json @@ -0,0 +1,357 @@ +{ + "version": "3.13.0", + "sidebar": [ + { + "type": "category", + "label": "快速开始", + "items": [ + "getting-started/README", + "getting-started/configure-routes", + "getting-started/load-balancing", + "getting-started/key-authentication", + "getting-started/rate-limiting" + ] + }, + { + "type": "doc", + "id": "installation-guide" + }, + { + "type": "doc", + "id": "architecture-design/apisix" + }, + { + "type": "category", + "label": "教程", + "items": [ + "tutorials/expose-api", + "tutorials/protect-api", + "tutorials/observe-your-api", + "tutorials/health-check", + "tutorials/client-to-apisix-mtls", + "tutorials/keycloak-oidc" + ] + }, + { + "type": "category", + "label": "APISIX 术语", + "items": [ + "terminology/api-gateway", + "terminology/consumer", + "terminology/consumer-group", + "terminology/credential", + "terminology/global-rule", + "terminology/plugin", + "terminology/plugin-config", + "terminology/plugin-metadata", + "terminology/route", + "terminology/router", + "terminology/script", + "terminology/service", + "terminology/upstream", + "terminology/secret" + ] + }, + { + "type": "category", + "label": "插件", + "items": [ + { + "type": "category", + "label": "普通插件", + "items": [ + "plugins/batch-requests", + "plugins/redirect", + "plugins/echo", + "plugins/gzip", + "plugins/brotli", + "plugins/real-ip", + "plugins/server-info", + "plugins/ext-plugin-pre-req", + "plugins/ext-plugin-post-req", + "plugins/ext-plugin-post-resp", + "plugins/ocsp-stapling" + ] + }, + { + "type": "category", + "label": "转换请求", + "items": [ + "plugins/response-rewrite", + "plugins/proxy-rewrite", + "plugins/grpc-transcode", + "plugins/grpc-web", + "plugins/fault-injection", + "plugins/mocking", + "plugins/body-transformer", + "plugins/attach-consumer-label" + ] + }, + { + "type": "category", + "label": "身份认证", + "items": [ + "plugins/authz-keycloak", + "plugins/authz-casdoor", + "plugins/wolf-rbac", + "plugins/key-auth", + "plugins/jwt-auth", + "plugins/jwe-decrypt", + "plugins/basic-auth", + "plugins/openid-connect", + "plugins/hmac-auth", + "plugins/authz-casbin", + "plugins/ldap-auth", + "plugins/opa", + "plugins/forward-auth", + "plugins/multi-auth" + ] + }, + { + "type": "category", + "label": "安全防护", + "items": [ + "plugins/cors", + "plugins/uri-blocker", + "plugins/ip-restriction", + "plugins/ua-restriction", + "plugins/referer-restriction", + "plugins/consumer-restriction", + "plugins/csrf", + "plugins/public-api", + "plugins/gm", + "plugins/chaitin-waf" + ] + }, + { + "type": "category", + "label": "流量控制", + "items": [ + "plugins/limit-req", + "plugins/limit-conn", + "plugins/limit-count", + "plugins/proxy-cache", + "plugins/request-validation", + "plugins/proxy-mirror", + "plugins/api-breaker", + "plugins/traffic-split", + "plugins/request-id", + "plugins/proxy-control", + "plugins/client-control", + "plugins/workflow" + ] + }, + { + "type": "category", + "label": "可观测性", + "items": [ + { + "type": "category", + "label": "数据链路", + "items": [ + "plugins/zipkin", + "plugins/skywalking", + "plugins/opentelemetry" + ] + }, + { + "type": "category", + "label": "数据指标", + "items": [ + "plugins/prometheus", + "plugins/node-status", + "plugins/datadog" + ] + }, + { + "type": "category", + "label": "日志采集", + "items": [ + "plugins/http-logger", + "plugins/skywalking-logger", + "plugins/tcp-logger", + "plugins/kafka-logger", + "plugins/rocketmq-logger", + "plugins/udp-logger", + "plugins/clickhouse-logger", + "plugins/syslog", + "plugins/log-rotate", + "plugins/error-log-logger", + "plugins/sls-logger", + "plugins/google-cloud-logging", + "plugins/splunk-hec-logging", + "plugins/file-logger", + "plugins/loggly", + "plugins/elasticsearch-logger", + "plugins/tencent-cloud-cls", + "plugins/loki-logger" + ] + } + ] + }, + { + "type": "category", + "label": "无服务器架构", + "items": [ + "plugins/serverless", + "plugins/azure-functions", + "plugins/openwhisk", + "plugins/aws-lambda", + "plugins/openfunction" + ] + }, + { + "type": "category", + "label": "其它协议", + "items": [ + "plugins/dubbo-proxy", + "plugins/mqtt-proxy", + "plugins/http-dubbo" + ] + } + ] + }, + { + "type": "category", + "label": "相关 API", + "items": [ + { + "type": "doc", + "id": "admin-api" + }, + { + "type": "doc", + "id": "control-api" + }, + { + "type": "doc", + "id": "status-api" + } + ] + }, + { + "type": "category", + "label": "开发者", + "items": [ + { + "type": "doc", + "id": "building-apisix" + }, + { + "type": "doc", + "id": "build-apisix-dev-environment-on-mac" + }, + { + "type": "doc", + "id": "support-fips-in-apisix" + }, + { + "type": "doc", + "id": "external-plugin" + }, + { + "type": "doc", + "id": "wasm" + }, + { + "type": "doc", + "id": "CODE_STYLE" + }, + { + "type": "doc", + "id": "plugin-develop" + }, + { + "type": "doc", + "id": "debug-mode" + } + ] + }, + { + "type": "doc", + "id": "FAQ" + }, + { + "type": "category", + "label": "其它", + "items": [ + { + "type": "category", + "label": "服务发现", + "items": [ + "discovery", + "discovery/dns", + "discovery/nacos", + "discovery/eureka", + "discovery/control-plane-service-discovery", + "discovery/kubernetes" + ] + }, + { + "type": "doc", + "id": "router-radixtree" + }, + { + "type": "doc", + "id": "stream-proxy" + }, + { + "type": "doc", + "id": "grpc-proxy" + }, + { + "type": "doc", + "id": "customize-nginx-configuration" + }, + { + "type": "doc", + "id": "certificate" + }, + { + "type": "doc", + "id": "apisix-variable" + }, + { + "type": "doc", + "id": "batch-processor" + }, + { + "type": "doc", + "id": "benchmark" + }, + { + "type": "doc", + "id": "install-dependencies" + }, + { + "type": "doc", + "id": "mtls" + }, + { + "type": "doc", + "id": "debug-function" + }, + { + "type": "doc", + "id": "profile" + }, + { + "type": "doc", + "id": "ssl-protocol" + }, + { + "type": "doc", + "id": "http3" + } + ] + }, + { + "type": "doc", + "id": "CHANGELOG" + }, + { + "type": "doc", + "id": "upgrade-guide-from-2.15.x-to-3.0.0" + } + ] +} diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/control-api.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/control-api.md new file mode 100644 index 0000000..eeb61b5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/control-api.md @@ -0,0 +1,236 @@ +--- +title: Control API +--- + + + +control API 可以被用来: + +* 暴露 APISIX 内部状态信息 +* 控制单个 APISIX 的数据平面的行为 + +默认情况下,control API 是启用的,监听 `127.0.0.1:9090`。你可以通过修改 `apisix/conf/config.yaml` 中的 control 部分来更改设置,如下: + +```yaml +apisix: + ... + enable_control: true + control: + ip: "127.0.0.1" + port: 9090 +``` + +插件的 control API 在默认情况下不支持参数匹配,如果想启用参数匹配功能可以在 control 部分添加 `router: 'radixtree_uri_with_parameter'` + +注意:control API server 不应该被配置成监听公网地址。 + +## 通过插件添加的 control API + +APISIX 中一些插件添加了自己的 control API。如果你对他们感兴趣,请参阅对应插件的文档。 + +## 独立于插件的 control API + +以下是支持的 API: + +### GET /v1/schema + +引入自 2.2 版本 + +使用以下格式返回被该 APISIX 实例使用的 json schema: + +```json +{ + "main": { + "route": { + "properties": {...} + }, + "upstream": { + "properties": {...} + }, + ... + }, + "plugins": { + "example-plugin": { + "consumer_schema": {...}, + "metadata_schema": {...}, + "schema": {...}, + "type": ..., + "priority": 0, + "version": 0.1 + }, + ... + }, + "stream-plugins": { + "mqtt-proxy": { + ... + }, + ... + } +} +``` + +只有启用了的插件才会被包含在返回结果中 `plugins` 部分。(返回结果中的)一些插件可能会缺失如 `consumer_schema` 或者 `type` 字段,这取决于插件的定义。 + +### GET /v1/healthcheck + +引入自 2.3 版本 + +使用以下格式返回当前的 [health check](./tutorials/health-check.md) 状态 + +```json +[ + { + "nodes": [ + { + "ip": "52.86.68.46", + "counter": { + "http_failure": 0, + "success": 0, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "healthy" + }, + { + "ip": "100.24.156.8", + "counter": { + "http_failure": 5, + "success": 0, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "unhealthy" + } + ], + "name": "/apisix/routes/1", + "type": "http" + } +] + +``` + +每个 entry 包含以下字段: + +* name: 资源 ID,健康检查的报告对象。 +* type: 健康检查类型,取值为 `["http", "https", "tcp"]`。 +* nodes: 检查节点列表。 +* nodes[i].ip: IP 地址。 +* nodes[i].port: 端口。 +* nodes[i].status: 状态:`["healthy", "unhealthy", "mostly_healthy", "mostly_unhealthy"]`。 +* nodes[i].counter.success: 成功计数器。 +* nodes[i].counter.http_failure: HTTP 访问失败计数器。 +* nodes[i].counter.tcp_failure: TCP 连接或读写的失败计数器。 +* nodes[i].counter.timeout_failure: 超时计数器。 + +用户也可以通过 `/v1/healthcheck/$src_type/$src_id` 来获取指定 health checker 的状态。 + +例如,`GET /v1/healthcheck/upstreams/1` 返回: + +```json +{ + "nodes": [ + { + "ip": "52.86.68.46", + "counter": { + "http_failure": 0, + "success": 2, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "healthy" + }, + { + "ip": "100.24.156.8", + "counter": { + "http_failure": 5, + "success": 0, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "unhealthy" + } + ], + "type": "http" + "name": "/apisix/routes/1" +} + +``` + +:::note + +只有一个上游满足以下条件时,它的健康检查状态才会出现在结果里面: + +* 上游配置了健康检查。 +* 上游在任何一个 worker 进程处理过客户端请求。 + +::: + +如果你使用浏览器访问该 API,你将得到一个网页: + +![Health Check Status Page](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/health_check_status_page.png) + +### POST /v1/gc + +引入自 2.8 版本 + +在 http 子系统中触发一次全量 GC + +注意,当你启用 stream proxy 时,APISIX 将为 stream 子系统运行另一个 Lua 虚拟机。它不会触发这个 Lua 虚拟机中的全量 GC。 + +### GET /v1/plugin_metadatas + +引入自 3.0.0 版本 + +打印所有插件的元数据: + +```json +[ + { + "log_format": { + "upstream_response_time": "$upstream_response_time" + }, + "id": "file-logger" + }, + { + "ikey": 1, + "skey": "val", + "id": "example-plugin" + } +] +``` + +### GET /v1/plugin_metadata/{plugin_name} + +引入自 3.0.0 版本 + +打印指定插件的元数据: + +```json +{ + "log_format": { + "upstream_response_time": "$upstream_response_time" + }, + "id": "file-logger" +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/customize-nginx-configuration.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/customize-nginx-configuration.md new file mode 100644 index 0000000..0817f63 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/customize-nginx-configuration.md @@ -0,0 +1,63 @@ +--- +title: 自定义 Nginx 配置 +--- + + + +APISIX 使用的 Nginx 配置是通过模板文件 `apisix/cli/ngx_tpl.lua` 以及 `apisix/cli/config.lua` 和`conf/config.yaml` 中的参数生成的。 + +在执行完 `./bin/apisix start`,你可以在 `conf/nginx.conf` 看到生成的 Nginx 配置文件。 + +如果你需要自定义 Nginx 配置,请阅读 `conf/config.default.example` 中的 `nginx_config`。你可以在 `conf/config.yaml` 中覆盖默认值。例如,你可以在 `conf/nginx.conf` 中通过配置 `xxx_snippet` 条目注入一些代码片段: + +```yaml +... +# config.yaml 里面的内容 +nginx_config: + main_configuration_snippet: | + daemon on; + http_configuration_snippet: | + server + { + listen 45651; + server_name _; + access_log off; + + location /ysec_status { + req_status_show; + allow 127.0.0.1; + deny all; + } + } + + chunked_transfer_encoding on; + + http_server_configuration_snippet: | + set $my "var"; + http_admin_configuration_snippet: | + log_format admin "$request_time $pipe"; + http_end_configuration_snippet: | + server_names_hash_bucket_size 128; + stream_configuration_snippet: | + tcp_nodelay off; +... +``` + +注意`nginx_config`及其子项的格式缩进,在执行`./bin/apisix start`时,错误的缩进将导致更新`conf/nginx.conf`文件失败。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/debug-function.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/debug-function.md new file mode 100644 index 0000000..fd0e15a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/debug-function.md @@ -0,0 +1,163 @@ +--- +title: 调试功能 +--- + + + +## `5xx` 响应状态码 + +500、502、503 等类似的 `5xx` 状态码,是由于服务器错误而响应的状态码,当一个请求出现 `5xx` 状态码时;它可能来源于 `APISIX` 或 `Upstream` 。如何识别这些响应状态码的来源,是一件很有意义的事,它能够快速的帮助我们确定问题的所在。(当修改 `conf/config.yaml` 的配置 `show_upstream_status_in_response_header` 为 `true` 时,会返回所有上游状态码,不仅仅是 `5xx` 状态。) + +## 如何识别 `5xx` 响应状态码的来源 + +在请求的响应头中,通过 `X-APISIX-Upstream-Status` 这个响应头,我们可以有效的识别 `5xx` 状态码的来源。当 `5xx` 状态码来源于 `Upstream` 时,在响应头中可以看到 `X-APISIX-Upstream-Status` 这个响应头,并且这个响应头的值为响应的状态码。当 `5xx` 状态码来源于 `APISIX` 时,响应头中没有 `X-APISIX-Upstream-Status` 的响应头信息。也就是只有 `5xx` 状态码来源于 `Upstream` 时,才会有 `X-APISIX-Upstream-Status` 响应头。 + +## 示例 + +示例 1:`502` 响应状态码来源于 `Upstream` (IP 地址不可用) + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +测试: + +```shell +$ curl http://127.0.0.1:9080/hello -v +...... +< HTTP/1.1 502 Bad Gateway +< Date: Wed, 25 Nov 2020 14:40:22 GMT +< Content-Type: text/html; charset=utf-8 +< Content-Length: 154 +< Connection: keep-alive +< Server: APISIX/2.0 +< X-APISIX-Upstream-Status: 502 +< + +502 Bad Gateway + +

502 Bad Gateway

+
openresty
+ + + +``` + +具有 `X-APISIX-Upstream-Status: 502` 的响应头。 + +示例 2:`502` 响应状态码来源于 `APISIX` + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 500, + "body": "Fault Injection!\n" + } + } + }, + "uri": "/hello" +}' +``` + +测试: + +```shell +$ curl http://127.0.0.1:9080/hello -v +...... +< HTTP/1.1 500 Internal Server Error +< Date: Wed, 25 Nov 2020 14:50:20 GMT +< Content-Type: text/plain; charset=utf-8 +< Transfer-Encoding: chunked +< Connection: keep-alive +< Server: APISIX/2.0 +< +Fault Injection! +``` + +没有 `X-APISIX-Upstream-Status` 的响应头。 + +示例 3:`Upstream` 具有多节点,并且所有节点不可用 + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "nodes": { + "127.0.0.3:1": 1, + "127.0.0.2:1": 1, + "127.0.0.1:1": 1 + }, + "retries": 2, + "type": "roundrobin" +}' +``` + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "upstream_id": "1" +}' +``` + +测试: + +```shell +$ curl http://127.0.0.1:9080/hello -v +< HTTP/1.1 502 Bad Gateway +< Date: Wed, 25 Nov 2020 15:07:34 GMT +< Content-Type: text/html; charset=utf-8 +< Content-Length: 154 +< Connection: keep-alive +< Server: APISIX/2.0 +< X-APISIX-Upstream-Status: 502, 502, 502 +< + +502 Bad Gateway + +

502 Bad Gateway

+
openresty
+ + +``` + +具有 `X-APISIX-Upstream-Status: 502, 502, 502` 的响应头。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/debug-mode.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/debug-mode.md new file mode 100644 index 0000000..c9c0b10 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/debug-mode.md @@ -0,0 +1,110 @@ +--- +title: 调试模式 +--- + + + +### 基本调试模式 + +设置 `conf/debug.yaml` 即可开启基本调试模式: + +``` +basic: + enable: true +#END +``` + +注意:在 APISIX 2.10 之前,开启基本调试模式曾经是设置 `conf/config.yaml` 中的 `apisix.enable_debug` 为 `true`。 + +比如对 `/hello` 开启了 `limit-conn` 和 `limit-count` 插件,这时候应答头中会有 `Apisix-Plugins: limit-conn, limit-count`。 + +```shell +$ curl http://127.0.0.1:1984/hello -i +HTTP/1.1 200 OK +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Apisix-Plugins: limit-conn, limit-count +X-RateLimit-Limit: 2 +X-RateLimit-Remaining: 1 +Server: openresty + +hello world +``` + +如果这个信息无法通过 HTTP 应答头传递,比如插件在 stream 子系统里面执行, +那么这个信息会以 warn 等级日志写入到错误日志中。 + +### 高级调试模式 + +设置 `conf/debug.yaml` 中的选项,开启高级调试模式。由于 APISIX 服务启动后是每秒定期检查该文件, +当可以正常读取到 `#END` 结尾时,才认为文件处于写完关闭状态。 + +根据文件最后修改时间判断文件内容是否有变化,如有变化则重新加载,如没变化则跳过本次检查。 +所以高级调试模式的开启、关闭都是热更新方式完成。 + +| 名称 | 必选项 | 说明 | 默认值 | +| ------------------------------- | ------ | ------------------------------------------------------------- | ------ | +| hook_conf.enable | 是 | 是否开启 hook 追踪调试。开启后将打印指定模块方法的请求参数或返回值。 | false | +| hook_conf.name | 是 | 开启 hook 追踪调试的模块列表名称。 | | +| hook_conf.log_level | 是 | 打印请求参数和返回值的日志级别。 | warn | +| hook_conf.is_print_input_args | 是 | 是否打印输入参数。 | true | +| hook_conf.is_print_return_value | 是 | 是否打印返回值。 | true | + +请看下面示例: + +```yaml +hook_conf: + enable: false # 是否开启 hook 追踪调试 + name: hook_phase # 开启 hook 追踪调试的模块列表名称 + log_level: warn # 日志级别 + is_print_input_args: true # 是否打印输入参数 + is_print_return_value: true # 是否打印返回值 + +hook_phase: # 模块函数列表,名字:hook_phase + apisix: # 引用的模块名称 + - http_access_phase # 函数名:数组 + - http_header_filter_phase + - http_body_filter_phase + - http_log_phase +#END +``` + +### 动态高级调试模式 + +动态高级调试模式是基于高级调试模式,可以由单个请求动态开启高级调试模式。设置 `conf/debug.yaml` 中的选项。 + +示例: + +```yaml +http_filter: + enable: true # 是否动态开启高级调试模式 + enable_header_name: X-APISIX-Dynamic-Debug # 追踪携带此 header 的请求 +...... +#END +``` + +动态开启高级调试模式,示例: + +```shell +curl 127.0.0.1:9090/hello --header 'X-APISIX-Dynamic-Debug: foo' +``` + +注意:动态高级调试模式无法调试 `apisix.http_access_phase`,模块(因为请求进入 `apisix.http_access_phase` 模块后,才会判断是否动态开启高级调试模式)。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery.md new file mode 100644 index 0000000..3aaaaaf --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery.md @@ -0,0 +1,290 @@ +--- +title: 集成服务发现注册中心 +--- + + + +## 摘要 + +当业务量发生变化时,需要对上游服务进行扩缩容,或者因服务器硬件故障需要更换服务器。如果网关是通过配置来维护上游服务信息,在微服务架构模式下,其带来的维护成本可想而知。再者因不能及时更新这些信息,也会对业务带来一定的影响,还有人为误操作带来的影响也不可忽视,所以网关非常必要通过服务注册中心动态获取最新的服务实例信息。架构图如下所示: + +![discovery through service registry](../../assets/images/discovery-cn.png) + +1. 服务启动时将自身的一些信息,比如服务名、IP、端口等信息上报到注册中心;各个服务与注册中心使用一定机制(例如心跳)通信,如果注册中心与服务长时间无法通信,就会注销该实例;当服务下线时,会删除注册中心的实例信息; +2. 网关会准实时地从注册中心获取服务实例信息; +3. 当用户通过网关请求服务时,网关从注册中心获取的实例列表中选择一个进行代理; + +常见的注册中心:Eureka, Etcd, Consul, Nacos, Zookeeper 等 + +## 如何扩展注册中心? + +### 基本步骤 + +APISIX 要扩展注册中心其实是件非常容易的事情,其基本步骤如下: + +1. 在 `apisix/discovery/` 目录中添加注册中心客户端的实现; +2. 实现用于初始化的 `_M.init_worker()` 函数以及用于获取服务实例节点列表的 `_M.nodes(service_name)` 函数; +3. 将注册中心数据转换为 APISIX 格式的数据; + +### 以 Eureka 举例 + +#### 实现 eureka 客户端 + +首先,在 `apisix/discovery` 下创建 `eureka` 目录; + +其次,在 `apisix/discovery/eureka` 目录中添加 [`init.lua`](https://github.com/apache/apisix/blob/master/apisix/discovery/init.lua); + +然后在 `init.lua` 实现用于初始化的 `init_worker` 函数以及用于获取服务实例节点列表的 `nodes` 函数即可: + +```lua +local _M = { + version = 0.1, +} + + +function _M.nodes(service_name) + ... ... +end + + +function _M.init_worker() + ... ... +end + + +return _M +``` + +最后,在 `apisix/discovery/eureka` 下的 `schema.lua` 里面提供 YAML 配置的 schema。 + +#### Eureka 与 APISIX 之间数据转换逻辑 + +APISIX 是通过 `upstream.nodes` 来配置上游服务的,所以使用注册中心后,通过注册中心获取服务的所有 node 后,赋值给 `upstream.nodes` 来达到相同的效果。那么 APISIX 是怎么将 Eureka 的数据转成 node 的呢?假如从 Eureka 获取如下数据: + +```json +{ + "applications": { + "application": [ + { + "name": "USER-SERVICE", # 服务名称 + "instance": [ + { + "instanceId": "192.168.1.100:8761", + "hostName": "192.168.1.100", + "app": "USER-SERVICE", # 服务名称 + "ipAddr": "192.168.1.100", # 实例 IP 地址 + "status": "UP", # 状态 + "overriddenStatus": "UNKNOWN", # 覆盖状态 + "port": { + "$": 8761, # 端口 + "@enabled": "true" # 开始端口 + }, + "securePort": { + "$": 443, + "@enabled": "false" + }, + "metadata": { + "management.port": "8761", + "weight": 100 # 权重,需要通过 spring boot 应用的 eureka.instance.metadata-map.weight 进行配置 + }, + "homePageUrl": "http://192.168.1.100:8761/", + "statusPageUrl": "http://192.168.1.100:8761/actuator/info", + "healthCheckUrl": "http://192.168.1.100:8761/actuator/health", + ... ... + } + ] + } + ] + } +} +``` + +解析 instance 数据步骤: + +1. 首先要选择状态为“UP”的实例:overriddenStatus 值不为 "UNKNOWN" 以 overriddenStatus 为准,否则以 status 的值为准; +2. IP 地址:以 ipAddr 的值为 IP; 并且必须是 IPv4 或 IPv6 格式的; +3. 端口:端口取值规则是,如果 port["@enabled"] 等于 "true" 那么使用 port["\$"] 的值;如果 securePort["@enabled"] 等于 "true" 那么使用 securePort["$"] 的值; +4. 权重:权重取值顺序是,先判断 `metadata.weight` 是否有值,如果没有,则取配置中的 `eureka.weight` 的值,如果还没有,则取默认值`100`; + +这个例子转成 APISIX nodes 的结果如下: + +```json +[ + { + "host": "192.168.1.100", + "port": 8761, + "weight": 100, + "metadata": { + "management.port": "8761" + } + } +] +``` + +## 注册中心配置 + +### 初始化服务发现 + +首先要在 `conf/config.yaml` 文件中增加如下配置,添加不同的服务发现客户端,以便在使用过程中动态选择: + +```yaml +discovery: + eureka: ... +``` + +此名称要与 `apisix/discovery/` 目录中实现对应注册中心的文件名保持一致。 + +现已支持注册中心有:Eureka。 + +### Eureka 的配置 + +在 `conf/config.yaml` 增加如下格式的配置: + +```yaml +discovery: + eureka: + host: # it's possible to define multiple eureka hosts addresses of the same eureka cluster. + - "http://${username}:${password}@${eureka_host1}:${eureka_port1}" + - "http://${username}:${password}@${eureka_host2}:${eureka_port2}" + prefix: "/eureka/" + fetch_interval: 30 # 从 eureka 中拉取数据的时间间隔,默认 30 秒 + weight: 100 # default weight for node + timeout: + connect: 2000 # 连接 eureka 的超时时间,默认 2000ms + send: 2000 # 向 eureka 发送数据的超时时间,默认 2000ms + read: 5000 # 从 eureka 读数据的超时时间,默认 5000ms +``` + +通过 `discovery.eureka.host` 配置 eureka 的服务器地址。 + +如果 eureka 的地址是 `http://127.0.0.1:8761/` ,并且不需要用户名和密码验证的话,配置如下: + +```yaml +discovery: + eureka: + host: + - "http://127.0.0.1:8761" + prefix: "/eureka/" +``` + +## upstream 配置 + +### 七层 + +APISIX 是通过 `upstream.discovery_type` 选择使用的服务发现,`upstream.service_name` 与注册中心的服务名进行关联。下面是将 URL 为 "/user/\*" 的请求路由到注册中心名为 "USER-SERVICE" 的服务上例子: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/user/*", + "upstream": { + "service_name": "USER-SERVICE", + "type": "roundrobin", + "discovery_type": "eureka" + } +}' + +HTTP/1.1 201 Created +Date: Sat, 31 Aug 2019 01:17:15 GMT +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server + +{"node":{"value":{"uri":"\/user\/*","upstream": {"service_name": "USER-SERVICE", "type": "roundrobin", "discovery_type": "eureka"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925}} +``` + +因为上游的接口 URL 可能会有冲突,通常会在网关通过前缀来进行区分: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/a/*", + "plugins": { + "proxy-rewrite" : { + "regex_uri": ["^/a/(.*)", "/${1}"] + } + }, + "upstream": { + "service_name": "A-SERVICE", + "type": "roundrobin", + "discovery_type": "eureka" + } +}' + +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/b/*", + "plugins": { + "proxy-rewrite" : { + "regex_uri": ["^/b/(.*)", "/${1}"] + } + }, + "upstream": { + "service_name": "B-SERVICE", + "type": "roundrobin", + "discovery_type": "eureka" + } +}' +``` + +假如 A-SERVICE 和 B-SERVICE 都提供了一个 `/test` 的接口,通过上面的配置,可以通过 `/a/test` 访问 A-SERVICE 的 `/test` 接口,通过 `/b/test` 访问 B-SERVICE 的 `/test` 接口。 + +**注意**:配置 `upstream.service_name` 后 `upstream.nodes` 将不再生效,而是使用从注册中心的数据来替换,即使注册中心的数据是空的。 + +### 四层 + +eureka 服务发现也支持在四层中使用,配置方式与七层的类似。 + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "remote_addr": "127.0.0.1", + "upstream": { + "scheme": "tcp", + "discovery_type": "eureka", + "service_name": "APISIX-EUREKA", + "type": "roundrobin" + } +}' +HTTP/1.1 200 OK +Date: Fri, 30 Dec 2022 03:52:19 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.0.0 +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Access-Control-Expose-Headers: * +Access-Control-Max-Age: 3600 +X-API-VERSION: v3 + +{"key":"\/apisix\/stream_routes\/1","value":{"remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","type":"roundrobin","discovery_type":"eureka","scheme":"tcp","pass_host":"pass","service_name":"APISIX-EUREKA"},"id":"1","create_time":1672106762,"update_time":1672372339}} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/control-plane-service-discovery.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/control-plane-service-discovery.md new file mode 100644 index 0000000..b300b73 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/control-plane-service-discovery.md @@ -0,0 +1,72 @@ +--- +title: 控制面服务发现 +keywords: + - API 网关 + - APISIX + - ZooKeeper + - Nacos + - APISIX-Seed +description: 本文档介绍了如何在 API 网关 Apache APISIX 控制面通过 Nacos 和 Zookeeper 实现服务发现。 +--- + + + +本文档介绍了如何在 APISIX 控制面通过 Nacos 和 Zookeeper 实现服务发现。 + +## APISIX-Seed 架构 + +Apache APISIX 在早期已经支持了数据面服务发现,现在 APISIX 也通过 [APISIX-Seed](https://github.com/api7/apisix-seed) 项目实现了控制面服务发现,下图为 APISIX-Seed 架构图。 + +![control-plane-service-discovery](../../../assets/images/control-plane-service-discovery.png) + +图中的数字代表的具体信息如下: + +1. 通过 Admin API 向 APISIX 注册上游并指定服务发现类型。APISIX-Seed 将监听 etcd 中的 APISIX 资源变化,过滤服务发现类型并获取服务名称(如 ZooKeeper); +2. APISIX-Seed 将在服务注册中心(如 ZooKeeper)订阅指定的服务名称,以监控和更新对应的服务信息; +3. 客户端向服务注册中心注册服务后,APISIX-Seed 会获取新的服务信息,并将更新后的服务节点写入 etcd; +4. 当 APISIX-Seed 在 etcd 中更新相应的服务节点信息时,APISIX 会将最新的服务节点信息同步到内存中。 + +:::note + +引入 APISIX-Seed 后,如果注册中心的服务变化频繁,etcd 中的数据也会频繁变化。因此,需要在启动 etcd 时设置 `--auto-compaction` 选项,用来定期压缩历史记录,避免耗尽 etcd 存储空间。详细信息请参考 [revisions](https://etcd.io/docs/v3.5/learning/api/#revisions)。 + +::: + +## 为什么需要 APISIX-Seed? + +- 网络拓扑变得更简单 + + APISIX 不需要与每个注册中心保持网络连接,只需要关注 etcd 中的配置信息即可。这将大大简化网络拓扑。 + +- 上游服务总数据量变小 + + 由于 `registry` 的特性,APISIX 可能会在 Worker 中存储全量的 `registry` 服务数据,例如 Consul_KV。通过引入 APISIX-Seed,APISIX 的每个进程将不需要额外缓存上游服务相关信息。 + +- 更容易管理 + + 服务发现配置需要为每个 APISIX 实例配置一次。通过引入 APISIX-Seed,APISIX 将对服务注册中心的配置变化无感知。 + +## 支持的服务发现类型 + +目前已经支持了 ZooKeeper 和 Nacos,后续还将支持更多的服务注册中心,更多信息请参考:[APISIX Seed](https://github.com/api7/apisix-seed#apisix-seed-for-apache-apisix)。 + +- 如果你想启用控制面 ZooKeeper 服务发现,请参考:[ZooKeeper 部署教程](https://github.com/api7/apisix-seed/blob/main/docs/zh/latest/zookeeper.md)。 + +- 如果你想启用控制面 Nacos 服务发现,请参考:[Nacos 部署教程](https://github.com/api7/apisix-seed/blob/main/docs/zh/latest/nacos.md)。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/dns.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/dns.md new file mode 100644 index 0000000..b1f99ee --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/dns.md @@ -0,0 +1,146 @@ +--- +title: DNS +--- + + + +## 基于 DNS 的服务发现 + +某些服务发现系统如 Consul,支持通过 DNS 提供系统信息。我们可以使用这种方法直接实现服务发现,七层与四层均支持。 + +首先我们需要配置 DNS 服务器的地址: + +```yaml +# 添加到 config.yaml +discovery: + dns: + servers: + - "127.0.0.1:8600" # 使用 DNS 服务器的真实地址 +``` + +与在 Upstream 的 `nodes` 对象中配置域名不同的是,DNS 服务发现将返回所有的记录。例如按照以下的 upstream 配置: + +```json +{ + "id": 1, + "discovery_type": "dns", + "service_name": "test.consul.service", + "type": "roundrobin" +} +``` + +之后 `test.consul.service` 将被解析为 `1.1.1.1` 和 `1.1.1.2`,这个结果等同于: + +```json +{ + "id": 1, + "type": "roundrobin", + "nodes": [ + {"host": "1.1.1.1", "weight": 1}, + {"host": "1.1.1.2", "weight": 1} + ] +} +``` + +注意所有来自 `test.consul.service` 的 IP 都有相同的权重。 + +解析的记录将根据它们的 TTL 来进行缓存。对于记录不在缓存中的服务,我们将默认按照 `SRV -> A -> AAAA -> CNAME` 的顺序进行查询,刷新缓存记录时,我们将从上次成功的类型开始尝试。也可以通过修改配置文件来自定义 DNS 的解析顺序。 + +```yaml +# 添加到 config.yaml +discovery: + dns: + servers: + - "127.0.0.1:8600" # 使用 DNS 服务器的真实地址 + order: # DNS 解析的顺序 + - last # "last" 表示从上次成功的类型开始 + - SRV + - A + - AAAA + - CNAME + +``` + +如果你想指定 upstream 服务器的端口,可以把以下内容添加到 `service_name`: + +```json +{ + "id": 1, + "discovery_type": "dns", + "service_name": "test.consul.service:1980", + "type": "roundrobin" +} +``` + +另一种方法是通过 SRV 记录,见如下。 + +### SRV 记录 + +通过使用 SRV 记录你可以指定一个服务的端口和权重。 + +假设你有一条这样的 SRV 记录: + +``` +; under the section of blah.service +A 300 IN A 1.1.1.1 +B 300 IN A 1.1.1.2 +B 300 IN A 1.1.1.3 + +; name TTL type priority weight port +srv 86400 IN SRV 10 60 1980 A +srv 86400 IN SRV 20 20 1981 B +``` + +Upstream 配置是这样的: + +```json +{ + "id": 1, + "discovery_type": "dns", + "service_name": "srv.blah.service", + "type": "roundrobin" +} +``` + +效果等同于: + +```json +{ + "id": 1, + "type": "roundrobin", + "nodes": [ + {"host": "1.1.1.1", "port": 1980, "weight": 60, "priority": -10}, + {"host": "1.1.1.2", "port": 1981, "weight": 10, "priority": -20}, + {"host": "1.1.1.3", "port": 1981, "weight": 10, "priority": -20} + ] +} +``` + +注意 B 域名的两条记录均分权重。 +对于 SRV 记录,低优先级的节点被先选中,所以最后一项的优先级是负数。 + +关于 0 权重的 SRV 记录,在 [RFC 2782](https://www.ietf.org/rfc/rfc2782.txt) 中是这么描述的: + +> 当没有任何候选服务器时,域管理员应使用权重为 0 的,使 RR 更为易读(噪音更少)。当存在权重大于 0 的记录时,权重为 0 的记录被选中的可能性很小。 + +我们把权重为 0 的记录当作权重为 1,因此节点“被选中的可能性很小”,这也是处理此类记录的常用方法。 + +对于端口为 0 的 SRV 记录,我们会使用上游协议的默认端口。 +你也可以在“service_name”字段中直接指定端口,比如“srv.blah.service:8848”。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/eureka.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/eureka.md new file mode 100644 index 0000000..a7a1600 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/eureka.md @@ -0,0 +1,25 @@ +--- +title: eureka +--- + + + +Apache APISIX 支持使用 [Eureka](https://github.com/Netflix/eureka#eureka) 做服务发现。 +详情请阅读 [支持的服务注册发现](../discovery.md#当前支持的注册中心) 。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/kubernetes.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/kubernetes.md new file mode 100644 index 0000000..e10a539 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/kubernetes.md @@ -0,0 +1,403 @@ +--- +title: Kubernetes +keywords: + - Kubernetes + - Apache APISIX + - 服务发现 + - 集群 + - API 网关 +description: 本文将介绍如何在 Apache APISIX 中基于 Kubernetes 进行服务发现以及相关问题汇总。 +--- + + + +## 基于 Kubernetes 的服务发现 + +Kubernetes 服务发现以 [_List-Watch_](https://kubernetes.io/docs/reference/using-api/api-concepts) 方式监听 [_Kubernetes_](https://kubernetes.io) 集群 [_Endpoints_](https://kubernetes.io/docs/concepts/services-networking/service) 资源的实时变化,并将其值存储到 ngx.shared.DICT 中。 + +同时遵循 [_APISIX Discovery 规范_](../discovery.md) 提供了节点查询接口。 + +## Kubernetes 服务发现的使用 + +目前 Kubernetes 服务发现支持单集群和多集群模式,分别适用于待发现的服务分布在单个或多个 Kubernetes 的场景。 + +### 单集群模式 Kubernetes 服务发现的配置格式 + +单集群模式 Kubernetes 服务发现的完整配置如下: + +```yaml +discovery: + kubernetes: + service: + # apiserver schema, options [http, https] + schema: https #default https + + # apiserver host, options [ipv4, ipv6, domain, environment variable] + host: ${KUBERNETES_SERVICE_HOST} #default ${KUBERNETES_SERVICE_HOST} + + # apiserver port, options [port number, environment variable] + port: ${KUBERNETES_SERVICE_PORT} #default ${KUBERNETES_SERVICE_PORT} + + client: + # serviceaccount token or token_file + token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + #token: |- + # eyJhbGciOiJSUzI1NiIsImtpZCI6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEif + # 6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEifeyJhbGciOiJSUzI1NiIsImtpZCI + + default_weight: 50 # weight assigned to each discovered endpoint. default 50, minimum 0 + + # kubernetes discovery support namespace_selector + # you can use one of [equal, not_equal, match, not_match] filter namespace + namespace_selector: + # only save endpoints with namespace equal default + equal: default + + # only save endpoints with namespace not equal default + #not_equal: default + + # only save endpoints with namespace match one of [default, ^my-[a-z]+$] + #match: + #- default + #- ^my-[a-z]+$ + + # only save endpoints with namespace not match one of [default, ^my-[a-z]+$] + #not_match: + #- default + #- ^my-[a-z]+$ + + # kubernetes discovery support label_selector + # for the expression of label_selector, please refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + label_selector: |- + first="a",second="b" + + # reserved lua shared memory size, 1m memory can store about 1000 pieces of endpoint + shared_size: 1m #default 1m + + # if watch_endpoint_slices setting true, watch apiserver with endpointslices instead of endpoints + watch_endpoint_slices: false #default false +``` + +如果 Kubernetes 服务发现运行在 Pod 内,你可以使用如下最简配置: + +```yaml +discovery: + kubernetes: { } +``` + +如果 Kubernetes 服务发现运行在 Pod 外,你需要新建或选取指定的 [_ServiceAccount_](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/), 获取其 Token 值,然后使用如下配置: + +```yaml +discovery: + kubernetes: + service: + schema: https + host: # enter apiserver host value here + port: # enter apiServer port value here + client: + token: # enter serviceaccount token value here + #token_file: # enter token file path here +``` + +### 单集群模式 Kubernetes 服务发现的查询接口 + +单集群模式 Kubernetes 服务发现遵循 [_APISIX Discovery 规范_](../discovery.md) 提供节点查询接口。 + +**函数:** +nodes(service_name) + +**说明:** +service_name 必须满足格式:[namespace]/[name]:[portName] + ++ namespace: Endpoints 所在的命名空间 + ++ name: Endpoints 的资源名 + ++ portName: Endpoints 定义包含的 `ports.name` 值,如果 Endpoints 没有定义 `ports.name`,请依次使用 `targetPort`, `port` 代替。设置了 `ports.name` 的情况下,不能使用后两者。 + +**返回值:** +以如下 Endpoints 为例: + + ```yaml + apiVersion: v1 + kind: Endpoints + metadata: + name: plat-dev + namespace: default + subsets: + - addresses: + - ip: "10.5.10.109" + - ip: "10.5.10.110" + ports: + - port: 3306 + name: port + ``` + +nodes("default/plat-dev:port") 调用会得到如下的返回值: + + ``` + { + { + host="10.5.10.109", + port= 3306, + weight= 50, + }, + { + host="10.5.10.110", + port= 3306, + weight= 50, + }, + } + ``` + +### 多集群模式 Kubernetes 服务发现的配置格式 + +多集群模式 Kubernetes 服务发现的完整配置如下: + +```yaml +discovery: + kubernetes: + - id: release # a custom name refer to the cluster, pattern ^[a-z0-9]{1,8} + service: + # apiserver schema, options [http, https] + schema: https #default https + + # apiserver host, options [ipv4, ipv6, domain, environment variable] + host: "1.cluster.com" + + # apiserver port, options [port number, environment variable] + port: "6443" + + client: + # serviceaccount token or token_file + token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + #token: |- + # eyJhbGciOiJSUzI1NiIsImtpZCI6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEif + # 6Ikx5ME1DNWdnbmhQNkZCNlZYMXBsT3pYU3BBS2swYzBPSkN3ZnBESGpkUEEifeyJhbGciOiJSUzI1NiIsImtpZCI + + default_weight: 50 # weight assigned to each discovered endpoint. default 50, minimum 0 + + # kubernetes discovery support namespace_selector + # you can use one of [equal, not_equal, match, not_match] filter namespace + namespace_selector: + # only save endpoints with namespace equal default + equal: default + + # only save endpoints with namespace not equal default + #not_equal: default + + # only save endpoints with namespace match one of [default, ^my-[a-z]+$] + #match: + #- default + #- ^my-[a-z]+$ + + # only save endpoints with namespace not match one of [default, ^my-[a-z]+$] + #not_match: + #- default + #- ^my-[a-z]+$ + + # kubernetes discovery support label_selector + # for the expression of label_selector, please refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + label_selector: |- + first="a",second="b" + + # reserved lua shared memory size,1m memory can store about 1000 pieces of endpoint + shared_size: 1m #default 1m + + # if watch_endpoint_slices setting true, watch apiserver with endpointslices instead of endpoints + watch_endpoint_slices: false #default false +``` + +多集群模式 Kubernetes 服务发现没有为 `service` 和 `client` 域填充默认值,你需要根据集群配置情况自行填充。 + +### 多集群模式 Kubernetes 服务发现的查询接口 + +多集群模式 Kubernetes 服务发现遵循 [_APISIX Discovery 规范_](../discovery.md) 提供节点查询接口。 + +**函数:** +nodes(service_name) + +**说明:** +service_name 必须满足格式:[id]/[namespace]/[name]:[portName] + ++ id: Kubernetes 服务发现配置中定义的集群 id 值 + ++ namespace: Endpoints 所在的命名空间 + ++ name: Endpoints 的资源名 + ++ portName: Endpoints 定义包含的 `ports.name` 值,如果 Endpoints 没有定义 `ports.name`,请依次使用 `targetPort`, `port` 代替。设置了 `ports.name` 的情况下,不能使用后两者。 + +**返回值:** +以如下 Endpoints 为例: + + ```yaml + apiVersion: v1 + kind: Endpoints + metadata: + name: plat-dev + namespace: default + subsets: + - addresses: + - ip: "10.5.10.109" + - ip: "10.5.10.110" + ports: + - port: 3306 + name: port + ``` + +nodes("release/default/plat-dev:port") 调用会得到如下的返回值: + + ``` + { + { + host="10.5.10.109", + port= 3306, + weight= 50, + }, + { + host="10.5.10.110", + port= 3306, + weight= 50, + }, + } + ``` + +## Q&A + +**Q: 为什么只支持配置 token 来访问 Kubernetes APIServer?** + +A: 一般情况下,我们有三种方式可以完成与 Kubernetes APIServer 的认证: + +- mTLS +- Token +- Basic authentication + +因为 lua-resty-http 目前不支持 mTLS, Basic authentication 不被推荐使用,所以当前只实现了 Token 认证方式。 + +**Q: APISIX 继承了 NGINX 的多进程模型,是否意味着每个 APISIX 工作进程都会监听 Kubernetes Endpoints?** + +A: Kubernetes 服务发现只使用特权进程监听 Kubernetes Endpoints,然后将其值存储到 `ngx.shared.DICT` 中,工作进程通过查询 `ngx.shared.DICT` 来获取结果。 + +**Q: [_ServiceAccount_](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) 需要的权限有哪些?** + +A: [_ServiceAccount_](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) 需要集群级 [ get,list,watch ] endpoints 资源的的权限,其声明式定义如下: + +```yaml +kind: ServiceAccount +apiVersion: v1 +metadata: + name: apisix-test + namespace: default +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: apisix-test +rules: +- apiGroups: [ "" ] + resources: [ endpoints,endpointslices ] + verbs: [ get,list,watch ] +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: apisix-test +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: apisix-test +subjects: + - kind: ServiceAccount + name: apisix-test + namespace: default +``` + +**Q: 怎样获取指定 [_ServiceAccount_](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) 的 Token 值?** + +A: 假定你指定的 [_ServiceAccount_](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) 资源名为“kubernetes-discovery“, 命名空间为“apisix”, 请按如下步骤获取其 Token 值。 + + 1. 获取 _Secret_ 资源名。执行以下命令,输出的第一列内容就是目标 _Secret_ 资源名: + + ```shell + kubectl -n apisix get secrets | grep kubernetes-discovery + ``` + + 2. 获取 Token 值。假定你获取到的 _Secret_ 资源名为 "kubernetes-discovery-token-c64cv", 执行以下命令,输出内容就是目标 Token 值: + + ```shell + kubectl -n apisix get secret kubernetes-discovery-token-c64cv -o jsonpath={.data.token} | base64 -d + ``` + +## 调试 API + +它还提供了用于调试的控制 api。 + +### 内存 Dump API + +```shell +GET /v1/discovery/kubernetes/dump +``` + +例子 + +```shell +# curl http://127.0.0.1:9090/v1/discovery/kubernetes/dump | jq +{ + "endpoints": [ + { + "endpoints": [ + { + "value": "{\"https\":[{\"host\":\"172.18.164.170\",\"port\":6443,\"weight\":50},{\"host\":\"172.18.164.171\",\"port\":6443,\"weight\":50},{\"host\":\"172.18.164.172\",\"port\":6443,\"weight\":50}]}", + "name": "default/kubernetes" + }, + { + "value": "{\"metrics\":[{\"host\":\"172.18.164.170\",\"port\":2379,\"weight\":50},{\"host\":\"172.18.164.171\",\"port\":2379,\"weight\":50},{\"host\":\"172.18.164.172\",\"port\":2379,\"weight\":50}]}", + "name": "kube-system/etcd" + }, + { + "value": "{\"http-85\":[{\"host\":\"172.64.89.2\",\"port\":85,\"weight\":50}]}", + "name": "test-ws/testing" + } + ], + "id": "first" + } + ], + "config": [ + { + "default_weight": 50, + "id": "first", + "client": { + "token": "xxx" + }, + "service": { + "host": "172.18.164.170", + "port": "6443", + "schema": "https" + }, + "shared_size": "1m" + } + ] +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/nacos.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/nacos.md new file mode 100644 index 0000000..370ef17 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/discovery/nacos.md @@ -0,0 +1,283 @@ +--- +title: nacos +--- + + + +## 基于 [Nacos](https://nacos.io/zh-cn/docs/what-is-nacos.html) 的服务发现 + +当前模块的性能有待改进: + +1. 并行发送请求。 + +### Nacos 配置 + +在文件 `conf/config.yaml` 中添加以下配置到: + +```yaml +discovery: + nacos: + host: + - "http://${username}:${password}@${host1}:${port1}" + prefix: "/nacos/v1/" + fetch_interval: 30 # default 30 sec + weight: 100 # default 100 + timeout: + connect: 2000 # default 2000 ms + send: 2000 # default 2000 ms + read: 5000 # default 5000 ms +``` + +也可以这样简洁配置(未配置项使用默认值): + +```yaml +discovery: + nacos: + host: + - "http://192.168.33.1:8848" +``` + +### Upstream 设置 + +#### 七层 + +例如,转发 URI 匹配 "/nacos/*" 的请求到一个上游服务, +该服务在 Nacos 中的服务名是 APISIX-NACOS,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS,创建路由时指定服务发现类型为 nacos。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/nacos/*", + "upstream": { + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos" + } +}' +``` + +响应如下: + +```json +{ + "node": { + "key": "\/apisix\/routes\/1", + "value": { + "id": "1", + "create_time": 1615796097, + "status": 1, + "update_time": 1615799165, + "upstream": { + "hash_on": "vars", + "pass_host": "pass", + "scheme": "http", + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos" + }, + "priority": 0, + "uri": "\/nacos\/*" + } + } +} +``` + +#### 四层 + +nacos 服务发现也支持在四层中使用,配置方式与七层的类似。 + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "remote_addr": "127.0.0.1", + "upstream": { + "scheme": "tcp", + "discovery_type": "nacos", + "service_name": "APISIX-NACOS", + "type": "roundrobin" + } +}' +``` + +### 参数 + +| 名字 | 类型 | 可选项 | 默认值 | 有效值 | 说明 | +| ------------ | ------ | ----------- | ------- | ----- | ------------------------------------------------------------ | +| namespace_id | string | 可选 | public | | 服务所在的命名空间 | +| group_name | string | 可选 | DEFAULT_GROUP | | 服务所在的组 | + +#### 指定命名空间 + +例如,转发 URI 匹配 "/nacosWithNamespaceId/*" 的请求到一个上游服务, +该服务在 Nacos 中的服务名是 APISIX-NACOS,命名空间是 test_ns,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&namespaceId=test_ns,创建路由时指定服务发现类型为 nacos。 + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/nacosWithNamespaceId/*", + "upstream": { + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos", + "discovery_args": { + "namespace_id": "test_ns" + } + } +}' +``` + +响应如下: + +```json +{ + "node": { + "key": "\/apisix\/routes\/2", + "value": { + "id": "2", + "create_time": 1615796097, + "status": 1, + "update_time": 1615799165, + "upstream": { + "hash_on": "vars", + "pass_host": "pass", + "scheme": "http", + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos", + "discovery_args": { + "namespace_id": "test_ns" + } + }, + "priority": 0, + "uri": "\/nacosWithNamespaceId\/*" + } + } +} +``` + +#### 指定组 + +例如,转发 URI 匹配 "/nacosWithGroupName/*" 的请求到一个上游服务, +该服务在 Nacos 中的服务名是 APISIX-NACOS,组名是 test_group,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&groupName=test_group,创建路由时指定服务发现类型为 nacos。 + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/3 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/nacosWithGroupName/*", + "upstream": { + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos", + "discovery_args": { + "group_name": "test_group" + } + } +}' +``` + +响应如下: + +```json +{ + "node": { + "key": "\/apisix\/routes\/3", + "value": { + "id": "3", + "create_time": 1615796097, + "status": 1, + "update_time": 1615799165, + "upstream": { + "hash_on": "vars", + "pass_host": "pass", + "scheme": "http", + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos", + "discovery_args": { + "group_name": "test_group" + } + }, + "priority": 0, + "uri": "\/nacosWithGroupName\/*" + } + } +} +``` + +#### 同时指定命名空间和组 + +例如,转发 URI 匹配 "/nacosWithNamespaceIdAndGroupName/*" 的请求到一个上游服务, +该服务在 Nacos 中的服务名是 APISIX-NACOS,命名空间是 test_ns,组名是 test_group,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&namespaceId=test_ns&groupName=test_group,创建路由时指定服务发现类型为 nacos。 + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/4 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/nacosWithNamespaceIdAndGroupName/*", + "upstream": { + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos", + "discovery_args": { + "namespace_id": "test_ns", + "group_name": "test_group" + } + } +}' +``` + +响应如下: + +```json +{ + "node": { + "key": "\/apisix\/routes\/4", + "value": { + "id": "4", + "create_time": 1615796097, + "status": 1, + "update_time": 1615799165, + "upstream": { + "hash_on": "vars", + "pass_host": "pass", + "scheme": "http", + "service_name": "APISIX-NACOS", + "type": "roundrobin", + "discovery_type": "nacos", + "discovery_args": { + "namespace_id": "test_ns", + "group_name": "test_group" + } + }, + "priority": 0, + "uri": "\/nacosWithNamespaceIdAndGroupName\/*" + } + } +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/external-plugin.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/external-plugin.md new file mode 100644 index 0000000..5ae32e5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/external-plugin.md @@ -0,0 +1,111 @@ +--- +title: 外部插件 +--- + + + +## 什么是 External Plugin 和 Plugin Runner + +APISIX 支持使用 Lua 语言编写插件,这种类型的插件在 APISIX 内部执行。 +有时候你想使用其他语言来开发插件,因此,APISIX 支持以 `Sidecar` 的方式加载和运行你写的插件。 +这里的 `Sidecar` 就是 Plugin Runner,你写的插件叫做 External Plugin。 + +## 它是如何工作的 + +![external-plugin](../../assets/images/external-plugin.png) + +当你在 APISIX 中配置了一个 Plugin Runner,APISIX 将以子进程的方式运行该 Plugin Runner。 + +该子进程与 APISIX 进程从属相同用户。当重启或者重新加载 APISIX 时,该 Plugin Runner 也将被重启。 + +一旦你为指定路由配置了 `ext-plugin-*` 插件, +匹配该路由的请求将触发从 APISIX 到 Plugin Runner 的 RPC 调用。 + + Plugin Runner 将处理该 RPC 调用,在其侧创建一个请求,运行 External Plugin 并将结果返回给 APISIX。 + + External Plugin 及其执行顺序在这里 `ext-plugin-*` 配置。与其他插件一样,External Plugin 可以动态启用和重新配置。 + +## 它是如何实现的 + +如果你对 Plugin Runner 内部实现感兴趣,请参考这份文档: +[The Implementation of Plugin Runner](../../en/latest/internal/plugin-runner.md) + +## 支持的 Plugin Runner + +- Java: https://github.com/apache/apisix-java-plugin-runner +- Go: https://github.com/apache/apisix-go-plugin-runner +- Python: https://github.com/apache/apisix-python-plugin-runner +- JavaScript: https://github.com/zenozeng/apisix-javascript-plugin-runner + +## 在 APISIX 中配置 Plugin Runner + +在生产环境运行 Plugin Runner,添加以下配置到 `config.yaml`: + +```yaml +ext-plugin: + cmd: ["blah"] # replace it to the real runner executable according to the runner you choice +``` + +APISIX 将以子进程的方式管理该 Plugin Runner。 + +注意:在 Mac 上,APISIX `v2.6` 无法管理该 Plugin Runner。 + +在开发过程中,我们希望单独运行 Plugin Runner,这样就可以重新启动它,而无需先重新启动 APISIX。 + +通过指定环境变量 `APISIX_LISTEN_ADDRESS`, 我们可以使 Plugin Runner 监听一个固定的地址。 +例如: + +```bash +APISIX_LISTEN_ADDRESS=unix:/tmp/x.sock +``` + +此时,Plugin Runner 将监听 `/tmp/x.sock` + +同时,你需要配置 APISIX 发送 RPC 请求到该固定的地址: + +```yaml +ext-plugin: + # cmd: ["blah"] # don't configure the executable! + path_for_test: "/tmp/x.sock" # without 'unix:' prefix +``` + +在生产环境,不应该使用 `path_for_test`,此时监听的地址将动态生成。 + +## 常见问题 + +### 由 APISIX 管理时,Plugin Runner 无法访问我的环境变量 + +自 `v2.7`,APISIX 可以将环境变量传递给 Plugin Runner。 + +然而,默认情况下,Nginx 将隐藏所有环境变量。所以你需要首先在 `conf/config.yaml` 中声明环境变量: + +```yaml +nginx_config: + envs: + - MY_ENV_VAR +``` + +### APISIX 使用 SIGKILL 终止 Plugin Runner,而不是使用 SIGTERM! + +自 `v2.7`,当跑在 OpenResty 1.19+ 时,APISIX 将使用 SIGTERM 来停止 Plugin Runner。 + +但是,APISIX 需要等待 Plugin Runner 退出,这样我们才能确保资源得以被释放。 + +因此,我们先发送 SIGTERM。然后在 1 秒后,如果 Plugin Runner 仍然在运行,我们将发送 SIGKILL。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/README.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/README.md new file mode 100644 index 0000000..7575132 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/README.md @@ -0,0 +1,71 @@ +--- +title: 入门指南 +description: 本教程使用脚本在本地环境快速安装 Apache APISIX,并且通过管理 API 来验证是否安装成功。 +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +Apache APISIX 是 Apache 软件基金会下的[顶级项目](https://projects.apache.org/project.html?apisix),由 API7.ai 开发并捐赠。它是一个具有动态、实时、高性能等特点的云原生 API 网关。 + +你可以使用 APISIX 网关作为所有业务的流量入口,它提供了动态路由、动态上游、动态证书、A/B 测试、灰度发布(金丝雀发布)、蓝绿部署、限速、防攻击、收集指标、监控报警、可观测、服务治理等功能。 + +本教程使用脚本在本地环境快速安装 Apache APISIX,并且通过管理 API 来验证是否安装成功。 + +## 前置条件 + +快速启动脚本需要以下条件: + +* 已安装 [Docker](https://docs.docker.com/get-docker/),用于部署 **etcd** 和 **APISIX**。 +* 已安装 [curl](https://curl.se/),用于验证 APISIX 是否安装成功。 + +## 安装 APISIX + +:::caution + +为了提供更好的体验,管理 API 默认无需授权,请在生产环境中打开授权开关。 + +::: +APISIX 可以借助 quickstart 脚本快速安装并启动: + +```shell +curl -sL https://run.api7.ai/apisix/quickstart | sh +``` + +该命令启动 _apisix-quickstart_ 和 _etcd_ 两个容器,APISIX 使用 etcd 保存和同步配置。APISIX 和 etcd 容器使用 Docker 的 [**host**](https://docs.docker.com/network/host/) 网络模式,因此可以从本地直接访问。 + +如果一切顺利,将输出如下信息: + +```text +✔ APISIX is ready! +``` + +## 验证 + +你可以通过 curl 来访问正在运行的 APISIX 实例。比如,你可以发送一个简单的 HTTP 请求来验证 APISIX 运行状态是否正常: + +```shell +curl "http://127.0.0.1:9080" --head | grep Server +``` + +如果一切顺利,将输出如下信息: + +```text +Server: APISIX/Version +``` + +这里的 `Version` 是指你已经安装的 APISIX 版本,比如 `APISIX/3.3.0`。 + +现在,你已经成功安装并运行了 APISIX! + +## 下一步 + +如果你已经成功地安装了 APISIX 并且正常运行,那么你可以继续进行下面的教程。 + +* [配置路由](configure-routes.md) +* [负载均衡](load-balancing.md) +* [限速](rate-limiting.md) +* [密钥验证](key-authentication.md) diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/configure-routes.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/configure-routes.md new file mode 100644 index 0000000..f119d00 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/configure-routes.md @@ -0,0 +1,71 @@ +--- +title: 配置路由 +slug: /getting-started/configure-routes +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +Apache APISIX 使用 _routes_ 来提供灵活的网关管理功能,在一个请求中,_routes_ 包含了访问路径和上游目标等信息。 + +本教程将引导你创建一个 route 并验证它,你可以参考以下步骤: + +1. 创建一个指向 [httpbin.org](http://httpbin.org)的 _upstream_。 +2. 使用 _cURL_ 发送一个请求,了解 APISIX 的代理和转发请求机制。 + +## Route 是什么 + +Route(也称之为路由)是访问上游目标的路径,在 [Apache APISIX](https://api7.ai/apisix) 中,Route 首先通过预定的规则来匹配客户端请求,然后加载和执行相应的插件,最后将请求转发至特定的 Upstream。 + +在 APISIX 中,一个最简单的 Route 仅由匹配路径和 Upstream 地址两个信息组成。 + +## Upstream 是什么 + +Upstream(也称之为上游)是一组具备相同功能的节点集合,它是对虚拟主机的抽象。Upstream 可以通过预先配置的规则对多个服务节点进行负载均衡。 + +## 前置条件 + +1. 参考[入门指南](./README.md)完成 APISIX 的安装。 + +## 创建路由 + +你可以创建一个路由,将客户端的请求转发至 [httpbin.org](http://httpbin.org)(这个网站能测试 HTTP 请求和响应的各种信息)。 + +通过下面的命令,你将创建一个路由,把请求`http://127.0.0.1:9080/ip` 转发至 [httpbin.org/ip](http://httpbin.org/ip): + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "getting-started-ip", + "uri": "/ip", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +如果配置成功,将会返回 `HTTP/1.1 201 Created`。 + +## 验证 + +```shell +curl "http://127.0.0.1:9080/ip" +``` + +你将会得到类似下面的返回: + +```text +{ + "origin": "183.94.122.205" +} +``` + +## 下一步 + +本教程创建的路由仅对应一个上游目标。在下个教程中,你将会学习如何配置多个上游目标的负载均衡。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/key-authentication.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/key-authentication.md new file mode 100644 index 0000000..aae7a9b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/key-authentication.md @@ -0,0 +1,184 @@ +--- +title: 密钥验证 +slug: /getting-started/key-authentication +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +API 网关主要作用是连接 API 消费者和提供者。出于安全考虑,在访问内部资源之前,应先对消费者进行身份验证和授权。 + +![身份验证](https://static.apiseven.com/uploads/2023/02/08/8mRaK3v1_consumer.png) + +APISIX 拥有灵活的插件扩展系统,目前有很多可用于用户身份验证和授权的插件。例如: + +- [Key Authentication](https://apisix.apache.org/zh/docs/apisix/plugins/key-auth/) +- [Basic Authentication](https://apisix.apache.org/zh/docs/apisix/plugins/basic-auth/) +- [JSON Web Token (JWT) Authentication](https://apisix.apache.org/zh/docs/apisix/plugins/jwt-auth/) +- [Keycloak](https://apisix.apache.org/zh/docs/apisix/plugins/authz-keycloak/) +- [Casdoor](https://apisix.apache.org/zh/docs/apisix/plugins/authz-casdoor/) +- [Wolf RBAC](https://apisix.apache.org/zh/docs/apisix/plugins/wolf-rbac/) +- [OpenID Connect](https://apisix.apache.org/zh/docs/apisix/plugins/openid-connect/) +- [Central Authentication Service (CAS)](https://apisix.apache.org/zh/docs/apisix/plugins/cas-auth/) +- [HMAC](https://apisix.apache.org/zh/docs/apisix/plugins/hmac-auth/) +- [Casbin](https://apisix.apache.org/zh/docs/apisix/plugins/authz-casbin/) +- [LDAP](https://apisix.apache.org/zh/docs/apisix/plugins/ldap-auth/) +- [Open Policy Agent (OPA)](https://apisix.apache.org/zh/docs/apisix/plugins/opa/) +- [Forward Authentication](https://apisix.apache.org/zh/docs/apisix/plugins/forward-auth/) +- [Multiple Authentications](https://apisix.apache.org/docs/apisix/plugins/multi-auth/) + +本教程中,你将创建一个带有 _密钥验证_ 插件的 _消费者_,并学习如何启用和停用身份验证插件。 + +## Consumer 是什么 + +Consumer(也称之为消费者)是指使用 API 的应用或开发人员。 + +在 APISIX 中,消费者需要一个全局唯一的 _名称_,并从上面的列表中选择一个身份验证 _插件_。 + +## Key Authentication 是什么 + +Key Authentication(也称之为密钥验证)是一个相对比较简单但是应用广泛的身份验证方法,它的设计思路如下: + +1. 管理员为路由添加一个身份验证密钥(API 密钥)。 +2. API 消费者在发送请求时,在查询字符串或者请求头中添加密钥。 + +## 启用 Key Authentication + +### 前置条件 + +1. 参考[快入门指南](./README.md)完成 APISIX 的安装。 +2. 完成[配置路由](./configure-routes.md#route-是什么)。 + +### 创建消费者 + +创建一个名为 `tom` 的消费者,并启用 `key-auth` 插件,密钥设置为 `secret-key`。所有携带密钥 `secret-key` 的请求都会被识别为消费者 `tom`。 + +:::caution + +生产环境请使用复杂的密钥。 + +::: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT -d ' +{ + "username": "tom", + "plugins": { + "key-auth": { + "key": "secret-key" + } + } +}' +``` + +如果消费者创建成功,你将得到返回 `HTTP/1.1 201 Created`。 + +### 启用 Authentication + +在教程[配置路由](./configure-routes.md)中,我们已经创建了路由 `getting-started-ip`,我们通过 `PATCH` 方法为该路由增加 `key-auth` 插件: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "key-auth": {} + } +}' +``` + +如果增加插件成功,你将得到返回 `HTTP/1.1 201 Created`。 + +### 验证 + +我们可以在以下场景中进行验证: + +#### 1. 发送不带任何密钥的请求 + +发送一个不带请求头 `apikey` 的请求。 + +```shell +curl -i "http://127.0.0.1:9080/ip" +``` + +如果你已经启用了密钥身份验证,你将会得到返回 `HTTP/1.1 401 Unauthorized`,即未授权。 + +```text +HTTP/1.1 401 Unauthorized +Date: Wed, 08 Feb 2023 09:38:36 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.1.0 +``` + +#### 2. 发送携带错误密钥的请求 + +发送一个携带错误密钥(比如 `wrong-key`)的请求。 + +```shell +curl -i "http://127.0.0.1:9080/ip" -H 'apikey: wrong-key' +``` + +如果密钥错误,你也将得到返回 `HTTP/1.1 401 Unauthorized`,即未授权。 + +```text +HTTP/1.1 401 Unauthorized +Date: Wed, 08 Feb 2023 09:38:27 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.1.0 +``` + +#### 3. 发送携带正确密钥的请求 + +发送一个携带正确密钥(`secret-key`)的请求。 + +```shell +curl -i "http://127.0.0.1:9080/ip" -H 'apikey: secret-key' +``` + +你将会得到返回 `HTTP/1.1 200 OK`。 + +```text +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 44 +Connection: keep-alive +Date: Thu, 09 Feb 2023 03:27:57 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/3.1.0 +``` + +### 禁用 Authentication + +将参数设置 `_meta.disable` 为 `true`,即可禁用密钥验证插件。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "key-auth": { + "_meta": { + "disable": true + } + } + } +}' +``` + +你可以发送一个不带任何密钥的请求来验证: + +```shell +curl -i "http://127.0.0.1:9080/ip" +``` + +因为你已经禁用了密钥验证插件,所以你将会得到返回 `HTTP/1.1 200 OK`。 + +## 下一步 + +你已经学习了如何为路由配置密钥验证。在下个教程中,你将学习如何配置限速。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/load-balancing.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/load-balancing.md new file mode 100644 index 0000000..095ab06 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/load-balancing.md @@ -0,0 +1,99 @@ +--- +title: 负载均衡 +slug: /getting-started/load-balancing +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +负载均衡管理客户端和服务端之间的流量。它决定由哪个服务来处理特定的请求,从而提高性能、可扩展性和可靠性。在设计需要处理大量流量的系统时,负载均衡是一个关键的考虑因素。 + +Apache APISIX 支持加权负载均衡算法,传入的流量按照预定顺序轮流分配给一组服务器的其中一个。 + +在本教程中,你将创建一个具有两个上游服务的路由,并且启用负载均衡来测试在两个服务之间的切换情况。 + +## 前置条件 + +1. 参考[入门指南](./README.md)完成 APISIX 的安装。 +2. 了解 APISIX 中[路由及上游](./configure-routes.md#route-是什么)的概念。 + +## 启用负载均衡 + +创建一个具有两个上游服务的路由,访问 `/headers` 将被转发到 [httpbin.org](https://httpbin.org/headers) 和 [mock.api7.ai](https://mock.api7.ai/headers) 这两个上游服务,并且会返回请求头。 + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "getting-started-headers", + "uri": "/headers", + "upstream" : { + "type": "roundrobin", + "nodes": { + "httpbin.org:443": 1, + "mock.api7.ai:443": 1 + }, + "pass_host": "node", + "scheme": "https" + } +}' +``` + +如果路由创建成功,你将会收到返回 `HTTP/1.1 201 Created`。 + +:::info + +1. 将 `pass_host` 字段设置为 `node`,将传递请求头给上游。 +2. 将 `scheme` 字段设置为 `https`,向上游发送请求时将启用 TLS。 + +::: + +## 验证 + +这两个服务返回不同的数据。 + +`httpbin.org` 返回: + +```json +{ + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.58.0", + "X-Amzn-Trace-Id": "Root=1-63e34b15-19f666602f22591b525e1e80", + "X-Forwarded-Host": "localhost" + } +} +``` + +`mock.api7.ai` 返回: + +```json +{ + "headers": { + "accept": "*/*", + "host": "mock.api7.ai", + "user-agent": "curl/7.58.0", + "content-type": "application/json", + "x-application-owner": "API7.ai" + } +} +``` + +我们生成 100 个请求来测试负载均衡的效果: + +```shell +hc=$(seq 100 | xargs -I {} curl "http://127.0.0.1:9080/headers" -sL | grep "httpbin" | wc -l); echo httpbin.org: $hc, mock.api7.ai: $((100 - $hc)) +``` + +结果显示,请求几乎平均分配给这两个上游服务: + +```text +httpbin.org: 51, mock.api7.ai: 49 +``` + +## 下一步 + +你已经学习了如何配置负载均衡。在下个教程中,你将学习如何配置身份验证。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/rate-limiting.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/rate-limiting.md new file mode 100644 index 0000000..5f4350b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/getting-started/rate-limiting.md @@ -0,0 +1,101 @@ +--- +title: 限速 +slug: /getting-started/rate-limiting +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +APISIX 是一个统一的控制中心,它管理 API 和微服务的进出流量。除了客户端发来的合理的请求,还可能存在网络爬虫产生的不必要的流量,此外,网络攻击(比如 DDos)也可能产生非法请求。 + +APISIX 提供限速功能,通过限制在规定时间内发送到上游服务的请求数量来保护 APIs 和微服务。请求的计数在内存中完成,具有低延迟和高性能的特点。 + +
+
+Routes Diagram +
+
+ +在本教程中,你将启用 `limit-count` 插件来限制传入流量的速率。 + +## 前置条件 + +1. 参考[入门指南](./README.md)完成 APISIX 的安装。 +2. 完成[配置路由](./configure-routes.md#route-是什么)。 + +## 启用 Rate Limiting + +在教程[配置路由](./configure-routes.md)中,我们已经创建了路由 `getting-started-ip`,我们通过 `PATCH` 方法为该路由增加 `limit-count` 插件: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 10, + "rejected_code": 503 + } + } +}' +``` + +如果增加插件成功,你将得到返回 `HTTP/1.1 201 Created`。上述配置将传入流量的速率限制为每 10 秒最多 2 个请求。 + +### 验证 + +我们同时生成 100 个请求来测试限速插件的效果。 + +```shell +count=$(seq 100 | xargs -I {} curl "http://127.0.0.1:9080/ip" -I -sL | grep "503" | wc -l); echo \"200\": $((100 - $count)), \"503\": $count +``` + +请求结果同预期一致:在这 100 个请求中,有 2 个请求发送成功(状态码为 `200`),其他请求均被拒绝(状态码为 `503`)。 + +```text +"200": 2, "503": 98 +``` + +## 禁用 Rate Limiting + +将参数设置 `_meta.disable` 为 `true`,即可禁用限速插件。 + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "limit-count": { + "_meta": { + "disable": true + } + } + } +}' +``` + +### 验证 + +我们再次同时生成 100 个请求来测试限速插件是否已被禁用: + +```shell +count=$(seq 100 | xargs -i curl "http://127.0.0.1:9080/ip" -I -sL | grep "503" | wc -l); echo \"200\": $((100 - $count)), \"503\": $count +``` + +结果显示所有的请求均成功: + +```text +"200": 100, "503": 0 +``` + +## 更多 + +你可以使用 APISIX 的变量来配置限速插件的规则,比如 `$host` 和 `$uri`。此外,APISIX 也支持使用 Redis 集群进行限速配置,即通过 Redis 来进行计数。 + +## 下一步 + +恭喜你!你已经学习了如何配置限速插件,这也意味着你已经完成了所有的入门教程。 + +你可以继续学习其他文档来定制 APISIX,以满足你的生产环境需要。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/grpc-proxy.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/grpc-proxy.md new file mode 100644 index 0000000..dadc8ba --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/grpc-proxy.md @@ -0,0 +1,124 @@ +--- +title: gRPC 代理 +--- + + + +通过 APISIX 代理 gRPC 连接,并使用 APISIX 的大部分特性管理你的 gRPC 服务。 + +## 参数 + +* `scheme`: Route 对应的 Upstream 的 `scheme` 必须设置为 `grpc` 或者 `grpcs` +* `uri`: 格式为 /service/method 如:/helloworld.Greeter/SayHello + +## 示例 + +### 创建代理 gRPC 的 Route + +在指定 Route 中,代理 gRPC 服务接口: + +* 注意:这个 Route 对应的 Upstream 的 `scheme` 必须设置为 `grpc` 或者 `grpcs`。 +* 注意:APISIX 使用 TLS 加密的 HTTP/2 暴露 gRPC 服务,所以需要先 [配置 SSL 证书](certificate.md); +* 注意:APISIX 也支持通过纯文本的 HTTP/2 暴露 gRPC 服务,这不需要依赖 SSL,通常用于内网环境代理 gRPC 服务 +* 下面例子所代理的 gRPC 服务可供参考:[grpc_server_example](https://github.com/api7/grpc_server_example)。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["POST", "GET"], + "uri": "/helloworld.Greeter/SayHello", + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50051": 1 + } + } +}' +``` + +### 测试 TLS 加密的 HTTP/2 + +访问上面配置的 Route: + +```shell +grpcurl -insecure -import-path /pathtoprotos -proto helloworld.proto \ + -d '{"name":"apisix"}' 127.0.0.1:9443 helloworld.Greeter.SayHello +{ + "message": "Hello apisix" +} +``` + +> grpcurl 是一个 CLI 工具,类似于 curl,充当 gRPC 客户端并让您与 gRPC 服务器进行交互。安装方式请查看官方[文档](https://github.com/fullstorydev/grpcurl#installation) + +这表示已成功代理。 + +### 测试纯文本的 HTTP/2 + +默认情况下,APISIX 只在 `9443` 端口支持 TLS 加密的 HTTP/2。你也可以支持纯本文的 HTTP/2,只需要修改 `conf/config.yaml` 文件中的 `node_listen` 配置即可。 + +```yaml +apisix: + node_listen: + - port: 9080 + - port: 9081 + enable_http2: true +``` + +访问上面配置的 Route: + +```shell +grpcurl -plaintext -import-path /pathtoprotos -proto helloworld.proto \ + -d '{"name":"apisix"}' 127.0.0.1:9081 helloworld.Greeter.SayHello +{ + "message": "Hello apisix" +} +``` + +这表示已成功代理。 + +### gRPCS + +如果你的 gRPC 服务使用了自己的 TLS 加密,即所谓的 `gPRCS` (gRPC + TLS),那么需要修改 scheme 为 `grpcs`。继续上面的例子,50052 端口上跑的是 gPRCS 的服务,这时候应该这么配置: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["POST", "GET"], + "uri": "/helloworld.Greeter/SayHello", + "upstream": { + "scheme": "grpcs", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50052": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/http3.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/http3.md new file mode 100644 index 0000000..5ea2f12 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/http3.md @@ -0,0 +1,186 @@ +--- +title: HTTP3 协议 +--- + + + +[HTTP/3](https://en.wikipedia.org/wiki/HTTP/3) 是 Hypertext Transfer Protocol(HTTP) 的第三个主要版本。与依赖 TCP 的前辈不同,HTTP/3 基于 [QUIC (Quick UDP Internet Connections) protocol](https://en.wikipedia.org/wiki/QUIC)。它带来了多项好处,减少了延迟并提高了性能: + +* 实现不同网络连接之间的无缝过渡,例如从 Wi-Fi 切换到移动数据。 +* 消除队头阻塞,以便丢失的数据包不会阻塞所有流。 +* 在 TLS 握手的同时协商 TLS 版本,从而实现更快的连接。 +* 默认提供加密,确保通过 HTTP/3 连接传输的所有数据都受到保护和保密。 +* 在与客户端已建立连接的服务器通信时提供零往返时间 (0-RTT)。 + +APISIX 目前支持下游客户端和 APISIX 之间的 HTTP/3 连接。尚不支持与上游服务的 HTTP/3 连接。欢迎社区贡献。 + +:::caution + +此功能尚未经过大规模测试,因此不建议用于生产使用。 + +::: + +本文档将向您展示如何配置 APISIX 以在客户端和 APISIX 之间启用 HTTP/3 连接,并记录一些已知问题。 + +## 使用示例 + +### 启用 HTTP/3 + +将以下配置添加到 APISIX 的配置文件。该配置将在端口 `9443`(或其他端口)上启用 HTTP/3: + +```yaml title="config.yaml" +apisix: + ssl: + listen: + - port: 9443 + enable_http3: true + ssl_protocols: TLSv1.3 +``` + +:::info + +如果您使用 Docker 部署 APISIX,请确保在 HTTP3 端口中允许 UDP,例如 `-p 9443:9443/udp`。 + +::: + +然后重新加载 APISIX 以使配置更改生效: + +```shell +apisix reload +``` + +### 生成证书和密钥 + +HTTP/3 需要 TLS。您可以利用购买的证书或自行生成证书。 + +如自行生成,首先生成证书颁发机构 (CA) 密钥和证书: + +```shell +openssl genrsa -out ca.key 2048 && \ + openssl req -new -sha256 -key ca.key -out ca.csr -subj "/CN=ROOTCA" && \ + openssl x509 -req -days 36500 -sha256 -extensions v3_ca -signkey ca.key -in ca.csr -out ca.crt +``` + +接下来,生成具有 APISIX 通用名称的密钥和证书,并使用 CA 证书进行签名: + +```shell +openssl genrsa -out server.key 2048 && \ + openssl req -new -sha256 -key server.key -out server.csr -subj "/CN=test.com" && \ + openssl x509 -req -days 36500 -sha256 -extensions v3_req \ + -CA ca.crt -CAkey ca.key -CAserial ca.srl -CAcreateserial \ + -in server.csr -out server.crt +``` + +### 配置 HTTPS + +可选择性地将存储在 `server.crt` 和 `server.key` 中的内容加载到环境变量中: + +```shell +server_cert=$(cat server.crt) +server_key=$(cat server.key) +``` + +创建一个保存服务器证书及其密钥的 SSL 对象: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/ssls" -X PUT -d ' +{ + "id": "quickstart-tls-client-ssl", + "sni": "test.com", + "cert": "'"${server_cert}"'", + "key": "'"${server_key}"'" +}' +``` + +### 创建路由 + +创建一个路由至 `httpbin.org`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id":"httpbin-route", + "uri":"/get", + "upstream": { + "type":"roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +### 验证 HTTP/3 连接 + +验证前需要安装支持 HTTP/3 的 curl,如 [static-curl](https://github.com/stunnel/static-curl) 或其他支持 HTTP/3 的 curl。 + +发送一个请求到路由: + +```shell +curl -kv --http3-only \ + -H "Host: test.com" \ + --resolve "test.com:9443:127.0.0.1" "https://test.com:9443/get" +``` + +应收到 `HTTP/3 200` 相应如下: + +```text +* Added test.com:9443:127.0.0.1 to DNS cache +* Hostname test.com was found in DNS cache +* Trying 127.0.0.1:9443... +* QUIC cipher selection: TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_CCM_SHA256 +* Skipped certificate verification +* Connected to test.com (127.0.0.1) port 9443 +* using HTTP/3 +* [HTTP/3] [0] OPENED stream for https://test.com:9443/get +* [HTTP/3] [0] [:method: GET] +* [HTTP/3] [0] [:scheme: https] +* [HTTP/3] [0] [:authority: test.com] +* [HTTP/3] [0] [:path: /get] +* [HTTP/3] [0] [user-agent: curl/8.7.1] +* [HTTP/3] [0] [accept: */*] +> GET /get HTTP/3 +> Host: test.com +> User-Agent: curl/8.7.1 +> Accept: */* +> +* Request completely sent off +< HTTP/3 200 +... +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Content-Length": "0", + "Host": "test.com", + "User-Agent": "curl/8.7.1", + "X-Amzn-Trace-Id": "Root=1-6656013a-27da6b6a34d98e3e79baaf5b", + "X-Forwarded-Host": "test.com" + }, + "origin": "172.19.0.1, 123.40.79.456", + "url": "http://test.com/get" +} +* Connection #0 to host test.com left intact +``` + +## 已知问题 + +- 对于 APISIX-3.9, Tongsuo 相关测试用例会失败,因为 Tongsuo 不支持 QUIC TLS。 +- APISIX-3.9 基于 NGINX-1.25.3,存在 HTTP/3 漏洞(CVE-2024-24989、CVE-2024-24990)。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/install-dependencies.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/install-dependencies.md new file mode 100644 index 0000000..c545c8c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/install-dependencies.md @@ -0,0 +1,52 @@ +--- +title: 安装依赖 +--- + + + +## 注意 + +- Apache APISIX 从 v2.0 开始不再支持 `v2` 版本的 etcd,并且 etcd 最低支持版本为 v3.4.0,因此请使用 etcd 3.4.0+。更重要的是,因为 etcd v3 使用 gRPC 作为消息传递协议,而 Apache APISIX 使用 HTTP(S) 与 etcd 集群通信,因此请确保启用 [etcd gRPC gateway](https://etcd.io/docs/v3.4.0/dev-guide/api_grpc_gateway/) 功能。 + +- 目前 Apache APISIX 默认使用 HTTP 协议与 etcd 集群通信,这并不安全,如果希望保障数据的安全性和完整性。请为您的 etcd 集群配置证书及对应私钥,并在您的 Apache APISIX etcd endpoints 配置列表中明确使用 `https` 协议前缀。请查阅 `conf/config.yaml.example` 中 `etcd` 一节相关的配置来了解更多细节。 + +- 如果是 OpenResty 1.19,APISIX 会使用 OpenResty 内置的 LuaJIT 来运行 `bin/apisix`;否则会使用 Lua 5.1。如果运行过程中遇到 `luajit: lj_asm_x86.h:2819: asm_loop_fixup: Assertion '((intptr_t)target & 15) == 0' failed`,这是低版本 OpenResty 内置的 LuaJIT 在特定编译条件下的问题。 + +- 在某些平台上,通过包管理器安装 LuaRocks 会导致 Lua 被升级为 Lua 5.3,所以我们建议通过源代码的方式安装 LuaRocks。如果你通过官方仓库安装 OpenResty 和 OpenResty 的 OpenSSL 开发库(rpm 版本:openresty-openssl111-devel,deb 版本:openresty-openssl111-dev),那么 [我们提供了自动安装的脚本](https://github.com/apache/apisix/tree/master/utils/linux-install-luarocks.sh)。如果你是自己编译的 OpenResty,可以参考上述脚本并修改里面的路径。如果编译时没有指定 OpenSSL 库的路径,那么无需配置 LuaRocks 内跟 OpenSSL 相关的变量,因为默认都是用的系统自带的 OpenSSL。如果编译时指定了 OpenSSL 库,那么需要保证 LuaRocks 的 OpenSSL 配置跟 OpenResty 的相一致。 + +- OpenResty 是 APISIX 的一个依赖项,如果是第一次部署 APISIX 并且不需要使用 OpenResty 部署其他服务,可以在 OpenResty 安装完成后停止并禁用 OpenResty,这不会影响 APISIX 的正常工作,请根据自己的业务谨慎操作。例如 Ubuntu:`systemctl stop openresty && systemctl disable openresty`。 + +## 安装 + +在支持的操作系统上运行以下指令即可安装 Apache APISIX dependencies。 + +支持的操作系统版本:CentOS 7, Fedora 31 & 32, Ubuntu 16.04 & 18.04, Debian 9 & 10, Arch Linux。 + +注意,对于 Arch Linux 来说,我们使用 AUR 源中的 `openresty`,所以需要 AUR Helper 才能正常安装。目前支持 `yay` 和 `pacaur`。 + +``` +curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-dependencies.sh -sL | bash - +``` + +如果你已经克隆了 Apache APISIX 仓库,在根目录运行以下指令安装 Apache APISIX dependencies。 + +``` +bash utils/install-dependencies.sh +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/installation-guide.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/installation-guide.md new file mode 100644 index 0000000..faa775a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/installation-guide.md @@ -0,0 +1,330 @@ +--- +title: APISIX 安装指南 +keywords: + - APISIX + - APISIX 安装教程 + - 部署 APISIX +description: 本文档主要介绍了 APISIX 多种安装方法。 +--- + + + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +本文将介绍如何在你的环境中安装并运行 APISIX。 + +关于如何快速运行 Apache APISIX,请参考[入门指南](./getting-started/README.md)。 + +## 安装 APISIX + +你可以选择以下任意一种方式安装 APISIX: + + + + +使用此方法安装 APISIX,你需要安装 [Docker](https://www.docker.com/) 和 [Docker Compose](https://docs.docker.com/compose/)。 + +首先下载 [apisix-docker](https://github.com/apache/apisix-docker) 仓库。 + +```shell +git clone https://github.com/apache/apisix-docker.git +cd apisix-docker/example +``` + +然后,使用 `docker-compose` 启用 APISIX。 + + + + +```shell +docker-compose -p docker-apisix up -d +``` + + + + + +```shell +docker-compose -p docker-apisix -f docker-compose-arm64.yml up -d +``` + + + + + + + + +通过 Helm 安装 APISIX,请执行以下命令: + +```shell +helm repo add apisix https://charts.apiseven.com +helm repo update +helm install apisix apisix/apisix --create-namespace --namespace apisix +``` + +你可以从 [apisix-helm-chart](https://github.com/apache/apisix-helm-chart) 仓库找到其他组件。 + + + + + +该安装方法适用于 CentOS 7 和 CentOS 8。如果你选择该方法安装 APISIX,需要先安装 etcd。具体安装方法请参考 [安装 etcd](#安装-etcd)。 + +### 通过 RPM 仓库安装 + +如果当前系统**没有安装 OpenResty**,请使用以下命令来安装 OpenResty 和 APISIX 仓库: + +```shell +sudo yum install -y https://repos.apiseven.com/packages/centos/apache-apisix-repo-1.0-1.noarch.rpm +``` + +如果已安装 OpenResty 的官方 RPM 仓库,请使用以下命令安装 APISIX 的 RPM 仓库: + +```shell +sudo yum-config-manager --add-repo https://repos.apiseven.com/packages/centos/apache-apisix.repo +``` + +完成上述操作后使用以下命令安装 APISIX: + +```shell +sudo yum install apisix +``` + +:::tip + +你也可以安装指定版本的 APISIX(本示例为 APISIX v3.8.0 版本): + +```shell +sudo yum install apisix-3.8.0 +``` + +::: + +### 通过 RPM 包离线安装: + +将 APISIX 离线 RPM 包下载到 `apisix` 文件夹: + +```shell +sudo mkdir -p apisix +sudo yum install -y https://repos.apiseven.com/packages/centos/apache-apisix-repo-1.0-1.noarch.rpm +sudo yum clean all && yum makecache +sudo yum install -y --downloadonly --downloaddir=./apisix apisix +``` + +然后将 `apisix` 文件夹复制到目标主机并运行以下命令: + +```shell +sudo yum install ./apisix/*.rpm +``` + +### 管理 APISIX 服务 + +APISIX 安装完成后,你可以运行以下命令初始化 NGINX 配置文件和 etcd: + +```shell +apisix init +``` + +使用以下命令启动 APISIX: + +```shell +apisix start +``` + +:::tip + +你可以运行 `apisix help` 命令,通过查看返回结果,获取其他操作的命令及描述。 + +::: + + + + + +### 通过 DEB 仓库安装 + +目前 APISIX 支持的 DEB 仓库仅支持 Debian 11(Bullseye),并且支持 amd64 和 arm64 架构。 + +```shell +# amd64 +wget -O - http://repos.apiseven.com/pubkey.gpg | sudo apt-key add - +echo "deb http://repos.apiseven.com/packages/debian bullseye main" | sudo tee /etc/apt/sources.list.d/apisix.list + +# arm64 +wget -O - http://repos.apiseven.com/pubkey.gpg | sudo apt-key add - +echo "deb http://repos.apiseven.com/packages/arm64/debian bullseye main" | sudo tee /etc/apt/sources.list.d/apisix.list +``` + +完成上述操作后使用以下命令安装 APISIX: + +```shell +sudo apt update +sudo apt install -y apisix=3.8.0-0 +``` + +### 管理 APISIX 服务 + +APISIX 安装完成后,你可以运行以下命令初始化 NGINX 配置文件和 etcd: + +```shell +sudo apisix init +``` + +使用以下命令启动 APISIX: + +```shell +sudo apisix start +``` + +:::tip + +你可以运行 `apisix help` 命令,通过查看返回结果,获取其他操作的命令及描述。 + +::: + + + + + +如果你想要使用源码构建 APISIX,请参考 [源码安装 APISIX](./building-apisix.md)。 + + + + +## 安装 etcd + +APISIX 使用 [etcd](https://github.com/etcd-io/etcd) 作为配置中心进行保存和同步配置。在安装 APISIX 之前,需要在你的主机上安装 etcd。 + +如果你在安装 APISIX 时选择了 Docker 或 Helm 安装,那么 etcd 将会自动安装;如果你选择其他方法或者需要手动安装 APISIX,请参考以下步骤安装 etcd: + + + + +```shell +ETCD_VERSION='3.5.4' +wget https://github.com/etcd-io/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz +tar -xvf etcd-v${ETCD_VERSION}-linux-amd64.tar.gz && \ + cd etcd-v${ETCD_VERSION}-linux-amd64 && \ + sudo cp -a etcd etcdctl /usr/bin/ +nohup etcd >/tmp/etcd.log 2>&1 & +``` + + + + + +```shell +brew install etcd +brew services start etcd +``` + + + + +## 后续操作 + +### 配置 APISIX + +通过修改本地 `./conf/config.yaml` 文件,或者在启动 APISIX 时使用 `-c` 或 `--config` 添加文件路径参数 `apisix start -c `,完成对 APISIX 服务本身的基本配置。默认配置不应修改,可以在 `apisix/cli/config.lua` 中找到。 + +比如将 APISIX 默认监听端口修改为 8000,其他配置保持默认,在 `./conf/config.yaml` 中只需这样配置: + +```yaml title="./conf/config.yaml" +apisix: + node_listen: 8000 # APISIX listening port +``` + +比如指定 APISIX 默认监听端口为 8000,并且设置 etcd 地址为 `http://foo:2379`,其他配置保持默认。在 `./conf/config.yaml` 中只需这样配置: + +```yaml title="./conf/config.yaml" +apisix: + node_listen: 8000 # APISIX listening port + +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://foo:2379" +``` + +:::warning + +请不要手动修改 APISIX 安装目录下的 `./conf/nginx.conf` 文件。当 APISIX 启动时,会根据 `config.yaml` 的配置自动生成新的 `nginx.conf` 并自动启动服务。 + +::: + +### 更新 Admin API key + +建议修改 Admin API 的 key,保护 APISIX 的安全。 + +请参考如下信息更新配置文件: + +```yaml title="./conf/config.yaml" +deployment: + admin: + admin_key: + - name: "admin" + key: newsupersecurekey # 请修改 key 的值 + role: admin +``` + +更新完成后,你可以使用新的 key 访问 Admin API: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes?api_key=newsupersecurekey -i +``` + +### 为 APISIX 添加 systemd 配置文件 + +如果你是通过 RPM 包安装 APISIX,配置文件已经自动安装,你可以直接使用以下命令: + +```shell +systemctl start apisix +systemctl stop apisix +``` + +如果你是通过其他方法安装的 APISIX,可以参考[配置文件模板](https://github.com/api7/apisix-build-tools/blob/master/usr/lib/systemd/system/apisix.service)进行修改,并将其添加在 `/usr/lib/systemd/system/apisix.service` 路径下。 + +如需了解 APISIX 后续使用,请参考[入门指南](./getting-started/README.md)获取更多信息。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/mtls.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/mtls.md new file mode 100644 index 0000000..b7c06f1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/mtls.md @@ -0,0 +1,205 @@ +--- +title: TLS 双向认证 +--- + + + +## 保护 Admin API + +### 为什么使用 + +双向认证提供了一种更好的方法来阻止未经授权的对 APISIX Admin API 的访问。 + +客户端需要向服务器提供证书,服务器将检查该客户端证书是否由受信的 CA 签名,并决定是否响应其请求。 + +### 如何配置 + +1. 生成自签证书对,包括 CA、server、client 证书对。 + +2. 修改 `conf/config.yaml` 中的配置项: + +```yaml title="conf/config.yaml" + admin_listen: + ip: 127.0.0.1 + port: 9180 + https_admin: true + + admin_api_mtls: + admin_ssl_ca_cert: "/data/certs/mtls_ca.crt" # Path of your self-signed ca cert. + admin_ssl_cert: "/data/certs/mtls_server.crt" # Path of your self-signed server side cert. + admin_ssl_cert_key: "/data/certs/mtls_server.key" # Path of your self-signed server side key. +``` + +3. 执行命令,使配置生效: + +```shell +apisix init +apisix reload +``` + +### 客户端如何调用 + +需要将证书文件的路径与域名按实际情况替换。 + +* 注意:提供的 CA 证书需要与服务端的相同。* + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl --cacert /data/certs/mtls_ca.crt --key /data/certs/mtls_client.key --cert /data/certs/mtls_client.crt https://admin.apisix.dev:9180/apisix/admin/routes -H "X-API-KEY: $admin_key" +``` + +## 保护 ETCD + +### 如何配置 + +你需要构建 [APISIX-runtime](./FAQ.md#如何构建-APISIX-runtime-环境?),并且需要在配置文件中设定 `etcd.tls` 来使 ETCD 的双向认证功能正常工作。 + +```yaml title="conf/config.yaml" +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + tls: + cert: /data/certs/etcd_client.pem # path of certificate used by the etcd client + key: /data/certs/etcd_client.key # path of key used by the etcd client +``` + +如果 APISIX 不信任 etcd server 使用的 CA 证书,我们需要设置 CA 证书。 + +```yaml title="conf/config.yaml" +apisix: + ssl: + ssl_trusted_certificate: /path/to/certs/ca-certificates.crt # path of CA certificate used by the etcd server +``` + +## 保护路由 + +### 为什么使用 + +双向认证是一种密码学安全的验证客户端身份的手段。当你需要加密并保护流量的双向安全时很有用。 + +* 注意:双向认证只发生在 HTTPS 中。如果你的路由也可以通过 HTTP 访问,你应该在 HTTP 中添加额外的保护,或者禁止通过 HTTP 访问。* + +### 如何配置 + +我们提供了一个[演示教程](./tutorials/client-to-apisix-mtls.md),详细地讲解了如何配置客户端和 APISIX 之间的 mTLS。 + +在配置 `ssl` 资源时,同时需要配置 `client.ca` 和 `client.depth` 参数,分别代表为客户端证书签名的 CA 列表,和证书链的最大深度。可参考:[SSL API 文档](./admin-api.md#ssl)。 + +下面是一个可用于生成带双向认证配置的 SSL 资源的 shell 脚本示例(如果需要,可修改 API 地址、API Key 和 SSL 资源的 ID。): + +```shell +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' + "cert": "'"$(cat t/certs/mtls_server.crt)"'", + "key": "'"$(cat t/certs/mtls_server.key)"'", + "snis": [ + "admin.apisix.dev" + ], + "client": { + "ca": "'"$(cat t/certs/mtls_ca.crt)"'", + "depth": 10 + } +}' +``` + +测试: + +```bash +curl -vvv --resolve 'admin.apisix.dev:9443:127.0.0.1' https://admin.apisix.dev:9443/hello --cert t/certs/mtls_client.crt --key t/certs/mtls_client.key --cacert t/certs/mtls_ca.crt + +* Added admin.apisix.dev:9443:127.0.0.1 to DNS cache +* Hostname admin.apisix.dev was found in DNS cache +* Trying 127.0.0.1:9443... +* Connected to admin.apisix.dev (127.0.0.1) port 9443 (#0) +* ALPN: offers h2 +* ALPN: offers http/1.1 +* CAfile: t/certs/mtls_ca.crt +* CApath: none +* [CONN-0-0][CF-SSL] (304) (OUT), TLS handshake, Client hello (1): +* [CONN-0-0][CF-SSL] (304) (IN), TLS handshake, Server hello (2): +* [CONN-0-0][CF-SSL] (304) (IN), TLS handshake, Unknown (8): +* [CONN-0-0][CF-SSL] (304) (IN), TLS handshake, Request CERT (13): +* [CONN-0-0][CF-SSL] (304) (IN), TLS handshake, Certificate (11): +* [CONN-0-0][CF-SSL] (304) (IN), TLS handshake, CERT verify (15): +* [CONN-0-0][CF-SSL] (304) (IN), TLS handshake, Finished (20): +* [CONN-0-0][CF-SSL] (304) (OUT), TLS handshake, Certificate (11): +* [CONN-0-0][CF-SSL] (304) (OUT), TLS handshake, CERT verify (15): +* [CONN-0-0][CF-SSL] (304) (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / AEAD-AES256-GCM-SHA384 +* ALPN: server accepted h2 +* Server certificate: +* subject: C=cn; ST=GuangDong; L=ZhuHai; CN=admin.apisix.dev; OU=ops +* start date: Dec 1 10:17:24 2022 GMT +* expire date: Aug 18 10:17:24 2042 GMT +* subjectAltName: host "admin.apisix.dev" matched cert's "admin.apisix.dev" +* issuer: C=cn; ST=GuangDong; L=ZhuHai; CN=ca.apisix.dev; OU=ops +* SSL certificate verify ok. +* Using HTTP2, server supports multiplexing +* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0 +* h2h3 [:method: GET] +* h2h3 [:path: /hello] +* h2h3 [:scheme: https] +* h2h3 [:authority: admin.apisix.dev:9443] +* h2h3 [user-agent: curl/7.87.0] +* h2h3 [accept: */*] +* Using Stream ID: 1 (easy handle 0x13000bc00) +> GET /hello HTTP/2 +> Host: admin.apisix.dev:9443 +> user-agent: curl/7.87.0 +> accept: */* +``` + +注意,测试时使用的域名需要符合证书的参数。 + +## APISIX 与上游间的双向认证 + +### 为什么使用 + +有时候上游的服务启用了双向认证。在这种情况下,APISIX 作为上游服务的客户端,需要提供客户端证书来正常与其进行通信。 + +### 如何配置 + +在配置 upstream 资源时,可以使用参数 `tls.client_cert` 和 `tls.client_key` 来配置 APISIX 用于与上游进行通讯时使用的证书。可参考 [Upstream API 文档](./admin-api.md#upstream)。 + +该功能需要 APISIX 运行在 [APISIX-Runtime](./FAQ.md#如何构建-apisix-runtime-环境) 上。 + +下面是一个与配置 SSL 时相似的 shell 脚本,可为一个已存在的 upstream 资源配置双向认证。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/upstreams/1 \ +-H "X-API-KEY: $admin_key" -X PATCH -d ' +{ + "tls": { + "client_cert": "'"$(cat t/certs/mtls_client.crt)"'", + "client_key": "'"$(cat t/certs/mtls_client.key)"'" + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugin-develop.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugin-develop.md new file mode 100644 index 0000000..e1d6b99 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugin-develop.md @@ -0,0 +1,480 @@ +--- +title: 插件开发 +--- + + + +此文档是关于 lua 语言的插件开发,其他语言请看:[external plugin](./external-plugin.md)。 + +## 插件放置路径 + +通过在 `conf/config.yaml` 中配置 `extra_lua_path` 来加载你自定义的 lua 插件代码 (或者配置 `extra_lua_cpath` 指定编译的 .so 或 .dll 文件)。 + +比如,你可以创建一个目录 `/path/to/example` 作为 `extra_lua_path` 配置的值: + +```yaml +apisix: + ... + extra_lua_path: "/path/to/example/?.lua" +``` + +`example` 目录的结构应该像下面这样: + +``` +├── example +│   └── apisix +│   ├── plugins +│   │   └── 3rd-party.lua +│   └── stream +│   └── plugins +│   └── 3rd-party.lua +``` + +:::note + +该目录 (`/path/to/example`) 下必须包含 `/apisix/plugins` 的子目录。 + +::: + +:::important + +你应该给自己的代码文件起一个与内置插件代码文件 (在 `apisix/plugins` 目录下) 不同的名字。但是如果有需要,你可以使用相同名称的代码文件覆盖内置的代码文件。 + +::: + +## 启用插件 + +要启用您的自定义插件,请将插件列表添加到 `conf/config.yaml` 并附加您的插件名称。例如: + +```yaml +plugins: # 请参阅 `conf/config.yaml.example` 示例 + - ... # 添加现有插件 + - your-plugin # 添加您的自定义插件名称 (名称是在代码中定义的插件名称) +``` + +:::warning + +特别注意的是,在默认情况下 plugins 字段配置没有定义的情况下,大多数 APISIX 插件都是启用的状态 (默认启用的插件请参考[apisix/cli/config.lua](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua))。 + +一旦在 `conf/config.yaml` 中定义了 plugins 配置,新的 plugins 列表将会替代默认的配置,而是不是合并,因此在新增配置`plugins`字段时请确保包含正在使用的内置插件。为了在定义 plugins 配置的同时与默认行为保持一致,可以在 plugins 中包含 `apisix/cli/config.lua` 定义的所有默认启用的插件。 + +::: + +## 编写插件 + +[example-plugin](https://github.com/apache/apisix/blob/master/apisix/plugins/example-plugin.lua) 插件 (本地位置: **apisix/plugins/example-plugin.lua**) 提供了一个示例。 + +### 命名和优先级 + +在代码里指定插件名称(名称是插件的唯一标识,不可重名)和加载优先级。 + +```lua +local plugin_name = "example-plugin" + +local _M = { + version = 0.1, + priority = 0, + name = plugin_name, + schema = schema, + metadata_schema = metadata_schema, +} +``` + +注:新插件的优先级(priority 属性)不能与现有插件的优先级相同,您可以使用 [control API](./control-api.md#get-v1schema) 的 `/v1/schema` 方法查看所有插件的优先级。另外,同一个阶段里面,优先级 ( priority ) 值大的插件,会优先执行,比如 `example-plugin` 的优先级是 0,`ip-restriction` 的优先级是 3000,所以在每个阶段,会先执行 `ip-restriction` 插件,再去执行 `example-plugin` 插件。这里的“阶段”的定义,参见后续的 [确定执行阶段](#确定执行阶段) 这一节。对于你的插件,建议采用 1 到 99 之间的优先级。 + +注:先后顺序与执行顺序无关。 + +### 配置描述与校验 + +定义插件的配置项,以及对应的 [JSON Schema](https://json-schema.org) 描述,并完成对 JSON 的校验,这样方便对配置的数据规格进行验证,以确保数据的完整性以及程序的健壮性。同样,我们以 example-plugin 插件为例,看看他的配置数据: + +```json +{ + "example-plugin": { + "i": 1, + "s": "s", + "t": [1] + } +} +``` + +我们看下他的 Schema 描述: + +```lua +local schema = { + type = "object", + properties = { + i = {type = "number", minimum = 0}, + s = {type = "string"}, + t = {type = "array", minItems = 1}, + ip = {type = "string"}, + port = {type = "integer"}, + }, + required = {"i"}, +} +``` + +这个 schema 定义了一个非负数 `i`,字符串 `s`,非空数组 `t`,和 `ip` 跟 `port`。只有 `i` 是必需的。 + +同时,需要实现 **check_schema(conf, schema_type)** 方法,完成配置参数的合法性校验。 + +```lua +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end +``` + +:::note + +项目已经提供了 **core.schema.check** 公共方法,直接使用即可完成配置参数校验。 + +::: + +通过函数输入参数 **schema_type** 可以对不同类型的 schema 进行对应的校验。例如很多插件需要使用一些[元数据](./terminology/plugin-metadata.md),可以定义插件的 `metadata_schema`。 + +```lua title="example-plugin.lua" +-- schema definition for metadata +local metadata_schema = { + type = "object", + properties = { + ikey = {type = "number", minimum = 0}, + skey = {type = "string"}, + }, + required = {"ikey", "skey"}, +} + +function _M.check_schema(conf, schema_type) + --- check schema for metadata + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + return core.schema.check(schema, conf) +end +``` + +再比如 [key-auth](https://github.com/apache/apisix/blob/master/apisix/plugins/key-auth.lua) 插件为了跟 [Consumer](./admin-api.md#consumer) 资源一起使用,认证插件需要提供一个 `consumer_schema` 来检验 `Consumer` 资源的 `plugins` 属性里面的配置。 + +```lua title="key-auth.lua" + +local consumer_schema = { + type = "object", + properties = { + key = {type = "string"}, + }, + required = {"key"}, +} + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_CONSUMER then + return core.schema.check(consumer_schema, conf) + else + return core.schema.check(schema, conf) + end +end +``` + +### 确定执行阶段 + +根据业务功能,确定你的插件需要在哪个[阶段](./terminology/plugin.md#插件执行生命周期)执行。 + +以 `key-auth` 为例, `key-auth`是一个认证插件,所以需要在 rewrite 阶段执行。在 APISIX,只有认证逻辑可以在 rewrite 阶段里面完成,其他需要在代理到上游之前执行的逻辑都是在 access 阶段完成的。 + +**注意:我们不能在 rewrite 和 access 阶段调用 `ngx.exit`、`ngx.redirect` 或者 `core.respond.exit`。如果确实需要退出,只需要 return 状态码和正文,插件引擎将使用返回的状态码和正文进行退出。[例子](https://github.com/apache/apisix/blob/35269581e21473e1a27b11cceca6f773cad0192a/apisix/plugins/limit-count.lua#L177)** + +#### APISIX 的自定义阶段 + +除了 OpenResty 的阶段,我们还提供额外的阶段来满足特定的目的: + +- `delayed_body_filter` + +```lua +function _M.delayed_body_filter(conf, ctx) + -- delayed_body_filter 在 body_filter 之后被调用。 + -- 它被 tracing 类型插件用来在 body_filter 之后立即结束 span。 +end +``` + +### 编写执行逻辑 + +在对应的阶段方法里编写功能的逻辑代码,在阶段方法中具有 `conf` 和 `ctx` 两个参数,以 `limit-conn` 插件配置为例。 + +#### conf 参数 + +`conf` 参数是插件的相关配置信息,您可以通过 `core.log.warn(core.json.encode(conf))` 将其输出到 `error.log` 中进行查看,如下所示: + +```lua +function _M.access(conf, ctx) + core.log.warn(core.json.encode(conf)) + ...... +end +``` + +conf: + +```json +{ + "rejected_code": 503, + "burst": 0, + "default_conn_delay": 0.1, + "conn": 1, + "key": "remote_addr" +} +``` + +#### ctx 参数 + +`ctx` 参数缓存了请求相关的数据信息,您可以通过 `core.log.warn(core.json.encode(ctx, true))` 将其输出到 `error.log` 中进行查看,如下所示: + +```lua +function _M.access(conf, ctx) + core.log.warn(core.json.encode(ctx, true)) + ...... +end +``` + +### 其它注意事项 + +特别需要注意的是,如果你的插件有新建自己的代码目录,那么就需要修改 Makefile 文件,新增创建文件夹的操作,比如: + +``` +$(INSTALL) -d $(INST_LUADIR)/apisix/plugins/skywalking +$(INSTALL) apisix/plugins/skywalking/*.lua $(INST_LUADIR)/apisix/plugins/skywalking/ +``` + +`_M` 中还有其他字段会影响到插件的行为。 + +```lua +local _M = { + ... + type = 'auth', + run_policy = 'prefer_route', +} +``` + +`run_policy` 字段可以用来控制插件执行。当这个字段设置成 `prefer_route` 时,且该插件同时配置在全局和路由级别,那么只有路由级别的配置生效。 + +如果你的插件需要跟 `consumer` 一起使用,需要把 `type` 设置成 `auth`。 + +## 加载插件和替换插件 + +现在使用 `require "apisix.plugins.3rd-party"` 会加载你自己的插件,比如 `require "apisix.plugins.jwt-auth"`会加载 `jwt-auth` 插件。 + +可能你会想覆盖一个文件中的函数,你可以在 `conf/config.yaml` 文件中配置 `lua_module_hook` 来使你的 hook 生效。 + +你的配置可以像下面这样: + +```yaml +apisix: + ... + extra_lua_path: "/path/to/example/?.lua" + lua_module_hook: "my_hook" +``` + +当 APISIX 启动的时候,`example/my_hook.lua` 就会被加载,这时你可以使用这个钩子在 APISIX 中来全局替换掉一个方法。 +这个例子:[my_hook.lua](https://github.com/apache/apisix/blob/master/example/my_hook.lua) 可以在项目的 `example` 路径下被找到。 + +## 检查外部依赖 + +如果你的插件,涉及到一些外部的依赖和三方库,请首先检查一下依赖项的内容。如果插件需要用到共享内存,需要在 [自定义 Nginx 配置](./customize-nginx-configuration.md),例如: + +```yaml +# put this in config.yaml: +nginx_config: + http_configuration_snippet: | + # for openid-connect plugin + lua_shared_dict discovery 1m; # cache for discovery metadata documents + lua_shared_dict jwks 1m; # cache for JWKs + lua_shared_dict introspection 10m; # cache for JWT verification results +``` + +插件本身提供了 init 方法。方便插件加载后做初始化动作。如果你需要清理初始化动作创建出来的内容,你可以在对应的 destroy 方法里完成这一操作。 + +注:如果部分插件的功能实现,需要在 Nginx 初始化启动,则可能需要在 `apisix/init.lua` 文件的初始化方法 http_init 中添加逻辑,并且可能需要在 `apisix/cli/ngx_tpl.lua` 文件中,对 Nginx 配置文件生成的部分,添加一些你需要的处理。但是这样容易对全局产生影响,根据现有的插件机制,**我们不建议这样做,除非你已经对代码完全掌握**。 + +## 加密存储字段 + +有些插件需要将参数加密存储,比如 `basic-auth` 插件的 `password` 参数。这个插件需要在 `schema` 中指定哪些参数需要被加密存储。 + +```lua +encrypt_fields = {"password"} +``` + +如果是嵌套的参数,比如 `error-log-logger` 插件的 `clickhouse.password` 参数,需要用 `.` 来分隔: + +```lua +encrypt_fields = {"clickhouse.password"} +``` + +目前还不支持: + +1. 两层以上的嵌套 +2. 数组中的字段 + +通过在 `schema` 中指定 `encrypt_fields = {"password"}`,可以将参数加密存储。APISIX 将提供以下功能: + +- 新增和更新资源时,对于 `encrypt_fields` 中声明的参数,APISIX 会自动加密存储在 etcd 中 +- 获取资源时,以及在运行插件时,对于 `encrypt_fields` 中声明的参数,APISIX 会自动解密 + +默认情况下,APISIX 启用数据加密并使用[两个默认的密钥](https://github.com/apache/apisix/blob/85563f016c35834763376894e45908b2fb582d87/apisix/cli/config.lua#L75),你可以在 `config.yaml` 中修改: + +```yaml +apisix: + data_encryption: + enable: true + keyring: + - ... +``` + +`keyring` 是一个数组,可以指定多个 key,APISIX 会按照 keyring 中 key 的顺序,依次尝试用 key 来解密数据(只对在 `encrypt_fields` 声明的参数)。如果解密失败,会尝试下一个 key,直到解密成功。 + +如果 `keyring` 中的 key 都无法解密数据,则使用原始数据。 + +## 注册公共接口 + +插件可以注册暴露给公网的接口。以 batch-requests 插件为例,这个插件注册了 `POST /apisix/batch-requests` 接口,让客户端可以将多个 API 请求组合在一个请求/响应中: + +```lua +function batch_requests() + -- ... +end + +function _M.api() + -- ... + return { + { + methods = {"POST"}, + uri = "/apisix/batch-requests", + handler = batch_requests, + } + } +end +``` + +注意,注册的接口将不会默认暴露,需要使用[public-api 插件](../../en/latest/plugins/public-api.md)来暴露它。 + +## 注册控制接口 + +如果你只想暴露 API 到 localhost 或内网,你可以通过 [Control API](./control-api.md) 来暴露它。 + +Take a look at example-plugin plugin: + +```lua +local function hello() + local args = ngx.req.get_uri_args() + if args["json"] then + return 200, {msg = "world"} + else + return 200, "world\n" + end +end + + +function _M.control_api() + return { + { + methods = {"GET"}, + uris = {"/v1/plugin/example-plugin/hello"}, + handler = hello, + } + } +end +``` + +如果你没有改过默认的 control API 配置,这个插件暴露的 `GET /v1/plugin/example-plugin/hello` API 只有通过 `127.0.0.1` 才能访问它。通过以下命令进行测试: + +```shell +curl -i -X GET "http://127.0.0.1:9090/v1/plugin/example-plugin/hello" +``` + +[查看更多有关 control API 介绍](./control-api.md) + +## 注册自定义变量 + +我们可以在 APISIX 的许多地方使用变量。例如,在 http-logger 中自定义日志格式,用它作为 `limit-*` 插件的键。在某些情况下,内置的变量是不够的。因此,APISIX 允许开发者在全局范围内注册他们的变量,并将它们作为普通的内置变量使用。 + +例如,让我们注册一个叫做 `a6_labels_zone` 的变量来获取路由中 `zone` 标签的值。 + +``` +local core = require "apisix.core" + +core.ctx.register_var("a6_labels_zone", function(ctx) + local route = ctx.matched_route and ctx.matched_route.value + if route and route.labels then + return route.labels.zone + end + return nil +end) +``` + +此后,任何对 `$a6_labels_zone` 的获取操作都会调用注册的获取器来获取数值。 + +注意,自定义变量不能用于依赖 Nginx 指令的功能,如 `access_log_format`。 + +## 编写测试用例 + +针对功能,完善各种维度的测试用例,对插件做个全方位的测试吧!插件的测试用例,都在 __t/plugin__ 目录下,可以前去了解。 +项目测试框架采用的 [****test-nginx****](https://github.com/openresty/test-nginx) 。 +一个测试用例 __.t__ 文件,通常用 \__DATA\__ 分割成 序言部分 和 数据部分。这里我们简单介绍下数据部分, +也就是真正测试用例的部分,仍然以 key-auth 插件为例: + +```perl +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.key-auth") + local ok, err = plugin.check_schema({key = 'test-key'}, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +[error] +``` + +一个测试用例主要有三部分内容: + +- 程序代码:Nginx location 的配置内容 +- 输入:http 的 request 信息 +- 输出检查:status,header,body,error_log 检查 + +这里请求 __/t__,经过配置文件 __location__,调用 __content_by_lua_block__ 指令完成 lua 的脚本,最终返回。 +用例的断言是 response_body 返回 "done",__no_error_log__ 表示会对 Nginx 的 error.log 检查, +必须没有 ERROR 级别的记录。 + +### 附上 test-nginx 执行流程 + +根据我们在 Makefile 里配置的 PATH,和每一个 __.t__ 文件最前面的一些配置项,框架会组装成一个完整的 nginx.conf 文件, +__t/servroot__ 会被当成 Nginx 的工作目录,启动 Nginx 实例。根据测试用例提供的信息,发起 http 请求并检查 http 的返回项, +包括 http status,http response header,http response body 等。 + +## 相关资源 + +- 核心概念 - [插件](https://apisix.apache.org/docs/apisix/terminology/plugin/) +- [Apache APISIX 扩展指南](https://apisix.apache.org/zh/blog/2021/10/26/extension-guide/) +- [Create a Custom Plugin in Lua](https://docs.api7.ai/apisix/how-to-guide/custom-plugins/create-plugin-in-lua) +- [example-plugin 代码](https://github.com/apache/apisix/blob/master/apisix/plugins/example-plugin.lua) diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/api-breaker.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/api-breaker.md new file mode 100644 index 0000000..5183f84 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/api-breaker.md @@ -0,0 +1,137 @@ +--- +title: api-breaker +keywords: + - Apache APISIX + - API 网关 + - API Breaker +description: 本文介绍了 Apache APISIX api-breaker 插件的相关操作,你可以使用此插件的 API 熔断机制来保护上游业务服务。 +--- + + + +## 描述 + +`api-breaker` 插件实现了 API 熔断功能,从而帮助我们保护上游业务服务。 + +:::note 注意 + +关于熔断超时逻辑,由代码逻辑自动按**触发不健康状态**的次数递增运算: + +当上游服务返回 `unhealthy.http_statuses` 配置中的状态码(默认为 `500`),并达到 `unhealthy.failures` 预设次数时(默认为 3 次),则认为上游服务处于不健康状态。 + +第一次触发不健康状态时,熔断 2 秒。超过熔断时间后,将重新开始转发请求到上游服务,如果继续返回 `unhealthy.http_statuses` 状态码,记数再次达到 `unhealthy.failures` 预设次数时,熔断 4 秒。依次类推(2,4,8,16,……),直到达到预设的 `max_breaker_sec`值。 + +当上游服务处于不健康状态时,如果转发请求到上游服务并返回 `healthy.http_statuses` 配置中的状态码(默认为 `200`),并达到 `healthy.successes` 次时,则认为上游服务恢复至健康状态。 + +::: + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ----------------------- | -------------- | ------ | ---------- | --------------- | -------------------------------- | +| break_response_code | integer | 是 | | [200, ..., 599] | 当上游服务处于不健康状态时返回的 HTTP 错误码。 | +| break_response_body | string | 否 | | | 当上游服务处于不健康状态时返回的 HTTP 响应体信息。 | +| break_response_headers | array[object] | 否 | | [{"key":"header_name","value":"can contain Nginx $var"}] | 当上游服务处于不健康状态时返回的 HTTP 响应头信息。该字段仅在配置了 `break_response_body` 属性时生效,并能够以 `$var` 的格式包含 APISIX 变量,比如 `{"key":"X-Client-Addr","value":"$remote_addr:$remote_port"}`。 | +| max_breaker_sec | integer | 否 | 300 | >=3 | 上游服务熔断的最大持续时间,以秒为单位。 | +| unhealthy.http_statuses | array[integer] | 否 | [500] | [500, ..., 599] | 上游服务处于不健康状态时的 HTTP 状态码。 | +| unhealthy.failures | integer | 否 | 3 | >=1 | 上游服务在一定时间内触发不健康状态的异常请求次数。 | +| healthy.http_statuses | array[integer] | 否 | [200] | [200, ..., 499] | 上游服务处于健康状态时的 HTTP 状态码。 | +| healthy.successes | integer | 否 | 3 | >=1 | 上游服务触发健康状态的连续正常请求次数。 | + +## 启用插件 + +以下示例展示了如何在指定路由上启用 `api-breaker` 插件,该路由配置表示在一定时间内返回 `500` 或 `503` 状态码达到 3 次后触发熔断,返回 `200` 状态码 1 次后恢复健康: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "api-breaker": { + "break_response_code": 502, + "unhealthy": { + "http_statuses": [500, 503], + "failures": 3 + }, + "healthy": { + "http_statuses": [200], + "successes": 1 + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## 测试插件 + +按上述配置启用插件后,使用 `curl` 命令请求该路由: + +```shell +curl -i -X POST "http://127.0.0.1:9080/hello" +``` + +如果上游服务在一定时间内返回 `500` 状态码达到 3 次,客户端将会收到 `502 Bad Gateway` 的应答: + +```shell +HTTP/1.1 502 Bad Gateway +... + +502 Bad Gateway + +

502 Bad Gateway

+
openresty
+ + +``` + +## 删除插件 + +当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/attach-consumer-label.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/attach-consumer-label.md new file mode 100644 index 0000000..21111c7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/attach-consumer-label.md @@ -0,0 +1,180 @@ +--- +title: attach-consumer-label +keywords: + - Apache APISIX + - API 网关 + - API Consumer +description: 本文介绍了 Apache APISIX attach-consumer-label 插件的相关操作,你可以使用此插件向上游服务传递自定义的 Consumer labels。 +--- + + + +## 描述 + +`attach-consumer-label` 插件在 X-Consumer-Username 和 X-Credential-Indentifier 之外,还将自定义的消费者相关标签附加到经过身份验证的请求,以便上游服务区分消费者并实现额外的逻辑。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|----------|--------|--------|----------|--------|----------------------------------------------------------------------------------------------------------------------------------------------------| +| headers | object | 是 | | | 要附加到请求标头的 Consumer 标签的键值对,其中键是请求标头名称,例如 "X-Consumer-Role",值是对客户标签键的引用,例如 "$role"。请注意,该值应始终以美元符号 (`$`) 开头。如果 Consumer 上没有配置引用的值,则相应的标头将不会附加到请求中。 | + +## 启用插件 + +下面的示例演示了如何在通过身份验证的请求转发到上游服务之前,将自定义标签附加到请求标头。如果请求被拒绝,就不会在请求标头上附加任何消费者标签。如果某个标签值未在消费者上配置,但在“attach-consumer-label”插件中被引用,相应的标头也不会被附加。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +创建一个有自定义标签的 Consumer `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "username": "john", + # highlight-start + "labels": { + // Annotate 1 + "department": "devops", + // Annotate 2 + "company": "api7" + } + # highlight-end + }' +``` + +❶ Consumer 的 `department` 标签信息。 + +❷ Consumer 的 `company` 标签信息。 + +为 Consumer `john` 配置 `key-auth`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +创建路由并启用 `key-auth` 和 `attach-consumer-label` 插件: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "attach-consumer-label-route", + "uri": "/get", + "plugins": { + "key-auth": {}, + # highlight-start + "attach-consumer-label": { + "headers": { + // Annotate 1 + "X-Consumer-Department": "$department", + // Annotate 2 + "X-Consumer-Company": "$company", + // Annotate 3 + "X-Consumer-Role": "$role" + } + } + # highlight-end + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +❶ 将 Consumer 标签 `department` 的值附加到请求头的 `X-Consumer-Department` 字段。 + +❷ 将 Consumer 标签 `company` 的值附加到请求头的 `X-Consumer-Company` 字段。 + +❸ 将 Consumer 标签 `role` 的值附加到请求头的 `X-Consumer-Role` 字段。由于 Consumer 标签中没有配置 `role` 这个标签,该字段不会出现在发往上游的请求头中。 + +:::tip + +引用标签的值必须以 `$` 符号开头。 + +::: + +使用正确的 apikey 请求该路由,验证插件: + +```shell +curl -i "http://127.0.0.1:9080/get" -H 'apikey: john-key' +``` + +可以看到类似的 `HTTP/1.1 200 OK` 响应: + +```text +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Apikey": "john-key", + "Host": "127.0.0.1", + # highlight-start + "X-Consumer-Username": "john", + "X-Credential-Indentifier": "cred-john-key-auth", + "X-Consumer-Company": "api7", + "X-Consumer-Department": "devops", + # highlight-end + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66e5107c-5bb3e24f2de5baf733aec1cc", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "192.168.65.1, 205.198.122.37", + "url": "http://127.0.0.1/get" +} +``` + +## 删除插件 + +当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/attach-consumer-label-route" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/get", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/authz-casbin.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/authz-casbin.md new file mode 100644 index 0000000..648a99f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/authz-casbin.md @@ -0,0 +1,272 @@ +--- +title: authz-casbin +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Authz Casbin + - authz-casbin +description: 本文介绍了关于 Apache APISIX `authz-casbin` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`authz-casbin` 插件是一个基于 [Lua Casbin](https://github.com/casbin/lua-casbin/) 的访问控制插件,该插件支持各种 [access control models](https://casbin.org/docs/en/supported-models) 的强大授权场景。 + +## 属性 + +| 名称 | 类型 | 必选项 | 描述 | +| ----------- | ------ | ------- | ---------------------------------- | +| model_path | string | 是 | Casbin 鉴权模型配置文件路径。 | +| policy_path | string | 是 | Casbin 鉴权策略配置文件路径。 | +| model | string | 是 | Casbin 鉴权模型的文本定义。 | +| policy | string | 是 | Casbin 鉴权策略的文本定义。 | +| username | string | 是 | 描述请求中有可以通过访问控制的用户名。 | + +:::note + +你必须在插件配置中指定 `model_path`、`policy_path` 和 `username` 或者指定 `model`、`policy` 和 `username` 才能使插件生效。 + +如果你想要使所有的 Route 共享 Casbin 配置,你可以先在插件元数据中指定 `model` 和 `policy`,在插件配置中仅指定 `username`,这样所有 Route 都可以使用 Casbin 插件配置。 + +:::: + +## 元数据 + +| 名称 | 类型 | 必选项 | 描述 | +| ----------- | ------ | ------- | ------------------------------| +| model | string | 是 | Casbin 鉴权模型的文本定义。 | +| policy | string | 是 | Casbin 鉴权策略的文本定义。 | + +## 启用插件 + +你可以使用 model/policy 文件路径或使用插件 configuration/metadata 中的 model/policy 文本配置在 Route 上启用插件。 + +### 通过 model/policy 文件路径启用插件 + +以下示例展示了通过 model/policy 配置文件来设置 Casbin 身份验证: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "authz-casbin": { + "model_path": "/path/to/model.conf", + "policy_path": "/path/to/policy.csv", + "username": "user" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" +}' +``` + +### 通过 model/policy 文本配置启用插件 + +以下示例展示了通过你的 model/policy 文本来设置 Casbin 身份验证: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "authz-casbin": { + "model": "[request_definition] + r = sub, obj, act + + [policy_definition] + p = sub, obj, act + + [role_definition] + g = _, _ + + [policy_effect] + e = some(where (p.eft == allow)) + + [matchers] + m = (g(r.sub, p.sub) || keyMatch(r.sub, p.sub)) && keyMatch(r.obj, p.obj) && keyMatch(r.act, p.act)", + + "policy": "p, *, /, GET + p, admin, *, * + g, alice, admin", + + "username": "user" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" +}' +``` + +### 通过 plugin metadata 配置模型/策略 + +首先,我们需要使用 Admin API 发送一个 `PUT` 请求,将 `model` 和 `policy` 的配置添加到插件的元数据中。 + +所有通过这种方式创建的 Route 都会带有一个带插件元数据配置的 Casbin enforcer。你也可以使用这种方式更新 model/policy,该插件将会自动同步最新的配置信息。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/authz-casbin \ +-H "X-API-KEY: $admin_key" -i -X PUT -d ' +{ +"model": "[request_definition] +r = sub, obj, act + +[policy_definition] +p = sub, obj, act + +[role_definition] +g = _, _ + +[policy_effect] +e = some(where (p.eft == allow)) + +[matchers] +m = (g(r.sub, p.sub) || keyMatch(r.sub, p.sub)) && keyMatch(r.obj, p.obj) && keyMatch(r.act, p.act)", + +"policy": "p, *, /, GET +p, admin, *, * +g, alice, admin" +}' +``` + +更新插件元数据后,可以将插件添加到指定 Route 中: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "authz-casbin": { + "username": "user" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" +}' +``` + +:::note + +插件路由的配置比插件元数据的配置有更高的优先级。因此,如果插件路由的配置中存在 model/policy 配置,插件将优先使用插件路由的配置而不是插件元数据中的配置。 + +::: + +## 测试插件 + +首先定义测试鉴权模型: + +```conf +[request_definition] +r = sub, obj, act + +[policy_definition] +p = sub, obj, act + +[role_definition] +g = _, _ + +[policy_effect] +e = some(where (p.eft == allow)) + +[matchers] +m = (g(r.sub, p.sub) || keyMatch(r.sub, p.sub)) && keyMatch(r.obj, p.obj) && keyMatch(r.act, p.act) +``` + +然后添加测试鉴权策略: + +```conf +p, *, /, GET +p, admin, *, * +g, alice, admin +``` + +如果想要了解更多关于 `policy` 和 `model` 的配置,请参考 [examples](https://github.com/casbin/lua-casbin/tree/master/examples)。 + +上述配置将允许所有人使用 `GET` 请求访问主页(`/`),而只有具有管理员权限的用户才可以访问其他页面并使用其他请求方法。 + +简单举例来说,假设我们向主页发出 `GET` 请求,通常都可以返回正常结果。 + +```shell +curl -i http://127.0.0.1:9080/ -X GET +``` + +但如果是一个未经授权的普通用户(例如:`bob`)访问除 `/` 以外的其他页面,将得到一个 403 错误: + +```shell +curl -i http://127.0.0.1:9080/res -H 'user: bob' -X GET +``` + +``` +HTTP/1.1 403 Forbidden +``` + +而拥有管理权限的用户(如 `alice`)则可以访问其它页面。 + +```shell +curl -i http://127.0.0.1:9080/res -H 'user: alice' -X GET +``` + +## 删除插件 + +当你需要禁用 `authz-casbin` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/*", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/authz-casdoor.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/authz-casdoor.md new file mode 100644 index 0000000..e24150f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/authz-casdoor.md @@ -0,0 +1,115 @@ +--- +title: authz-casdoor +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Authz Casdoor + - authz-casdoor +description: 本篇文档介绍了 Apache APISIX auth-casdoor 插件的相关信息。 +--- + + + +## 描述 + +使用 `authz-casdoor` 插件可添加 [Casdoor](https://casdoor.org/) 集中认证方式。 + +## 属性 + +| 名称 | 类型 | 必选项 | 描述 | +|---------------|--------|----------|----------------------------------------------| +| endpoint_addr | string | 是 | Casdoor 的 URL。 | +| client_id | string | 是 | Casdoor 的客户端 id。 | +| client_secret | string | 是 | Casdoor 的客户端密钥。 | +| callback_url | string | 是 | 用于接收 code 与 state 的回调地址。 | + +注意:schema 中还定义了 `encrypt_fields = {"client_secret"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +:::info IMPORTANT + +指定 `endpoint_addr` 和 `callback_url` 属性时不要以“/”来结尾。 + +`callback_url` 必须是路由的 URI。具体细节可查看下方示例内容,了解相关配置。 + +::: + +## 启用插件 + +以下示例展示了如何在指定路由上启用 `auth-casdoor` 插件: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/anything/*", + "plugins": { + "authz-casdoor": { + "endpoint_addr":"http://localhost:8000", + "callback_url":"http://localhost:9080/anything/callback", + "client_id":"7ceb9b7fda4a9061ec1c", + "client_secret":"3416238e1edf915eac08b8fe345b2b95cdba7e04" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +## 测试插件 + +一旦启用了该插件,访问该路由的新用户首先会经过 `authz-casdoor` 插件的处理,然后被重定向到 Casdoor 登录页面。 + +成功登录后,Casdoor 会将该用户重定向到 `callback_url`,并指定 GET 参数的 `code` 和 `state`。该插件还会向 Casdoor 请求一个访问 Token,并确认用户是否已登录。在成功认证后,该流程只出现一次并且后续请求不会被打断。 + +上述操作完成后,用户就会被重定向到目标 URL。 + +## 删除插件 + +当需要禁用 `authz-casdoor` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/anything/*", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/authz-keycloak.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/authz-keycloak.md new file mode 100644 index 0000000..576f29b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/authz-keycloak.md @@ -0,0 +1,216 @@ +--- +title: authz-keycloak +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Authz Keycloak + - authz-keycloak +description: 本文介绍了关于 Apache APISIX `authz-keycloak` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`authz-keycloak` 插件可用于通过 [Keycloak Identity Server](https://www.keycloak.org/) 添加身份验证。 + +:::tip + +虽然该插件是为了与 Keycloak 一起使用而开发的,但是它也可以与任何符合 OAuth/OIDC 或 UMA 协议的身份认证软件一起使用。 + +::: + +如果你想了解 Keycloak 的更多信息,请参考 [Authorization Services Guide](https://www.keycloak.org/docs/latest/authorization_services/)。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|----------------------------------------------|---------------|-------|-----------------------------------------------|--------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| discovery | string | 否 | | https://host.domain/realms/foo/.well-known/uma2-configuration | Keycloak 授权服务的 [discovery document](https://www.keycloak.org/docs/latest/authorization_services/index.html) 的 URL。 | +| token_endpoint | string | 否 | | https://host.domain/realms/foo/protocol/openid-connect/token | 接受 OAuth2 兼容 token 的接口,需要支持 `urn:ietf:params:oauth:grant-type:uma-ticket` 授权类型。 | +| resource_registration_endpoint | string | 否 | | https://host.domain/realms/foo/authz/protection/resource_set | 符合 UMA 的资源注册端点。如果提供,则覆盖发现中的值。 | +| client_id | string | 是 | | | 客户端正在寻求访问的资源服务器的标识符。 | +| client_secret | string | 否 | | | 客户端密码(如果需要)。 | +| grant_type | string | 否 | "urn:ietf:params:oauth:grant-type:uma-ticket" | ["urn:ietf:params:oauth:grant-type:uma-ticket"] | | +| policy_enforcement_mode | string | 否 | "ENFORCING" | ["ENFORCING", "PERMISSIVE"] | | +| permissions | array[string] | 否 | | | 描述客户端应用所需访问的资源和权限范围的字符串。格式必须为:`RESOURCE_ID#SCOPE_ID`。 | +| lazy_load_paths | boolean | 否 | false | [true, false] | 当设置为 true 时,使用资源注册端点而不是静态权限将请求 URI 动态解析为资源。 | +| http_method_as_scope | boolean | 否 | false | [true, false] | 设置为 true 时,将 HTTP 请求类型映射到同名范围并添加到所有请求的权限。 | +| timeout | integer | 否 | 3000 | [1000, ...] | 与 Identity Server 的 HTTP 连接超时(毫秒)。 | +| access_token_expires_in | integer | 否 | 300 | [1, ...] | 访问令牌的有效期。token. | +| access_token_expires_leeway | integer | 否 | 0 | [0, ...] | access_token 更新的到期余地。设置后,令牌将在到期前几秒更新 access_token_expires_leeway。这避免了 access_token 在到达 OAuth 资源服务器时刚刚过期的情况。 | +| refresh_token_expires_in | integer | 否 | 3600 | [1, ...] | 刷新令牌的失效时间。 | +| refresh_token_expires_leeway | integer | 否 | 0 | [0, ...] | refresh_token 更新的到期余地。设置后,令牌将在到期前几秒刷新 refresh_token_expires_leeway。这样可以避免在到达 OAuth 资源服务器时 refresh_token 刚刚过期的错误。 | +| ssl_verify | boolean | 否 | true | [true, false] | 设置为 `true` 时,验证 TLS 证书是否与主机名匹配。 | +| cache_ttl_seconds | integer | 否 | 86400 (equivalent to 24h) | positive integer >= 1 | 插件缓存插件用于向 Keycloak 进行身份验证的发现文档和令牌的最长时间(以秒为单位)。 | +| keepalive | boolean | 否 | true | [true, false] | 当设置为 `true` 时,启用 HTTP keep-alive 保证在使用后仍然保持连接打开。如果您期望对 Keycloak 有很多请求,请设置为 `true`。 | +| keepalive_timeout | integer | 否 | 60000 | positive integer >= 1000 | 已建立的 HTTP 连接将关闭之前的空闲时间。 | +| keepalive_pool | integer | 否 | 5 | positive integer >= 1 | 连接池中的最大连接数。 | +| access_denied_redirect_uri | string | 否 | | [1, 2048] | 需要将用户重定向到的 URI,而不是返回类似 `"error_description":"not_authorized"` 这样的错误消息。 | +| password_grant_token_generation_incoming_uri | string | 否 | | /api/token | 将此设置为使用密码授予类型生成令牌。该插件会将传入的请求 URI 与此值进行比较。 | + +注意:schema 中还定义了 `encrypt_fields = {"client_secret"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +除上述释义外,还有以下需要注意的点: + +- Discovery and endpoints + - 使用 `discovery` 属性后,`authz-keycloak` 插件就可以从其 URL 中发现 Keycloak API 的端点。该 URL 指向 Keyloak 针对相应领域授权服务的发现文档。 + - 如果发现文档可用,则插件将根据该文档确定令牌端点 URL。如果 URL 存在,则 `token_endpoint` 和 `resource_registration_endpoint` 的值将被其覆盖。 +- Client ID and secret + - 该插件需配置 `client_id` 属性来标识自身。 + - 如果 `lazy_load_paths` 属性被设置为 `true`,那么该插件还需要从 Keycloak 中获得一个自身访问令牌。在这种情况下,如果客户端对 Keycloak 的访问是加密的,就需要配置 `client_secret` 属性。 +- Policy enforcement mode + - `policy_enforcement_mode` 属性指定了在处理发送到服务器的授权请求时,该插件如何执行策略。 + - `ENFORCING` mode:即使没有与给定资源关联的策略,请求也会默认被拒绝。`policy_enforcement_mode` 默认设置为 `ENFORCING`。 + - `PERMISSIVE` mode:如果资源没有绑定任何访问策略,也被允许请求。 +- Permissions + - 在处理传入的请求时,插件可以根据请求的参数确定静态或动态检查 Keycloak 的权限。 + - 如果 `lazy_load_paths` 参数设置为 `false`,则权限来自 `permissions` 属性。`permissions` 中的每个条目都需要按照令牌端点预设的 `permission` 属性进行格式化。详细信息请参考 [Obtaining Permissions](https://www.keycloak.org/docs/latest/authorization_services/index.html#_service_obtaining_permissions). + + :::note + + 有效权限可以是单个资源,也可以是与一个或多个范围配对的资源。 + + ::: + + 如果 `lazy_load_paths` 属性设置为 `true`,则请求 URI 将解析为使用资源注册端点在 Keycloak 中配置的一个或多个资源。已经解析的资源被用作于检查的权限。 + + :::note + + 需要该插件从令牌端点为自己获取单独的访问令牌。因此,请确保在 Keycloak 的客户端设置中设置了 `Service Accounts Enabled` 选项。 + + 还需要确保颁发的访问令牌包含具有 `uma_protection` 角色的 `resource_access` 声明,以保证插件能够通过 Protection API 查询资源。 + + ::: + +- 自动将 HTTP method 映射到作用域 + + `http_method_as_scope` 通常与 `lazy_load_paths` 一起使用,但也可以与静态权限列表一起使用。 + + - 如果 `http_method_as_scope` 属性设置为 `true`,插件会将请求的 HTTP 方法映射到同名范围。然后将范围添加到每个要检查的权限。 + + - 如果 `lazy_load_paths` 属性设置为 `false`,则插件会将映射范围添加到 `permissions` 属性中配置的任意一个静态权限——即使它们已经包含一个或多个范围。 + +- 使用 `password` 授权生成令牌 + + - 如果要使用 `password` 授权生成令牌,你可以设置 `password_grant_token_generation_incoming_uri` 属性的值。 + + - 如果传入的 URI 与配置的属性匹配并且请求方法是 POST,则使用 `token_endpoint` 生成一个令牌。 + + 同时,你还需要添加 `application/x-www-form-urlencoded` 作为 `Content-Type` 标头,`username` 和 `password` 作为参数。 + + 如下示例是当 `password_grant_token_generation_incoming_uri` 设置为 `/api/token` 时的命令: + + ```shell + curl --location --request POST 'http://127.0.0.1:9080/api/token' \ + --header 'Accept: application/json, text/plain, */*' \ + --header 'Content-Type: application/x-www-form-urlencoded' \ + --data-urlencode 'username=' \ + --data-urlencode 'password=' + ``` + +## 如何启用 + +以下示例为你展示了如何在指定 Route 中启用 `authz-keycloak` 插件,其中 `${realm}` 是 Keycloak 中的 `realm` 名称: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/get", + "plugins": { + "authz-keycloak": { + "token_endpoint": "http://127.0.0.1:8090/realms/${realm}/protocol/openid-connect/token", + "permissions": ["resource name#scope name"], + "client_id": "Client ID" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + } +}' +``` + +## 测试插件 + +通过上述命令启用插件后,可以通过以下方法测试插件。 + +首先需要从 Keycloak 获取 JWT 令牌: + +```shell +curl "http:///realms//protocol/openid-connect/token" \ + -d "client_id=" \ + -d "client_secret=" \ + -d "username=" \ + -d "password=" \ + -d "grant_type=password" +``` + +你应该收到类似以下的响应: + +```text +{"access_token":"eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJoT3ludlBPY2d6Y3VWWnYtTU42bXZKMUczb0dOX2d6MFo3WFl6S2FSa1NBIn0.eyJleHAiOjE3MDMyOTAyNjAsImlhdCI6MTcwMzI4OTk2MCwianRpIjoiMjJhOGFmMzItNDM5Mi00Yzg3LThkM2UtZDkyNDVmZmNiYTNmIiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguMS44Mzo4MDgwL3JlYWxtcy9xdWlja3N0YXJ0LXJlYWxtIiwiYXVkIjoiYWNjb3VudCIsInN1YiI6IjAyZWZlY2VlLTBmYTgtNDg1OS1iYmIwLTgyMGZmZDdjMWRmYSIsInR5cCI6IkJlYXJlciIsImF6cCI6ImFwaXNpeC1xdWlja3N0YXJ0LWNsaWVudCIsInNlc3Npb25fc3RhdGUiOiI1YzIzZjVkZC1hN2ZhLTRlMmItOWQxNC02MmI1YzYyNmU1NDYiLCJhY3IiOiIxIiwicmVhbG1fYWNjZXNzIjp7InJvbGVzIjpbImRlZmF1bHQtcm9sZXMtcXVpY2tzdGFydC1yZWFsbSIsIm9mZmxpbmVfYWNjZXNzIiwidW1hX2F1dGhvcml6YXRpb24iXX0sInJlc291cmNlX2FjY2VzcyI6eyJhY2NvdW50Ijp7InJvbGVzIjpbIm1hbmFnZS1hY2NvdW50IiwibWFuYWdlLWFjY291bnQtbGlua3MiLCJ2aWV3LXByb2ZpbGUiXX19LCJzY29wZSI6ImVtYWlsIHByb2ZpbGUiLCJzaWQiOiI1YzIzZjVkZC1hN2ZhLTRlMmItOWQxNC02MmI1YzYyNmU1NDYiLCJlbWFpbF92ZXJpZmllZCI6ZmFsc2UsInByZWZlcnJlZF91c2VybmFtZSI6InF1aWNrc3RhcnQtdXNlciJ9.WNZQiLRleqCxw-JS-MHkqXnX_BPA9i6fyVHqF8l-L-2QxcqTAwbIp7AYKX-z90CG6EdRXOizAEkQytB32eVWXaRkLeTYCI7wIrT8XSVTJle4F88ohuBOjDfRR61yFh5k8FXXdAyRzcR7tIeE2YUFkRqw1gCT_VEsUuXPqm2wTKOmZ8fRBf4T-rP4-ZJwPkHAWc_nG21TmLOBCSulzYqoC6Lc-OvX5AHde9cfRuXx-r2HhSYs4cXtvX-ijA715MY634CQdedheoGca5yzPsJWrAlBbCruN2rdb4u5bDxKU62pJoJpmAsR7d5qYpYVA6AsANDxHLk2-W5F7I_IxqR0YQ","expires_in":300,"refresh_expires_in":1800,"refresh_token":"eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJjN2IwYmY4NC1kYjk0LTQ5YzctYWIyZC01NmU3ZDc1MmRkNDkifQ.eyJleHAiOjE3MDMyOTE3NjAsImlhdCI6MTcwMzI4OTk2MCwianRpIjoiYzcyZjAzMzctYmZhNS00MWEzLTlhYjEtZmJlNGY0NmZjMDgxIiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguMS44Mzo4MDgwL3JlYWxtcy9xdWlja3N0YXJ0LXJlYWxtIiwiYXVkIjoiaHR0cDovLzE5Mi4xNjguMS44Mzo4MDgwL3JlYWxtcy9xdWlja3N0YXJ0LXJlYWxtIiwic3ViIjoiMDJlZmVjZWUtMGZhOC00ODU5LWJiYjAtODIwZmZkN2MxZGZhIiwidHlwIjoiUmVmcmVzaCIsImF6cCI6ImFwaXNpeC1xdWlja3N0YXJ0LWNsaWVudCIsInNlc3Npb25fc3RhdGUiOiI1YzIzZjVkZC1hN2ZhLTRlMmItOWQxNC02MmI1YzYyNmU1NDYiLCJzY29wZSI6ImVtYWlsIHByb2ZpbGUiLCJzaWQiOiI1YzIzZjVkZC1hN2ZhLTRlMmItOWQxNC02MmI1YzYyNmU1NDYifQ.7AH7ppbVOlkYc9CoJ7kLSlDUkmFuNga28Amugn2t724","token_type":"Bearer","not-before-policy":0,"session_state":"5c23f5dd-a7fa-4e2b-9d14-62b5c626e546","scope":"email profile"} +``` + +之后就可以使用获得的访问令牌发起请求: + +```shell +curl http://127.0.0.1:9080/get -H 'Authorization: Bearer ${ACCESS_TOKEN}' +``` + +## 删除插件 + +当你需要禁用 `authz-keycloak` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/get", + "plugins": { + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + } +}' +``` + +## 插件 Roadmap + +- 目前,`authz-keycloak` 插件通过要求定义资源名称和所需的范围,来强制执行路由策略。但 Keycloak 官方适配的其他语言客户端(Java、JavaScript)仍然可以通过动态查询 Keycloak 路径以及延迟加载身份资源的路径来提供路径匹配。在 Apache APISIX 之后发布的插件中即将支持此功能。 + +- 支持从 Keycloak JSON 文件中读取权限范畴和其他配置项。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/aws-lambda.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/aws-lambda.md new file mode 100644 index 0000000..7b23a35 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/aws-lambda.md @@ -0,0 +1,224 @@ +--- +title: aws-lambda +keywords: + - Apache APISIX + - Plugin + - AWS Lambda + - aws-lambda +description: 本文介绍了关于 Apache APISIX aws-lambda 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`aws-lambda` 插件用于将 [AWS Lambda](https://aws.amazon.com/lambda/) 和 [Amazon API Gateway](https://aws.amazon.com/api-gateway/) 作为动态上游集成至 APISIX,从而实现将访问指定 URI 的请求代理到 AWS 云。 + +启用 `aws-lambda` 插件后,该插件会终止对已配置 URI 的请求,并代表客户端向 AWS Lambda Gateway URI 发起一个新的请求。这个新请求中携带了之前配置的授权详细信息,包括请求头、请求体和参数(以上参数都是从原始请求中传递的),然后 `aws-lambda` 插件会将带有响应头、状态码和响应体的响应信息返回给使用 APISIX 发起请求的客户端。 + +该插件支持通过 AWS API key 和 AWS IAM secrets 进行授权。当使用 AWS IAM secrets 时,该插件支持 [AWS Signature Version 4 signing](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html)。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------------ - | ------- | -------- | ------- | ------------ | ------------------------------------------------------------ | +| function_uri | string | 是 | | | 触发 lambda serverless 函数的 AWS API Gateway 端点。 | +| authorization | object | 否 | | | 访问云函数的授权凭证。 | +| authorization.apikey | string | 否 | | | 生成的 API 密钥,用于授权对 AWS Gateway 端点的请求。 | +| authorization.iam | object | 否 | | | 用于通过 AWS v4 请求签名执行的基于 AWS IAM 角色的授权。请参考 [IAM 授权方案](#iam-授权方案)。 | +| authorization.iam.accesskey | string | 是 | | 从 AWS IAM 控制台生成的访问密钥 ID。 | +| authorization.iam.secretkey | string | 是 | | 从 AWS IAM 控制台生成的访问密钥。 | +| authorization.iam.aws_region | string | 否 | "us-east-1" | 发出请求的 AWS 区域。有关更多 AWS 区域代码的信息请参考 [AWS 区域代码表](https://docs.aws.amazon.com/zh_cn/general/latest/gr/rande.html#region-names-codes)。 | +| authorization.iam.service | string | 否 | "execute-api" | 接收该请求的服务。若使用 Amazon API gateway APIs, 应设置为 `execute-api`。若使用 Lambda function, 应设置为 `lambda`。 | +| timeout | integer | 否 | 3000 | [100,...] | 代理请求超时(以毫秒为单位)。 | +| ssl_verify | boolean | 否 | true | true/false | 当设置为 `true` 时执行 SSL 验证。 | +| keepalive | boolean | 否 | true | true/false | 当设置为 `true` 时,保持连接的活动状态以便重复使用。 | +| keepalive_pool | integer | 否 | 5 | [1,...] | 在关闭该连接之前,可以在该连接上发送的最大请求数。 | +| keepalive_timeout | integer | 否 | 60000 | [1000,...] | 当连接空闲时,保持该连接处于活动状态的时间,以毫秒为单位。 | + +## 启用插件 + +你可以通过以下命令在指定路由中启用该插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "aws-lambda": { + "function_uri": "https://x9w6z07gb9.execute-api.us-east-1.amazonaws.com/default/test-apisix", + "authorization": { + "apikey": "" + }, + "ssl_verify":false + } + }, + "uri": "/aws" +}' +``` + +通过上述示例配置插件后,任何对 `/aws` URI 的请求(`HTTP/1.1`、`HTTPS`、`HTTP2`)都将调用已配置的 AWS 函数的 URI,并且会将响应信息返回给客户端。 + +下述命令的含义是:AWS Lambda 从请求中获取 `name` 参数,并返回一条 `"Hello $name"` 消息: + +```shell +curl -i -XGET localhost:9080/aws\?name=APISIX +``` + +正常返回结果: + +```shell +HTTP/1.1 200 OK +Content-Type: application/json +... +"Hello, APISIX!" +``` + +以下示例是客户端通过 HTTP/2 协议与 APISIX 进行通信。 + +在进行测试之前,由于该 `enable_http2: true` 默认是禁用状态,你可以通过在 `./conf/config.yaml` 中添加 `apisix.node_listen` 下的 `- port: 9081` 和 `enable_http2: true` 字段启用。示例如下 + +```yaml +apisix: + node_listen: # 支持监听多个端口 + - 9080 + - port: 9081 + enable_http2: true # 该字段如果不设置,默认值为 `false` +``` + +使用 `curl` 命令测试: + +```shell +curl -i -XGET --http2 --http2-prior-knowledge localhost:9081/aws\?name=APISIX +``` + +正常返回结果: + +```shell +HTTP/2 200 +content-type: application/json +... +"Hello, APISIX!" +``` + +与上面的示例类似,AWS Lambda 函数也可以通过 AWS API Gateway 触发,但需要使用 AWS IAM 权限进行授权。`aws-lambda` 插件的配置文件中包含了 `"authorization"` 字段,用户可以在 HTTP 调用中通过 AWS v4 请求签名。 + +以下示例展示了如何通过配置文件实现授权: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "aws-lambda": { + "function_uri": "https://ajycz5e0v9.execute-api.us-east-1.amazonaws.com/default/test-apisix", + "authorization": { + "iam": { + "accesskey": "", + "secretkey": "" + } + }, + "ssl_verify": false + } + }, + "uri": "/aws" +}' +``` + +:::note 注意 + +使用该方法时已经假设你有一个启用了程序化访问的 IAM 用户,并具有访问端点的必要权限(AmazonAPIGatewayInvokeFullAccess)。 + +::: + +### 配置路径转发 + +`aws-lambda` 插件在代理请求到 AWS 上游时也支持 URL 路径转发。基本请求路径的扩展被附加到插件配置中指定的 `function_uri` 字段上。 + +:::info 重要 + +因为 APISIX 路由是严格匹配的,所以为了使 `aws-lambda` 插件正常工作,在路由上配置的 `uri` 字段必须以 `*` 结尾,`*` 意味着这个 URI 的任何子路径都会被匹配到同一个路由。 + +::: + +以下示例展示了如何通过配置文件实现路径转发: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "aws-lambda": { + "function_uri": "https://x9w6z07gb9.execute-api.us-east-1.amazonaws.com", + "authorization": { + "apikey": "" + }, + "ssl_verify":false + } + }, + "uri": "/aws/*" +}' +``` + +通过上述示例配置插件后,任何访问 `aws/default/test-apisix` 的请求都会调用 AWS Lambda 函数,并转发附加的参数。 + +使用 `curl` 命令测试: + +```shell +curl -i -XGET http://127.0.0.1:9080/aws/default/test-apisix\?name\=APISIX +``` + +正常返回结果: + +```shell +HTTP/1.1 200 OK +Content-Type: application/json +... +"Hello, APISIX!" +``` + +## 删除插件 + +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/aws", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/azure-functions.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/azure-functions.md new file mode 100644 index 0000000..dde9495 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/azure-functions.md @@ -0,0 +1,215 @@ +--- +title: azure-functions +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Azure Functions + - azure-functions +description: 本文介绍了关于 API 网关 Apache APISIX azure-functions 插件的基本信息及使用方法。 +--- + + +## 描述 + +`azure-functions` 插件用于将 [Azure Serverless Function](https://azure.microsoft.com/en-in/services/functions/) 作为动态上游集成至 APISIX,从而实现将访问指定 URI 的请求代理到 Microsoft Azure 云服务。 + +启用 `azure-functions` 插件后,该插件会终止对已配置 URI 的请求,并代表客户端向 Azure Functions 发起一个新的请求。该新请求中携带了之前配置的授权详细信息,包括请求头、请求体和参数(以上参数都是从原始请求中传递的)。之后便会通过 `azure-functions` 插件,将带有响应头、状态码和响应体的信息返回给使用 APISIX 发起请求的客户端。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------------- | ------- | ------ | ------ | ---------- | ------------------------------------------------------------ | +| function_uri | string | 是 | | | 触发 Serverless Functions 的 Azure Functions 端点。例如 `http://test-apisix.azurewebsites.net/api/HttpTrigger`。 | +| authorization | object | 否 | | | 访问 Azure Functions 的授权凭证。 | +| authorization.apikey | string | 否 | | | 授权凭证内的字段。生成 API 密钥来授权对端点的请求。 | +| authorization.clientid | string | 否 | | | 授权凭证内的字段。生成客户端 ID(Azure Active Directory)来授权对端点的请求。 | +| timeout | integer | 否 | 3000 | [100,...] | 代理请求超时(以毫秒为单位)。 | +| ssl_verify | boolean | 否 | true | true/false | 当设置为 `true` 时执行 SSL 验证。 | +| keepalive | boolean | 否 | true | true/false | 当设置为 `true` 时,保持连接的活动状态以便重复使用。 | +| keepalive_pool | integer | 否 | 5 | [1,...] | 连接断开之前,可接收的最大请求数。 | +| keepalive_timeout | integer | 否 | 60000 | [1000,...] | 当连接空闲时,保持该连接处于活动状态的时间(以毫秒为单位)。 | + +## 元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| --------------- | ------ | ------ | ------ | ------------------------------------------------------------ | +| master_apikey | string | 否 | "" | 可用于访问 Azure Functions URI 的 API 密钥。 | +| master_clientid | string | 否 | "" | 可用于授权 Azure Functions URI 的客户端 ID(Active Directory)。 | + +`azure-functions` 插件的元数据提供了授权回退的功能。它定义了 `master_apikey` 和 `master_clientid` 字段,用户可以为关键任务的应用部署声明 API 密钥或客户端 ID。因此,如果在 `azure-functions` 插件属性中没有找到相关授权凭证,此时元数据中的授权凭证就会发挥作用。 + +:::note 注意 + +授权方式优先级排序如下: + +1. 首先,`azure-functions` 插件在 APISIX 代理的请求头中寻找 `x-functions-key` 或 `x-functions-clientid` 键。 +2. 如果没有找到,`azure-functions` 插件会检查插件属性中的授权凭证。如果授权凭证存在,`azure-functions` 插件会将相应的授权标头添加到发送到 Azure Functions 的请求中。 +3. 如果未配置 `azure-functions` 插件的授权凭证属性,APISIX 将获取插件元数据配置并使用 API 密钥。 + +::: + +如果你想添加一个新的 API 密钥,请向 `/apisix/admin/plugin_metadata` 端点发出请求,并附上所需的元数据。示例如下: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/azure-functions \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "master_apikey" : "" +}' +``` + +## 启用插件 + +你可以通过以下命令在指定路由中启用该插件,请确保你的 Azure Functions 已提前部署好,并正常提供服务。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "azure-functions": { + "function_uri": "http://test-apisix.azurewebsites.net/api/HttpTrigger", + "authorization": { + "apikey": "${Generated API key to access the Azure-Function}" + } + } + }, + "uri": "/azure" +}' +``` + +通过上述示例配置插件后,任何对 `/azure` URI 的请求(`HTTP/1.1`、`HTTPS`、`HTTP2`)都将调用已配置的 Azure Functions 的 URI,并且会将响应信息返回给客户端。 + +下述命令的含义是:Azure Functions 从请求中获取 `name` 参数,并返回一条 `"Hello $name"` 消息: + +```shell +curl -i -XGET http://localhost:9080/azure\?name=APISIX +``` + +正常返回结果: + +```shell +HTTP/1.1 200 OK +Content-Type: text/plain; charset=utf-8 +... +Hello, APISIX +``` + +以下示例是客户端通过 HTTP/2 协议与 APISIX 进行通信。 + +在进行测试之前,由于该 `enable_http2: true` 默认是禁用状态,你可以通过在 `./conf/config.yaml` 中添加 `apisix.node_listen` 下的 `- port: 9081` 和 `enable_http2: true` 字段启用。示例如下: + +```yaml +apisix: + node_listen: # 支持监听多个端口 + - 9080 + - port: 9081 + enable_http2: true # 该字段如果不设置,默认值为 `false` +``` + +使用 `curl` 命令测试: + +```shell +curl -i -XGET --http2 --http2-prior-knowledge http://localhost:9081/azure\?name=APISIX +``` + +正常返回结果: + +```shell +HTTP/2 200 +content-type: text/plain; charset=utf-8 +... +Hello, APISIX +``` + +### 配置路径转发 + +`azure-functions` 插件在代理请求到 Azure Functions 上游时也支持 URL 路径转发。基本请求路径的扩展被附加到插件配置中指定的 `function_uri` 字段上。 + +:::info 重要 + +因为 APISIX 路由是严格匹配的,所以为了使 `azure-functions` 插件正常工作,在路由上配置的 `uri` 字段必须以 `*` 结尾,`*` 意味着这个 URI 的任何子路径都会被匹配到同一个路由。 + +::: + +以下示例展示了如何通过配置文件实现路径转发: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "azure-functions": { + "function_uri": "http://app-bisakh.azurewebsites.net/api", + "authorization": { + "apikey": "${Generated API key to access the Azure-Function}" + } + } + }, + "uri": "/azure/*" +}' +``` + +通过上述示例配置插件后,任何访问 `azure/HttpTrigger1` 的请求都会调用 Azure Functions 并转发附加的参数。 + +使用 `curl` 命令测试: + +```shell +curl -i -XGET http://127.0.0.1:9080/azure/HttpTrigger1\?name\=APISIX\ +``` + +正常返回结果: + +```shell +HTTP/1.1 200 OK +Content-Type: text/plain; charset=utf-8 +... +Hello, APISIX +``` + +## 删除插件 + +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/azure", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/basic-auth.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/basic-auth.md new file mode 100644 index 0000000..c445e45 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/basic-auth.md @@ -0,0 +1,512 @@ +--- +title: basic-auth +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Basic Auth + - basic-auth +description: basic-auth 插件为消费者添加了基本访问身份验证,以便消费者在访问上游资源之前进行身份验证。 +--- + + + + + + + +## 描述 + +`basic-auth` 插件为 [消费者](../terminology/consumer.md) 添加了 [基本访问身份验证](https://en.wikipedia.org/wiki/Basic_access_authentication),以便消费者在访问上游资源之前进行身份验证。 + +当消费者成功通过身份验证后,APISIX 会在将请求代理到上游服务之前向请求添加其他标头,例如 `X-Consumer-Username`、`X-Credential-Indentifier` 和其他消费者自定义标头(如果已配置)。上游服务将能够区分消费者并根据需要实现其他逻辑。如果这些值中的任何一个不可用,则不会添加相应的标头。 + +## 属性 + +Consumer/Credentials 端: + +| 名称 | 类型 | 必选项 | 描述 | +| -------- | ------ | -----| ----------------------------------------------------------------------------------------------- | +| username | string | 是 | Consumer 的用户名并且该用户名是唯一,如果多个 Consumer 使用了相同的 `username`,将会出现请求匹配异常。| +| password | string | 是 | 用户的密码。该字段支持使用 [APISIX Secret](../terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | + +注意:schema 中还定义了 `encrypt_fields = {"password"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +Route 端: + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| ---------------- | ------- | ------ | ------ | --------------------------------------------------------------- | +| hide_credentials | boolean | 否 | false | 该参数设置为 `true` 时,则不会将 Authorization 请求头传递给 Upstream。| +| anonymous_consumer | boolean | 否 | false | 匿名消费者名称。如果已配置,则允许匿名用户绕过身份验证。 | + +## 示例 + +以下示例演示了如何在不同场景中使用 `basic-auth` 插件。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +### 在路由上实现基本身份验证 + +以下示例演示如何在路由上实现基本身份验证。 + +创建消费者 `johndoe`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "johndoe" + }' +``` + +为消费者创建 `basic-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/johndoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-basic-auth", + "plugins": { + "basic-auth": { + "username": "johndoe", + "password": "john-key" + } + } + }' +``` + +创建一个带有 `basic-auth` 的路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "basic-auth-route", + "uri": "/anything", + "plugins": { + "basic-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +#### 使用有效密钥进行验证 + +使用有效密钥发送请求至: + +```shell +curl -i "http://127.0.0.1:9080/anything" -u johndoe:john-key +``` + +您应该会看到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```json +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Apikey": "john-key", + "Authorization": "Basic am9obmRvZTpqb2huLWtleQ==", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66e5107c-5bb3e24f2de5baf733aec1cc", + "X-Consumer-Username": "john", + "X-Credential-Indentifier": "cred-john-basic-auth", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "192.168.65.1, 205.198.122.37", + "url": "http://127.0.0.1/get" +} +``` + +#### 使用无效密钥进行验证 + +使用无效密钥发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" -u johndoe:invalid-key +``` + +您应该看到以下 `HTTP/1.1 401 Unauthorized` 响应: + +```text +{"message":"Invalid user authorization"} +``` + +#### 无需密钥即可验证 + +无需密钥即可发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该看到以下 `HTTP/1.1 401 Unauthorized` 响应: + +```text +{"message":"Missing authorization in request"} +``` + +### 隐藏上游的身份验证信息 + +以下示例演示了如何通过配置 `hide_credentials` 来防止密钥被发送到上游服务。APISIX 默认情况下会将身份验证密钥转发到上游服务,这在某些情况下可能会导致安全风险,您应该考虑更新 `hide_credentials`。 + +创建消费者 `johndoe` : + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "johndoe" + }' +``` + +为消费者创建 `basic-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/johndoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-basic-auth", + "plugins": { + "basic-auth": { + "username": "johndoe", + "password": "john-key" + } + } + }' +``` + +#### 不隐藏凭据 + +使用 `basic-auth` 创建路由,并将 `hide_credentials` 配置为 `false`,这是默认配置: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ +-H "X-API-KEY: ${admin_key}" \ +-d '{ + "id": "basic-auth-route", + "uri": "/anything", + "plugins": { + "basic-auth": { + "hide_credentials": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +发送带有有效密钥的请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" -u johndoe:john-key +``` + +您应该看到以下 `HTTP/1.1 200 OK` 响应: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Authorization": "Basic am9obmRvZTpqb2huLWtleQ==", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66cc2195-22bd5f401b13480e63c498c6", + "X-Consumer-Username": "john", + "X-Credential-Indentifier": "cred-john-basic-auth", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "192.168.65.1, 43.228.226.23", + "url": "http://127.0.0.1/anything" +} +``` + +请注意,凭证以 base64 编码格式对上游服务可见。 + +:::tip + +您还可以使用 `Authorization` 标头在请求中传递 base64 编码的凭据,如下所示: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H "Authorization: Basic am9obmRvZTpqb2huLWtleQ==" +``` + +::: + +#### 隐藏凭据 + +将插件的 `hide_credentials` 更新为 `true`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/basic-auth-route" -X PATCH \ +-H "X-API-KEY: ${admin_key}" \ +-d '{ + "plugins": { + "basic-auth": { + "hide_credentials": true + } + } +}' +``` + +发送带有有效密钥的请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" -u johndoe:john-key +``` + +您应该看到以下 `HTTP/1.1 200 OK` 响应: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66cc21a7-4f6ac87946e25f325167d53a", + "X-Consumer-Username": "john", + "X-Credential-Indentifier": "cred-john-basic-auth", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "192.168.65.1, 43.228.226.23", + "url": "http://127.0.0.1/anything" +} +``` + +请注意,上游服务不再可见这些凭据。 + +### 将消费者自定义 ID 添加到标头 + +以下示例演示了如何在 `Consumer-Custom-Id` 标头中将消费者自定义 ID 附加到经过身份验证的请求,该 ID 可用于根据需要实现其他逻辑。 + +创建带有自定义 ID 标签的消费者 `johndoe`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "johndoe", + "labels": { + "custom_id": "495aec6a" + } + }' +``` + +为消费者创建 `basic-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/johndoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-basic-auth", + "plugins": { + "basic-auth": { + "username": "johndoe", + "password": "john-key" + } + } + }' +``` + +创建一个带有 `basic-auth` 的路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "basic-auth-route", + "uri": "/anything", + "plugins": { + "basic-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +使用有效密钥向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" -u johndoe:john-key +``` + +您应该看到一个带有 `X-Consumer-Custom-Id` 的 `HTTP/1.1 200 OK` 响应,类似于以下内容: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Authorization": "Basic am9obmRvZTpqb2huLWtleQ==", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66ea8d64-33df89052ae198a706e18c2a", + "X-Consumer-Username": "johndoe", + "X-Credential-Identifier": "cred-john-basic-auth", + "X-Consumer-Custom-Id": "495aec6a", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "192.168.65.1, 205.198.122.37", + "url": "http://127.0.0.1/anything" +} +``` + +### 匿名消费者的速率限制 + +以下示例演示了如何为普通消费者和匿名消费者配置不同的速率限制策略,其中匿名消费者不需要进行身份验证,并且配额较少。 + +创建普通消费者 `johndoe` 并配置 `limit-count` 插件以允许 30 秒内的配额为 3: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "johndoe", + "plugins": { + "limit-count": { + "count": 3, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +为消费者 `johndoe` 创建 `basic-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/johndoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-basic-auth", + "plugins": { + "basic-auth": { + "username": "johndoe", + "password": "john-key" + } + } + }' +``` + +创建匿名用户 `anonymous`,并配置 `limit-count` 插件,以允许 30 秒内配额为 1: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +创建一个路由并配置 `basic-auth` 插件来接受匿名消费者 `anonymous` 绕过身份验证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "basic-auth-route", + "uri": "/anything", + "plugins": { + "basic-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +为了验证,请使用 `john` 的密钥发送五个连续的请求: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -u johndoe:john-key -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +您应该看到以下响应,显示在 5 个请求中,3 个请求成功(状态代码 200),而其他请求被拒绝(状态代码 429)。 + +```text +200: 3, 429: 2 +``` + +发送五个匿名请求: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +您应该看到以下响应,表明只有一个请求成功: + +```text +200: 1, 429: 4 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/batch-requests.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/batch-requests.md new file mode 100644 index 0000000..045f3c2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/batch-requests.md @@ -0,0 +1,234 @@ +--- +title: batch-requests +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Batch Requests +description: 本文介绍了关于 Apache APISIX `batch-request` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +在启用 `batch-requests` 插件后,用户可以通过将多个请求组装成一个请求的形式,把请求发送给网关,网关会从请求体中解析出对应的请求,再分别封装成独立的请求,以 [HTTP pipeline](https://en.wikipedia.org/wiki/HTTP_pipelining) 的方式代替用户向网关自身再发起多个 HTTP 请求,经历路由匹配,转发到对应上游等多个阶段,合并结果后再返回客户端。 + +![batch-request](https://static.apiseven.com/uploads/2023/06/27/ATzEuOn4_batch-request.png) + +在客户端需要访问多个 API 的情况下,这将显著提高性能。 + +:::note + +用户原始请求中的请求头(除了以 `Content-` 开始的请求头,例如:`Content-Type`)将被赋给 HTTP pipeline 中的每个请求,因此对于网关来说,这些以 HTTP pipeline 方式发送给自身的请求与用户直接发起的外部请求没有什么不同,只能访问已经配置好的路由,并将经历完整的鉴权过程,因此不存在安全问题。 + +如果原始请求的请求头与插件中配置的请求头冲突,则以插件中配置的请求头优先(配置文件中指定的 real_ip_header 除外)。 + +::: + +## 属性 + +无。 + +## 接口 + +该插件会增加 `/apisix/batch-requests` 接口。 + +:::note + +你需要通过 [public-api](../../../zh/latest/plugins/public-api.md) 插件来暴露它。 + +::: + +## 启用插件 + +该插件默认是禁用状态,你可以在配置文件(`./conf/config.yaml`)添加如下配置启用 `batch-requests` 插件: + +```yaml title="conf/config.yaml" +plugins: + - ... + - batch-requests +``` + +## 配置插件 + +默认情况下,可以发送到 `/apisix/batch-requests` 的最大请求体不能大于 1 MiB。你可以通过 `apisix/admin/plugin_metadata/batch-requests` 更改插件的此配置: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/batch-requests \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "max_body_size": 4194304 +}' +``` + +## 元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------- | ------- | -------| ------- | ------ | ---------------------------- | +| max_body_size | integer | 是 | 1048576 |[1, ...]| 请求体的最大大小,单位:bytes。 | + +## 请求和响应格式 + +该插件会为 `apisix` 创建一个 `/apisix/batch-requests` 的接口,用来处理批量请求。 + +### 请求参数 + +| 参数名 | 类型 | 必选项 | 默认值 | 描述 | +| -------- |------------------------------------| ------ | ------ | -------------------------------- | +| query | object | 否 | | 给所有请求都携带的 `query string`。 | +| headers | object | 否 | | 给所有请求都携带的 `header`。 | +| timeout | number | 否 | 30000 | 聚合请求的超时时间,单位为 `ms`。 | +| pipeline | array[[HttpRequest](#httprequest)] | 是 | | HTTP 请求的详细信息。 | + +#### HttpRequest + +| 参数名 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------- | ------- | -------- | ------- | -------------------------------------------------------------------------------- | --------------------------------------------------------------------- | +| version | string | 否 | 1.1 | [1.0, 1.1] | 请求所使用的 HTTP 协议版本。 | +| method | string | 否 | GET | ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS", "CONNECT", "TRACE"] | 请求使用的 HTTP 方法。 | +| query | object | 否 | | | 独立请求所携带的 `query string`, 如果 `Key` 和全局的有冲突,以此设置为主。 | +| headers | object | 否 | | | 独立请求所携带的 `header`, 如果 `Key` 和全局的有冲突,以此设置为主。 | +| path | string | 是 | | | HTTP 请求路径。 | +| body | string | 否 | | | HTTP 请求体。 | +| ssl_verify | boolean | 否 | false | | 验证 SSL 证书与主机名是否匹配。 | + +### 响应参数 + +返回值是一个 [HttpResponse](#httpresponse) 的`数组`。 + +#### HttpResponse + +| 参数名 | 类型 | 描述 | +| ------- | ------- | ------------------- | +| status | integer | HTTP 请求的状态码。 | +| reason | string | HTTP 请求的返回信息。 | +| body | string | HTTP 请求的响应体。 | +| headers | object | HTTP 请求的响应头。 | + +## 修改自定义 URI + +你可以通过 [public-api](../../../en/latest/plugins/public-api.md) 插件设置自定义 URI。 + +只需要在创建路由时设置所需的 URI 并更改 `public-api` 插件的配置: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/br \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/batch-requests", + "plugins": { + "public-api": { + "uri": "/apisix/batch-requests" + } + } +}' +``` + +## 测试插件 + +首先,你需要为 `batch-requests` 插件的 API 创建一个路由,它将使用 [public-api](../../../en/latest/plugins/public-api.md) 插件。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/apisix/batch-requests", + "plugins": { + "public-api": {} + } +}' +``` + +之后,你就可以将要访问的请求信息传到网关的批量请求接口(`/apisix/batch-requests`)了,网关会以 [http pipeline](https://en.wikipedia.org/wiki/HTTP_pipelining) 的方式自动帮你完成请求。 + +```shell +curl --location --request POST 'http://127.0.0.1:9080/apisix/batch-requests' \ +--header 'Content-Type: application/json' \ +--data '{ + "headers": { + "Content-Type": "application/json", + "admin-jwt":"xxxx" + }, + "timeout": 500, + "pipeline": [ + { + "method": "POST", + "path": "/community.GiftSrv/GetGifts", + "body": "test" + }, + { + "method": "POST", + "path": "/community.GiftSrv/GetGifts", + "body": "test2" + } + ] +}' +``` + +正常返回结果如下: + +```json +[ + { + "status": 200, + "reason": "OK", + "body": "{\"ret\":500,\"msg\":\"error\",\"game_info\":null,\"gift\":[],\"to_gets\":0,\"get_all_msg\":\"\"}", + "headers": { + "Connection": "keep-alive", + "Date": "Sat, 11 Apr 2020 17:53:20 GMT", + "Content-Type": "application/json", + "Content-Length": "81", + "Server": "APISIX web server" + } + }, + { + "status": 200, + "reason": "OK", + "body": "{\"ret\":500,\"msg\":\"error\",\"game_info\":null,\"gift\":[],\"to_gets\":0,\"get_all_msg\":\"\"}", + "headers": { + "Connection": "keep-alive", + "Date": "Sat, 11 Apr 2020 17:53:20 GMT", + "Content-Type": "application/json", + "Content-Length": "81", + "Server": "APISIX web server" + } + } +] +``` + +## 删除插件 + +如果你想禁用插件,可以将 `batch-requests` 从配置文件中的插件列表删除,重新加载 APISIX 后即可生效。 + +```yaml title="conf/config.yaml" +plugins: # plugin list + - ... +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/body-transformer.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/body-transformer.md new file mode 100644 index 0000000..701efa4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/body-transformer.md @@ -0,0 +1,609 @@ +--- +title: body-transformer +keywords: + - Apache APISIX + - API 网关 + - Plugin + - BODY TRANSFORMER + - body-transformer +description: body-transformer 插件执行基于模板的转换,将请求和/或响应主体从一种格式转换为另一种格式,例如从 JSON 到 JSON、从 JSON 到 HTML 或从 XML 到 YAML。 +--- + + + + + + + +## 描述 + +`body-transformer` 插件执行基于模板的转换,将请求和/或响应主体从一种格式转换为另一种格式,例如从 JSON 到 JSON、从 JSON 到 HTML 或从 XML 到 YAML。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|--------------|----------------------|-------|---------------|--------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| `request` | object | 否 | | | 请求体转换配置。 | +| `request.input_format` | string | 否 | | [`xml`,`json`,`encoded`,`args`,`plain`,`multipart`] | 请求体原始媒体类型。若未指定,则该值将由 `Content-Type` 标头确定以应用相应的解码器。`xml` 选项对应于 `text/xml` 媒体类型。`json` 选项对应于 `application/json` 媒体类型。`encoded` 选项对应于 `application/x-www-form-urlencoded` 媒体类型。`args` 选项对应于 GET 请求。`plain` 选项对应于 `text/plain` 媒体类型。`multipart` 选项对应于 `multipart/related` 媒体类型。如果媒体类型不是这两种类型,则该值将保留未设置状态并直接应用转换模板。 | +| `request.template` | string | True | | | 请求体转换模板。模板使用 [lua-resty-template](https://github.com/bungle/lua-resty-template) 语法。有关更多详细信息,请参阅 [模板语法](https://github.com/bungle/lua-resty-template#template-syntax)。您还可以使用辅助函数 `_escape_json()` 和 `_escape_xml()` 转义双引号等特殊字符,使用 `_body` 访问请求正文,使用 `_ctx` 访问上下文变量。| +| `request.template_is_base64` | boolean | 否 | false | | 如果模板是 base64 编码的,则设置为 true。| +| `response` | object | 否 | | | 响应体转换配置。| +| `response.input_format` | string | 否 | | [`xml`,`json`] | 响应体原始媒体类型。如果未指定,则该值将由 `Content-Type` 标头确定以应用相应的解码器。如果媒体类型既不是 `xml` 也不是 `json`,则该值将保留未设置状态,并直接应用转换模板。| +| `response.template` | string | True | | | 响应主体转换模板。| +| `response.template_is_base64` | boolean | 否 | false | | 如果模板是 base64 编码的,则设置为 true。| + +## 示例 + +以下示例演示了如何针对不同场景配置 `body-transformer`。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +转换模板使用 [lua-resty-template](https://github.com/bungle/lua-resty-template) 语法。请参阅 [模板语法](https://github.com/bungle/lua-resty-template#template-syntax) 了解更多信息。 + +您还可以使用辅助函数 `_escape_json()` 和 `_escape_xml()` 转义特殊字符(例如双引号)、`_body` 访问请求正文以及 `_ctx` 访问上下文变量。 + +在所有情况下,您都应确保转换模板是有效的 JSON 字符串。 + +### JSON 和 XML SOAP 之间的转换 + +以下示例演示了在使用 SOAP 上游服务时如何将请求主体从 JSON 转换为 XML,将响应主体从 XML 转换为 JSON。 + +启动示例 SOAP 服务: + +```shell +cd /tmp +git clone https://github.com/spring-guides/gs-soap-service.git +cd gs-soap-service/complete +./mvnw spring-boot:run +``` + +创建请求和响应转换模板: + +```shell +req_template=$(cat < + + + + {{_escape_xml(name)}} + + + +EOF +) + +rsp_template=$(cat < 18 then + context._multipart:set_simple("status", "adult") + else + context._multipart:set_simple("status", "minor") + end + + local body = context._multipart:tostring() +%}{* body *} +EOF +) +``` + +创建一个带有 `body-transformer` 的路由,将 `input_format` 设置为 `multipart`,并使用之前创建的请求模板进行转换: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "body-transformer-route", + "uri": "/anything", + "plugins": { + "body-transformer": { + "request": { + "input_format": "multipart", + "template": "'"$req_template"'" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送多部分 POST 请求: + +```shell +curl -X POST \ + -F "name=john" \ + -F "age=10" \ + "http://127.0.0.1:9080/anything" +``` + +您应该会看到类似以下内容的响应: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": { + "age": "10", + "name": "john", + "status": "minor" + }, + "headers": { + "Accept": "*/*", + "Content-Length": "361", + "Content-Type": "multipart/form-data; boundary=------------------------qtPjk4c8ZjmGOXNKzhqnOP", + ... + }, + ... +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/brotli.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/brotli.md new file mode 100644 index 0000000..be13d1a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/brotli.md @@ -0,0 +1,133 @@ +--- +title: brotli +keywords: + - Apache APISIX + - API 网关 + - Plugin + - brotli +description: 这个文档包含有关 Apache APISIX brotli 插件的相关信息。 +--- + + + +## 描述 + +`brotli` 插件可以动态的设置 Nginx 中的 [brotli](https://github.com/google/ngx_brotli) 的行为。 + +## 前提条件 + +该插件依赖 brotli 共享库。 + +如下是构建和安装 brotli 共享库的示例脚本: + +``` shell +wget https://github.com/google/brotli/archive/refs/tags/v1.1.0.zip +unzip v1.1.0.zip +cd brotli-1.1.0 && mkdir build && cd build +cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local/brotli .. +sudo cmake --build . --config Release --target install +sudo sh -c "echo /usr/local/brotli/lib >> /etc/ld.so.conf.d/brotli.conf" +sudo ldconfig +``` + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|--------------|----------------------|-------|---------------|--------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| types | array[string] or "*" | False | ["text/html"] | | 动态设置 `brotli_types` 指令。特殊值 `"*"` 用于匹配任意的 MIME 类型。 | +| min_length | integer | False | 20 | >= 1 | 动态设置 `brotli_min_length` 指令。 | +| comp_level | integer | False | 6 | [0, 11] | 动态设置 `brotli_comp_level` 指令。 | +| mode | integer | False | 0 | [0, 2] | 动态设置 `brotli decompress mode`,更多信息参考 [RFC 7932](https://tools.ietf.org/html/rfc7932)。 | +| lgwin | integer | False | 19 | [0, 10-24] | 动态设置 `brotli sliding window size`,`lgwin` 是滑动窗口大小的以 2 为底的对数,将其设置为 0 会让压缩器自行决定最佳值,更多信息请参考 [RFC 7932](https://tools.ietf.org/html/rfc7932)。 | +| lgblock | integer | False | 0 | [0, 16-24] | 动态设置 `brotli input block size`,`lgblock` 是最大输入块大小的以 2 为底的对数,将其设置为 0 会让压缩器自行决定最佳值,更多信息请参考 [RFC 7932](https://tools.ietf.org/html/rfc7932)。 | +| http_version | number | False | 1.1 | 1.1, 1.0 | 与 `gzip_http_version` 指令类似,用于识别 http 的协议版本。 | +| vary | boolean | False | false | | 与 `gzip_vary` 指令类似,用于启用或禁用 `Vary: Accept-Encoding` 响应头。 | + +## 启用插件 + +如下示例中,在指定的路由上启用 `brotli` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/", + "plugins": { + "brotli": { + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } +}' +``` + +## 使用示例 + +通过上述命令启用插件后,可以通过以下方法测试插件: + +```shell +curl http://127.0.0.1:9080/ -i -H "Accept-Encoding: br" +``` + +``` +HTTP/1.1 200 OK +Content-Type: text/html; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Date: Tue, 05 Dec 2023 03:06:49 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/3.6.0 +Content-Encoding: br + +Warning: Binary output can mess up your terminal. Use "--output -" to tell +Warning: curl to output it to your terminal anyway, or consider "--output +Warning: " to save to a file. +``` + +## 删除插件 + +当您需要禁用 `brotli` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/chaitin-waf.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/chaitin-waf.md new file mode 100644 index 0000000..e090c92 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/chaitin-waf.md @@ -0,0 +1,263 @@ +--- +title: chaitin-waf +keywords: + - Apache APISIX + - API 网关 + - Plugin + - WAF +description: 本文介绍了关于 Apache APISIX `chaitin-waf` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +在启用 `chaitin-waf` 插件后,流量将被转发给长亭 WAF 服务,用以检测和防止各种 Web 应用程序攻击,以保护应用程序和用户数据的安全。 + +## 响应头 + +根据插件配置,可以选择是否附加额外的响应头。 + +响应头的信息如下: + +- **X-APISIX-CHAITIN-WAF**:APISIX 是否将请求转发给 WAF 服务器。 + - yes:转发 + - no:不转发 + - unhealthy:符合匹配条件,但没有可用的 WAF 服务器 + - err:插件执行过程中出错。此时会附带 **X-APISIX-CHAITIN-WAF-ERROR** 请求头 + - waf-err:与 WAF 服务器交互时出错。此时会附带 **X-APISIX-CHAITIN-WAF-ERROR** 请求头 + - timeout:与 WAF 服务器的交互超时 +- **X-APISIX-CHAITIN-WAF-ERROR**:调试用响应头。APISIX 与 WAF 交互时的错误信息。 +- **X-APISIX-CHAITIN-WAF-TIME**:APISIX 与 WAF 交互所耗费的时间,单位是毫秒。 +- **X-APISIX-CHAITIN-WAF-STATUS**:WAF 服务器返回给 APISIX 的状态码。 +- **X-APISIX-CHAITIN-WAF-ACTION**:WAF 服务器返回给 APISIX 的处理结果。 + - pass:请求合法 + - reject:请求被 WAF 服务器拒绝 +- **X-APISIX-CHAITIN-WAF-SERVER**:调试用响应头。所使用的 WAF 服务器。 + +## 插件元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|--------------------------|---------------|-----|-------|--------------------------------------------| +| nodes | array(object) | 必选 | | 长亭 WAF 的地址列表。 | +| nodes[0].host | string | 必选 | | 长亭 WAF 的地址,支持 IPV4、IPV6、Unix Socket 等配置方式。 | +| nodes[0].port | string | 可选 | 80 | 长亭 WAF 的端口。 | +| config | object | 否 | | 长亭 WAF 服务的配置参数值。当路由没有配置时将使用这里所配置的参数。 | +| config.connect_timeout | integer | 否 | 1000 | connect timeout, 毫秒 | +| config.send_timeout | integer | 否 | 1000 | send timeout, 毫秒 | +| config.read_timeout | integer | 否 | 1000 | read timeout, 毫秒 | +| config.req_body_size | integer | 否 | 1024 | 请求体大小,单位为 KB | +| config.keepalive_size | integer | 否 | 256 | 长亭 WAF 服务的最大并发空闲连接数 | +| config.keepalive_timeout | integer | 否 | 60000 | 空闲链接超时,毫秒 | + +一个典型的示例配置如下: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```bash +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/chaitin-waf -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "nodes":[ + { + "host": "unix:/path/to/safeline/resources/detector/snserver.sock", + "port": 8000 + } + ] +}' +``` + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|--------------------------|---------------|-----|-------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| match | array[object] | 否 | | 匹配规则列表,默认为空且规则将被无条件执行。 | +| match.vars | array[array] | 否 | | 由一个或多个 `{var, operator, val}` 元素组成的列表,例如:`{"arg_name", "==", "json"}`,表示当前请求参数 `name` 是 `json`。这里的 `var` 与 NGINX 内部自身变量命名是保持一致,所以也可以使用 `request_uri`、`host` 等;对于已支持的运算符,具体用法请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) 的 `operator-list` 部分。 | +| append_waf_resp_header | bool | 否 | true | 是否添加响应头 | +| append_waf_debug_header | bool | 否 | false | 是否添加调试用响应头,`add_header` 为 `true` 时才生效 | +| config | object | 否 | | 长亭 WAF 服务的配置参数值。当路由没有配置时将使用元数据里所配置的参数。 | +| config.connect_timeout | integer | 否 | | connect timeout, 毫秒 | +| config.send_timeout | integer | 否 | | send timeout, 毫秒 | +| config.read_timeout | integer | 否 | | read timeout, 毫秒 | +| config.req_body_size | integer | 否 | | 请求体大小,单位为 KB | +| config.keepalive_size | integer | 否 | | 长亭 WAF 服务的最大并发空闲连接数 | +| config.keepalive_timeout | integer | 否 | | 空闲链接超时,毫秒 | + +一个典型的示例配置如下,这里使用 `httpbun.org` 作为示例后端,可以按需替换: + +```bash +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/*", + "plugins": { + "chaitin-waf": { + "match": [ + { + "vars": [ + ["http_waf","==","true"] + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbun.org:80": 1 + } + } +}' +``` + +## 测试插件 + +以上述的示例配置为例进行测试。 + +不满足匹配条件时,请求可以正常触达: + +```bash +curl -H "Host: httpbun.org" http://127.0.0.1:9080/get -i + +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 408 +Connection: keep-alive +X-APISIX-CHAITIN-WAF: no +Date: Wed, 19 Jul 2023 09:30:42 GMT +X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596 +Server: APISIX/3.3.0 + +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Connection": "close", + "Host": "httpbun.org", + "User-Agent": "curl/8.1.2", + "X-Forwarded-For": "127.0.0.1", + "X-Forwarded-Host": "httpbun.org", + "X-Forwarded-Port": "9080", + "X-Forwarded-Proto": "http", + "X-Real-Ip": "127.0.0.1" + }, + "method": "GET", + "origin": "127.0.0.1, 122.231.76.178", + "url": "http://httpbun.org/get" +} +``` + +面对潜在的注入请求也原样转发并遇到 404 错误: + +```bash +curl -H "Host: httpbun.org" http://127.0.0.1:9080/getid=1%20AND%201=1 -i + +HTTP/1.1 404 Not Found +Content-Type: text/plain; charset=utf-8 +Content-Length: 19 +Connection: keep-alive +X-APISIX-CHAITIN-WAF: no +Date: Wed, 19 Jul 2023 09:30:28 GMT +X-Content-Type-Options: nosniff +X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596 +Server: APISIX/3.3.0 + +404 page not found +``` + +当满足匹配条件时,正常请求依然可以触达上游: + +```bash +curl -H "Host: httpbun.org" -H "waf: true" http://127.0.0.1:9080/get -i + +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 427 +Connection: keep-alive +X-APISIX-CHAITIN-WAF-TIME: 2 +X-APISIX-CHAITIN-WAF-STATUS: 200 +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-ACTION: pass +Date: Wed, 19 Jul 2023 09:29:58 GMT +X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596 +Server: APISIX/3.3.0 + +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Connection": "close", + "Host": "httpbun.org", + "User-Agent": "curl/8.1.2", + "Waf": "true", + "X-Forwarded-For": "127.0.0.1", + "X-Forwarded-Host": "httpbun.org", + "X-Forwarded-Port": "9080", + "X-Forwarded-Proto": "http", + "X-Real-Ip": "127.0.0.1" + }, + "method": "GET", + "origin": "127.0.0.1, 122.231.76.178", + "url": "http://httpbun.org/get" +} +``` + +而潜在的攻击请求将会被拦截并返回 403 错误: + +```bash +curl -H "Host: httpbun.org" -H "waf: true" http://127.0.0.1:9080/getid=1%20AND%201=1 -i + +HTTP/1.1 403 Forbidden +Date: Wed, 19 Jul 2023 09:29:06 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-TIME: 2 +X-APISIX-CHAITIN-WAF-ACTION: reject +X-APISIX-CHAITIN-WAF-STATUS: 403 +Server: APISIX/3.3.0 +Set-Cookie: sl-session=UdywdGL+uGS7q8xMfnJlbQ==; Domain=; Path=/; Max-Age=86400 + +{"code": 403, "success":false, "message": "blocked by Chaitin SafeLine Web Application Firewall", "event_id": "51a268653f2c4189bfa3ec66afbcb26d"} +``` + +## 删除插件 + +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```bash +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbun.org:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/clickhouse-logger.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/clickhouse-logger.md new file mode 100644 index 0000000..f482f44 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/clickhouse-logger.md @@ -0,0 +1,209 @@ +--- +title: clickhouse-logger +keywords: + - APISIX + - API 网关 + - Plugin + - ClickHouse +description: 本文介绍了 API 网关 Apache APISIX 如何使用 clickhouse-logger 插件将日志数据发送到 ClickHouse 数据库中。 +--- + + + +## 描述 + +`clickhouse-logger` 插件可用于将日志数据推送到 [ClickHouse](https://github.com/ClickHouse/ClickHouse) 数据库中。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------------------- | ----------- | -------------------------------------------------------- | +| endpoint_addr | 废弃 | 是 | | | ClickHouse 的 `endpoints`。请使用 `endpoint_addrs` 代替。 | +| endpoint_addrs | array | 是 | | | ClickHouse 的 `endpoints。`。 | +| database | string | 是 | | | 使用的数据库。 | +| logtable | string | 是 | | | 写入的表名。 | +| user | string | 是 | | | ClickHouse 的用户。 | +| password | string | 是 | | | ClickHouse 的密码。 | +| timeout | integer | 否 | 3 | [1,...] | 发送请求后保持连接活动的时间。 | +| name | string | 否 | "clickhouse logger" | | 标识 logger 的唯一标识符。如果您使用 Prometheus 监视 APISIX 指标,名称将以 `apisix_batch_process_entries` 导出。 | +| ssl_verify | boolean | 否 | true | [true,false] | 当设置为 `true` 时,验证证书。 | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | +| include_req_body | boolean | 否 | false | [false, true] | 当设置为 `true` 时,包含请求体。**注意**:如果请求体无法完全存放在内存中,由于 NGINX 的限制,APISIX 无法将它记录下来。| +| include_req_body_expr | array | 否 | | | 当 `include_req_body` 属性设置为 `true` 时进行过滤。只有当此处设置的表达式计算结果为 `true` 时,才会记录请求体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 | +| include_resp_body | boolean | 否 | false | [false, true] | 当设置为 `true` 时,包含响应体。 | +| include_resp_body_expr | array | 否 | | | 当 `include_resp_body` 属性设置为 `true` 时进行过滤。只有当此处设置的表达式计算结果为 `true` 时才会记录响应体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。| + +注意:schema 中还定义了 `encrypt_fields = {"password"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +该插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认情况下批处理器每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置)。 + +### 默认日志格式示例 + +```json +{ + "response": { + "status": 200, + "size": 118, + "headers": { + "content-type": "text/plain", + "connection": "close", + "server": "APISIX/3.7.0", + "content-length": "12" + } + }, + "client_ip": "127.0.0.1", + "upstream_latency": 3, + "apisix_latency": 98.999998092651, + "upstream": "127.0.0.1:1982", + "latency": 101.99999809265, + "server": { + "version": "3.7.0", + "hostname": "localhost" + }, + "route_id": "1", + "start_time": 1704507612177, + "service_id": "", + "request": { + "method": "POST", + "querystring": { + "foo": "unknown" + }, + "headers": { + "host": "localhost", + "connection": "close", + "content-length": "18" + }, + "size": 110, + "uri": "/hello?foo=unknown", + "url": "http://localhost:1984/hello?foo=unknown" + } +} +``` + +## 配置插件元数据 + +`clickhouse-logger` 也支持自定义日志格式,与 [http-logger](./http-logger.md) 插件类似。 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------------- | ------- | ------------------------------------------------ | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX](../apisix-variable.md) 或 [NGINX](http://nginx.org/en/docs/varindex.html) 变量。该配置全局生效。如果你指定了 `log_format`,该配置就会对所有绑定 `clickhouse-logger` 的路由或服务生效。| + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/clickhouse-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +您可以使用 Clickhouse docker 镜像来创建一个容器,如下所示: + +```shell +docker run -d -p 8123:8123 -p 9000:9000 -p 9009:9009 --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server +``` + +然后在您的 ClickHouse 数据库中创建一个表来存储日志。 + +```shell +curl -X POST 'http://localhost:8123/' \ +--data-binary 'CREATE TABLE default.test (host String, client_ip String, route_id String, service_id String, `@timestamp` String, PRIMARY KEY(`@timestamp`)) ENGINE = MergeTree()' --user default: +``` + +## 启用插件 + +你可以通过以下命令在指定路由中启用该插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "", + "database": "default", + "logtable": "test", + "endpoint_addrs": ["http://127.0.0.1:8123"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +:::note 注意 + +如果配置多个 `endpoints`,日志将会随机写入到各个 `endpoints`。 + +::: + +## 测试插件 + +现在你可以向 APISIX 发起请求: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +现在,如果您检查表中的行,您将获得以下输出: + +```shell +curl 'http://localhost:8123/?query=select%20*%20from%20default.test' +127.0.0.1 127.0.0.1 1 2023-05-08T19:15:53+05:30 +``` + +## 删除插件 + +当你需要删除该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/client-control.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/client-control.md new file mode 100644 index 0000000..529c73d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/client-control.md @@ -0,0 +1,116 @@ +--- +title: client-control +keywords: + - APISIX + - API 网关 + - Client Control +description: 本文介绍了 Apache APISIX proxy-control 插件的相关操作,你可以使用此插件动态地控制 NGINX 处理客户端的请求的行为。 +--- + + + +## 描述 + +`client-control` 插件能够通过设置客户端请求体大小的上限来动态地控制 NGINX 处理客户端的请求。 + +:::info 重要 + +此插件需要 APISIX 在 [APISIX-Runtime](../FAQ.md#如何构建-apisix-Runtime-环境) 环境上运行。更多信息请参考 [apisix-build-tools](https://github.com/api7/apisix-build-tools)。 + +::: + +## 属性 + +| 名称 | 类型 | 必选项 | 有效值 | 描述 | +| --------- | ------------- | ----------- | ------------------------------------------------------------------------ |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| max_body_size | integer | 否 | [0,...] | 设置客户端请求体的最大上限,动态调整 [`client_max_body_size`](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size) 的大小,单位为字节。当设置 `max_body_size` 为 0 时,将不会对客户端请求体大小进行检查。 | + +## 启用插件 + +以下示例展示了如何在指定路由上启用 `client-control` 插件,并设置 `max_body_size`: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "client-control": { + "max_body_size" : 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## 测试插件 + +启用插件后,使用 `curl` 命令请求该路由: + +```shell +curl -i http://127.0.0.1:9080/index.html -d '123' +``` + +因为在配置插件时设置了 `max_body_size` 为 `1`,所以返回的 HTTP 响应头中如果带有 `413` 状态码,则表示插件生效: + +```shell +HTTP/1.1 413 Request Entity Too Large +... + +413 Request Entity Too Large + +

413 Request Entity Too Large

+
openresty
+ + +``` + +## 删除插件 + +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/consumer-restriction.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/consumer-restriction.md new file mode 100644 index 0000000..bfca572 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/consumer-restriction.md @@ -0,0 +1,353 @@ +--- +title: consumer-restriction +keywords: + - Apache APISIX + - API 网关 + - Consumer restriction +description: Consumer Restriction 插件允许用户根据 Route、Service、Consumer 或 Consumer Group 来设置相应的访问限制。 +--- + + + +## 描述 + +`consumer-restriction` 插件允许用户根据 Route、Service、Consumer 或 Consumer Group 来设置相应的访问限制。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| -------------------------- | ------------- | ------ | ------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| type | string | 否 | consumer_name | ["consumer_name", "consumer_group_id", "service_id", "route_id"] | 支持设置访问限制的对象类型。 | +| whitelist | array[string] | 是 | | | 加入白名单的对象,优先级高于`allowed_by_methods`。 | +| blacklist | array[string] | 是 | | | 加入黑名单的对象,优先级高于`whitelist`。 | +| rejected_code | integer | 否 | 403 | [200,...] | 当请求被拒绝时,返回的 HTTP 状态码。 | +| rejected_msg | string | 否 | | | 当请求被拒绝时,返回的错误信息。 | +| allowed_by_methods | array[object] | 否 | | | 一组为 Consumer 设置允许的配置,包括用户名和允许的 HTTP 方法列表。 | +| allowed_by_methods.user | string | 否 | | | 为 Consumer 设置的用户名。 | +| allowed_by_methods.methods | array[string] | 否 | | ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS", "CONNECT", "TRACE", "PURGE"] | 为 Consumer 设置的允许的 HTTP 方法列表。 | + +:::note + +不同的 `type` 属性值分别代表以下含义: + +- `consumer_name`:把 Consumer 的 `username` 列入白名单或黑名单来限制 Consumer 对 Route 或 Service 的访问。 +- `consumer_group_id`: 把 Consumer Group 的 `id` 列入白名单或黑名单来限制 Consumer 对 Route 或 Service 的访问。 +- `service_id`:把 Service 的 `id` 列入白名单或黑名单来限制 Consumer 对 Service 的访问,需要结合授权插件一起使用。 +- `route_id`:把 Route 的 `id` 列入白名单或黑名单来限制 Consumer 对 Route 的访问。 + +::: + +## 启用并测试插件 + +### 通过 `consumer_name` 限制访问 + +首先,创建两个 Consumer,分别为 `jack1` 和 `jack2`: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "username": "jack1", + "plugins": { + "basic-auth": { + "username":"jack2019", + "password": "123456" + } + } +}' + +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "username": "jack2", + "plugins": { + "basic-auth": { + "username":"jack2020", + "password": "123456" + } + } +}' +``` + +然后,在指定路由上启用并配置 `consumer-restriction` 插件,并通过将 `consumer_name` 加入 `whitelist` 来限制不同 Consumer 的访问: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "whitelist": [ + "jack1" + ] + } + } +}' +``` + +**测试插件** + +`jack1` 发出访问请求,返回 `200` HTTP 状态码,代表访问成功: + +```shell +curl -u jack2019:123456 http://127.0.0.1:9080/index.html -i +``` + +```shell +HTTP/1.1 200 OK +``` + +`jack2` 发出访问请求,返回 `403` HTTP 状态码,代表访问被限制,插件生效: + +```shell +curl -u jack2020:123456 http://127.0.0.1:9080/index.html -i +``` + +```shell +HTTP/1.1 403 Forbidden +... +{"message":"The consumer_name is forbidden."} +``` + +### 通过 `allowed_by_methods` 限制访问 + +首先,创建两个 Consumer,分别为 `jack1` 和 `jack2`,创建方法请参考[通过 `consumer_name` 限制访问](#通过-consumername-限制访问)。 + +然后,在指定路由上启用并配置 `consumer-restriction` 插件,并且仅允许 `jack1` 使用 `POST` 方法进行访问: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "allowed_by_methods":[{ + "user": "jack1", + "methods": ["POST"] + }] + } + } +}' +``` + +**测试插件** + +`jack1` 发出访问请求,返回 `403` HTTP 状态码,代表访问被限制: + +```shell +curl -u jack2019:123456 http://127.0.0.1:9080/index.html +``` + +```shell +HTTP/1.1 403 Forbidden +... +{"message":"The consumer_name is forbidden."} +``` + +现在更新插件配置,增加 `jack1` 的 `GET` 访问能力: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "allowed_by_methods":[{ + "user": "jack1", + "methods": ["POST","GET"] + }] + } + } +}' +``` + +`jack1` 再次发出访问请求,返回 `200` HTTP 状态码,代表访问成功: + +```shell +curl -u jack2019:123456 http://127.0.0.1:9080/index.html +``` + +```shell +HTTP/1.1 200 OK +``` + +### 通过 `service_id` 限制访问 + +使用 `service_id` 的方式需要与授权插件一起配合使用,这里以 [`key-auth`](./key-auth.md) 授权插件为例。 + +首先,创建两个 Service: + +```shell +curl http://127.0.0.1:9180/apisix/admin/services/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "new service 001" +}' + +curl http://127.0.0.1:9180/apisix/admin/services/2 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "new service 002" +}' +``` + +在指定 Consumer 上配置 `key-auth` 和 `consumer-restriction` 插件,并通过将 `service_id` 加入 `whitelist` 来限制 Consumer 对 Service 的访问: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "new_consumer", + "plugins": { + "key-auth": { + "key": "auth-jack" + }, + "consumer-restriction": { + "type": "service_id", + "whitelist": [ + "1" + ], + "rejected_code": 403 + } + } +}' +``` + +**测试插件** + +在指定路由上启用并配置 `key-auth` 插件,并绑定 `service_id` 为 `1`: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "service_id": 1, + "plugins": { + "key-auth": { + } + } +}' +``` + +对 Service 发出访问请求,返回 `403` HTTP 状态码,说明在白名单列中的 `service_id` 允许访问,插件生效: + +```shell +curl http://127.0.0.1:9080/index.html -H 'apikey: auth-jack' -i +``` + +```shell +HTTP/1.1 200 OK +``` + +更新配置 `key-auth` 插件,并绑定 `service_id` 为 `2`: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "service_id": 2, + "plugins": { + "key-auth": { + } + } +}' +``` + +再次对 Service 发出访问请求,返回 `403` HTTP 状态码,说明不在白名单列表的 `service_id` 被拒绝访问,插件生效: + +```shell +curl http://127.0.0.1:9080/index.html -H 'apikey: auth-jack' -i +``` + +```shell +HTTP/1.1 403 Forbidden +... +{"message":"The service_id is forbidden."} +``` + +## 删除插件 + +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {} + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/cors.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/cors.md new file mode 100644 index 0000000..9367c27 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/cors.md @@ -0,0 +1,127 @@ +--- +title: cors +keywords: + - Apache APISIX + - API 网关 + - CORS +description: 本文介绍了 Apache APISIX cors 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`cors` 插件可以让你轻松地为服务端启用 [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS)(Cross-Origin Resource Sharing,跨域资源共享)的返回头。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| ---------------- | ------- | ------ | ------ | ------------------------------------------------------------ | +| allow_origins | string | 否 | "*" | 允许跨域访问的 Origin,格式为 `scheme://host:port`,示例如 `https://somedomain.com:8081`。如果你有多个 Origin,请使用 `,` 分隔。当 `allow_credential` 为 `false` 时,可以使用 `*` 来表示允许所有 Origin 通过。你也可以在启用了 `allow_credential` 后使用 `**` 强制允许所有 Origin 均通过,但请注意这样存在安全隐患。 | +| allow_methods | string | 否 | "*" | 允许跨域访问的 Method,比如:`GET`,`POST` 等。如果你有多个 Method,请使用 `,` 分割。当 `allow_credential` 为 `false` 时,可以使用 `*` 来表示允许所有 Method 通过。你也可以在启用了 `allow_credential` 后使用 `**` 强制允许所有 Method 都通过,但请注意这样存在安全隐患。 | +| allow_headers | string | 否 | "*" | 允许跨域访问时请求方携带哪些非 `CORS 规范` 以外的 Header。如果你有多个 Header,请使用 `,` 分割。当 `allow_credential` 为 `false` 时,可以使用 `*` 来表示允许所有 Header 通过。你也可以在启用了 `allow_credential` 后使用 `**` 强制允许所有 Header 都通过,但请注意这样存在安全隐患。 | +| expose_headers | string | 否 | | 允许跨域访问时响应方携带哪些非 CORS 规范 以外的 Header。如果你有多个 Header,请使用 , 分割。当 allow_credential 为 false 时,可以使用 * 来表示允许任意 Header。如果不设置,插件不会修改 `Access-Control-Expose-Headers` 头,详情请参考 [Access-Control-Expose-Headers - MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers)。 | +| max_age | integer | 否 | 5 | 浏览器缓存 CORS 结果的最大时间,单位为秒。在这个时间范围内,浏览器会复用上一次的检查结果,`-1` 表示不缓存。请注意各个浏览器允许的最大时间不同,详情请参考 [Access-Control-Max-Age - MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age#directives)。 | +| allow_credential | boolean | 否 | false | 是否允许跨域访问的请求方携带凭据(如 Cookie 等)。根据 CORS 规范,如果设置该选项为 `true`,那么将不能在其他属性中使用 `*`。 | +| allow_origins_by_regex | array | 否 | nil | 使用正则表达式数组来匹配允许跨域访问的 Origin,如 `[".*\.test.com$"]` 可以匹配任何 `test.com` 的子域名。如果 `allow_origins_by_regex` 属性已经指定,则会忽略 `allow_origins` 属性。 | +| allow_origins_by_metadata | array | 否 | nil | 通过引用插件元数据的 `allow_origins` 配置允许跨域访问的 Origin。比如当插件元数据为 `"allow_origins": {"EXAMPLE": "https://example.com"}` 时,配置 `["EXAMPLE"]` 将允许 Origin `https://example.com` 的访问。 | + +:::info IMPORTANT + +1. `allow_credential` 是一个很敏感的选项,请谨慎开启。开启之后,其他参数默认的 `*` 将失效,你必须显式指定它们的值。 +2. 在使用 `**` 时,需要清楚该参数引入的一些安全隐患,比如 CSRF,并确保这样的安全等级符合自己预期。 + +::: + +## 元数据 + +| 名称 | 类型 | 必选项 | 描述 | +| ----------- | ------ | ------ | ------------------ | +| allow_origins | object | 否 | 定义允许跨域访问的 Origin;它的键为 `allow_origins_by_metadata` 使用的引用键,值则为允许跨域访问的 Origin,其语义与属性中的 `allow_origins` 相同。 | + +## 启用插件 + +你可以在路由或服务上启用 `cors` 插件。 + +你可以通过如下命令在指定路由上启用 `cors` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "cors": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + } +}' +``` + +## 测试插件 + +通过上述命令启用插件后,可以使用如下命令测试插件是否启用成功: + +```shell +curl http://127.0.0.1:9080/hello -v +``` + +如果返回结果中出现 CORS 相关的 header,则代表插件生效: + +```shell +... +< Server: APISIX web server +< Access-Control-Allow-Origin: * +< Access-Control-Allow-Methods: * +< Access-Control-Allow-Headers: * +< Access-Control-Max-Age: 5 +... +``` + +## 删除插件 + +当你需要禁用 `cors` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/csrf.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/csrf.md new file mode 100644 index 0000000..49eb4bb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/csrf.md @@ -0,0 +1,155 @@ +--- +title: csrf +keywords: + - Apache APISIX + - API 网关 + - 跨站请求伪造攻击 + - Cross-site request forgery + - csrf +description: CSRF 插件基于 Double Submit Cookie 的方式,帮助用户阻止跨站请求伪造攻击。 + +--- + + + +## 描述 + +`csrf` 插件基于 [`Double Submit Cookie`](https://en.wikipedia.org/wiki/Cross-site_request_forgery#Double_Submit_Cookie) 的方式,保护用户的 API 免于 CSRF 攻击。 + +在此插件运行时,`GET`、`HEAD` 和 `OPTIONS` 会被定义为 `safe-methods`,其他的请求方法则定义为 `unsafe-methods`。因此 `GET`、`HEAD` 和 `OPTIONS` 方法的调用不会被检查拦截。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ----------- | ------- | ----- |---------------------| +| name | string | 否 | `apisix-csrf-token` | | 生成的 Cookie 中的 Token 名称,需要使用此名称在请求头携带 Cookie 中的内容。 | +| expires | number | 否 | `7200` | | CSRF Cookie 的过期时间,单位为秒。当设置为 `0` 时,会忽略 CSRF Cookie 过期时间检查。| +| key | string | 是 | | | 加密 Token 的密钥。 | + +注意:schema 中还定义了 `encrypt_fields = {"key"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +## 启用插件 + +以下示例展示了如何在指定路由上启用并配置 `csrf` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "csrf": { + "key": "edd1c9f034335f136f87ad84b625c8f1" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:9001": 1 + } + } +}' +``` + +当你使用 `GET` 之外的方法访问被保护的路由时,请求会被拦截并返回 `401` HTTP 状态码。 + +使用 `GET` 请求 `/hello` 时,在响应中会有一个携带了加密 Token 的 Cookie。Token 字段名称为插件配置中的 `name` 值,默认为 `apisix-csrf-token`。 + +:::note + +每一个请求都会返回一个新的 Cookie。 + +::: + +在后续对该路由进行的 `unsafe-methods` 请求中,需要从 Cookie 中读取加密的 Token,并在请求头中携带该 Token。请求头字段的名称为插件属性中的 `name`。 + +## 测试插件 + +启用插件后,使用 `curl` 命令尝试直接对该路由发起 `POST` 请求,会返回 `Unauthorized` 字样的报错提示: + +```shell +curl -i http://127.0.0.1:9080/hello -X POST +``` + +```shell +HTTP/1.1 401 Unauthorized +... +{"error_msg":"no csrf token in headers"} +``` + +当发起 `GET` 请求时,返回结果中会有携带 Token 的 Cookie: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +``` +HTTP/1.1 200 OK +... +Set-Cookie: apisix-csrf-token=eyJyYW5kb20iOjAuNjg4OTcyMzA4ODM1NDMsImV4cGlyZXMiOjcyMDAsInNpZ24iOiJcL09uZEF4WUZDZGYwSnBiNDlKREtnbzVoYkJjbzhkS0JRZXVDQm44MG9ldz0ifQ==;path=/;Expires=Mon, 13-Dec-21 09:33:55 GMT +``` + +在请求之前,用户需要从 Cookie 中读取 Token,并在后续的 `unsafe-methods` 请求的请求头中携带。 + +例如,你可以在客户端使用 [js-cookie](https://github.com/js-cookie/js-cookie) 读取 Cookie,使用 [axios](https://github.com/axios/axios) 发送请求: + +```js +const token = Cookie.get('apisix-csrf-token'); + +const instance = axios.create({ + headers: {'apisix-csrf-token': token} +}); +``` + +使用 `curl` 命令发送请求,确保请求中携带了 Cookie 信息,如果返回 `200` HTTP 状态码则表示请求成功: + +```shell +curl -i http://127.0.0.1:9080/hello -X POST -H 'apisix-csrf-token: eyJyYW5kb20iOjAuNjg4OTcyMzA4ODM1NDMsImV4cGlyZXMiOjcyMDAsInNpZ24iOiJcL09uZEF4WUZDZGYwSnBiNDlKREtnbzVoYkJjbzhkS0JRZXVDQm44MG9ldz0ifQ==' -b 'apisix-csrf-token=eyJyYW5kb20iOjAuNjg4OTcyMzA4ODM1NDMsImV4cGlyZXMiOjcyMDAsInNpZ24iOiJcL09uZEF4WUZDZGYwSnBiNDlKREtnbzVoYkJjbzhkS0JRZXVDQm44MG9ldz0ifQ==' +``` + +```shell +HTTP/1.1 200 OK +``` + +## 删除插件 + +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/datadog.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/datadog.md new file mode 100644 index 0000000..7bb50d2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/datadog.md @@ -0,0 +1,210 @@ +--- +title: datadog +--- + + + +## 简介 + +`datadog` 是 Apache APISIX 内置的监控插件,可与 [Datadog](https://www.datadoghq.com/)(云应用最常用的监控和可观测性平台之一)无缝集成。`datadog` 插件支持对每个请求和响应周期进行多种指标参数的获取,这些指标参数基本反映了系统的行为和健康状况。 + +`datadog` 插件通过 UDP 协议将其自定义指标推送给 DogStatsD 服务器,该服务器通过 UDP 连接与 Datadog Agent 捆绑在一起(关于如何安装 Datadog Agent,请参考[Agent](https://docs.datadoghq.com/agent/) )。DogStatsD 基本上是 StatsD 协议的实现,它为 Apache APISIX Agent 收集自定义指标,并将其聚合成单个数据点,发送到配置的 Datadog 服务器。更多关于 DogStatsD 的信息,请参考 [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/?tab=hostagent) 。 + +`datadog` 插件具有将多个指标参数组成一个批处理统一推送给外部 Datadog Agent 的能力,并且可以重复使用同一个数据包套接字。 + +此功能可以有效解决日志数据发送不及时的问题。在创建批处理器之后,如果对 `inactive_timeout` 参数进行配置,那么批处理器会在配置好的时间内自动发送日志数据。如果不进行配置,时间默认为 5s。 + +关于 Apache APISIX 的批处理程序的更多信息,请参考 [Batch-Processor](../batch-processor.md#配置) + +## APISIX-Datadog plugin 工作原理 + +![APISIX-Datadog 插件架构图](https://static.apiseven.com/202108/1636685752757-d02d8305-2a68-4b3e-b2cc-9e5410c8bf11.png) + +APISIX-Datadog 插件将其自定义指标推送到 DogStatsD server。而 DogStatsD server 通过 UDP 连接与 Datadog agent 捆绑在一起。DogStatsD 是 StatsD 协议的一个实现。它为 Apache APISIX agent 收集自定义指标,将其聚合成一个数据点,并将其发送到配置的 Datadog server。要了解更多关于 DogStatsD 的信息,请访问 DogStatsD 文档。 + +当你启用 APISIX-Datadog 插件时,Apache APISIX agent 会在每个请求响应周期向 DogStatsD server 输出以下指标: + +| 参数名称 | StatsD 类型 | 描述 | +| ---------------- | ----------- | ---------------------------------------------------------- | +| Request Counter | Counter | 收到的请求数量。 | +| Request Latency | Histogram | 处理该请求所需的时间,以毫秒为单位。 | +| Upstream latency | Histogram | 上游 server agent 请求到收到响应所需的时间,以毫秒为单位。 | +| APISIX Latency | Histogram | APISIX agent 处理该请求的时间,以毫秒为单位。 | +| Ingress Size | Timer | 请求体大小,以字节为单位。 | +| Egress Size | Timer | 响应体大小,以字节为单位。 | + +这些指标将被发送到 DogStatsD agent,并带有以下标签。如果任何特定的标签没有合适的值,该标签将被直接省略。 + +| 参数名称 | 描述 | +| --------------- | ------------------------------------------------------------ | +| route_name | 路由的名称,如果不存在,将显示路由 ID。 | +| service_id | 如果一个路由是用服务的抽象概念创建的,那么特定的服务 ID 将被使用。 | +| consumer | 如果路由有一个链接的消费者,消费者的用户名将被添加为一个标签。 | +| balancer_ip | 处理了当前请求的上游复制均衡器的的 IP。 | +| response_status | HTTP 响应状态代码。 | +| scheme | 已用于提出请求的协议,如 HTTP、gRPC、gRPCs 等。 | + +APISIX-Datadog 插件维护了一个带有 timer 的 buffer。当 timer 失效时,APISIX-Datadog 插件会将 buffer 的指标作为一个批量处理程序传送给本地运行的 DogStatsD server。这种方法通过重复使用相同的 UDP 套接字,对资源的占用较少,而且由于可以配置 timer,所以不会一直让网络过载。 + +## 如何使用插件 + +### 前提:Datadog Agent + +1. 首先你必须在系统中安装一个 Datadog agent。它可以是一个 docker 容器,一个 pod 或是一个二进制的包管理器。你只需要确保 Apache APISIX agent 可以到达 Datadog agent 的 8125 端口。 +2. 如果你从没使用过 Datadog + 1. 首先访问 [www.datadoghq.com](http://www.datadoghq.com/) ,创建一个账户。 + 2. 然后按照下图标注的步骤生成 API 密钥。 ![Generate an API Key](https://static.apiseven.com/202108/1636685007445-05f134fd-e80a-4173-b1d7-f0a118087998.png) +3. APISIX-Datadog 插件只需要依赖 `datadog/agent` 的 dogstatsd 组件即可实现,因为该插件按照 statsd 协议通过标准的 UDP 套接字向 DogStatsD server 异步发送参数。我们推荐使用独立的 `datadog/dogstatsd` 镜像,而不是使用完整的`datadog/agent` ,因为 `datadog/dogstatsd` 的组件大小只有大约 11 MB,更加轻量化。而完整的 `datadog/agent` 镜像的大小为 2.8 GB。 + +运行以下命令,将它作为一个容器来运行: + +```shell +# pull the latest image +docker pull datadog/dogstatsd:latest +# run a detached container +docker run -d --name dogstatsd-agent -e DD_API_KEY= -p 8125:8125/udp datadog/dogstatsd +``` + +如果你在生产环境中使用 Kubernetes,你可以将 `dogstatsd` 作为一个 `Daemonset` 或 `Multi-Container Pod` 与 Apache APISIX agent 一起部署。 + +### 启用插件 + +本小节介绍了如何在指定路由上启用 `datadog` 插件。进行以下操作之前请确认您的 Datadog Agent 已经启动并正常运行。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "datadog": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +现在,任何对 uri `/hello` 的请求都会生成上述指标,并推送到 Datadog Agent 的 DogStatsD 服务器。 + +### 删除插件 + +删除插件配置中相应的 JSON 配置以禁用 `datadog`。 +APISIX 插件是支持热加载的,所以不用重新启动 APISIX,配置就能生效。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +### 补充:自定义配置 + +在默认配置中,`datadog` 插件希望 Dogstatsd 服务在 `127.0.0.1:8125` 可用。如果你想更新配置,请更新插件的元数据。如果想要了解更多关于 `datadog` 插件元数据的字段,请参阅[元数据](#元数据)。 + +向 `/apisix/admin/plugin_metadata/datadog` 发起请求,更改其元数据。操作示例如下: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/datadog -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "host": "172.168.45.29", + "port": 8126, + "constant_tags": [ + "source:apisix", + "service:custom" + ], + "namespace": "apisix" +}' +``` + +上述命令将会更新元数据,后续各指标将通过 UDP StatsD 推送到 `172.168.45.29:8126` 上对应的服务,并且配置将被热加载,不需要重新启动 APISIX 实例,就可以使配置生效。 + +如果你想把 `datadog` 插件的元数据 schema 恢复到默认值,只需向同一个服务地址再发出一个 Body 为空的 PUT 请求。示例如下: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/datadog \ +-H "X-API-KEY: $admin_key" -X PUT -d '{}' +``` + +## 配置属性 + +### 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ----------- | ------ | ----------- | ------- | ----- | ------------------------------------------------------------ | +| prefer_name | boolean | optional | true | true/false | 如果设置为 `false`,将使用路由/服务的 id 值作为插件的 `route_name`,而不是带有参数的标签名称。 | + +该插件支持使用批处理程序来聚集和处理条目(日志/数据)的批次。这就避免了插件频繁地提交数据,默认情况下,批处理程序每 `5` 秒或当队列中的数据达到 `1000` 时提交数据。有关信息或自定义批处理程序的参数设置,请参阅[批处理程序](../batch-processor.md#configuration) 配置部分。 + +### 元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| ----------- | ------ | ----------- | ------- | ---------------------------------------------------------------------- | +| host | string | optional | "127.0.0.1" | DogStatsD 服务器的主机地址 | +| port | integer | optional | 8125 | DogStatsD 服务器的主机端口 | +| namespace | string | optional | "apisix" | 由 APISIX 代理发送的所有自定义参数的前缀。对寻找指标图的实体很有帮助,例如:apisix.request.counter。 | +| constant_tags | array | optional | [ "source:apisix" ] | 静态标签嵌入到生成的指标中。这对某些信号的度量进行分组很有用。 | + +要了解更多关于如何有效地编写标签,请访问[这里](https://docs.datadoghq.com/getting_started/tagging/#defining-tags) + +### 输出指标 + +启用 datadog 插件之后,APISIX 就会按照下面的指标格式,将数据整理成数据包最终发送到 DogStatsD server。 + +| Metric Name | StatsD Type | Description | +| ----------- | ----------- | ------- | +| Request Counter | Counter | 收到的请求数量。 | +| Request Latency | Histogram | 处理该请求所需的时间(以毫秒为单位)。 | +| Upstream latency | Histogram | 代理请求到上游服务器直到收到响应所需的时间(以毫秒为单位)。 | +| APISIX Latency | Histogram | APISIX 代理处理该请求的时间(以毫秒为单位)。| +| Ingress Size | Timer | 以字节为单位的请求体大小。 | +| Egress Size | Timer | 以字节为单位的响应体大小。 | + +这些指标会带有以下标签,并首先被发送到本地 DogStatsD Agent。 + +> 如果一个标签没有合适的值,该标签将被直接省略。 + +- **route_name**:在路由模式定义中指定的名称,如果不存在或插件属性 `prefer_name` 被设置为 `false`,它将默认使用路由/服务的 id 值。 +- **service_name**:如果一个路由是用服务的抽象概念创建的,特定的服务 name/id(基于插件的 `prefer_name` 属性)将被使用。 +- **consumer**:如果路由有一个正在链接中的消费者,那么消费者的用户名将被添加为一个标签。 +- **balancer_ip**:处理当前请求的上游负载均衡器的 IP。 +- **response_status**:HTTP 响应状态代码。 +- **scheme**:已用于提出请求的协议,如 HTTP、gRPC、gRPCs 等。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/dubbo-proxy.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/dubbo-proxy.md new file mode 100644 index 0000000..5d748e7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/dubbo-proxy.md @@ -0,0 +1,156 @@ +--- +title: dubbo-proxy +--- + + + +## 描述 + +`dubbo-proxy` 插件允许将 `HTTP` 请求代理到 [**dubbo**](http://dubbo.apache.org)。 + +## 要求 + +如果你正在使用 `OpenResty`, 你需要编译它来支持 `dubbo`, 参考 [APISIX-Runtime](../FAQ.md#如何构建-apisix-runtime-环境)。 + +## 运行时属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------ | ------ | ----------- | -------- | ------------ | -------------------------------------------------------------------- | +| service_name | string | 必选 | | | dubbo 服务名字 | +| service_version | string | 必选 | | | dubbo 服务版本 | +| method | string | 可选 | uri 路径 | | dubbo 服务方法 | + +## 静态属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------ | ------ | ----------- | -------- | ------------ | -------------------------------------------------------------------- | +| upstream_multiplex_count | number | 必选 | 32 | >= 1 | 上游连接中最大的多路复用请求数 | + +## 如何启用 + +首先,在 `config.yaml` 中启用 `dubbo-proxy` 插件: + +``` +# Add this in config.yaml +plugins: + - ... # plugin you need + - dubbo-proxy +``` + +然后重载 `APISIX`。 + +这里有个例子,在指定的路由中启用 `dubbo-proxy` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "nodes": { + "127.0.0.1:20880": 1 + }, + "type": "roundrobin" +}' + +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uris": [ + "/hello" + ], + "plugins": { + "dubbo-proxy": { + "service_name": "org.apache.dubbo.sample.tengine.DemoService", + "service_version": "0.0.0", + "method": "tengineDubbo" + } + }, + "upstream_id": 1 +}' +``` + +## 测试插件 + +你可以在 `Tengine` 提供的 [快速开始](https://github.com/alibaba/tengine/tree/master/modules/mod_dubbo#quick-start) 例子中使用上述配置进行测试。 + +将会有同样的结果。 + +从上游 `dubbo` 服务返回的数据一定是 `Map` 类型。 + +如果返回的数据如下 + +```json +{ + "status": "200", + "header1": "value1", + "header2": "valu2", + "body": "blahblah" +} +``` + +则对应的 `HTTP` 响应如下 + +```http +HTTP/1.1 200 OK # "status" will be the status code +... +header1: value1 +header2: value2 +... + +blahblah # "body" will be the body +``` + +## 删除插件 + +当你想在某个路由或服务中禁用 `dubbo-proxy` 插件,非常简单,你可以直接删除插件配置中的 `json` 配置,不需要重启服务就能立即生效: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uris": [ + "/hello" + ], + "plugins": { + }, + "upstream_id": 1 + } +}' +``` + +现在 `dubbo-proxy` 插件就已经被禁用了。此方法同样适用于其他插件。 + +如果你想彻底禁用 `dubbo-proxy` 插件, +你需要在 `config.yaml` 中注释掉以下内容: + +```yaml +plugins: + - ... # plugin you need + #- dubbo-proxy +``` + +然后重新加载 `APISIX`。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/echo.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/echo.md new file mode 100644 index 0000000..e3920bb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/echo.md @@ -0,0 +1,133 @@ +--- +title: echo +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Echo +description: 本文介绍了关于 Apache APISIX `echo` 插件的基本信息及使用方法。 +--- + + + + +## 描述 + +`echo` 插件可以帮助用户尽可能地全面了解如何开发 APISIX 插件。 + +该插件展示了如何在常见的 `phase` 中实现相应的功能,常见的 `phase` 包括:init, rewrite, access, balancer, header filter, body filter 以及 log。 + +:::caution WARNING + +`echo` 插件只能用作示例,并不能处理一些特别的场景。**请勿将该插件用在生产环境中!** + +::: + +## 属性 + +| 名称 | 类型 | 必选项 | 描述 | +| ----------- | ------ | ------ | ----------------------------------------------------------------------------------------------- | +| before_body | string | 否 | 在 `body` 属性之前添加的内容,如果 `body` 属性没有指定,就会将其添加在上游 `response body` 之前。 | +| body | string | 否 | 返回给客户端的响应内容,它将覆盖上游返回的响应 `body`。 | +| after_body | string | 否 | 在 `body` 属性之后添加的内容,如果 body 属性没有指定将在上游响应 `body` 之后添加。 | +| headers | object | 否 | 返回值的 headers。 | + +:::note + +参数 `before_body`、`body` 和 `after_body` 至少要配置一个。 + +::: + +## 启用插件 + +以下示例展示了如何在指定路由中启用 `echo` 插件。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "echo": { + "before_body": "before the body modification " + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +## 测试插件 + +通过上述命令启用插件后,你可以使用如下命令测试插件是否启用成功: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +``` +HTTP/1.1 200 OK +... +before the body modification hello world +``` + +## 删除插件 + +当你需要禁用 `echo` 插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/elasticsearch-logger.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/elasticsearch-logger.md new file mode 100644 index 0000000..362b579 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/elasticsearch-logger.md @@ -0,0 +1,446 @@ +--- +title: elasticsearch-logger +keywords: + - APISIX + - API 网关 + - 插件 + - Elasticsearch-logger + - 日志 +description: elasticsearch-logger Plugin 将请求和响应日志批量推送到 Elasticsearch,并支持日志格式的自定义。 +--- + + + + + + + +## 描述 + +`elasticsearch-logger` 插件将请求和响应日志批量推送到 [Elasticsearch](https://www.elastic.co),并支持自定义日志格式。启用后,插件会将请求上下文信息序列化为 [Elasticsearch Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 并将其添加到队列中,然后再推送到 Elasticsearch。有关更多详细信息,请参阅 [批处理器](../batch-processor.md)。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| ------------- | ------- | -------- | -------------------- | ------------------------------------------------------------ | +| endup_addrs | array[string] | 是 | | Elasticsearch API 端点地址。如果配置了多个端点,则会随机写入。 | +| field | object | 是 | | Elasticsearch `field` 配置。 | +| field.index | string | 是 | | Elasticsearch [_index 字段](https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-index-field.html#mapping-index-field)。 | +| log_format | object | 否 | | JSON 格式的键值对中的自定义日志格式。值中支持 [APISIX](../apisix-variable.md) 或 [NGINX 变量](http://nginx.org/en/docs/varindex.html)。 | +| auth | array | 否 | | Elasticsearch [身份验证](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) 配置。 | +| auth.username | string | 是 | | Elasticsearch [身份验证](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) 用户名​​。 | +| auth.password | string | 是 | | Elasticsearch [身份验证](https://www.elastic.co/guide/en/elasticsearch/reference/current/setting-up-authentication.html) 密码。 | +| ssl_verify | boolean | 否 | true | 如果为 true,则执行 SSL 验证。 | +| timeout | integer | 否 | 10 | Elasticsearch 发送数据超时(秒)。 | +| include_req_body | boolean | 否 | false |如果为 true,则将请求主体包含在日志中。请注意,如果请求主体太大而无法保存在内存中,则由于 NGINX 的限制而无法记录。| +| include_req_body_expr | array[array] | 否 | | 一个或多个条件的数组,形式为 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。在 `include_req_body` 为 true 时使用。仅当此处配置的表达式计算结果为 true 时,才会记录请求主体。| +| include_resp_body | boolean | 否 | false | 如果为 true,则将响应主体包含在日志中。| +| include_resp_body_expr | array[array] | 否 | | 一个或多个条件的数组,形式为 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。在 `include_resp_body` 为 true 时使用。仅当此处配置的表达式计算结果为 true 时,才会记录响应主体。| + +注意:schema 中还定义了 `encrypt_fields = {"auth.password"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +本插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。 + +## Plugin Metadata + +| Name | Type | Required | Default | Description | +|------|------|----------|---------|-------------| +| log_format | object | 否 | |自定义日志格式为 JSON 格式的键值对。值中支持 [APISIX 变量](../apisix-variable.md) 和 [NGINX 变量](http://nginx.org/en/docs/varindex.html)。 | + +## 示例 + +以下示例演示了如何为不同场景配置 `elasticsearch-logger` 插件。 + +要遵循示例,请在 Docker 中启动 Elasticsearch 实例: + +```shell +docker run -d \ + --name elasticsearch \ + --network apisix-quickstart-net \ + -v elasticsearch_vol:/usr/share/elasticsearch/data/ \ + -p 9200:9200 \ + -p 9300:9300 \ + -e ES_JAVA_OPTS="-Xms512m -Xmx512m" \ + -e discovery.type=single-node \ + -e xpack.security.enabled=false \ + docker.elastic.co/elasticsearch/elasticsearch:7.17.1 +``` + +在 Docker 中启动 Kibana 实例,以可视化 Elasticsearch 中的索引数据: + +```shell +docker run -d \ + --name kibana \ + --network apisix-quickstart-net \ + -p 5601:5601 \ + -e ELASTICSEARCH_HOSTS="http://elasticsearch:9200" \ + docker.elastic.co/kibana/kibana:7.17.1 +``` + +如果成功,您应该在 [localhost:5601](http://localhost:5601) 上看到 Kibana 仪表板。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 以默认日志格式记录 + +以下示例演示如何在路由上启用 `elasticsearch-logger` 插件,该插件记录客户端对路由的请求和响应,并将日志推送到 Elasticsearch。 + +使用 `elasticsearch-logger` 创建路由,将 `index` 字段配置为 `gateway`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "elasticsearch-logger-route", + "uri": "/anything", + "plugins": { + "elasticsearch-logger": { + "endpoint_addrs": ["http://elasticsearch:9200"], + "field": { + "index": "gateway" + } + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +向路由发送请求以生成日志条目: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应。 + +导航到 [localhost:5601](http://localhost:5601) 上的 Kibana 仪表板,并在 __Discover__ 选项卡下创建一个新的索引模式 `gateway` 以从 Elasticsearch 获取数据。配置完成后,导航回 __Discover__ 选项卡,您应该会看到生成的日志,类似于以下内容: + +```json +{ + "_index": "gateway", + "_id": "CE-JL5QBOkdYRG7kEjTJ", + "_version": 1, + "_score": 1, + "_source": { + "request": { + "headers": { + "host": "127.0.0.1:9080", + "accept": "*/*", + "user-agent": "curl/8.6.0" + }, + "size": 85, + "querystring": {}, + "method": "GET", + "url": "http://127.0.0.1:9080/anything", + "uri": "/anything" + }, + "response": { + "headers": { + "content-type": "application/json", + "access-control-allow-credentials": "true", + "server": "APISIX/3.11.0", + "content-length": "390", + "access-control-allow-origin": "*", + "connection": "close", + "date": "Mon, 13 Jan 2025 10:18:14 GMT" + }, + "status": 200, + "size": 618 + }, + "route_id": "elasticsearch-logger-route", + "latency": 585.00003814697, + "apisix_latency": 18.000038146973, + "upstream_latency": 567, + "upstream": "50.19.58.113:80", + "server": { + "hostname": "0b9a772e68f8", + "version": "3.11.0" + }, + "service_id": "", + "client_ip": "192.168.65.1" + }, + "fields": { + ... + } +} +``` + +### 使用 Plugin Metadata 记录请求和响应标头 + +以下示例演示了如何使用 [Plugin Metadata](../terminology/plugin-metadata.md) 和 [NGINX 变量](http://nginx.org/en/docs/varindex.html) 自定义日志格式,以记录请求和响应中的特定标头。 + +在 APISIX 中,[Plugin Metadata](../terminology/plugin-metadata.md) 用于配置同一插件的所有插件实例的通用元数据字段。当插件在多个资源中启用并需要对其元数据字段进行通用更新时,它很有用。 + +首先,使用 `elasticsearch-logger` 创建路由,如下所示: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "elasticsearch-logger-route", + "uri": "/anything", + "plugins": { + "elasticsearch-logger": { + "endpoint_addrs": ["http://elasticsearch:9200"], + "field": { + "index": "gateway" + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +接下来,配置 `elasticsearch-logger` 的 Plugin Metadata: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/plugin_metadata/elasticsearch-logger" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr", + "env": "$http_env", + "resp_content_type": "$sent_http_Content_Type" + } + }' +``` + +使用 `env` 标头向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H "env: dev" +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应。 + +导航到 [localhost:5601](http://localhost:5601) 上的 Kibana 仪表板,并在 __Discover__ 选项卡下创建一个新的索引模式 `gateway` 以从 Elasticsearch 获取数据(如果您尚未这样做)。配置完成后,导航回 __Discover__ 选项卡,您应该会看到生成的日志,类似于以下内容: + +```json +{ + "_index": "gateway", + "_id": "Ck-WL5QBOkdYRG7kODS0", + "_version": 1, + "_score": 1, + "_source": { + "client_ip": "192.168.65.1", + "route_id": "elasticsearch-logger-route", + "@timestamp": "2025-01-06T10:32:36+00:00", + "host": "127.0.0.1", + "resp_content_type": "application/json" + }, + "fields": { + ... + } +} +``` + +### 有条件地记录请求主体 + +以下示例演示了如何有条件地记录请求主体。 + +使用 `elasticsearch-logger` 创建路由,仅在 URL 查询字符串 `log_body` 为 `true` 时记录请求主体: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "plugins": { + "elasticsearch-logger": { + "endpoint_addrs": ["http://elasticsearch:9200"], + "field": { + "index": "gateway" + }, + "include_req_body": true, + "include_req_body_expr": [["arg_log_body", "==", "yes"]] + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + }, + "uri": "/anything", + "id": "elasticsearch-logger-route" +}' +``` + +使用满足以下条件的 URL 查询字符串向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything?log_body=yes" -X POST -d '{"env": "dev"}' +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应。 + +导航到 [localhost:5601](http://localhost:5601) 上的 Kibana 仪表板,并在 __Discover__ 选项卡下创建一个新的索引模式 `gateway` 以从 Elasticsearch 获取数据(如果您尚未这样做)。配置完成后,导航回 __Discover__ 选项卡,您应该会看到生成的日志,类似于以下内容: + +```json +{ + "_index": "gateway", + "_id": "Dk-cL5QBOkdYRG7k7DSW", + "_version": 1, + "_score": 1, + "_source": { + "request": { + "headers": { + "user-agent": "curl/8.6.0", + "accept": "*/*", + "content-length": "14", + "host": "127.0.0.1:9080", + "content-type": "application/x-www-form-urlencoded" + }, + "size": 182, + "querystring": { + "log_body": "yes" + }, + "body": "{\"env\": \"dev\"}", + "method": "POST", + "url": "http://127.0.0.1:9080/anything?log_body=yes", + "uri": "/anything?log_body=yes" + }, + "start_time": 1735965595203, + "response": { + "headers": { + "content-type": "application/json", + "server": "APISIX/3.11.0", + "access-control-allow-credentials": "true", + "content-length": "548", + "access-control-allow-origin": "*", + "connection": "close", + "date": "Mon, 13 Jan 2025 11:02:32 GMT" + }, + "status": 200, + "size": 776 + }, + "route_id": "elasticsearch-logger-route", + "latency": 703.9999961853, + "apisix_latency": 34.999996185303, + "upstream_latency": 669, + "upstream": "34.197.122.172:80", + "server": { + "hostname": "0b9a772e68f8", + "version": "3.11.0" + }, + "service_id": "", + "client_ip": "192.168.65.1" + }, + "fields": { + ... + } +} +``` + +向路由发送一个没有任何 URL 查询字符串的请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" -X POST -d '{"env": "dev"}' +``` + +导航到 Kibana 仪表板 __Discover__ 选项卡,您应该看到生成的日志,但没有请求正文: + +```json +{ + "_index": "gateway", + "_id": "EU-eL5QBOkdYRG7kUDST", + "_version": 1, + "_score": 1, + "_source": { + "request": { + "headers": { + "content-type": "application/x-www-form-urlencoded", + "accept": "*/*", + "content-length": "14", + "host": "127.0.0.1:9080", + "user-agent": "curl/8.6.0" + }, + "size": 169, + "querystring": {}, + "method": "POST", + "url": "http://127.0.0.1:9080/anything", + "uri": "/anything" + }, + "start_time": 1735965686363, + "response": { + "headers": { + "content-type": "application/json", + "access-control-allow-credentials": "true", + "server": "APISIX/3.11.0", + "content-length": "510", + "access-control-allow-origin": "*", + "connection": "close", + "date": "Mon, 13 Jan 2025 11:15:54 GMT" + }, + "status": 200, + "size": 738 + }, + "route_id": "elasticsearch-logger-route", + "latency": 680.99999427795, + "apisix_latency": 4.9999942779541, + "upstream_latency": 676, + "upstream": "34.197.122.172:80", + "server": { + "hostname": "0b9a772e68f8", + "version": "3.11.0" + }, + "service_id": "", + "client_ip": "192.168.65.1" + }, + "fields": { + ... + } +} +``` + +:::info + +如果您除了将 `include_req_body` 或 `include_resp_body` 设置为 `true` 之外还自定义了 `log_format`,则插件不会在日志中包含正文。 + +作为一种解决方法,您可以在日志格式中使用 NGINX 变量 `$request_body`,例如: + +```json +{ + "elasticsearch-logger": { + ..., + "log_format": {"body": "$request_body"} + } +} +``` + +::: diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/error-log-logger.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/error-log-logger.md new file mode 100644 index 0000000..d9d5590 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/error-log-logger.md @@ -0,0 +1,192 @@ +--- +title: error-log-logger +keywords: + - APISIX + - API 网关 + - 错误日志 + - Plugin +description: API 网关 Apache APISIX error-log-logger 插件用于将 APISIX 的错误日志推送到 TCP、Apache SkyWalking、Apache Kafka 或 ClickHouse 服务器。 +--- + + + +## 描述 + +`error-log-logger` 插件用于将 APISIX 的错误日志 (`error.log`) 推送到 TCP、Apache SkyWalking、Apache Kafka 或 ClickHouse 服务器,你还可以设置错误日志级别以将日志发送到服务器。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| -------------------------------- | ------- | ------ | ------------------------------ | ------------- | -------------------------------------------------------------------------------- | +| tcp.host | string | 是 | | | TCP 服务的 IP 地址或主机名。 | +| tcp.port | integer | 是 | | [0,...] | 目标端口。 | +| tcp.tls | boolean | 否 | false | [false, true] | 当设置为 `true` 时执行 SSL 验证。 | +| tcp.tls_server_name | string | 否 | | | TLS 服务名称标记。 | +| skywalking.endpoint_addr | string | 否 | http://127.0.0.1:12900/v3/logs | | SkyWalking 的 HTTP endpoint 地址,例如:http://127.0.0.1:12800。 | +| skywalking.service_name | string | 否 | APISIX | | SkyWalking 上报的 service 名称。 | +| skywalking.service_instance_name | String | 否 | APISIX Instance Name | | SkyWalking 上报的 service 实例名,如果希望直接获取本机主机名请设置为 `$hostname`。 | +| clickhouse.endpoint_addr | String | 否 | http://127.0.0.1:8213 | | ClickHouse 的 HTTP endpoint 地址,例如 `http://127.0.0.1:8213`。 | +| clickhouse.user | String | 否 | default | | ClickHouse 的用户名。 | +| clickhouse.password | String | 否 | | | ClickHouse 的密码。 | +| clickhouse.database | String | 否 | | | ClickHouse 的用于接收日志的数据库。 | +| clickhouse.logtable | String | 否 | | | ClickHouse 的用于接收日志的表。 | +| kafka.brokers | array | 是 | | | 需要推送的 Kafka broker 列表。 | +| kafka.brokers.host | string | 是 | | | Kafka broker 的节点 host 配置,例如 `192.168.1.1`| +| kafka.brokers.port | string | 是 | | | Kafka broker 的节点端口配置 | +| kafka.brokers.sasl_config | object | 否 | | | Kafka broker 中的 sasl_config | +| kafka.brokers.sasl_config.mechanism | string | 否 | "PLAIN" | ["PLAIN"] | Kafka broker 中的 sasl 认证机制 | +| kafka.brokers.sasl_config.user | string | 是 | | | Kafka broker 中 sasl 配置中的 user,如果 sasl_config 存在,则必须填写 | +| kafka.brokers.sasl_config.password | string | 是 | | | Kafka broker 中 sasl 配置中的 password,如果 sasl_config 存在,则必须填写 | +| kafka.kafka_topic | string | 是 | | | 需要推送的 Kafka topic。| +| kafka.producer_type | string | 否 | async | ["async", "sync"] | 生产者发送消息的模式。| +| kafka.required_acks | integer | 否 | 1 | [0, 1, -1] | 生产者在确认一个请求发送完成之前需要收到的反馈信息的数量。该参数是为了保证发送请求的可靠性。该属性的配置与 Kafka `acks` 属性相同,具体配置请参考 [Apache Kafka 文档](https://kafka.apache.org/documentation/#producerconfigs_acks)。 | +| kafka.key | string | 否 | | | 用于消息分区而分配的密钥。 | +| kafka.cluster_name | integer | 否 | 1 | [0,...] | Kafka 集群的名称,当有两个及以上 Kafka 集群时使用。只有当 `producer_type` 设为 `async` 模式时才可以使用该属性。| +| kafka.meta_refresh_interval | integer | 否 | 30 | [1,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的 `refresh_interval` 参数,用于指定自动刷新 metadata 的间隔时长,单位为秒。 | +| timeout | integer | 否 | 3 | [1,...] | 连接和发送数据超时间,以秒为单位。 | +| keepalive | integer | 否 | 30 | [1,...] | 复用连接时,连接保持的时间,以秒为单位。 | +| level | string | 否 | WARN | | 进行错误日志筛选的级别,默认为 `WARN`,取值 ["STDERR", "EMERG", "ALERT", "CRIT", "ERR", "ERROR", "WARN", "NOTICE", "INFO", "DEBUG"],其中 `ERR` 与 `ERROR` 级别一致。 | + +注意:schema 中还定义了 `encrypt_fields = {"clickhouse.password"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。 + +### 默认日志格式示例 + +```text +["2024/01/06 16:04:30 [warn] 11786#9692271: *1 [lua] plugin.lua:205: load(): new plugins: {"error-log-logger":true}, context: init_worker_by_lua*","\n","2024/01/06 16:04:30 [warn] 11786#9692271: *1 [lua] plugin.lua:255: load_stream(): new plugins: {"limit-conn":true,"ip-restriction":true,"syslog":true,"mqtt-proxy":true}, context: init_worker_by_lua*","\n"] +``` + +## 启用插件 + +该插件默认为禁用状态,你可以在 `./conf/config.yaml` 中启用 `error-log-logger` 插件。你可以参考如下示例启用插件: + +```yaml title="./conf/config.yaml" +plugins: # plugin list + ...... + - request-id + - hmac-auth + - api-breaker + - error-log-logger # enable plugin `error-log-logger +``` + +完成插件配置后,你需要重新加载 APISIX,插件才会生效。 + +:::note 注意 + +该插件属于 APISIX 全局性插件,不需要在任何路由或服务中绑定。 + +::: + +### 配置 TCP 服务器地址 + +你可以通过配置插件元数据来设置 TCP 服务器地址,如下所示: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/error-log-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "tcp": { + "host": "127.0.0.1", + "port": 1999 + }, + "inactive_timeout": 1 +}' +``` + +### 配置 SkyWalking OAP 服务器地址 + +通过以下配置插件元数据设置 SkyWalking OAP 服务器地址,如下所示: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/error-log-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "skywalking": { + "endpoint_addr": "http://127.0.0.1:12800/v3/logs" + }, + "inactive_timeout": 1 +}' +``` + +### 配置 ClickHouse 数据库 + +该插件支持将错误日志作为字符串发送到 ClickHouse 服务器中对应表的 `data` 字段。 + +你可以按照如下方式进行配置: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/error-log-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "clickhouse": { + "user": "default", + "password": "a", + "database": "error_log", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:8123" + } +}' +``` + +### 配置 Kafka + +该插件支持将错误日志发送到 Kafka,你可以按照如下方式进行配置: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/error-log-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "kafka":{ + "brokers":[ + { + "host":"127.0.0.1", + "port":9092 + } + ], + "kafka_topic":"test2" + }, + "level":"ERROR", + "inactive_timeout":1 +}' +``` + +## 删除插件 + +当你不再需要该插件时,只需要在 `./conf/config.yaml` 中删除或注释该插件即可。 + +```yaml +plugins: # plugin list + ... ... + - request-id + - hmac-auth + - api-breaker + #- error-log-logger # enable plugin `error-log-logger +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ext-plugin-post-req.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ext-plugin-post-req.md new file mode 100644 index 0000000..6efcf78 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ext-plugin-post-req.md @@ -0,0 +1,33 @@ +--- +title: ext-plugin-post-req +keywords: + - Apache APISIX + - Plugin + - ext-plugin-post-req +description: 本文介绍了关于 Apache APISIX `ext-plugin-post-req` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`ext-plugin-post-req` 插件的功能与 `ext-plugin-pre-req` 插件的不同之处在于:`ext-plugin-post-req` 插件是在内置 Lua 插件执行之后且在请求到达上游之前工作。 + +你可以参考 [ext-plugin-pre-req](./ext-plugin-pre-req.md) 文档,学习如何配置和使用 `ext-plugin-post-req` 插件。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ext-plugin-post-resp.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ext-plugin-post-resp.md new file mode 100644 index 0000000..e4cacbe --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ext-plugin-post-resp.md @@ -0,0 +1,119 @@ +--- +title: ext-plugin-post-resp +keywords: + - Apache APISIX + - API 网关 + - Plugin + - ext-plugin-post-resp +description: 本文介绍了关于 Apache APISIX `ext-plugin-post-resp` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`ext-plugin-post-resp` 插件用于在执行内置 Lua 插件之前和在 Plugin Runner 内运行特定的 External Plugin。 + +`ext-plugin-post-resp` 插件将在请求获取到上游的响应之后执行。 + +启用本插件之后,APISIX 将使用 [lua-resty-http](https://github.com/api7/lua-resty-http) 库向上游发起请求,这会导致: + +- [proxy-control](./proxy-control.md) 插件不可用 +- [proxy-mirror](./proxy-mirror.md) 插件不可用 +- [proxy-cache](./proxy-cache.md) 插件不可用 +- [APISIX 与上游间的双向认证](../mtls.md#apisix-与上游间的双向认证) 功能尚不可用 + +如果你想了解更多关于 External Plugin 的信息,请参考 [External Plugin](../external-plugin.md) 。 + +:::note + +External Plugin 执行的结果会影响当前请求的响应。 + +::: + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ----------------- | ------ | ------ | ------- | --------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| conf | array | 否 | | [{"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"}] | 在 Plugin Runner 内执行的插件列表的配置。 | +| allow_degradation | boolean| 否 | false | [false, true] | 当 Plugin Runner 临时不可用时是否允许请求继续,当值设置为 `true` 时则自动允许请求继续。 | + +## 启用插件 + +以下示例展示了如何在指定路由中启用 `ext-plugin-post-resp` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "ext-plugin-post-resp": { + "conf" : [ + {"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"} + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## 测试插件 + +通过上述命令启用插件后,可以使用如下命令测试插件是否启用成功: + +```shell +curl -i http://127.0.0.1:9080/index.html +``` + +在返回结果中可以看到刚刚配置的 Plugin Runner 已经被触发,同时 `ext-plugin-A` 插件也已经被执行。 + +## 删除插件 + +当你需要禁用 `ext-plugin-post-resp` 插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ext-plugin-pre-req.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ext-plugin-pre-req.md new file mode 100644 index 0000000..4944501 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ext-plugin-pre-req.md @@ -0,0 +1,110 @@ +--- +title: ext-plugin-pre-req +keywords: + - Apache APISIX + - API 网关 + - Plugin + - ext-plugin-pre-req +description: 本文介绍了关于 Apache APISIX `ext-plugin-pre-req` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`ext-plugin-pre-req` 插件用于在执行内置 Lua 插件之前和在 Plugin Runner 内运行特定的 External Plugin。 + +如果你想了解更多关于 External Plugin 的信息,请参考 [External Plugin](../external-plugin.md) 。 + +:::note + +External Plugin 执行的结果会影响当前请求的行为。 + +::: + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ----------------- | ------ | ------ | ------- | --------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| conf | array | 否 | | [{"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"}] | 在 Plugin Runner 内执行的插件列表的配置。 | +| allow_degradation | boolean| 否 | false | [false, true] | 当 Plugin Runner 临时不可用时是否允许请求继续,当值设置为 true 时则自动允许请求继续。 | + +## 启用插件 + +以下示例展示了如何在指定路由中启用 `ext-plugin-pre-req` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "ext-plugin-pre-req": { + "conf" : [ + {"name": "ext-plugin-A", "value": "{\"enable\":\"feature\"}"} + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## 测试插件 + +通过上述命令启用插件后,可以使用如下命令测试插件是否启用成功: + +```shell +curl -i http://127.0.0.1:9080/index.html +``` + +在返回结果中可以看到刚刚配置的 Plugin Runner 已经被触发,同时 `ext-plugin-A` 插件也已经被执行。 + +## 删除插件 + +当你需要禁用 `ext-plugin-pre-req` 插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/fault-injection.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/fault-injection.md new file mode 100644 index 0000000..c0ae512 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/fault-injection.md @@ -0,0 +1,299 @@ +--- +title: fault-injection +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Fault Injection + - fault-injection +description: 本文介绍了关于 Apache APISIX `fault-injection` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`fault-injection` 插件是故障注入插件,该插件可以和其他插件一起使用,并在其他插件执行前被执行。 + +## 属性 + +| 名称 | 类型 | 必选项 | 有效值 | 描述 | +| ----------------- | ------- | ---- | ---------- | -------------------------- | +| abort.http_status | integer | 是 | [200, ...] | 返回给客户端的 HTTP 状态码 | +| abort.body | string | 否 | | 返回给客户端的响应数据。支持使用 NGINX 变量,如 `client addr: $remote_addr\n`| +| abort.headers | object | 否 | | 返回给客户端的响应头,可以包含 NGINX 变量,如 `$remote_addr` | +| abort.percentage | integer | 否 | [0, 100] | 将被中断的请求占比 | +| abort.vars | array[] | 否 | | 执行故障注入的规则,当规则匹配通过后才会执行故障注。`vars` 是一个表达式的列表,来自 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list)。 | +| delay.duration | number | 是 | | 延迟时间,可以指定小数 | +| delay.percentage | integer | 否 | [0, 100] | 将被延迟的请求占比 | +| delay.vars | array[] | 否 | | 执行请求延迟的规则,当规则匹配通过后才会延迟请求。`vars` 是一个表达式列表,来自 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list)。 | + +:::info IMPORTANT + +`abort` 属性将直接返回给客户端指定的响应码并且终止其他插件的执行。 + +`delay` 属性将延迟某个请求,并且还会执行配置的其他插件。 + +`abort` 和 `delay` 属性至少要配置一个。 + +::: + +:::tip + +`vars` 是由 [`lua-resty-expr`](https://github.com/api7/lua-resty-expr) 的表达式组成的列表,它可以灵活的实现规则之间的 AND/OR 关系,示例如下:: + +```json +[ + [ + [ "arg_name","==","jack" ], + [ "arg_age","==",18 ] + ], + [ + [ "arg_name2","==","allen" ] + ] +] +``` + +以上示例表示前两个表达式之间的关系是 AND,而前两个和第三个表达式之间的关系是 OR。 + +::: + +## 启用插件 + +你可以在指定路由启用 `fault-injection` 插件,并指定 `abort` 属性。如下所示: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "Fault Injection!" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +同样,我们也可以指定 `delay` 属性。如下所示: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "delay": { + "duration": 3 + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +还可以同时为指定路由启用 `fault-injection` 插件,并指定 `abort` 属性和 `delay` 属性的 `vars` 规则。如下所示: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 403, + "body": "Fault Injection!\n", + "vars": [ + [ + [ "arg_name","==","jack" ] + ] + ] + }, + "delay": { + "duration": 2, + "vars": [ + [ + [ "http_age","==","18" ] + ] + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +## 测试插件 + +通过上述示例启用插件后,可以向路由发起如下请求: + +```shell +curl http://127.0.0.1:9080/hello -i +``` + +```shell +HTTP/1.1 200 OK +Date: Mon, 13 Jan 2020 13:50:04 GMT +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server + +Fault Injection! +``` + +通过如下命令可以向配置 `delay` 属性的路由发起请求: + +```shell +time curl http://127.0.0.1:9080/hello -i +``` + +```shell +HTTP/1.1 200 OK +Content-Type: application/octet-stream +Content-Length: 6 +Connection: keep-alive +Server: APISIX web server +Date: Tue, 14 Jan 2020 14:30:54 GMT +Last-Modified: Sat, 11 Jan 2020 12:46:21 GMT + +hello + +real 0m3.034s +user 0m0.007s +sys 0m0.010s +``` + +### 标准匹配的故障注入 + +你可以在 `fault-injection` 插件中使用 `vars` 规则设置特定规则: + +```Shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 403, + "body": "Fault Injection!\n", + "vars": [ + [ + [ "arg_name","==","jack" ] + ] + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +使用不同的 `name` 参数测试路由: + +```Shell +curl "http://127.0.0.1:9080/hello?name=allen" -i +``` + +没有故障注入的情况下,你可以得到如下结果: + +```shell +HTTP/1.1 200 OK +Content-Type: application/octet-stream +Transfer-Encoding: chunked +Connection: keep-alive +Date: Wed, 20 Jan 2021 07:21:57 GMT +Server: APISIX/2.2 + +hello +``` + +如果我们将 `name` 设置为与配置相匹配的名称,`fault-injection` 插件将被执行: + +```Shell +curl "http://127.0.0.1:9080/hello?name=jack" -i +``` + +```shell +HTTP/1.1 403 Forbidden +Date: Wed, 20 Jan 2021 07:23:37 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/2.2 + +Fault Injection! +``` + +## 删除插件 + +当你需要禁用 `fault-injection` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/file-logger.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/file-logger.md new file mode 100644 index 0000000..c5953c2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/file-logger.md @@ -0,0 +1,242 @@ +--- +title: file-logger +keywords: + - APISIX + - API 网关 + - Plugin + - file-logger +description: API 网关 Apache APISIX file-logger 插件可用于将日志数据存储到指定位置。 +--- + + + +## 描述 + +`file-logger` 插件可用于将日志数据存储到指定位置。 + +:::tip 提示 + +`file-logger` 插件特点如下: + +- 可将指定路由的日志发送到指定位置,方便你在本地统计各个路由的请求和响应数据。在使用 [debug mode](../../../en/latest/debug-mode.md) 时,你可以很轻松地将出现问题的路由的日志输出到指定文件中,从而更方便地排查问题。 +- 可以获取 [APISIX 变量](../../../en/latest/apisix-variable.md)和 [NGINX 变量](http://nginx.org/en/docs/varindex.html),而 `access.log` 仅能使用 NGINX 变量。 +- 支持热加载,你可以在路由中随时更改其配置并立即生效。而修改 `access.log` 相关配置,则需要重新加载 APISIX。 +- 支持以 JSON 格式保存日志数据。 +- 可以在 `log phase` 阶段修改 `file-logger` 执行的函数来收集你所需要的信息。 + +::: + +## 属性 + +| 名称 | 类型 | 必选项 | 描述 | +| ---------------- | ------- |-----| ------------------------------------------------ | +| path | string | 是 | 自定义输出文件路径。例如:`logs/file.log`。 | +| log_format | object | 否 | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | +| include_req_body | boolean | 否 | 当设置为 `true` 时,日志中将包含请求体。如果请求体太大而无法在内存中保存,则由于 Nginx 的限制,无法记录请求体。| +| include_req_body_expr | array | 否 | 当 `include_req_body` 属性设置为 `true` 时的过滤器。只有当此处设置的表达式求值为 `true` 时,才会记录请求体。有关更多信息,请参阅 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 。 | +| include_resp_body | boolean | 否 | 当设置为 `true` 时,生成的文件包含响应体。 | +| include_resp_body_expr | array | 否 | 当 `include_resp_body` 属性设置为 `true` 时,使用该属性并基于 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 进行过滤。如果存在,则仅在表达式计算结果为 `true` 时记录响应。 | +| match | array[array] | 否 | 当设置了这个选项后,只有匹配规则的日志才会被记录。`match` 是一个表达式列表,具体请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list)。 | + +### 默认日志格式示例 + + ```json + { + "service_id": "", + "apisix_latency": 100.99999809265, + "start_time": 1703907485819, + "latency": 101.99999809265, + "upstream_latency": 1, + "client_ip": "127.0.0.1", + "route_id": "1", + "server": { + "version": "3.7.0", + "hostname": "localhost" + }, + "request": { + "headers": { + "host": "127.0.0.1:1984", + "content-type": "application/x-www-form-urlencoded", + "user-agent": "lua-resty-http/0.16.1 (Lua) ngx_lua/10025", + "content-length": "12" + }, + "method": "POST", + "size": 194, + "url": "http://127.0.0.1:1984/hello?log_body=no", + "uri": "/hello?log_body=no", + "querystring": { + "log_body": "no" + } + }, + "response": { + "headers": { + "content-type": "text/plain", + "connection": "close", + "content-length": "12", + "server": "APISIX/3.7.0" + }, + "status": 200, + "size": 123 + }, + "upstream": "127.0.0.1:1982" + } + ``` + +## 插件元数据设置 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------------- | ------- | ------------------------------------------------ | +| log_format | object | 可选 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../../../en/latest/apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +:::note 注意 + +该设置全局生效。如果指定了 `log_format`,则所有绑定 `file-logger` 的路由或服务都将使用该日志格式。 + +::: + +以下示例展示了如何通过 Admin API 配置插件元数据: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/file-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +配置完成后,你可以在日志系统中看到如下类似日志: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## 启用插件 + +你可以通过以下命令在指定路由中启用该插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "file-logger": { + "path": "logs/file.log" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:9001": 1 + } + }, + "uri": "/hello" +}' +``` + +## 测试插件 + +你可以通过以下命令向 APISIX 发出请求: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +``` +HTTP/1.1 200 OK +... +hello, world +``` + +访问成功后,你可以在对应的 `logs` 目录下找到 `file.log` 文件。 + +## 过滤日志 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "file-logger": { + "path": "logs/file.log", + "match": [ + [ + [ "arg_name","==","jack" ] + ] + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:9001": 1 + } + }, + "uri": "/hello" +}' +``` + +测试: + +```shell +curl -i http://127.0.0.1:9080/hello?name=jack +``` + +在 `logs/file.log` 中可以看到日志记录 + +```shell +curl -i http://127.0.0.1:9080/hello?name=rose +``` + +在 `logs/file.log` 中看不到日志记录 + +## 删除插件 + +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:9001": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/forward-auth.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/forward-auth.md new file mode 100644 index 0000000..a030a6f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/forward-auth.md @@ -0,0 +1,189 @@ +--- +title: forward-auth +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Forward Authentication + - forward-auth +description: 本文介绍了关于 Apache APISIX `forward-auth` 插件的基本信息及使用方法。 +--- + + +## 描述 + +`forward-auth` 插件使用的是经典外部认证。当身份认证失败时,可以实现自定义错误或者重定向到认证页面的场景。 + +`forward-auth` 插件巧妙地将身份认证和授权逻辑移到了一个专门的外部服务中,APISIX 将用户的请求转发给认证服务并阻塞原始请求,然后在认证服务下以非 2xx 状态响应时进行结果替换。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ----------------- | ------------- | ------| ------- | -------------- | -------------------------------------------------------------------------------------------------------------------- | +| uri | string | 是 | | | 设置 `authorization` 服务的地址 (例如:https://localhost:9188)。 | +| ssl_verify | boolean | 否 | true | [true, false] | 当设置为 `true` 时,验证 SSL 证书。 | +| request_method | string | 否 | GET | ["GET","POST"] | 客户端向 `authorization` 服务发送请求的方法。当设置为 POST 时,会将 `request body` 转发至 `authorization` 服务。 | +| request_headers | array[string] | 否 | | | 设置需要由客户端转发到 `authorization` 服务的请求头。如果没有设置,则只发送 APISIX 提供的 headers (例如:X-Forwarded-XXX)。 | +| upstream_headers | array[string] | 否 | | | 认证通过时,设置 `authorization` 服务转发至 `upstream` 的请求头。如果不设置则不转发任何请求头。 | +| client_headers | array[string] | 否 | | | 认证失败时,由 `authorization` 服务向 `client` 发送的响应头。如果不设置则不转发任何响应头。 | +| timeout | integer | 否 | 3000ms | [1, 60000]ms | `authorization` 服务请求超时时间。 | +| keepalive | boolean | 否 | true | [true, false] | HTTP 长连接。 | +| keepalive_timeout | integer | 否 | 60000ms | [1000, ...]ms | 长连接超时时间。 | +| keepalive_pool | integer | 否 | 5 | [1, ...]ms | 长连接池大小。 | +| allow_degradation | boolean | 否 | false | | 当设置为 `true` 时,允许在身份验证服务器不可用时跳过身份验证。 | +| status_on_error | integer | 否 | 403 | [200,...,599] | 设置授权服务出现网络错误时返回给客户端的 HTTP 状态。默认状态为“403”。 | + +## 数据定义 + +APISIX 将生成并发送如下所示的请求头到认证服务: + +| Scheme | HTTP Method | Host | URI | Source IP | +| ----------------- | ------------------ | ----------------- | --------------- | --------------- | +| X-Forwarded-Proto | X-Forwarded-Method | X-Forwarded-Host | X-Forwarded-Uri | X-Forwarded-For | + +## 使用示例 + +首先,你需要设置一个外部认证服务。以下示例使用的是 Apache APISIX 无服务器插件模拟服务: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/auth' \ + -H "X-API-KEY: $admin_key" \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/auth", + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": [ + "return function (conf, ctx) + local core = require(\"apisix.core\"); + local authorization = core.request.header(ctx, \"Authorization\"); + if authorization == \"123\" then + core.response.exit(200); + elseif authorization == \"321\" then + core.response.set_header(\"X-User-ID\", \"i-am-user\"); + core.response.exit(200); + else core.response.set_header(\"Location\", \"http://example.com/auth\"); + core.response.exit(403); + end + end" + ] + } + } +}' +``` + +现在你可以在指定 Route 上启用 `forward-auth` 插件: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/1' \ + -H "X-API-KEY: $admin_key" \ + -d '{ + "uri": "/headers", + "plugins": { + "forward-auth": { + "uri": "http://127.0.0.1:9080/auth", + "request_headers": ["Authorization"], + "upstream_headers": ["X-User-ID"], + "client_headers": ["Location"] + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } +}' +``` + +完成上述配置后,可通过以下三种方式进行测试: + +- 在请求头中发送认证的详细信息: + +```shell +curl http://127.0.0.1:9080/headers -H 'Authorization: 123' +``` + +``` +{ + "headers": { + "Authorization": "123", + "Next": "More-headers" + } +} +``` + +- 转发认证服务响应头到 Upstream。 + +```shell +curl http://127.0.0.1:9080/headers -H 'Authorization: 321' +``` + +``` +{ + "headers": { + "Authorization": "321", + "X-User-ID": "i-am-user", + "Next": "More-headers" + } +} +``` + +- 当授权失败时,认证服务可以向用户发送自定义响应: + +```shell +curl -i http://127.0.0.1:9080/headers +``` + +```shell +HTTP/1.1 403 Forbidden +Location: http://example.com/auth +``` + +## 删除插件 + +当你需要禁用 `forward-auth` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/gm.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/gm.md new file mode 100644 index 0000000..317b23b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/gm.md @@ -0,0 +1,191 @@ +--- +title: GM +keywords: + - Apache APISIX + - Plugin + - GM +description: 本文介绍了关于 Apache APISIX gm 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`gm` 插件能启用国密相关的功能。目前支持通过该插件动态配置国密双证书。 + +:::note 相关介绍 +国密就是国产化的密码算法。在我们日常开发过程中会接触到各种各样的密码算法,如 RSA、SHA256 等等。为了达到更高的安全等级,许多大公司和国家会制定自己的密码算法。国密就是这样一组由中国国家密码管理局制定的密码算法。在国际形势越发复杂多变的今天,密码算法的国产化替代,在一些领域已经成为了一股势不可挡的潮流。 +::: + +## 启用插件 + +**该插件要求 Apache APISIX 运行在编译了 Tongsuo 的 APISIX-Runtime 上。** + +首先,我们需要安装 Tongsuo(此处我们选择编译出 Tongsuo 的动态链接库): + +``` +# TODO: use a fixed release once they have created one. +# See https://github.com/Tongsuo-Project/Tongsuo/issues/318 +git clone https://github.com/api7/tongsuo --depth 1 +pushd tongsuo +./config shared enable-ntls -g --prefix=/usr/local/tongsuo +make -j2 +sudo make install_sw +``` + +其次,我们需要构建 APISIX-Runtime,让它使用 Tongsuo 作为 SSL 库: + +``` +export OR_PREFIX=/usr/local/openresty +export openssl_prefix=/usr/local/tongsuo +export zlib_prefix=$OR_PREFIX/zlib +export pcre_prefix=$OR_PREFIX/pcre + +export cc_opt="-DNGX_LUA_ABORT_AT_PANIC -I${zlib_prefix}/include -I${pcre_prefix}/include -I${openssl_prefix}/include" +export ld_opt="-L${zlib_prefix}/lib -L${pcre_prefix}/lib -L${openssl_prefix}/lib64 -Wl,-rpath,${zlib_prefix}/lib:${pcre_prefix}/lib:${openssl_prefix}/lib64" +./build-apisix-runtime.sh +``` + +该插件默认是禁用状态,你需要将其添加到配置文件(`./conf/config.yaml`)中才可以启用它: + +```yaml +plugins: + - ... + - gm +``` + +由于 APISIX 的默认 cipher 中不包含国密 cipher,所以我们还需要在配置文件(`./conf/config.yaml`)中设置 cipher: + +```yaml +apisix: + ... + ssl: + ... + # 可按实际情况调整。错误的 cipher 会导致“no shared cipher”或“no ciphers available”报错。 + ssl_ciphers: HIGH:!aNULL:!MD5 + +``` + +配置完成后,重新加载 APISIX,此时 APISIX 将会启用国密相关的逻辑。 + +## 测试插件 + +在测试插件之前,我们需要准备好国密双证书。Tongsuo 提供了生成[SM2 双证书](https://www.yuque.com/tsdoc/ts/sulazb)的教程。 + +在下面的例子中,我们将用到如下的证书: + +``` +# 客户端加密证书和密钥 +t/certs/client_enc.crt +t/certs/client_enc.key +# 客户端签名证书和密钥 +t/certs/client_sign.crt +t/certs/client_sign.key +# CA 和中间 CA 打包在一起的文件,用于设置受信任的 CA +t/certs/gm_ca.crt +# 服务端加密证书和密钥 +t/certs/server_enc.crt +t/certs/server_enc.key +# 服务端签名证书和密钥 +t/certs/server_sign.crt +t/certs/server_sign.key +``` + +此外,我们还需要准备 Tongsuo 命令行工具。 + +``` +./config enable-ntls -static +make -j2 +# 生成的命令行工具在 apps 目录下 +mv apps/openssl .. +``` + +你也可以采用非静态编译的方式,不过就需要根据具体环境,自己解决动态链接库的路径问题了。 + +以下示例展示了如何在指定域名中启用 `gm` 插件: + +创建对应的 SSL 对象: + +```python +#!/usr/bin/env python +# coding: utf-8 + +import sys +# sudo pip install requests +import requests + +if len(sys.argv) <= 3: + print("bad argument") + sys.exit(1) +with open(sys.argv[1]) as f: + enc_cert = f.read() +with open(sys.argv[2]) as f: + enc_key = f.read() +with open(sys.argv[3]) as f: + sign_cert = f.read() +with open(sys.argv[4]) as f: + sign_key = f.read() +api_key = "edd1c9f034335f136f87ad84b625c8f1" +resp = requests.put("http://127.0.0.1:9180/apisix/admin/ssls/1", json={ + "cert": enc_cert, + "key": enc_key, + "certs": [sign_cert], + "keys": [sign_key], + "gm": True, + "snis": ["localhost"], +}, headers={ + "X-API-KEY": api_key, +}) +print(resp.status_code) +print(resp.text) +``` + +将上面的脚本保存为 `./create_gm_ssl.py`,运行: + +```shell +./create_gm_ssl.py t/certs/server_enc.crt t/certs/server_enc.key t/certs/server_sign.crt t/certs/server_sign.key +``` + +输出结果如下所示: + +``` +200 +{"key":"\/apisix\/ssls\/1","value":{"keys":["Yn... +``` + +完成上述准备后,可以使用如下命令测试插件是否启用成功: + +```shell +./openssl s_client -connect localhost:9443 -servername localhost -cipher ECDHE-SM2-WITH-SM4-SM3 -enable_ntls -ntls -verifyCAfile t/certs/gm_ca.crt -sign_cert t/certs/client_sign.crt -sign_key t/certs/client_sign.key -enc_cert t/certs/client_enc.crt -enc_key t/certs/client_enc.key +``` + +这里 `./openssl` 是前面得到的 Tongsuo 命令行工具。9443 是 APISIX 默认的 HTTPS 端口。 + +如果一切正常,可以看到连接建立了起来,并输出如下信息: + +``` +... +New, NTLSv1.1, Cipher is ECDHE-SM2-SM4-CBC-SM3 +... +``` + +## 删除插件 + +如果不再使用此插件,可将 `gm` 插件从 `./conf/config.yaml` 配置文件中移除,然后重启 APISIX 或者通过插件热加载的接口触发插件的卸载。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/google-cloud-logging.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/google-cloud-logging.md new file mode 100644 index 0000000..d485bee --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/google-cloud-logging.md @@ -0,0 +1,231 @@ +--- +title: google-cloud-logging +keywords: + - APISIX + - API 网关 + - 插件 + - Google Cloud logging + - 日志 +description: API 网关 Apache APISIX 的 google-cloud-logging 插件可用于将请求日志转发到 Google Cloud Logging Service 中进行分析和存储。 +--- + + + +## 描述 + +`google-cloud-logging` 插件可用于将请求日志发送到 [Google Cloud Logging Service](https://cloud.google.com/logging/) 进行分析和存储。 + +## 属性 + +| 名称 | 必选项 | 默认值 | 描述 | +| ----------------------- | -------- | ------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------- | +| auth_config | 是 | | `auth_config` 和 `auth_file` 必须配置一个。 | +| auth_config.client_email | 是 | | 谷歌服务帐号的 email 参数。 | +| auth_config.private_key | 是 | | 谷歌服务帐号的私钥参数。 | +| auth_config.project_id | 是 | | 谷歌服务帐号的项目 ID。 | +| auth_config.token_uri | 是 | https://oauth2.googleapis.com/token | 请求谷歌服务帐户的令牌的 URI。 | +| auth_config.entries_uri | 否 | https://logging.googleapis.com/v2/entries:write | 谷歌日志服务写入日志条目的 API。 | +| auth_config.scope | 否 | | 谷歌服务账号的访问范围,可参考 [OAuth 2.0 Scopes for Google APIs](https://developers.google.com/identity/protocols/oauth2/scopes#logging)。可选项:"https://www.googleapis.com/auth/logging.read"、"https://www.googleapis.com/auth/logging.write"、"https://www.googleapis.com/auth/logging.admin"、"https://www.googleapis.com/auth/cloud-platform"。| +| auth_config.scopes | 废弃 | | 谷歌服务账号的访问范围,推荐使用 `auth_config.scope` | +| auth_file | 是 | | `auth_config` 和 `auth_file` 必须配置一个。 | +| ssl_verify | 否 | true | 当设置为 `true` 时,启用 `SSL` 验证。 | +| resource | 否 | {"type": "global"} | 谷歌监控资源,请参考 [MonitoredResource](https://cloud.google.com/logging/docs/reference/v2/rest/v2/MonitoredResource)。 | +| log_id | 否 | apisix.apache.org%2Flogs | 谷歌日志 ID,请参考 [LogEntry](https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry)。 | +| log_format | 否 | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +注意:schema 中还定义了 `encrypt_fields = {"auth_config.private_key"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +该插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免该插件频繁地提交数据。默认情况下每 `5` 秒钟或队列中的数据达到 `1000` 条时,批处理器会自动提交数据,如需了解更多信息或自定义配置,请参考 [Batch Processor](../batch-processor.md#配置)。 + +### 默认日志格式示例 + +```json +{ + "insertId": "0013a6afc9c281ce2e7f413c01892bdc", + "labels": { + "source": "apache-apisix-google-cloud-logging" + }, + "logName": "projects/apisix/logs/apisix.apache.org%2Flogs", + "httpRequest": { + "requestMethod": "GET", + "requestUrl": "http://localhost:1984/hello", + "requestSize": 59, + "responseSize": 118, + "status": 200, + "remoteIp": "127.0.0.1", + "serverIp": "127.0.0.1:1980", + "latency": "0.103s" + }, + "resource": { + "type": "global" + }, + "jsonPayload": { + "service_id": "", + "route_id": "1" + }, + "timestamp": "2024-01-06T03:34:45.065Z" +} +``` + +## 插件元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------------- | ------- | ------------------------------------------------ | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头。则表明获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +:::info 注意 + +该设置全局生效。如果指定了 `log_format`,则所有绑定 `google-cloud-logging` 的路由或服务都将使用该日志格式。 + +::: + +以下示例展示了如何通过 Admin API 配置插件元数据: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/google-cloud-logging \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +配置完成后,你将在日志系统中看到如下类似日志: + +```json +{"partialSuccess":false,"entries":[{"jsonPayload":{"client_ip":"127.0.0.1","host":"localhost","@timestamp":"2023-01-09T14:47:25+08:00","route_id":"1"},"resource":{"type":"global"},"insertId":"942e81f60b9157f0d46bc9f5a8f0cc40","logName":"projects/apisix/logs/apisix.apache.org%2Flogs","timestamp":"2023-01-09T14:47:25+08:00","labels":{"source":"apache-apisix-google-cloud-logging"}}]} +``` + +## 启用插件 + +以下示例展示了如何在指定路由上启用该插件: + +**完整配置** + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "google-cloud-logging": { + "auth_config":{ + "project_id":"apisix", + "client_email":"your service account email@apisix.iam.gserviceaccount.com", + "private_key":"-----BEGIN RSA PRIVATE KEY-----your private key-----END RSA PRIVATE KEY-----", + "token_uri":"https://oauth2.googleapis.com/token", + "scope":[ + "https://www.googleapis.com/auth/logging.admin" + ], + "entries_uri":"https://logging.googleapis.com/v2/entries:write" + }, + "resource":{ + "type":"global" + }, + "log_id":"apisix.apache.org%2Flogs", + "inactive_timeout":10, + "max_retry_count":0, + "max_retry_count":0, + "buffer_duration":60, + "retry_delay":1, + "batch_max_size":1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +**最小化配置** + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "google-cloud-logging": { + "auth_config":{ + "project_id":"apisix", + "client_email":"your service account email@apisix.iam.gserviceaccount.com", + "private_key":"-----BEGIN RSA PRIVATE KEY-----your private key-----END RSA PRIVATE KEY-----" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## 测试插件 + +你可以通过以下命令向 APISIX 发出请求: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +``` +HTTP/1.1 200 OK +... +hello, world +``` + +访问成功后,你可以登录 [Google Cloud Logging Service](https://console.cloud.google.com/logs/viewer) 查看相关日志。 + +## 删除插件 + +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/grpc-transcode.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/grpc-transcode.md new file mode 100644 index 0000000..10052d7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/grpc-transcode.md @@ -0,0 +1,396 @@ +--- +title: grpc-transcode +keywords: + - Apache APISIX + - API 网关 + - Plugin + - gRPC Web + - grpc-web +description: 本文介绍了关于 Apache APISIX `grpc-transcode` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +使用 `grpc-transcode` 插件可以在 HTTP 和 gRPC 请求之间进行转换。 + +APISIX 接收 HTTP 请求后,首先对请求进行转码,并将转码后的请求转发到 gRPC 服务,获取响应并以 HTTP 格式将其返回给客户端。 + + + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| --------- | ------------------------------------------------- | ----- | ------ ------------------------------ | +| proto_id | string/integer | 是 | | `.proto` 内容的 id。 | +| service | string | 是 | | gRPC 服务名。 | +| method | string | 是 | | gRPC 服务中要调用的方法名。 | +| deadline | number | 否 | 0 | gRPC 服务的 deadline,单位为:ms。 | +| pb_option | array[string([pb_option_def](#pb_option-的选项))] | 否 | | proto 编码过程中的转换选项。 | +| show_status_in_body | boolean | 否 | false | 是否在返回体中展示解析过的 `grpc-status-details-bin` | +| status_detail_type | string | 否 | | `grpc-status-details-bin` 中 [details](https://github.com/googleapis/googleapis/blob/b7cb84f5d42e6dba0fdcc2d8689313f6a8c9d7b9/google/rpc/status.proto#L46) 部分对应的 message type,如果不指定,此部分不进行解码 | + +### pb_option 的选项 + +| 类型 | 有效值 | +|-----------------|-------------------------------------------------------------------------------------------| +| enum as result | `enum_as_name`, `enum_as_value` | +| int64 as result | `int64_as_number`, `int64_as_string`, `int64_as_hexstring` | +| default values | `auto_default_values`, `no_default_values`, `use_default_values`, `use_default_metatable` | +| hooks | `enable_hooks`, `disable_hooks` | + +## 启用插件 + +在启用插件之前,你必须将 `.proto` 或 `.pb` 文件的内容添加到 APISIX。 + +可以使用 `/admin/protos/id` 接口将文件的内容添加到 `content` 字段: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/protos/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + }" +}' +``` + +如果你的 `.proto` 文件包含 `import`,或者想要把多个 `.proto` 文件合并成一个 proto,你可以生成一个 `.pb` 文件并在 APISIX 中使用它。 + +假设已经有一个 `.proto` 文件(`proto/helloworld.proto`),它导入了另一个 `proto` 文件: + +```proto +syntax = "proto3"; + +package helloworld; +import "proto/import.proto"; +... +``` + +首先,让我们从 `.proto` 文件创建一个 `.pb` 文件。 + +```shell +protoc --include_imports --descriptor_set_out=proto.pb proto/helloworld.proto +``` + +输出的二进制文件 `proto.pb` 将同时包含 `helloworld.proto` 和 `import.proto`。 + +然后将 `proto.pb` 的内容作为 proto 的 `content` 字段提交。 + +由于 proto 的内容是二进制的,我们需要使用以下 shell 命令将其转换成 `base64`: + +```shell +curl http://127.0.0.1:9180/apisix/admin/protos/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "content" : "'"$(base64 -w0 /path/to/proto.pb)"'" +}' +``` + +返回 `HTTP/1.1 201 Created` 结果如下: + +``` +{"node":{"value":{"create_time":1643879753,"update_time":1643883085,"content":"CmgKEnByb3RvL2ltcG9ydC5wcm90bxIDcGtnIhoKBFVzZXISEgoEbmFtZRgBIAEoCVIEbmFtZSIeCghSZXNwb25zZRISCgRib2R5GAEgASgJUgRib2R5QglaBy4vcHJvdG9iBnByb3RvMwq9AQoPcHJvdG8vc3JjLnByb3RvEgpoZWxsb3dvcmxkGhJwcm90by9pbXBvcnQucHJvdG8iPAoHUmVxdWVzdBIdCgR1c2VyGAEgASgLMgkucGtnLlVzZXJSBHVzZXISEgoEYm9keRgCIAEoCVIEYm9keTI5CgpUZXN0SW1wb3J0EisKA1J1bhITLmhlbGxvd29ybGQuUmVxdWVzdBoNLnBrZy5SZXNwb25zZSIAQglaBy4vcHJvdG9iBnByb3RvMw=="},"key":"\/apisix\/proto\/1"}} +``` + +现在我们可以在指定路由中启用 `grpc-transcode` 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHello" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50051": 1 + } + } +}' +``` + +:::note + +此处使用的 Upstream 应该是 gRPC 服务。请注意,`scheme` 需要设置为 `grpc`。 + +可以使用 [grpc_server_example](https://github.com/api7/grpc_server_example) 进行测试。 + +::: + +## 测试插件 + +通过上述示例配置插件后,你可以向 APISIX 发出请求以从 gRPC 服务(通过 APISIX)获得响应: + +访问上面配置的 route: + +```shell +curl -i http://127.0.0.1:9080/grpctest?name=world +``` + +返回结果 + +```Shell +HTTP/1.1 200 OK +Date: Fri, 16 Aug 2019 11:55:36 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server +Proxy-Connection: keep-alive + +{"message":"Hello world"} +``` + +你也可以配置 `pb_option`,如下所示: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/23 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/zeebe/WorkflowInstanceCreate", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "gateway_protocol.Gateway", + "method": "CreateWorkflowInstance", + "pb_option":["int64_as_string"] + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:26500": 1 + } + } +}' +``` + +可以通过如下命令检查刚刚配置的路由: + +```shell +curl -i "http://127.0.0.1:9080/zeebe/WorkflowInstanceCreate?bpmnProcessId=order-process&version=1&variables=\{\"orderId\":\"7\",\"ordervalue\":99\}" +``` + +```Shell +HTTP/1.1 200 OK +Date: Wed, 13 Nov 2019 03:38:27 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +grpc-encoding: identity +grpc-accept-encoding: gzip +Server: APISIX web server +Trailer: grpc-status +Trailer: grpc-message + +{"workflowKey":"#2251799813685260","workflowInstanceKey":"#2251799813688013","bpmnProcessId":"order-process","version":1} +``` + +## 在返回体中展示 `grpc-status-details-bin` + +如果 gRPC 服务返回了错误,返回头中可能存在 `grpc-status-details-bin` 字段对错误进行描述,你可以解码该字段,并展示在返回体中。 + +上传 proto 文件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/protos/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc GetErrResp (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + repeated string items = 2; + } + message HelloReply { + string message = 1; + repeated string items = 2; + }" +}' +``` + +启用 `grpc-transcode` 插件,并设置选项 `show_status_in_body` 为 `true`: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "GetErrResp", + "show_status_in_body": true + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50051": 1 + } + } +}' +``` + +访问上面配置的 route: + +```shell +curl -i http://127.0.0.1:9080/grpctest?name=world +``` + +返回结果 + +```Shell +HTTP/1.1 503 Service Temporarily Unavailable +Date: Wed, 10 Aug 2022 08:59:46 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +grpc-status: 14 +grpc-message: Out of service +grpc-status-details-bin: CA4SDk91dCBvZiBzZXJ2aWNlGlcKKnR5cGUuZ29vZ2xlYXBpcy5jb20vaGVsbG93b3JsZC5FcnJvckRldGFpbBIpCAESHFRoZSBzZXJ2ZXIgaXMgb3V0IG9mIHNlcnZpY2UaB3NlcnZpY2U +Server: APISIX web server + +{"error":{"details":[{"type_url":"type.googleapis.com\/helloworld.ErrorDetail","value":"\b\u0001\u0012\u001cThe server is out of service\u001a\u0007service"}],"message":"Out of service","code":14}} +``` + +注意返回体中还存在未解码的字段,如果需要解码该字段,需要在上传的 proto 文件中加上该字段对应的 `message type`。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/protos/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc GetErrResp (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + repeated string items = 2; + } + message HelloReply { + string message = 1; + repeated string items = 2; + } + message ErrorDetail { + int64 code = 1; + string message = 2; + string type = 3; + }" +}' +``` + +同时配置选项 `status_detail_type` 为 `helloworld.ErrorDetail`: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "GetErrResp", + "show_status_in_body": true, + "status_detail_type": "helloworld.ErrorDetail" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50051": 1 + } + } +}' +``` + +此时就能返回完全解码后的结果 + +```Shell +HTTP/1.1 503 Service Temporarily Unavailable +Date: Wed, 10 Aug 2022 09:02:46 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +grpc-status: 14 +grpc-message: Out of service +grpc-status-details-bin: CA4SDk91dCBvZiBzZXJ2aWNlGlcKKnR5cGUuZ29vZ2xlYXBpcy5jb20vaGVsbG93b3JsZC5FcnJvckRldGFpbBIpCAESHFRoZSBzZXJ2ZXIgaXMgb3V0IG9mIHNlcnZpY2UaB3NlcnZpY2U +Server: APISIX web server + +{"error":{"details":[{"type":"service","message":"The server is out of service","code":1}],"message":"Out of service","code":14}} +``` + +## 删除插件 + +当你需要禁用 `grpc-transcode` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/111 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/grpctest", + "plugins": {}, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50051": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/grpc-web.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/grpc-web.md new file mode 100644 index 0000000..c8b901e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/grpc-web.md @@ -0,0 +1,113 @@ +--- +title: grpc-web +keywords: + - Apache APISIX + - API 网关 + - Plugin + - gRPC Web + - grpc-web +description: 本文介绍了关于 Apache APISIX `grpc-web` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`grpc-web` 插件是一个代理插件,可以处理从 JavaScript 客户端到 gRPC Service 的 [gRPC Web](https://github.com/grpc/grpc-web) 请求。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|---------------------| ------- |----|-----------------------------------------|----------------------------------------------------------------| +| cors_allow_headers | string | 否 | "content-type,x-grpc-web,x-user-agent" | 允许跨域访问时请求方携带哪些非 `CORS 规范` 以外的 Header。如果你有多个 Header,请使用 `,` 分割。 | + +## 启用插件 + +你可以通过如下命令在指定路由上启用 `gRPC-web` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri":"/grpc/web/*", + "plugins":{ + "grpc-web":{} + }, + "upstream":{ + "scheme":"grpc", + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + } +}' +``` + +:::info IMPORTANT + +在使用 `gRPC Web` 代理插件时,路由必须使用**前缀匹配**模式(例如:`/*` 或 `/grpc/example/*`),因为 `gRPC Web` 客户端会在 URI 中传递 `proto` 中声明的**包名称**、**服务接口名称**、**方法名称**等信息(例如:`/path/a6.RouteService/Insert`)。 + +因此,在使用**绝对匹配**时将无法命中插件和提取 `proto` 信息。 + +::: + +## 测试插件 + +请参考 [gRPC-Web Client Runtime Library](https://www.npmjs.com/package/grpc-web) 或 [Apache APISIX gRPC Web Test Framework](https://github.com/apache/apisix/tree/master/t/plugin/grpc-web) 了解如何配置你的 Web 客户端。 + +运行 gRPC Web 客户端后,你可以从浏览器或通过 Node.js 向 APISIX 发出请求。 + +:::note + +请求方式仅支持 `POST` 和 `OPTIONS`,详细信息请参考:[CORS support](https://github.com/grpc/grpc-web/blob/master/doc/browser-features.md#cors-support) 。 + +内容类型支持 `application/grpc-web`、`application/grpc-web-text`、`application/grpc-web+proto`、`application/grpc-web-text+proto`,详细信息请参考:[Protocol differences vs gRPC over HTTP2](https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md#protocol-differences-vs-grpc-over-http2) 。 + +::: + +## 删除插件 + +当你需要禁用 `grpc-web` 插件时,可以通过如下命令删除相应的 `JSON` 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri":"/grpc/web/*", + "plugins":{}, + "upstream":{ + "scheme":"grpc", + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/gzip.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/gzip.md new file mode 100644 index 0000000..05a49b6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/gzip.md @@ -0,0 +1,126 @@ +--- +title: gzip +keywords: + - Apache APISIX + - API 网关 + - Plugin + - gzip +description: 本文介绍了关于 Apache APISIX `gzip` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`gzip` 插件能动态设置 [NGINX](https://docs.nginx.com/nginx/admin-guide/web-server/compression/) 的压缩行为。 +当启用 `gzip` 插件时,客户端在发起请求时需要在请求头中添加 `Accept-Encoding: gzip`,以表明客户端支持 `gzip` 压缩。APISIX 在接收到请求后,会根据客户端的支持情况和服务器配置动态判断是否对响应内容进行 gzip 压缩。如果判定条件得到满足,APISIX 将在响应头中添加 `Content-Encoding: gzip` 字段,以指示响应内容已经通过 `gzip` 压缩。在客户端接收到响应后,根据响应头中的 `Content-Encoding` 字段使用相应的解压缩算法对响应内容进行解压,从而获取原始的响应内容。 + +:::info IMPORTANT + +该插件要求 Apache APISIX 运行在 [APISIX-Runtime](../FAQ.md#如何构建-apisix-runtime-环境) 上。 + +::: + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------| -------------------- | ------- | -------------- | ------- | -------------------------------------------------------------------------------------------------------------------------------- | +| types | array[string] or "*" | 否 | ["text/html"] | | 动态设置 [`gzip_types`](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_types) 指令,特殊值 `"*"` 匹配任何 MIME 类型。 | +| min_length | integer | 否 | 20 | >= 1 | 动态设置 [`gzip_min_length`](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_min_length) 指令。 | +| comp_level | integer | 否 | 1 | [1, 9] | 动态设置 [`gzip_comp_level`](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level) 指令。 | +| http_version | number | 否 | 1.1 | 1.1, 1.0 | 动态设置 [`gzip_http_version`](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_http_version) 指令。 | +| buffers.number | integer | 否 | 32 | >= 1 | 动态设置 [`gzip_buffers`](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_buffers) 指令 参数 `number`。 | +| buffers.size | integer | 否 | 4096 | >= 1 | 动态设置 [`gzip_buffers`](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_buffers) 指令参数 `size`。单位为字节。 | +| vary | boolean | 否 | false | | 动态设置 [`gzip_vary`](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_vary) 指令。 | + +## 启用插件 + +以下示例展示了如何在指定路由中启用 `gzip` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "gzip": { + "buffers": { + "number": 8 + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## 测试插件 + +通过上述命令启用插件后,可以使用如下命令测试插件是否启用成功: + +```shell +curl http://127.0.0.1:9080/index.html -i -H "Accept-Encoding: gzip" +``` + +``` +HTTP/1.1 404 Not Found +Content-Type: text/html; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Date: Wed, 21 Jul 2021 03:52:55 GMT +Server: APISIX/2.7 +Content-Encoding: gzip + +Warning: Binary output can mess up your terminal. Use "--output -" to tell +Warning: curl to output it to your terminal anyway, or consider "--output +Warning: " to save to a file. +``` + +## 删除插件 + +当你需要禁用 `gzip` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/hmac-auth.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/hmac-auth.md new file mode 100644 index 0000000..c446047 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/hmac-auth.md @@ -0,0 +1,760 @@ +--- +title: hmac-auth +keywords: + - Apache APISIX + - API 网关 + - Plugin + - HMAC Authentication + - hmac-auth +description: hmac-auth 插件支持 HMAC 认证,保证请求的完整性,防止传输过程中的修改,增强 API 的安全性。 +--- + + + +## 描述 + +`hmac-auth` 插件支持 HMAC(基于哈希的消息认证码)认证,作为一种确保请求完整性的机制,防止它们在传输过程中被修改。要使用该插件,您需要在 [Consumers](../terminology/consumer.md) 上配置 HMAC 密钥,并在 Routes 或 Services 上启用该插件。 + +当消费者成功通过身份验证后,APISIX 会在将请求代理到上游服务之前向请求添加其他标头,例如 `X-Consumer-Username`、`X-Credential-Indentifier` 和其他消费者自定义标头(如果已配置)。上游服务将能够区分消费者并根据需要实现其他逻辑。如果这些值中的任何一个不可用,则不会添加相应的标头。 + +启用后,插件会验证请求的 `Authorization` 标头中的 HMAC 签名,并检查传入的请求是否来自受信任的来源。具体来说,当 APISIX 收到 HMAC 签名的请求时,会从 `Authorization` 标头中提取密钥 ID。然后,APISIX 会检索相应的消费者配置,包括密钥。如果密钥 ID 有效且存在,APISIX 将使用请求的 `Date` 标头和密钥生成 HMAC 签名。如果生成的签名与 `Authorization` 标头中提供的签名匹配,则请求通过身份验证并转发到上游服务。 + +插件实现基于 [draft-cavage-http-signatures](https://www.ietf.org/archive/id/draft-cavage-http-signatures-12.txt)。 + +## 属性 + +以下属性可用于 Consumers 或 Credentials 的配置。 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------------- | ------ | ------------- | ------------------------------------------| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| access_key | string | 是 | | | 消费者的唯一标识符,用于标识相关配置,例如密钥。 | +| secret_key | string | 是 | | | 用于生成 HMAC 的密钥。此字段支持使用 [APISIX Secret](../terminology/secret.md) 资源将值保存在 Secret Manager 中。 | + +以下属性可用于 Routes 或 Services 的配置。 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------------- | ------ | ------------- | ------------------------------------------| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| allowed_algorithms | array[string] | 否 | ["hmac-sha1", "hmac-sha256", "hmac-sha512"] | "hmac-sha1"、"hmac-sha256" 和 "hmac-sha512" 的组合 | 允许的 HMAC 算法列表。 | +| clock_skew | integer | 否 | 300 | >=1 | 客户端请求的时间戳与 APISIX 服务器当前时间之间允许的最大时间差(以秒为单位)。这有助于解决客户端和服务器之间的时间同步差异,并防止重放攻击。时间戳将根据 Date 头中的时间(必须为 GMT 格式)进行计算。 | +| signed_headers | array[string] | 否 | | | 客户端请求的 HMAC 签名中应包含的 HMAC 签名头列表。 | +| validate_request_body | boolean | 否 | false | | 如果为 true,则验证请求正文的完整性,以确保在传输过程中没有被篡改。具体来说,插件会创建一个 SHA-256 的 base64 编码 digest,并将其与 `Digest` 头进行比较。如果 `Digest` 头丢失或 digest 不匹配,验证将失败。 | +| hide_credentials | boolean | 否 | false | | 如果为 true,则不会将授权请求头传递给上游服务。 | +| anonymous_consumer | string | 否 | | | 匿名消费者名称。如果已配置,则允许匿名用户绕过身份验证。 | + +注意:schema 中还定义了 `encrypt_fields = {"secret_key"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +## 示例 + +下面的示例说明了如何在不同场景中使用“hmac-auth”插件。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 在路由上实现 HMAC 身份验证 + +以下示例演示如何在路由上实现 HMAC 身份验证。您还将在 `Consumer-Custom-Id` 标头中将消费者自定义 ID 附加到经过身份验证的请求,该 ID 可用于根据需要实现其他逻辑。 + +创建一个带有自定义 ID 标签的消费者 `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john", + "labels": { + "custom_id": "495aec6a" + } + }' +``` + +为消费者创建 `hmac-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-hmac-auth", + "plugins": { + "hmac-auth": { + "key_id": "john-key", + "secret_key": "john-secret-key" + } + } + }' +``` + +使用 `hmac-auth` 插件的默认配置创建路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "hmac-auth-route", + "uri": "/get", + "methods": ["GET"], + "plugins": { + "hmac-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +生成签名。您可以使用以下 Python 代码片段或其他技术栈: + +```python title="hmac-sig-header-gen.py" +import hmac +import hashlib +import base64 +from datetime import datetime, timezone + +key_id = "john-key" # key id +secret_key = b"john-secret-key" # secret key +request_method = "GET" # HTTP method +request_path = "/get" # Route URI +algorithm= "hmac-sha256" # can use other algorithms in allowed_algorithms + +# get current datetime in GMT +# note: the signature will become invalid after the clock skew (default 300s) +# you can regenerate the signature after it becomes invalid, or increase the clock +# skew to prolong the validity within the advised security boundary +gmt_time = datetime.now(timezone.utc).strftime('%a, %d %b %Y %H:%M:%S GMT') + +# construct the signing string (ordered) +# the date and any subsequent custom headers should be lowercased and separated by a +# single space character, i.e. `:` +# https://datatracker.ietf.org/doc/html/draft-cavage-http-signatures-12#section-2.1.6 +signing_string = ( + f"{key_id}\n" + f"{request_method} {request_path}\n" + f"date: {gmt_time}\n" +) + +# create signature +signature = hmac.new(secret_key, signing_string.encode('utf-8'), hashlib.sha256).digest() +signature_base64 = base64.b64encode(signature).decode('utf-8') + +# construct the request headers +headers = { + "Date": gmt_time, + "Authorization": ( + f'Signature keyId="{key_id}",algorithm="{algorithm}",' + f'headers="@request-target date",' + f'signature="{signature_base64}"' + ) +} + +# print headers +print(headers) +``` + +运行脚本: + +```shell +python3 hmac-sig-header-gen.py +``` + +您应该看到打印的请求标头: + +```text +{'Date': 'Fri, 06 Sep 2024 06:41:29 GMT', 'Authorization': 'Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="wWfKQvPDr0wHQ4IHdluB4IzeNZcj0bGJs2wvoCOT5rM="'} +``` + +使用生成的标头,向路由发送请求: + +```shell +curl -X GET "http://127.0.0.1:9080/get" \ + -H "Date: Fri, 06 Sep 2024 06:41:29 GMT" \ + -H 'Authorization: Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="wWfKQvPDr0wHQ4IHdluB4IzeNZcj0bGJs2wvoCOT5rM="' +``` + +您应该会看到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```json +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Authorization": "Signature keyId=\"john-key\",algorithm=\"hmac-sha256\",headers=\"@request-target date\",signature=\"wWfKQvPDr0wHQ4IHdluB4IzeNZcj0bGJs2wvoCOT5rM=\"", + "Date": "Fri, 06 Sep 2024 06:41:29 GMT", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66d96513-2e52d4f35c9b6a2772d667ea", + "X-Consumer-Username": "john", + "X-Credential-Identifier": "cred-john-hmac-auth", + "X-Consumer-Custom-Id": "495aec6a", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "192.168.65.1, 34.0.34.160", + "url": "http://127.0.0.1/get" +} +``` + +### Hide Authorization Information From Upstream + +As seen the in the [last example](#implement-hmac-authentication-on-a-route), the `Authorization` header passed to the Upstream includes the signature and all other details. This could potentially introduce security risks. + +The following example demonstrates how to prevent these information from being sent to the Upstream service. + +Update the plugin configuration to set `hide_credentials` to `true`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/hmac-auth-route" -X PATCH \ +-H "X-API-KEY: ${admin_key}" \ +-d '{ + "plugins": { + "hmac-auth": { + "hide_credentials": true + } + } +}' +``` + +Send a request to the route: + +```shell +curl -X GET "http://127.0.0.1:9080/get" \ + -H "Date: Fri, 06 Sep 2024 06:41:29 GMT" \ + -H 'Authorization: Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="wWfKQvPDr0wHQ4IHdluB4IzeNZcj0bGJs2wvoCOT5rM="' +``` + +You should see an `HTTP/1.1 200 OK` response and notice the `Authorization` header is entirely removed: + +```json +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66d96513-2e52d4f35c9b6a2772d667ea", + "X-Consumer-Username": "john", + "X-Credential-Identifier": "cred-john-hmac-auth", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "192.168.65.1, 34.0.34.160", + "url": "http://127.0.0.1/get" +} +``` + +### Enable Body Validation + +The following example demonstrates how to enable body validation to ensure the integrity of the request body. + +Create a consumer `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john" + }' +``` + +为消费者创建 `hmac-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-hmac-auth", + "plugins": { + "hmac-auth": { + "key_id": "john-key", + "secret_key": "john-secret-key" + } + } + }' +``` + +Create a Route with the `hmac-auth` plugin as such: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "hmac-auth-route", + "uri": "/post", + "methods": ["POST"], + "plugins": { + "hmac-auth": { + "validate_request_body": true + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +生成签名。您可以使用以下 Python 代码片段或其他技术栈: + +```python title="hmac-sig-digest-header-gen.py" +import hmac +import hashlib +import base64 +from datetime import datetime, timezone + +key_id = "john-key" # key id +secret_key = b"john-secret-key" # secret key +request_method = "POST" # HTTP method +request_path = "/post" # Route URI +algorithm= "hmac-sha256" # can use other algorithms in allowed_algorithms +body = '{"name": "world"}' # example request body + +# get current datetime in GMT +# note: the signature will become invalid after the clock skew (default 300s). +# you can regenerate the signature after it becomes invalid, or increase the clock +# skew to prolong the validity within the advised security boundary +gmt_time = datetime.now(timezone.utc).strftime('%a, %d %b %Y %H:%M:%S GMT') + +# construct the signing string (ordered) +# the date and any subsequent custom headers should be lowercased and separated by a +# single space character, i.e. `:` +# https://datatracker.ietf.org/doc/html/draft-cavage-http-signatures-12#section-2.1.6 +signing_string = ( + f"{key_id}\n" + f"{request_method} {request_path}\n" + f"date: {gmt_time}\n" +) + +# create signature +signature = hmac.new(secret_key, signing_string.encode('utf-8'), hashlib.sha256).digest() +signature_base64 = base64.b64encode(signature).decode('utf-8') + +# create the SHA-256 digest of the request body and base64 encode it +body_digest = hashlib.sha256(body.encode('utf-8')).digest() +body_digest_base64 = base64.b64encode(body_digest).decode('utf-8') + +# construct the request headers +headers = { + "Date": gmt_time, + "Digest": f"SHA-256={body_digest_base64}", + "Authorization": ( + f'Signature keyId="{key_id}",algorithm="hmac-sha256",' + f'headers="@request-target date",' + f'signature="{signature_base64}"' + ) +} + +# print headers +print(headers) +``` + +运行脚本: + +```shell +python3 hmac-sig-digest-header-gen.py +``` + +您应该看到打印的请求标头: + +```text +{'Date': 'Fri, 06 Sep 2024 09:16:16 GMT', 'Digest': 'SHA-256=78qzJuLwSpZ8HacsTdFCQJWxzPMOf8bYctRk2ySLpS8=', 'Authorization': 'Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="rjS6NxOBKmzS8CZL05uLiAfE16hXdIpMD/L/HukOTYE="'} +``` + +使用生成的标头,向路由发送请求: + +```shell +curl "http://127.0.0.1:9080/post" -X POST \ + -H "Date: Fri, 06 Sep 2024 09:16:16 GMT" \ + -H "Digest: SHA-256=78qzJuLwSpZ8HacsTdFCQJWxzPMOf8bYctRk2ySLpS8=" \ + -H 'Authorization: Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="rjS6NxOBKmzS8CZL05uLiAfE16hXdIpMD/L/HukOTYE="' \ + -d '{"name": "world"}' +``` + +您应该会看到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": { + "{\"name\": \"world\"}": "" + }, + "headers": { + "Accept": "*/*", + "Authorization": "Signature keyId=\"john-key\",algorithm=\"hmac-sha256\",headers=\"@request-target date\",signature=\"rjS6NxOBKmzS8CZL05uLiAfE16hXdIpMD/L/HukOTYE=\"", + "Content-Length": "17", + "Content-Type": "application/x-www-form-urlencoded", + "Date": "Fri, 06 Sep 2024 09:16:16 GMT", + "Digest": "SHA-256=78qzJuLwSpZ8HacsTdFCQJWxzPMOf8bYctRk2ySLpS8=", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66d978c3-49f929ad5237da5340bbbeb4", + "X-Consumer-Username": "john", + "X-Credential-Identifier": "cred-john-hmac-auth", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "origin": "192.168.65.1, 34.0.34.160", + "url": "http://127.0.0.1/post" +} +``` + +如果您发送的请求没有摘要或摘要无效: + +```shell +curl "http://127.0.0.1:9080/post" -X POST \ + -H "Date: Fri, 06 Sep 2024 09:16:16 GMT" \ + -H "Digest: SHA-256=78qzJuLwSpZ8HacsTdFCQJWxzPMOf8bYctRk2ySLpS8=" \ + -H 'Authorization: Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="rjS6NxOBKmzS8CZL05uLiAfE16hXdIpMD/L/HukOTYE="' \ + -d '{"name": "world"}' +``` + +您应该看到一个 `HTTP/1.1 401 Unauthorized` 响应,其中包含以下消息: + +```text +{"message":"client request can't be validated"} +``` + +### 强制签名标头 + +以下示例演示了如何强制在请求的 HMAC 签名中对某些标头进行签名。 + +创建消费者 `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john" + }' +``` + +为消费者创建 `hmac-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-hmac-auth", + "plugins": { + "hmac-auth": { + "key_id": "john-key", + "secret_key": "john-secret-key" + } + } + }' +``` + +使用 `hmac-auth` 插件创建路由,该插件要求 HMAC 签名中存在三个标头: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "hmac-auth-route", + "uri": "/get", + "methods": ["GET"], + "plugins": { + "hmac-auth": { + "signed_headers": ["date","x-custom-header-a","x-custom-header-b"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +生成签名。您可以使用以下 Python 代码片段或其他技术栈: + +```python title="hmac-sig-req-header-gen.py" +import hmac +import hashlib +import base64 +from datetime import datetime, timezone + +key_id = "john-key" # key id +secret_key = b"john-secret-key" # secret key +request_method = "GET" # HTTP method +request_path = "/get" # Route URI +algorithm= "hmac-sha256" # can use other algorithms in allowed_algorithms +custom_header_a = "hello123" # required custom header +custom_header_b = "world456" # required custom header + +# get current datetime in GMT +# note: the signature will become invalid after the clock skew (default 300s) +# you can regenerate the signature after it becomes invalid, or increase the clock +# skew to prolong the validity within the advised security boundary +gmt_time = datetime.now(timezone.utc).strftime('%a, %d %b %Y %H:%M:%S GMT') + +# construct the signing string (ordered) +# the date and any subsequent custom headers should be lowercased and separated by a +# single space character, i.e. `:` +# https://datatracker.ietf.org/doc/html/draft-cavage-http-signatures-12#section-2.1.6 +signing_string = ( + f"{key_id}\n" + f"{request_method} {request_path}\n" + f"date: {gmt_time}\n" + f"x-custom-header-a: {custom_header_a}\n" + f"x-custom-header-b: {custom_header_b}\n" +) + +# create signature +signature = hmac.new(secret_key, signing_string.encode('utf-8'), hashlib.sha256).digest() +signature_base64 = base64.b64encode(signature).decode('utf-8') + +# construct the request headers +headers = { + "Date": gmt_time, + "Authorization": ( + f'Signature keyId="{key_id}",algorithm="hmac-sha256",' + f'headers="@request-target date x-custom-header-a x-custom-header-b",' + f'signature="{signature_base64}"' + ), + "x-custom-header-a": custom_header_a, + "x-custom-header-b": custom_header_b +} + +# print headers +print(headers) +``` + +运行脚本: + +```shell +python3 hmac-sig-req-header-gen.py +``` + +您应该看到打印的请求标头: + +```text +{'Date': 'Fri, 06 Sep 2024 09:58:49 GMT', 'Authorization': 'Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date x-custom-header-a x-custom-header-b",signature="MwJR8JOhhRLIyaHlJ3Snbrf5hv0XwdeeRiijvX3A3yE="', 'x-custom-header-a': 'hello123', 'x-custom-header-b': 'world456'} +``` + +使用生成的标头,向路由发送请求: + +```shell +curl -X GET "http://127.0.0.1:9080/get" \ + -H "Date: Fri, 06 Sep 2024 09:58:49 GMT" \ + -H 'Authorization: Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date x-custom-header-a x-custom-header-b",signature="MwJR8JOhhRLIyaHlJ3Snbrf5hv0XwdeeRiijvX3A3yE="' \ + -H "x-custom-header-a: hello123" \ + -H "x-custom-header-b: world456" +``` + +您应该会看到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```json +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Authorization": "Signature keyId=\"john-key\",algorithm=\"hmac-sha256\",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"MwJR8JOhhRLIyaHlJ3Snbrf5hv0XwdeeRiijvX3A3yE=\"", + "Date": "Fri, 06 Sep 2024 09:58:49 GMT", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66d98196-64a58db25ece71c077999ecd", + "X-Consumer-Username": "john", + "X-Credential-Identifier": "cred-john-hmac-auth", + "X-Custom-Header-A": "hello123", + "X-Custom-Header-B": "world456", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "192.168.65.1, 103.97.2.206", + "url": "http://127.0.0.1/get" +} +``` + +### 匿名消费者的速率限制 + +以下示例演示了如何为常规消费者和匿名消费者配置不同的速率限制策略,其中匿名消费者不需要进行身份验证,配额较少。 + +创建常规消费者 `john`,并配置 `limit-count` 插件,以允许 30 秒内的配额为 3: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john", + "plugins": { + "limit-count": { + "count": 3, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +为消费者 `john` 创建 `hmac-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-hmac-auth", + "plugins": { + "hmac-auth": { + "key_id": "john-key", + "secret_key": "john-secret-key" + } + } + }' +``` + +创建匿名用户 `anonymous`,并配置 `limit-count` 插件,以允许 30 秒内配额为 1: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +创建路由并配置 `hmac-auth` 插件以接受匿名消费者 `anonymous` 绕过身份验证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "hmac-auth-route", + "uri": "/get", + "methods": ["GET"], + "plugins": { + "hmac-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +生成签名。您可以使用以下 Python 代码片段或其他技术栈: + +```python title="hmac-sig-header-gen.py" +import hmac +import hashlib +import base64 +from datetime import datetime, timezone + +key_id = "john-key" # key id +secret_key = b"john-secret-key" # secret key +request_method = "GET" # HTTP method +request_path = "/get" # Route URI +algorithm= "hmac-sha256" # can use other algorithms in allowed_algorithms + +# get current datetime in GMT +# note: the signature will become invalid after the clock skew (default 300s) +# you can regenerate the signature after it becomes invalid, or increase the clock +# skew to prolong the validity within the advised security boundary +gmt_time = datetime.now(timezone.utc).strftime('%a, %d %b %Y %H:%M:%S GMT') + +# construct the signing string (ordered) +# the date and any subsequent custom headers should be lowercased and separated by a +# single space character, i.e. `:` +# https://datatracker.ietf.org/doc/html/draft-cavage-http-signatures-12#section-2.1.6 +signing_string = ( + f"{key_id}\n" + f"{request_method} {request_path}\n" + f"date: {gmt_time}\n" +) + +# create signature +signature = hmac.new(secret_key, signing_string.encode('utf-8'), hashlib.sha256).digest() +signature_base64 = base64.b64encode(signature).decode('utf-8') + +# construct the request headers +headers = { + "Date": gmt_time, + "Authorization": ( + f'Signature keyId="{key_id}",algorithm="{algorithm}",' + f'headers="@request-target date",' + f'signature="{signature_base64}"' + ) +} + +# print headers +print(headers) +``` + +运行脚本: + +```shell +python3 hmac-sig-header-gen.py +``` + +您应该看到打印的请求标头: + +```text +{'Date': 'Mon, 21 Oct 2024 17:31:18 GMT', 'Authorization': 'Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="ztFfl9w7LmCrIuPjRC/DWSF4gN6Bt8dBBz4y+u1pzt8="'} +``` + +使用生成的标头发送五个连续的请求: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H "Date: Mon, 21 Oct 2024 17:31:18 GMT" -H 'Authorization: Signature keyId="john-key",algorithm="hmac-sha256",headers="@request-target date",signature="ztFfl9w7LmCrIuPjRC/DWSF4gN6Bt8dBBz4y+u1pzt8="' -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +您应该看到以下响应,显示在 5 个请求中,3 个请求成功(状态代码 200),而其他请求被拒绝(状态代码 429)。 + +```text +200: 3, 429: 2 +``` + +发送五个匿名请求: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +您应该看到以下响应,表明只有一个请求成功: + +```text +200: 1, 429: 4 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/http-dubbo.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/http-dubbo.md new file mode 100755 index 0000000..0fb7e9f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/http-dubbo.md @@ -0,0 +1,124 @@ +--- +title: http-dubbo +keywords: + - Apache APISIX + - API 网关 + - Plugin + - http-dubbo + - http to dubbo +description: 本文介绍了关于 Apache APISIX `http-dubbo` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`http-dubbo` 插件可以将 http 请求 encode 为 dubbo 协议转发给上游服务(注意:在 dubbo2.x 时上游服务的序列化类型必须是 fastjson) + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------------------ | ------- |-----| ------ | ----------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| service_name | string | 是 | | | dubbo 服务名 | +| service_version | string | 否 | 0.0.0 | | dubbo 服务版本 默认 0.0.0 | +| method | string | 是 | | | dubbo 服务方法名 | +| params_type_desc | string | 否 | | | dubbo 服务方法签名描述,入参如果是 void 可不填写 | +| serialization_header_key | string | 否 | | | 插件会读取该请求头判断 body 是否已经按照 dubbo 协议序列化完毕。如果该请求头的值为 true 则插件不会更改 body 内容,直接把他当作 dubbo 请求参数。如果为 false 则要求开发者按照 dubbo 泛化调用的格式传递参数,由插件进行序列化。注意:由于 lua 和 java 的插件序列化精度不同,可能会导致参数精度不同。 | +| serialized | boolean | 否 | false | [true, false] | 和`serialization_header_key`一样。优先级低于`serialization_header_key` | +| connect_timeout | number | 否 | 6000 | | 上游服务 tcp connect_timeout | +| read_timeout | number | 否 | 6000 | | 上游服务 tcp read_timeout | +| send_timeout | number | 否 | 6000 | | 上游服务 tcp send_timeout | + +## 启用插件 + +以下示例展示了如何在指定路由中启用 `http-dubbo` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/TestService/testMethod", + "plugins": { + "http-dubbo": { + "method": "testMethod", + "params_type_desc": "Ljava/lang/Long;Ljava/lang/Integer;", + "serialized": true, + "service_name": "com.xxx.xxx.TestService", + "service_version": "0.0.0" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## 测试插件 + +通过上述命令启用插件后,可以使用如下命令测试插件是否启用成功: + +```shell +curl --location 'http://127.0.0.1:9080/TestService/testMethod' \ +--data '1 +2' +``` + +## 如何获取 params_type_desc + +```java +Method[] declaredMethods = YourService.class.getDeclaredMethods(); +String params_type_desc = ReflectUtils.getDesc(Arrays.stream(declaredMethods).filter(it->it.getName().equals("yourmethod")).findAny().get().getParameterTypes()); + +//方法重载情况下需要找自己需要暴露的方法 ReflectUtils 为 dubbo 实现 +``` + +## 如何按照 dubbo 协议使用 json 进行序列化 + +为了防止精度丢失。我们推荐使用序列化好的 body 进行请求。 +dubbo 的 fastjson 序列化规则如下: + +- 每个参数之间使用 toJSONString 转化为 JSON 字符串 + +- 每个参数之间使用换行符 `\n` 分隔 + +部分语言和库在字符串或数字调用 toJSONString 后结果是不变的这可能需要你手动处理一些特殊情况例如: + +- 字符串 `abc"` 需要被 encode 为 `"abc\""` + +- 字符串 `123` 需要被 encode 为 `"123"` + +抽象类,父类或者泛型作为入参签名,入参需要具体类型时。序列化需要写入具体的类型信息具体参考 [WriteClassName](https://github.com/alibaba/fastjson/wiki/SerializerFeature_cn) + +## 删除插件 + +当你需要禁用 `http-dubbo` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/http-logger.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/http-logger.md new file mode 100644 index 0000000..1903ddd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/http-logger.md @@ -0,0 +1,191 @@ +--- +title: http-logger +keywords: + - Apache APISIX + - API 网关 + - 插件 + - HTTP Logger + - 日志 +description: 本文介绍了 API 网关 Apache APISIX 的 http-logger 插件。使用该插件可以将 APISIX 的日志数据推送到 HTTP 或 HTTPS 服务器。 +--- + + + + +## 描述 + +`http-logger` 插件可以将 APISIX 的日志数据推送到 HTTP 或 HTTPS 服务器。该插件提供了将日志数据请求作为 JSON 对象发送到监控工具或者其他 HTTP 服务器的功能。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|-------------------------| ------- |-----| ------------- | -------------------- | ------------------------------------------------ | +| uri | string | 是 | | | HTTP 或 HTTPS 服务器的 URI。 | +| auth_header | string | 否 | | | 授权 header(如果需要)。 | +| timeout | integer | 否 | 3 | [1,...] | 发送请求后保持连接处于活动状态的时间。 | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | +| include_req_body | boolean | 否 | false | [false, true] | 当设置为 `true` 时,将请求体包含在日志中。如果请求体太大而无法保存在内存中,由于 NGINX 的限制,无法记录。 | +| include_req_body_expr | array | 否 | | | 当 `include_req_body` 属性设置为 `true` 时的过滤器。只有当此处设置的表达式求值为 `true` 时,才会记录请求体。有关更多信息,请参阅 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 。 | +| include_resp_body | boolean | 否 | false | [false, true] | 当设置为 `true` 时,包含响应体。 | +| include_resp_body_expr | array | 否 | | | 当 `include_resp_body` 属性设置为 `true` 时,使用该属性并基于 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 进行过滤。如果存在,则仅在表达式计算结果为 `true` 时记录响应。 | +| concat_method | string | 否 | "json" | ["json", "new_line"] | 枚举类型: **json**:对所有待发日志使用 `json.encode` 编码。**new_line**:对每一条待发日志单独使用 `json.encode` 编码并使用 `\n` 连接起来。 | +| ssl_verify | boolean | 否 | false | [false, true] | 当设置为 `true` 时验证证书。 | + +该插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免该插件频繁地提交数据。默认情况下每 `5` 秒钟或队列中的数据达到 `1000` 条时,批处理器会自动提交数据,如需了解更多信息或自定义配置,请参考 [Batch Processor](../batch-processor.md#配置)。 + +### 默认日志格式示例 + + ```json + { + "service_id": "", + "apisix_latency": 100.99999809265, + "start_time": 1703907485819, + "latency": 101.99999809265, + "upstream_latency": 1, + "client_ip": "127.0.0.1", + "route_id": "1", + "server": { + "version": "3.7.0", + "hostname": "localhost" + }, + "request": { + "headers": { + "host": "127.0.0.1:1984", + "content-type": "application/x-www-form-urlencoded", + "user-agent": "lua-resty-http/0.16.1 (Lua) ngx_lua/10025", + "content-length": "12" + }, + "method": "POST", + "size": 194, + "url": "http://127.0.0.1:1984/hello?log_body=no", + "uri": "/hello?log_body=no", + "querystring": { + "log_body": "no" + } + }, + "response": { + "headers": { + "content-type": "text/plain", + "connection": "close", + "content-length": "12", + "server": "APISIX/3.7.0" + }, + "status": 200, + "size": 123 + }, + "upstream": "127.0.0.1:1982" + } + ``` + +## 插件元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------------- | ------- | ------------------------------------------------ | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头。则表明获取 [APISIX 变量](../../../en/latest/apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +:::info 注意 + +该设置全局生效。如果指定了 `log_format`,则所有绑定 `http-logger` 的路由或服务都将使用该日志格式。 + +::: + +以下示例展示了如何通过 Admin API 配置插件元数据: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/http-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +配置完成后,你将在日志系统中看到如下类似日志: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## 启用插件 + +你可以通过如下命令在指定路由上启用 `http-logger` 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "http-logger": { + "uri": "http://mockbin.org/bin/:ID" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +[mockbin](http://mockbin.org/bin/create) 服务器用于模拟 HTTP 服务器,以方便查看 APISIX 生成的日志。 + +## 测试插件 + +你可以通过以下命令向 APISIX 发出请求,访问日志将记录在你的 `mockbin` 服务器中: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +## 删除插件 + +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ip-restriction.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ip-restriction.md new file mode 100644 index 0000000..fccc265 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ip-restriction.md @@ -0,0 +1,153 @@ +--- +title: ip-restriction +keywords: + - Apache APISIX + - API 网关 + - Plugin + - IP restriction + - ip-restriction +description: ip-restriction 插件支持通过配置 IP 地址白名单或黑名单来限制 IP 地址对上游资源的访问。 +--- + + + + + + + +## 描述 + +`ip-restriction` 插件支持通过配置 IP 地址白名单或黑名单来限制 IP 地址对上游资源的访问。限制 IP 对资源的访问有助于防止未经授权的访问并加强 API 安全性。 + +## 属性 + +| 参数名 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| --------- | ------------- | ------ | ------ | ------ | -------------------------------- | +| whitelist | array[string] | 否 | | | 要列入白名单的 IP 列表。支持 IPv4、IPv6 和 CIDR 表示法。 | +| blacklist | array[string] | 否 | | | 要列入黑名单的 IP 列表。支持 IPv4、IPv6 和 CIDR 表示法。 | +| message | string | 否 | "Your IP address is not allowed" | [1, 1024] | 在未允许的 IP 访问的情况下返回的信息。 | + +:::note + +`whitelist` 或 `blacklist` 至少配置一个,但不能同时配置。 + +::: + +## 示例 + +以下示例演示了如何针对不同场景配置 `ip-restriction` 插件。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 通过白名单限制访问 + +以下示例演示了如何将有权访问上游资源的 IP 地址列表列入白名单,并自定义拒绝访问的错误消息。 + +使用 `ip-restriction` 插件创建路由,将一系列 IP 列入白名单,并自定义拒绝访问时的错误消息: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ip-restriction-route", + "uri": "/anything", + "plugins": { + "ip-restriction": { + "whitelist": [ + "192.168.0.1/24" + ], + "message": "Access denied" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +如果您的 IP 被允许,您应该会收到 `HTTP/1.1 200 OK` 响应。如果不允许,您应该会收到 `HTTP/1.1 403 Forbidden` 响应,并显示以下错误消息: + +```text +{"message":"Access denied"} +``` + +### 使用修改后的 IP 限制访问 + +以下示例演示了如何使用 `real-ip` 插件修改用于 IP 限制的 IP。如果 APISIX 位于反向代理之后,并且 APISIX 无法获得真实客户端 IP,则此功能特别有用。 + +使用 `ip-restriction` 插件创建路由,将特定 IP 地址列入白名单,并从 URL 参数 `realip` 获取客户端 IP 地址: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ip-restriction-route", + "uri": "/anything", + "plugins": { + "ip-restriction": { + "whitelist": [ + "192.168.1.241" + ] + }, + "real-ip": { + "source": "arg_realip" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything?realip=192.168.1.241" +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应。 + +使用不同的 IP 地址发送另一个请求: + +```shell +curl -i "http://127.0.0.1:9080/anything?realip=192.168.10.24" +``` + +您应该会收到 `HTTP/1.1 403 Forbidden` 响应。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/jwe-decrypt.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/jwe-decrypt.md new file mode 100644 index 0000000..b3bc92f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/jwe-decrypt.md @@ -0,0 +1,199 @@ +--- +title: jwe-decrypt +keywords: + - Apache APISIX + - API 网关 + - APISIX 插件 + - JWE Decrypt + - jwe-decrypt +description: 本文档包含了关于 APISIX jwe-decrypt 插件的相关信息。 +--- + + + +## 描述 + +`jwe-decrypt` 插件,用于解密 APISIX [Service](../terminology/service.md) 或者 [Route](../terminology/route.md) 请求中的 [JWE](https://datatracker.ietf.org/doc/html/rfc7516) 授权请求头。 + +插件增加了一个 `/apisix/plugin/jwe/encrypt` 的内部 API,提供给 JWE 加密使用。解密时,秘钥应该配置在 [Consumer](../terminology/consumer.md)内。 + +## 属性 + +Consumer 配置: + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|---------------|---------|-------|-------|-----|-------------------------------------------------------------| +| key | string | True | | | Consumer 的唯一 key | +| secret | string | True | | | 解密密钥,必须为 32 位。秘钥可以使用 [Secret](../terminology/secret.md) 资源保存在密钥管理服务中 | +| is_base64_encoded | boolean | False | false | | 如果密钥是 Base64 编码,则需要配置为 `true` | + +:::note + +注意,在启用 `is_base64_encoded` 后,你的 `secret` 长度可能会超过 32 位,你只需要保证在 Decode 后的长度仍然是 32 位即可。 + +::: + +Route 配置: + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|----------------|---------|-------|---------------|----------------------------------------------------------------------------| +| header | string | True | Authorization | 指定请求头,用于获取加密令牌 | +| forward_header | string | True | Authorization | 传递给 Upstream 的请求头名称 | +| strict | boolean | False | true | 如果为配置为 true,请求中缺失 JWE token 则抛出 `403` 异常。如果为 `false`, 在缺失 JWE token 的情况下不会抛出异常 | + +## 启用插件 + +首先,基于 `jwe-decrypt` 插件创建一个 Consumer,并且配置解密密钥: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "jack", + "plugins": { + "jwe-decrypt": { + "key": "user-key", + "secret": "-secret-length-must-be-32-chars-" + } + } +}' +``` + +下一步,基于 `jwe-decrypt` 插件创建一个路由,用于解密 authorization 请求头: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/anything*", + "plugins": { + "jwe-decrypt": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +### 使用 JWE 加密数据 + +该插件创建了一个内部的 API `/apisix/plugin/jwe/encrypt` 以使用 JWE 进行加密。要公开它,需要创建一个对应的路由,并启用 [public-api](public-api.md) 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/jwenew -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/apisix/plugin/jwe/encrypt", + "plugins": { + "public-api": {} + } +}' +``` + +向 API 发送一个请求,将 Consumer 中配置的密钥,以参数的方式传递给 URI,用于加密 payload 中的一些数据。 + +```shell +curl -G --data-urlencode 'payload={"uid":10000,"uname":"test"}' 'http://127.0.0.1:9080/apisix/plugin/jwe/encrypt?key=user-key' -i +``` + +您应该看到类似于如下内容的响应结果,其中 JWE 加密的数据位于响应体中: + +``` +HTTP/1.1 200 OK +Date: Mon, 25 Sep 2023 02:38:16 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.5.0 +Apisix-Plugins: public-api + +eyJhbGciOiJkaXIiLCJraWQiOiJ1c2VyLWtleSIsImVuYyI6IkEyNTZHQ00ifQ..MTIzNDU2Nzg5MDEy.hfzMJ0YfmbMcJ0ojgv4PYAHxPjlgMivmv35MiA.7nilnBt2dxLR_O6kf-HQUA +``` + +### 使用 JWE 解密数据 + +将加密数据放在 `Authorization` 请求头中,向 API 发起请求: + +```shell +curl http://127.0.0.1:9080/anything/hello -H 'Authorization: eyJhbGciOiJkaXIiLCJraWQiOiJ1c2VyLWtleSIsImVuYyI6IkEyNTZHQ00ifQ..MTIzNDU2Nzg5MDEy.hfzMJ0YfmbMcJ0ojgv4PYAHxPjlgMivmv35MiA.7nilnBt2dxLR_O6kf-HQUA' -i +``` + +您应该可以看到类似于如下的响应内容,其中 `Authorization` 响应头显示了有效的解密内容: + +``` +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 452 +Connection: keep-alive +Date: Mon, 25 Sep 2023 02:38:59 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/3.5.0 +Apisix-Plugins: jwe-decrypt + +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Authorization": "{\"uid\":10000,\"uname\":\"test\"}", + "Host": "127.0.0.1", + "User-Agent": "curl/8.1.2", + "X-Amzn-Trace-Id": "Root=1-6510f2c3-1586ec011a22b5094dbe1896", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1, 119.143.79.94", + "url": "http://127.0.0.1/anything/hello" +} +``` + +## 删除插件 + +要删除 `jwe-decrypt` 插件,您可以从插件配置中删除插件对应的 JSON 配置,APISIX 会自动加载,您不需要重新启动即可生效。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/anything*", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/jwt-auth.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/jwt-auth.md new file mode 100644 index 0000000..8977d82 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/jwt-auth.md @@ -0,0 +1,907 @@ +--- +title: jwt-auth +keywords: + - Apache APISIX + - API 网关 + - Plugin + - JWT Auth + - jwt-auth +description: jwt-auth 插件支持使用 JSON Web Token (JWT) 作为客户端在访问上游资源之前进行身份验证的机制。 +--- + + + +## 描述 + +`jwt-auth` 插件支持使用 [JSON Web Token (JWT)](https://jwt.io/) 作为客户端在访问上游资源之前进行身份验证的机制。 + +启用后,该插件会公开一个端点,供 [消费者](../terminology/consumer.md) 创建 JWT 凭据。该过程会生成一个令牌,客户端请求应携带该令牌以向 APISIX 标识自己。该令牌可以包含在请求 URL 查询字符串、请求标头或 cookie 中。然后,APISIX 将验证该令牌以确定是否应允许或拒绝请求访问上游资源。 + +当消费者成功通过身份验证后,APISIX 会在将请求代理到上游服务之前向请求添加其他标头,例如 `X-Consumer-Username`、`X-Credential-Indentifier` 和其他消费者自定义标头(如已配置)。上游服务将能够区分消费者并根据需要实施其他逻辑。如果任何一个值不可用,则不会添加相应的标题。 + +## 属性 + +Consumer/Credential 端: + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------- | ------- | ----- | ------- | --------------------------- | ------------------------------------------------------------------------------------------------------------ | +| key | string | 是 | | | 消费者的唯一密钥。 | +| secret | string | 否 | | | 当使用对称算法时,用于对 JWT 进行签名和验证的共享密钥。使用 `HS256` 或 `HS512` 作为算法时必填。如果未指定,后台将会自动生成。该字段支持使用 [APISIX Secret](../terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | +| public_key | string | 否 | | | RSA 或 ECDSA 公钥, `algorithm` 属性选择 `RS256` 或 `ES256` 算法时必选。该字段支持使用 [APISIX Secret](../terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | +| algorithm | string | 否 | "HS256" | ["HS256", "HS512", "RS256", "ES256"] | 加密算法。 | +| exp | integer | 否 | 86400 | [1,...] | token 的超时时间。 | +| base64_secret | boolean | 否 | false | | 当设置为 `true` 时,密钥为 base64 编码。 | +| lifetime_grace_period | integer | 否 | 0 | [0,...] | 宽限期(以秒为单位)。用于解决生成 JWT 的服务器与验证 JWT 的服务器之间的时钟偏差。 | +| key_claim_name | string | 否 | key | | JWT payload 中的声明用于标识相关的秘密,例如 `iss`。 | + +注意:schema 中还定义了 `encrypt_fields = {"secret"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +Route 端: + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| ------ | ------ | ------ | ------------- |---------------------------------------------------------| +| header | string | 否 | authorization | 设置我们从哪个 header 获取 token。 | +| query | string | 否 | jwt | 设置我们从哪个 query string 获取 token,优先级低于 header。 | +| cookie | string | 否 | jwt | 设置我们从哪个 cookie 获取 token,优先级低于 query。 | +| hide_credentials | boolean | 否 | false | 如果为 true,则不要将 header、query 或带有 JWT 的 cookie 传递给上游服务。 | +| key_claim_name | string | 否 | key | 包含用户密钥(对应消费者的密钥属性)的 JWT 声明的名称。| +| anonymous_consumer | string | 否 | false | 匿名消费者名称。如果已配置,则允许匿名用户绕过身份验证。 | +| store_in_ctx | boolean | 否 | false | 设置为 `true` 将会将 JWT 负载存储在请求上下文 (`ctx.jwt_auth_payload`) 中。这允许在同一请求上随后运行的低优先级插件检索和使用 JWT 令牌。 | + +您可以使用 [HashiCorp Vault](https://www.vaultproject.io/) 实施 `jwt-auth`,以从其[加密的 KV 引擎](https://developer.hashicorp.com/vault/docs/secrets/kv) 使用 [APISIX Secret](../terminology/secret.md) 资源。 + +## 示例 + +以下示例演示了如何在不同场景中使用 `jwt-auth` 插件。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 使用 JWT 进行消费者身份验证 + +以下示例演示如何实现 JWT 进行消费者密钥身份验证。 + +创建消费者 `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +为消费者创建 `jwt-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "jack-key", + "secret": "jack-hs256-secret" + } + } + }' +``` + +使用 `jwt-auth` 插件创建路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-route", + "uri": "/headers", + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +要为 `jack` 签发 JWT,您可以使用 [JWT.io 的调试器](https://jwt.io/#debugger-io) 或其他实用程序。如果您使用的是 [JWT.io 的调试器](https://jwt.io/#debugger-io),请执行以下操作: + +* 在 __Algorithm__ 下拉菜单中选择 __HS256__。 +* 将 __Verify Signature__ 部分中的密钥更新为 `jack-hs256-secret`。 +* 使用消费者密钥 `jack-key` 更新有效 payload;并在 UNIX 时间戳中添加 `exp` 或 `nbf`。 + + 您的 payload 应类似于以下内容: + + ```json + { + "key": "jack-key", + "nbf": 1729132271 + } + ``` + +将生成的 JWT 复制到 __Encoded__ 部分并保存到变量中: + +```text +jwt_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsIm5iZiI6MTcyOTEzMjI3MX0.0VDKUzNkSaa_H5g_rGNbNtDcKJ9fBGgcGC56AsVsV-I +``` + +使用 `Authorization` 标头中的 JWT 向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/headers" -H "Authorization: ${jwt_token}" +``` + +您应该收到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```text +{ + "headers": { + "Accept": "*/*", + "Authorization": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE3MjY2NDk2NDAsImtleSI6ImphY2sta2V5In0.kdhumNWrZFxjUvYzWLt4lFr546PNsr9TXuf0Az5opoM", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66ea951a-4d740d724bd2a44f174d4daf", + "X-Consumer-Username": "jack", + "X-Credential-Identifier": "cred-jack-jwt-auth", + "X-Forwarded-Host": "127.0.0.1" + } +} +``` + +30 秒后,令牌将过期。使用相同令牌发送请求以验证: + +```shell +curl -i "http://127.0.0.1:9080/headers" -H "Authorization: ${jwt_token}" +``` + +您应该收到类似于以下内容的 `HTTP/1.1 401 Unauthorized` 响应: + +```text +{"message":"failed to verify jwt"} +``` + +### 在请求标头、查询字符串或 Cookie 中携带 JWT + +以下示例演示如何在指定的标头、查询字符串和 Cookie 中接受 JWT。 + +创建一个消费者 `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +为消费者创建 `jwt-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "jack-key", + "secret": "jack-hs256-secret" + } + } + }' +``` + +创建一个带有 `jwt-auth` 插件的路由,并指定请求可以在标头、查询或 cookie 中携带令牌: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-route", + "uri": "/get", + "plugins": { + "jwt-auth": { + "header": "jwt-auth-header", + "query": "jwt-query", + "cookie": "jwt-cookie" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +要为 `jack` 签发 JWT,您可以使用 [JWT.io 的调试器](https://jwt.io/#debugger-io) 或其他实用程序。如果您使用的是 [JWT.io 的调试器](https://jwt.io/#debugger-io),请执行以下操作: + +* 在 __Algorithm__ 下拉菜单中选择 __HS256__。 +* 将 __Verify Signature__ 部分中的密钥更新为 `jack-hs256-secret`。 +* 使用消费者密钥 `jack-key` 更新有效 payload;并在 UNIX 时间戳中添加 `exp` 或 `nbf`。 + + 您的有效 payload 应类似于以下内容: + + ```json + { + "key": "jack-key", + "nbf": 1729132271 + } + ``` + +将生成的 JWT 复制到 __Encoded__ 部分并保存到变量中: + +```text +jwt_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsIm5iZiI6MTcyOTEzMjI3MX0.0VDKUzNkSaa_H5g_rGNbNtDcKJ9fBGgcGC56AsVsV-I +``` + +#### 使用标头中的 JWT 进行验证 + +发送标头中包含 JWT 的请求: + +```shell +curl -i "http://127.0.0.1:9080/get" -H "jwt-auth-header: ${jwt_token}" +``` + +您应该收到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```text +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "Jwt-Auth-Header": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTY5NTEyOTA0NH0.EiktFX7di_tBbspbjmqDKoWAD9JG39Wo_CAQ1LZ9voQ", + ... + }, + ... +} +``` + +#### 在查询字符串中使用 JWT 进行验证 + +在查询字符串中使用 JWT 发送请求: + +```shell +curl -i "http://127.0.0.1:9080/get?jwt-query=${jwt_token}" +``` + +您应该收到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```text +{ + "args": { + "jwt-query": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTY5NTEyOTA0NH0.EiktFX7di_tBbspbjmqDKoWAD9JG39Wo_CAQ1LZ9voQ" + }, + "headers": { + "Accept": "*/*", + ... + }, + "origin": "127.0.0.1, 183.17.233.107", + "url": "http://127.0.0.1/get?jwt-query=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTY5NTEyOTA0NH0.EiktFX7di_tBbspbjmqDKoWAD9JG39Wo_CAQ1LZ9voQ" +} +``` + +#### 使用 Cookie 中的 JWT 进行验证 + +使用 cookie 中的 JWT 发送请求: + +```shell +curl -i "http://127.0.0.1:9080/get" --cookie jwt-cookie=${jwt_token} +``` + +您应该收到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```text +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Cookie": "jwt-cookie=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTY5NTEyOTA0NH0.EiktFX7di_tBbspbjmqDKoWAD9JG39Wo_CAQ1LZ9voQ", + ... + }, + ... +} +``` + +### 管理环境变量中的机密 + +以下示例演示了如何将 `jwt-auth` 消费者密钥保存到环境变量并在配置中引用它。 + +APISIX 支持引用通过 [NGINX `env` 指令](https://nginx.org/en/docs/ngx_core_module.html#env) 配置的系统和用户环境变量。 + +将密钥保存到环境变量中: + +```shell +JACK_JWT_AUTH_KEY=jack-key +``` + +创建一个消费者 `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +为消费者创建 `jwt-auth` 凭证并在密钥中引用环境变量: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "$env://JACK_JWT_AUTH_KEY", + "secret": "jack-hs256-secret" + } + } + }' +``` + +创建路由并启用 `jwt-auth`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-route", + "uri": "/get", + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +要为 `jack` 签发 JWT,您可以使用 [JWT.io 的调试器](https://jwt.io/#debugger-io) 或其他实用程序。如果您使用 [JWT.io 的调试器](https://jwt.io/#debugger-io),请执行以下操作: + +* 在 __Algorithm__ 下拉列表中选择 __HS256__。 +* 将 __Verify Signature__ 部分中的密钥更新为 `jack-hs256-secret` 。 +* 使用消费者密钥 `jack-key` 更新有效 payload;并在 UNIX 时间戳中添加 `exp` 或 `nbf`。 + + 您的有效 payload 应类似于以下内容: + + ```json + { + "key": "jack-key", + "nbf": 1729132271 + } + ``` + +将生成的 JWT 复制到 __Encoded__ 部分并保存到变量中: + +```text +jwt_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsIm5iZiI6MTcyOTEzMjI3MX0.0VDKUzNkSaa_H5g_rGNbNtDcKJ9fBGgcGC56AsVsV-I +``` + +发送标头中包含 JWT 的请求: + +```shell +curl -i "http://127.0.0.1:9080/get" -H "Authorization: ${jwt_token}" +``` + +您应该收到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```text +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Authorization": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE2OTUxMzMxNTUsImtleSI6Imp3dC1rZXkifQ.jiKuaAJqHNSSQCjXRomwnQXmdkC5Wp5VDPRsJlh1WAQ", + ... + }, + ... +} +``` + +### 在秘密管理器中管理秘密 + +以下示例演示了如何管理 [HashiCorp Vault](https://www.vaultproject.io) 中的 `jwt-auth` 消费者密钥并在插件配置中引用它。 + +在 Docker 中启动 Vault 开发服务器: + +```shell +docker run -d \ + --name vault \ + -p 8200:8200 \ + --cap-add IPC_LOCK \ + -e VAULT_DEV_ROOT_TOKEN_ID=root \ + -e VAULT_DEV_LISTEN_ADDRESS=0.0.0.0:8200 \ + vault:1.9.0 \ + vault server -dev +``` + +APISIX 目前支持 [Vault KV 引擎版本 1](https://developer.hashicorp.com/vault/docs/secrets/kv#kv-version-1)。在 Vault 中启用它: + +```shell +docker exec -i vault sh -c "VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault secrets enable -path=kv -version=1 kv" +``` + +您应该看到类似于以下内容的响应: + +```text +Success! Enabled the kv secrets engine at: kv/ +``` + +创建一个 secret 并配置 Vault 地址和其他连接信息: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/secrets/vault/jwt" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "uri": "https://127.0.0.1:8200", + "prefix": "kv/apisix", + "token": "root" + }' +``` + +创建一个消费者 `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +为消费者创建 `jwt-auth` 凭证并引用密钥中的秘密: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "$secret://vault/jwt/jack/jwt-key", + "secret": "vault-hs256-secret" + } + } + }' +``` + +创建路由并启用 `jwt-auth`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-route", + "uri": "/get", + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +在 Vault 中将 `jwt-auth` 键值设置为 `jwt-vault-key`: + +```shell +docker exec -i vault sh -c "VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/jack jwt-key=jwt-vault-key" +``` + +您应该看到类似于以下内容的响应: + +```text +Success! Data written to: kv/apisix/jack +``` + +要签发 JWT,您可以使用 [JWT.io 的调试器](https://jwt.io/#debugger-io) 或其他实用程序。如果您使用 [JWT.io 的调试器](https://jwt.io/#debugger-io),请执行以下操作: + +* 在 __Algorithm__ 下拉列表中选择 __HS256__。 +* 将 __Verify Signature__ 部分中的密钥更新为 `vault-hs256-secret` 。 +* 使用消费者密钥 `jwt-vault-key` 更新有效 payload;并在 UNIX 时间戳中添加 `exp` 或 `nbf`。 + + 您的有效 payload 应类似于以下内容: + + ```json + { + "key": "jwt-vault-key", + "nbf": 1729132271 + } + ``` + +将生成的 JWT 复制到 __Encoded__ 部分并保存到变量中: + +```text +jwt_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqd3QtdmF1bHQta2V5IiwibmJmIjoxNzI5MTMyMjcxfQ.faiN93LNP1lGSXqAb4empNJKMRWop8-KgnU58VQn1EE +``` + +使用令牌作为标头发送请求: + +```shell +curl -i "http://127.0.0.1:9080/get" -H "Authorization: ${jwt_token}" +``` + +您应该收到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```text +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqd3QtdmF1bHQta2V5IiwiZXhwIjoxNjk1MTM4NjM1fQ.Au2liSZ8eQXUJR3SJESwNlIfqZdNyRyxIJK03L4dk_g", + ... + }, + ... +} +``` + +### 使用 RS256 算法签署 JWT + +以下示例演示了在实施 JWT 进行消费者身份验证时如何使用非对称算法(例如 RS256)来签名和验证 JWT。您将使用 [openssl](https://openssl-library.org/source/) 生成 RSA 密钥对,并使用 [JWT.io](https://jwt.io/#debugger-io) 生成 JWT,以更好地了解 JWT 的组成。 + +生成 2048 位的 RSA 私钥并提取对应的 PEM 格式的公钥: + +```shell +openssl genrsa -out jwt-rsa256-private.pem 2048 +openssl rsa -in jwt-rsa256-private.pem -pubout -out jwt-rsa256-public.pem +``` + +您应该会看到在当前工作目录中生成了 `jwt-rsa256-private.pem` 和 `jwt-rsa256-public.pem` 。 + +访问 [JWT.io 的调试器](https://jwt.io/#debugger-io) 并执行以下操作: + +* 在 __Algorithm__ 下拉列表中选择 __RS256__。 +* 将 key 复制并粘贴到 __Verify Signature__ 部分。 +* 使用与您想要使用的消费者密钥匹配的 `key` 更新有效 payload;以及 UNIX 时间戳中的 `exp` 或 `nbf`。 + +配置应类似于以下内容: + +
+
+complete configuration of JWT generation on jwt.io +
+
+ +复制左侧的 JWT 并保存到环境变量中: + +```shell +jwt_token=eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsImV4cCI6MTczNDIzMDQwMH0.XjqM0oszmCggwZs-8PUIlJv8wPJON1la2ET5v70E6TCE32Yq5ibrl-1azaK7IreAer3HtnVHeEfII2rR02v8xfR1TPIjU_oHov4qC-A4tLTbgqGVXI7fCy2WFm3PFh6MEKuRe6M3dCQtCAdkRRQrBr1gWFQZhV3TNeMmmtyIfuJpB7cp4DW5pYFsCcoE1Nw6Tz7dt8k0tPBTPI2Mv9AYfMJ30LHDscOaPNtz8YIk_TOkV9b9mhQudUJ7J_suCZMRxD3iL655jTp2gKsstGKdZa0_W9Reu4-HY3LSc5DS1XtfjuftpuUqgg9FvPU0mK_b0wT_Rq3lbYhcHb9GZ72qiQ +``` + +创建一个消费者 `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +为消费者创建 `jwt-auth` 凭证并配置 RSA 密钥: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "jack-key", + "algorithm": "RS256", + "public_key": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnE0h4k/GWfEbYO/yE2MPjHtNKDLNz4mv1KNIPLxY2ccjPYOtjuug+iZ4MujLV59YfrHriTs0H8jweQfff3pRSMjyEK+4qWTY3TeKBXIEa3pVDeoedSJrgjLBVio6xH7et8ir+QScScfLaJHGB4/l3DDGyEhO782a9teY8brn5hsWX5uLmDJvxtTGAHYi847XOcx2UneW4tZ8wQ6JGBSiSg5qAHan4dFZ7CpixCNNqEcSK6EQ7lKOLeFGG8ys/dHBIEasU4oMlCuJH77+XQQ/shchy+vm9oZfP+grLZkV+nKAd8MQZsid7ZJ/fiB/BmnhGrjtIfh98jwxSx4DgdLhdwIDAQAB\n-----END PUBLIC KEY-----", + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCcTSHiT8ZZ8Rtg7/ITYw+Me00oMs3Pia/Uo0g8vFjZxyM9g62O66D6Jngy6MtXn1h+seuJOzQfyPB5B99/elFIyPIQr7ipZNjdN4oFcgRrelUN6h51ImuCMsFWKjrEft63yKv5BJxJx8tokcYHj+XcMMbISE7vzZr215jxuufmGxZfm4uYMm/G1MYAdiLzjtc5zHZSd5bi1nzBDokYFKJKDmoAdqfh0VnsKmLEI02oRxIroRDuUo4t4UYbzKz90cEgRqxTigyUK4kfvv5dBD+yFyHL6+b2hl8/6CstmRX6coB3wxBmyJ3tkn9+IH8GaeEauO0h+H3yPDFLHgOB0uF3AgMBAAECggEARpY68Daw0Funzq5uN70r/3iLztSqx8hZpQEclXlF8wwQ6S33iqz1JSOMcwlZE7g9wfHd+jrHfndDypT4pVx7KxC86TZCghWuLrFvXqgwQM2dbcxGdwXVYZZEZAJsSeM19+/jYnFnl5ZoUVBMC4w79aX9j+O/6mKDUmjphHmxUuRCFjN0w7BRoYwmS796rSf1eoOcSXh2G9Ycc34DUFDfGpOzabndbmMfOz7W0DyUBG23fgLhNChTUGq8vMaqKXkQ8JKeKdEugSmRGz42HxjWoNlIGBDyB8tPNPT6SXsu/JBskdf9Gb71OWiub381oXC259sz+1K1REb1KSkgyC+bkQKBgQDKCnwXaf8aOIoJPCG53EqQfKScCIYQrvp1Uk3bs5tfYN4HcI3yAUnOqQ3Ux3eY9PfS37urlJXCfCbCnZ6P6xALZnN+aL2zWvZArlHvD6vnXiyevwK5IY+o2EW02h3A548wrGznQSsfX0tum22bEVlRuFfBbpZpizXwrV4ODSNhTwKBgQDGC27QQxah3yq6EbOhJJlJegjawVXEaEp/j4fD3qe/unLbUIFvCz6j9BAbgocDKzqXxlpTtIbnsesdLo7KM3MtYL0XO/87HIsBj9XCVgMkFCcM6YZ6fHnkJl0bs3haU4N9uI/wpokvfvXJp7iC9LUCseBdBj+N6T230HWiSbPjWQKBgQC8zzGKO/8vRNkSqkQmSczQ2/qE6p5G5w6eJy0lfOJdLswvDatJFpUf8PJA/6svoPYb9gOO5AtUNeuPAfeVLSnQTYzu+/kTrJTme0GMdAvE60gtjfmAgvGa64mw6gjWJk+1P92B+2/OIKMAmXXDbWIYMXqpBKzBs1vUMF/uJ68BlwKBgQDEivQem3YKj3/HyWmLstatpP7EmrqTgSzuC3OhX4b7L/5sySirG22/KKgTpSZ4bp5noeJiz/ZSWrAK9fmfkg/sKOV/+XsDHwCVPDnX86SKWbWnitp7FK2jTq94nlQC0H7edhvjqGLdUBJ9XoYu8MvzMLSJnXnVTHSDx832kU6FgQKBgQCbw4Eiu2IcOduIAokmsZl8Smh9ZeyhP2B/UBa1hsiPKQ6bw86QJr2OMbRXLBxtx+HYIfwDo4vXEE862PfoQyu6SjJBNmHiid7XcV06Z104UQNjP7IDLMMF+SASMqYoQWg/5chPfxBgIXnfWqw6TMmND3THY4Oj4Nhf4xeUg3HsaA==\n-----END PRIVATE KEY-----" + } + } + }' +``` + +:::tip + +您应该在开始行之后和结束行之前添加换行符,例如`-----BEGIN PRIVATE KEY-----\n......\n-----END PRIVATE KEY -----`。 + +关键内容可以直接拼接。 + +::: + +使用 `jwt-auth` 插件创建路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-route", + "uri": "/headers", + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +使用 `Authorization` 标头中的 JWT 向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/headers" -H "Authorization: ${jwt_token}" +``` + +您应该收到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```json +{ + "headers": { + "Accept": "*/*", + "Authorization": "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsImV4cCI6MTczNDIzMDQwMH0.XjqM0oszmCggwZs-8PUIlJv8wPJON1la2ET5v70E6TCE32Yq5ibrl-1azaK7IreAer3HtnVHeEfII2rR02v8xfR1TPIjU_oHov4qC-A4tLTbgqGVXI7fCy2WFm3PFh6MEKuRe6M3dCQtCAdkRRQrBr1gWFQZhV3TNeMmmtyIfuJpB7cp4DW5pYFsCcoE1Nw6Tz7dt8k0tPBTPI2Mv9AYfMJ30LHDscOaPNtz8YIk_TOkV9b9mhQudUJ7J_suCZMRxD3iL655jTp2gKsstGKdZa0_W9Reu4-HY3LSc5DS1XtfjuftpuUqgg9FvPU0mK_b0wT_Rq3lbYhcHb9GZ72qiQ", + ... + } +} +``` + +### 将消费者自定义 ID 添加到标头 + +以下示例演示了如何将消费者自定义 ID 附加到 `Consumer-Custom-Id` 标头中经过身份验证的请求,该标头可用于根据需要实现其他逻辑。 + +创建一个带有自定义 ID 标签的消费者 `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack", + "labels": { + "custom_id": "495aec6a" + } + }' +``` + +为消费者创建 `jwt-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "jack-key", + "secret": "jack-hs256-secret" + } + } + }' +``` + +使用 `jwt-auth` 创建路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-auth-route", + "uri": "/anything", + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +要为 `jack` 签发 JWT,您可以使用 [JWT.io 的调试器](https://jwt.io/#debugger-io) 或其他实用程序。如果您使用的是 [JWT.io 的调试器](https://jwt.io/#debugger-io),请执行以下操作: + +* 在 __Algorithm__ 下拉菜单中选择 __HS256__。 +* 将 __Verify Signature__ 部分中的密钥更新为 `jack-hs256-secret` 。 +* 使用消费者密钥 `jack-key` 更新有效 payload;并在 UNIX 时间戳中添加 `exp` 或 `nbf` 。 + + 您的有效 payload 应类似于以下内容: + + ```json + { + "key": "jack-key", + "nbf": 1729132271 + } + ``` + +将生成的 JWT 复制到 __Encoded__ 部分并保存到变量中: + +```text +jwt_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsIm5iZiI6MTcyOTEzMjI3MX0.0VDKUzNkSaa_H5g_rGNbNtDcKJ9fBGgcGC56AsVsV-I +``` + +使用 `Authorization` 标头中的 JWT 向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/headers" -H "Authorization: ${jwt_token}" +``` + +您应该看到类似于以下内容的 `HTTP/1.1 200 OK` 响应,其中附加了 `X-Consumer-Custom-Id`: + +```json +{ + "headers": { + "Accept": "*/*", + "Authorization": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAiOjE3MjY2NDk2NDAsImtleSI6ImphY2sta2V5In0.kdhumNWrZFxjUvYzWLt4lFr546PNsr9TXuf0Az5opoM", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66ea951a-4d740d724bd2a44f174d4daf", + "X-Consumer-Username": "jack", + "X-Credential-Identifier": "cred-jack-jwt-auth", + "X-Consumer-Custom-Id": "495aec6a", + "X-Forwarded-Host": "127.0.0.1" + } +} +``` + +### 匿名消费者的速率限制 + +以下示例演示了如何为普通消费者和匿名消费者配置不同的速率限制策略,其中匿名消费者不需要进行身份验证,并且配额较少。 + +创建一个普通消费者 `jack`,并配置 `limit-count` 插件,以允许 30 秒内的配额为 3: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack", + "plugins": { + "limit-count": { + "count": 3, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +为消费者 `jack` 创建 `jwt-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-jwt-auth", + "plugins": { + "jwt-auth": { + "key": "jack-key", + "secret": "jack-hs256-secret" + } + } + }' +``` + +创建匿名用户 `anonymous`,并配置 `limit-count` 插件,以允许 30 秒内配额为 1: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +创建一个路由并配置 `jwt-auth` 插件以接受匿名消费者 `anonymous` 绕过身份验证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "jwt-auth-route", + "uri": "/anything", + "plugins": { + "jwt-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +要为 `jack` 签发 JWT,您可以使用 [JWT.io 的调试器](https://jwt.io/#debugger-io) 或其他实用程序。如果您使用的是 [JWT.io 的调试器](https://jwt.io/#debugger-io),请执行以下操作: + +* 在 __Algorithm__ 下拉菜单中选择 __HS256__。 +* 将 __Verify Signature__ 部分中的密钥更新为 `jack-hs256-secret`。 +* 使用角色 `user` 、权限 `read` 和消费者密钥 `jack-key` 以及 UNIX 时间戳中的 `exp` 或 `nbf` 更新有效 payload。 + + 您的有效 payload 应类似于以下内容: + + ```json + { + "key": "jack-key", + "nbf": 1729132271 + } + ``` + +将生成的 JWT 复制到 __Encoded__ 部分并保存到变量中: + +```shell +jwt_token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqYWNrLWtleSIsIm5iZiI6MTcyOTEzMjI3MX0.hjtSsEILpko14zb8-ibyxrB2tA5biYY9JrFm3do69vs +``` + +为了验证速率限制,请使用 jack 的 JWT 连续发送五个请求: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H "Authorization: ${jwt_token}" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +您应该看到以下响应,显示在 5 个请求中,3 个请求成功(状态代码 200),而其他请求被拒绝(状态代码 429)。 + +```text +200: 3, 429: 2 +``` + +发送五个匿名请求: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +您应该看到以下响应,表明只有一个请求成功: + +```text +200: 1, 429: 4 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/kafka-logger.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/kafka-logger.md new file mode 100644 index 0000000..e708a21 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/kafka-logger.md @@ -0,0 +1,247 @@ +--- +title: kafka-logger +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Kafka Logger +description: API 网关 Apache APISIX 的 kafka-logger 插件用于将日志作为 JSON 对象推送到 Apache Kafka 集群中。 +--- + + + +## 描述 + +`kafka-logger` 插件用于将日志作为 JSON 对象推送到 Apache Kafka 集群中。可用作 `ngx_lua` NGINX 模块的 Kafka 客户端驱动程序。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------------- | ------- | ------ | -------------- | --------------------- | ------------------------------------------------ | +| broker_list | object | 是 | | | 已废弃,现使用 `brokers` 属性代替。原指需要推送的 Kafka 的 broker 列表。 | +| brokers | array | 是 | | | 需要推送的 Kafka 的 broker 列表。 | +| brokers.host | string | 是 | | | Kafka broker 的节点 host 配置,例如 `192.168.1.1` | +| brokers.port | string | 是 | | | Kafka broker 的节点端口配置 | +| brokers.sasl_config | object | 否 | | | Kafka broker 中的 sasl_config | +| brokers.sasl_config.mechanism | string | 否 | "PLAIN" | ["PLAIN"] | Kafka broker 中的 sasl 认证机制 | +| brokers.sasl_config.user | string | 是 | | | Kafka broker 中 sasl 配置中的 user,如果 sasl_config 存在,则必须填写 | +| brokers.sasl_config.password | string | 是 | | | Kafka broker 中 sasl 配置中的 password,如果 sasl_config 存在,则必须填写 | +| kafka_topic | string | 是 | | | 需要推送的 topic。 | +| producer_type | string | 否 | async | ["async", "sync"] | 生产者发送消息的模式。 | +| required_acks | integer | 否 | 1 | [1, -1] | 生产者在确认一个请求发送完成之前需要收到的反馈信息的数量。该参数是为了保证发送请求的可靠性。该属性的配置与 Kafka `acks` 属性相同,具体配置请参考 [Apache Kafka 文档](https://kafka.apache.org/documentation/#producerconfigs_acks)。required_acks 还不支持为 0。 | +| key | string | 否 | | | 用于消息分区而分配的密钥。 | +| timeout | integer | 否 | 3 | [1,...] | 发送数据的超时时间。 | +| name | string | 否 | "kafka logger" | | 标识 logger 的唯一标识符。如果您使用 Prometheus 监视 APISIX 指标,名称将以 `apisix_batch_process_entries` 导出。 | +| meta_format | enum | 否 | "default" | ["default","origin"] | `default`:获取请求信息以默认的 JSON 编码方式。`origin`:获取请求信息以 HTTP 原始请求方式。更多信息,请参考 [meta_format](#meta_format-示例)。| +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | +| include_req_body | boolean | 否 | false | [false, true] | 当设置为 `true` 时,包含请求体。**注意**:如果请求体无法完全存放在内存中,由于 NGINX 的限制,APISIX 无法将它记录下来。| +| include_req_body_expr | array | 否 | | | 当 `include_req_body` 属性设置为 `true` 时进行过滤。只有当此处设置的表达式计算结果为 `true` 时,才会记录请求体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 | +| max_req_body_bytes | integer | 否 | 524288 | >=1 | 允许的最大请求正文(以字节为单位)。在此限制内的请求体将被推送到 Kafka。如果大小超过配置值,则正文在推送到 Kafka 之前将被截断。 | +| include_resp_body | boolean | 否 | false | [false, true] | 当设置为 `true` 时,包含响应体。 | +| include_resp_body_expr | array | 否 | | | 当 `include_resp_body` 属性设置为 `true` 时进行过滤。只有当此处设置的表达式计算结果为 `true` 时才会记录响应体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。| +| max_resp_body_bytes | integer | 否 | 524288 | >=1 | 允许的最大响应正文(以字节为单位)。低于此限制的响应主体将被推送到 Kafka。如果大小超过配置值,则正文在推送到 Kafka 之前将被截断。 | +| cluster_name | integer | 否 | 1 | [0,...] | Kafka 集群的名称,当有两个及以上 Kafka 集群时使用。只有当 `producer_type` 设为 `async` 模式时才可以使用该属性。| +| producer_batch_num | integer | 否 | 200 | [1,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的 `batch_num` 参数,聚合消息批量提交,单位为消息条数。 | +| producer_batch_size | integer | 否 | 1048576 | [0,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的 `batch_size` 参数,单位为字节。 | +| producer_max_buffering | integer | 否 | 50000 | [1,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的 `max_buffering` 参数,表示最大缓冲区,单位为条。 | +| producer_time_linger | integer | 否 | 1 | [1,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的 `flush_time` 参数,单位为秒。| +| meta_refresh_interval | integer | 否 | 30 | [1,...] | 对应 [lua-resty-kafka](https://github.com/doujiang24/lua-resty-kafka) 中的 `refresh_interval` 参数,用于指定自动刷新 metadata 的间隔时长,单位为秒。 | + +该插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。 + +:::tip 提示 + +数据首先写入缓冲区。当缓冲区超过 `batch_max_size` 或 `buffer_duration` 设置的值时,则会将数据发送到 Kafka 服务器并刷新缓冲区。 + +如果发送成功,则返回 `true`。如果出现错误,则返回 `nil`,并带有描述错误的字符串 `buffer overflow`。 + +::: + +### meta_format 示例 + +- `default`: + + ```json + { + "upstream": "127.0.0.1:1980", + "start_time": 1619414294760, + "client_ip": "127.0.0.1", + "service_id": "", + "route_id": "1", + "request": { + "querystring": { + "ab": "cd" + }, + "size": 90, + "uri": "/hello?ab=cd", + "url": "http://localhost:1984/hello?ab=cd", + "headers": { + "host": "localhost", + "content-length": "6", + "connection": "close" + }, + "body": "abcdef", + "method": "GET" + }, + "response": { + "headers": { + "connection": "close", + "content-type": "text/plain; charset=utf-8", + "date": "Mon, 26 Apr 2021 05:18:14 GMT", + "server": "APISIX/2.5", + "transfer-encoding": "chunked" + }, + "size": 190, + "status": 200 + }, + "server": { + "hostname": "localhost", + "version": "2.5" + }, + "latency": 0 + } + ``` + +- `origin`: + + ```http + GET /hello?ab=cd HTTP/1.1 + host: localhost + content-length: 6 + connection: close + + abcdef + ``` + +## 插件元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| ---------------- | ------- | ------ | ------------- |------------------------------------------------ | +| log_format | object | 否 | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../../../en/latest/apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +:::note 注意 + +该设置全局生效。如果指定了 `log_format`,则所有绑定 `kafka-logger` 的路由或服务都将使用该日志格式。 + +::: + +以下示例展示了如何通过 Admin API 配置插件元数据: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/kafka-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +配置完成后,你将在日志系统中看到如下类似日志: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## 如何启用 + +你可以通过如下命令在指定路由上启用 `kafka-logger` 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "kafka-logger": { + "brokers" : [ + { + "host": "127.0.0.1", + "port": 9092 + } + ], + "kafka_topic" : "test2", + "key" : "key1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +该插件还支持一次推送到多个 Broker,示例如下: + +```json +"brokers" : [ + { + "host" :"127.0.0.1", + "port" : 9092 + }, + { + "host" :"127.0.0.1", + "port" : 9093 + } +], +``` + +## 测试插件 + +你可以通过以下命令向 APISIX 发出请求: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +## 删除插件 + +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/key-auth.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/key-auth.md new file mode 100644 index 0000000..fa6f469 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/key-auth.md @@ -0,0 +1,570 @@ +--- +title: key-auth +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Key Auth + - key-auth +description: key-auth 插件支持使用身份验证密钥作为客户端在访问上游资源之前进行身份验证的机制。 +--- + + + + + + + +## 描述 + +`key-auth` 插件支持使用身份验证密钥作为客户端在访问上游资源之前进行身份验证的机制。 + +要使用该插件,您需要在 [Consumers](../terminology/consumer.md) 上配置身份验证密钥,并在路由或服务上启用该插件。密钥可以包含在请求 URL 查询字符串或请求标头中。然后,APISIX 将验证密钥以确定是否应允许或拒绝请求访问上游资源。 + +当消费者成功通过身份验证后,APISIX 会在将请求代理到上游服务之前向请求添加其他标头,例如 `X-Consumer-Username`、`X-Credential-Indentifier` 和其他消费者自定义标头(如果已配置)。上游服务将能够区分消费者并根据需要实现其他逻辑。如果这些值中的任何一个不可用,则不会添加相应的标头。 + +## 属性 + +Consumer/Credential 端: + +| 名称 | 类型 | 必选项 | 描述 | +| ---- | ------ | ------ | ------------------------------------------------------------------------------------------------------------- | +| key | string | 是 | 不同的 Consumer 应有不同的 `key`,它应当是唯一的。如果多个 Consumer 使用了相同的 `key`,将会出现请求匹配异常。该字段支持使用 [APISIX Secret](../terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | + +注意:schema 中还定义了 `encrypt_fields = {"key"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +Route 端: + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| ----------------- | ------ | ----- | ------ |----------------------------------------------------------------------------------------------------------------------------------------------------------| +| header | string | 否 | apikey | 设置我们从哪个 header 获取 key。 | +| query | string | 否 | apikey | 设置我们从哪个 query string 获取 key,优先级低于 `header`。 | +| hide_credentials | boolean | 否 | false | 如果为 `true`,则不要将含有认证信息的 header 或 query string 传递给 Upstream。 | + +## 示例 + +以下示例演示了如何在不同场景中使用 `key-auth` 插件。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 在路由上实现密钥认证 + +以下示例演示如何在路由上实现密钥认证并将密钥包含在请求标头中。 + +创建一个消费者 `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +为消费者创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-key-auth", + "plugins": { + "key-auth": { + "key": "jack-key" + } + } + }' +``` + +使用 `key-auth` 创建路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "key-auth-route", + "uri": "/anything", + "plugins": { + "key-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +#### 使用有效密钥进行验证 + +使用有效密钥发送请求至: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H 'apikey: jack-key' +``` + +您应该收到 `HTTP/1.1 200 OK` 响应。 + +#### 使用无效密钥进行验证 + +使用无效密钥发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H 'apikey: wrong-key' +``` + +您应该看到以下 `HTTP/1.1 401 Unauthorized` 响应: + +```text +{"message":"Invalid API key in request"} +``` + +#### 无需密钥即可验证 + +无需密钥即可发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该看到以下 `HTTP/1.1 401 Unauthorized` 响应: + +```text +{"message":"Missing API key found in request"} +``` + +### 隐藏上游的身份验证信息 + +以下示例演示如何通过配置 `hide_credentials` 来防止密钥被发送到上游服务。默认情况下,身份验证密钥被转发到上游服务,这在某些情况下可能会导致安全风险。 + +创建一个消费者 `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +为消费者创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-key-auth", + "plugins": { + "key-auth": { + "key": "jack-key" + } + } + }' +``` + +#### 不隐藏凭据 + +使用 `key-auth` 创建路由,并将 `hide_credentials` 配置为 `false` (默认配置): + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ +-H "X-API-KEY: ${admin_key}" \ +-d '{ + "id": "key-auth-route", + "uri": "/anything", + "plugins": { + "key-auth": { + "hide_credentials": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +发送带有有效密钥的请求: + +```shell +curl -i "http://127.0.0.1:9080/anything?apikey=jack-key" +``` + +您应该看到以下 `HTTP/1.1 200 OK` 响应: + +```json +{ + "args": { + "auth": "jack-key" + }, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.2.1", + "X-Consumer-Username": "jack", + "X-Credential-Identifier": "cred-jack-key-auth", + "X-Amzn-Trace-Id": "Root=1-6502d8a5-2194962a67aa21dd33f94bb2", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1, 103.248.35.179", + "url": "http://127.0.0.1/anything?apikey=jack-key" +} +``` + +注意凭证 `jack-key` 对于上游服务是可见的。 + +#### 隐藏凭据 + +将插件的 `hide_credentials` 更新为 `true`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/key-auth-route" -X PATCH \ +-H "X-API-KEY: ${admin_key}" \ +-d '{ + "plugins": { + "key-auth": { + "hide_credentials": true + } + } +}' +``` + +发送带有有效密钥的请求: + +```shell +curl -i "http://127.0.0.1:9080/anything?apikey=jack-key" +``` + +您应该看到以下 `HTTP/1.1 200 OK` 响应: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.2.1", + "X-Consumer-Username": "jack", + "X-Credential-Identifier": "cred-jack-key-auth", + "X-Amzn-Trace-Id": "Root=1-6502d85c-16f34dbb5629a5960183e803", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1, 103.248.35.179", + "url": "http://127.0.0.1/anything" +} +``` + +注意凭证 `jack-key` 对上游服务不再可见。 + +### 演示标头和查询中的密钥优先级 + +以下示例演示了如何在路由上实现消费者的密钥身份验证,并自定义应包含密钥的 URL 参数。该示例还显示,当在标头和查询字符串中都配置了 API 密钥时,请求标头具有更高的优先级。 + +创建消费者 `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +为消费者创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-key-auth", + "plugins": { + "key-auth": { + "key": "jack-key" + } + } + }' +``` + +使用 `key-auth` 创建路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ +-H "X-API-KEY: ${admin_key}" \ +-d '{ + "id": "key-auth-route", + "uri": "/anything", + "plugins": { + "key-auth": { + "query": "auth" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +#### 使用有效密钥进行验证 + +使用有效密钥发送请求至: + +```shell +curl -i "http://127.0.0.1:9080/anything?auth=jack-key" +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应。 + +#### 使用无效密钥进行验证 + +使用无效密钥发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything?auth=wrong-key" +``` + +您应该看到以下 `HTTP/1.1 401 Unauthorized` 响应: + +```text +{"message":"Invalid API key in request"} +``` + +#### 使用查询字符串中的有效密钥进行验证 + +但是,如果您在标头中包含有效密钥,而 URL 查询字符串中仍包含无效密钥: + +```shell +curl -i "http://127.0.0.1:9080/anything?auth=wrong-key" -H 'apikey: jack-key' +``` + +您应该会看到 `HTTP/1.1 200 OK` 响应。这表明标头中包含的密钥始终具有更高的优先级。 + +### 将消费者自定义 ID 添加到标头 + +以下示例演示了如何在 `Consumer-Custom-Id` 标头中将消费者自定义 ID 附加到经过身份验证的请求,该 ID 可用于根据需要实现其他逻辑。 + +创建一个带有自定义 ID 标签的消费者 `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack", + "labels": { + "custom_id": "495aec6a" + } + }' +``` + +Create `key-auth` credential for the consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-key-auth", + "plugins": { + "key-auth": { + "key": "jack-key" + } + } + }' +``` + +Create a Route with `key-auth`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "key-auth-route", + "uri": "/anything", + "plugins": { + "key-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +To verify, send a request to the Route with the valid key: + +```shell +curl -i "http://127.0.0.1:9080/anything?auth=jack-key" +``` + +You should see an `HTTP/1.1 200 OK` response similar to the following: + +```json +{ + "args": { + "auth": "jack-key" + }, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-66ea8d64-33df89052ae198a706e18c2a", + "X-Consumer-Username": "jack", + "X-Credential-Identifier": "cred-jack-key-auth", + "X-Consumer-Custom-Id": "495aec6a", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "192.168.65.1, 205.198.122.37", + "url": "http://127.0.0.1/anything?apikey=jack-key" +} +``` + +### 匿名消费者的速率限制 + +以下示例演示了如何为常规消费者和匿名消费者配置不同的速率限制策略,其中匿名消费者不需要进行身份验证,并且配额较少。 + +创建常规消费者 `jack` 并配置 `limit-count` 插件以允许 30 秒内的配额为 3: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack", + "plugins": { + "limit-count": { + "count": 3, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +为消费者 `jack` 创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-key-auth", + "plugins": { + "key-auth": { + "key": "jack-key" + } + } + }' +``` + +创建匿名用户 `anonymous`,并配置 `limit-count`插件,以允许 30 秒内配额为 1: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +创建路由并配置 `key-auth` 插件以接受匿名消费者 `anonymous` 绕过身份验证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "key-auth-route", + "uri": "/anything", + "plugins": { + "key-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +为了验证,请使用 `jack` 的密钥发送五个连续的请求: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H 'apikey: jack-key' -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +您应该看到以下响应,显示在 5 个请求中,3 个请求成功(状态代码 200),而其他请求被拒绝(状态代码 429)。 + +```text +200: 3, 429: 2 +``` + +发送五个匿名请求: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +您应该看到以下响应,表明只有一个请求成功: + +```text +200: 1, 429: 4 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ldap-auth.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ldap-auth.md new file mode 100644 index 0000000..0417879 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ldap-auth.md @@ -0,0 +1,167 @@ +--- +title: ldap-auth +keywords: + - Apache APISIX + - API 网关 + - Plugin + - LDAP Authentication + - ldap-auth +description: 本篇文档介绍了 Apache APISIX ldap-auth 插件的相关信息。 +--- + + + +## 描述 + +`ldap-auth` 插件可用于给路由或服务添加 LDAP 身份认证,该插件使用 [lua-resty-ldap](https://github.com/api7/lua-resty-ldap) 连接 LDAP 服务器。 + +该插件需要与 Consumer 一起配合使用,API 的调用方可以使用 [basic authentication](https://en.wikipedia.org/wiki/Basic_access_authentication) 与 LDAP 服务器进行认证。 + +## 属性 + +Consumer 端: + +| 名称 | 类型 | 必选项 | 描述 | +| ------- | ------ | -------- | -------------------------------------------------------------------------------- | +| user_dn | string | 是 | LDAP 客户端的 dn,例如:`cn=user01,ou=users,dc=example,dc=org`。该字段支持使用 [APISIX Secret](../terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | + +Route 端: + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|----------|---------|----------|---------|------------------------------------------------------------------------| +| base_dn | string | 是 | | LDAP 服务器的 dn,例如:`ou=users,dc=example,dc=org`。| +| ldap_uri | string | 是 | | LDAP 服务器的 URI。 | +| use_tls | boolean | 否 | false | 如果设置为 `true` 则表示启用 TLS。 | +| tls_verify| boolean | 否 | false | 是否校验 LDAP 服务器的证书。如果设置为 `true`,你必须设置 `config.yaml` 里面的 `ssl_trusted_certificate`,并且确保 `ldap_uri` 里的 host 和服务器证书中的 host 匹配。 | +| uid | string | 否 | cn | UID 属性。 | + +## 启用插件 + +首先,你需要创建一个 Consumer 并在其中配置该插件,具体代码如下: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "foo", + "plugins": { + "ldap-auth": { + "user_dn": "cn=user01,ou=users,dc=example,dc=org" + } + } +}' +``` + +然后就可以在指定路由或服务中启用该插件,具体代码如下: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": { + "ldap-auth": { + "base_dn": "ou=users,dc=example,dc=org", + "ldap_uri": "localhost:1389", + "uid": "cn" + }, + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## 测试插件 + +通过上述方法配置插件后,可以通过以下命令测试插件: + +```shell +curl -i -uuser01:password1 http://127.0.0.1:9080/hello +``` + +```shell +HTTP/1.1 200 OK +... +hello, world +``` + +如果授权信息请求头丢失或无效,则请求将被拒绝(如下展示了几种返回结果): + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +```shell +HTTP/1.1 401 Unauthorized +... +{"message":"Missing authorization in request"} +``` + +```shell +curl -i -uuser:password1 http://127.0.0.1:9080/hello +``` + +```shell +HTTP/1.1 401 Unauthorized +... +{"message":"Invalid user authorization"} +``` + +```shell +curl -i -uuser01:passwordfalse http://127.0.0.1:9080/hello +``` + +```shell +HTTP/1.1 401 Unauthorized +... +{"message":"Invalid user authorization"} +``` + +## 删除插件 + +当你需要禁用 `ldap-auth` 插件时,可以通过以下命令删除相应的 JSON 配置。APISIX 将自动重新加载,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/limit-conn.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/limit-conn.md new file mode 100644 index 0000000..e90fabd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/limit-conn.md @@ -0,0 +1,420 @@ +--- +title: limit-conn +keywords: + - APISIX + - API 网关 + - Limit Connection +description: limit-conn 插件通过管理并发连接来限制请求速率。超过阈值的请求可能会被延迟或拒绝,以确保 API 使用受控并防止过载。 +--- + + + + + + + +## 描述 + +`limit-conn` 插件通过并发连接数来限制请求速率。超过阈值的请求将根据配置被延迟或拒绝,从而确保可控的资源使用并防止过载。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|------------|---------|----------|-------|----------------------------|------------------| +| conn | integer | 是 | | > 0 | 允许的最大并发请求数。超过配置的限制且低于`conn + burst`的请求将被延迟。| +| burst | integer | 是 | | >= 0 | 每秒允许延迟的过多并发请求数。超过限制的请求将被立即拒绝。| +| default_conn_delay | number | 是 | | > 0 | 允许超过`conn + burst`的并发请求的处理延迟(秒),可根据`only_use_default_delay`设置动态调整。| +| only_use_default_delay | boolean | 否 | false | | 如果为 false,则根据请求超出`conn`限制的程度按比例延迟请求。拥塞越严重,延迟就越大。例如,当 `conn` 为 `5`、`burst` 为 `3` 且 `default_conn_delay` 为 `1` 时,6 个并发请求将导致 1 秒的延迟,7 个请求将导致 2 秒的延迟,8 个请求将导致 3 秒的延迟,依此类推,直到达到 `conn + burst` 的总限制,超过此限制的请求将被拒绝。如果为 true,则使用 `default_conn_delay` 延迟 `burst` 范围内的所有超额请求。超出 `conn + burst` 的请求将被立即拒绝。例如,当 `conn` 为 `5`、`burst` 为 `3` 且 `default_conn_delay` 为 `1` 时,6、7 或 8 个并发请求都将延迟 1 秒。| +| key_type | string | 否 | var | ["var","var_combination"] | key 的类型。如果`key_type` 为 `var`,则 `key` 将被解释为变量。如果 `key_type` 为 `var_combination`,则 `key` 将被解释为变量的组合。 | +| key | string | 否 | remote_addr | | 用于计数请求的 key。如果 `key_type` 为 `var`,则 `key` 将被解释为变量。变量不需要以美元符号(`$`)为前缀。如果 `key_type` 为 `var_combination`,则 `key` 会被解释为变量的组合。所有变量都应该以美元符号 (`$`) 为前缀。例如,要配置 `key` 使用两个请求头 `custom-a` 和 `custom-b` 的组合,则 `key` 应该配置为 `$http_custom_a $http_custom_b`。| +| rejection_code | integer | 否 | 503 | [200,...,599] | 请求因超出阈值而被拒绝时返回的 HTTP 状态代码。| +| rejection_msg | string | 否 | | 非空 | 请求因超出阈值而被拒绝时返回的响应主体。| +| allow_degradation | boolean | 否 | false | | 如果为 true,则允许 APISIX 在插件或其依赖项不可用时继续处理没有插件的请求。| +| policy | string | 否 | local | ["local","re​​dis","re​​dis-cluster"] | 速率限制计数器的策略。如果是 `local`,则计数器存储在本地内存中。如果是 `redis`,则计数器存储在 Redis 实例上。如果是 `redis-cluster`,则计数器存储在 Redis 集群中。| +| redis_host | string | 否 | | | Redis 节点的地址。当 `policy` 为 `redis` 时必填。 | +| redis_port | integer | 否 | 6379 | [1,...] | 当 `policy` 为 `redis` 时,Redis 节点的端口。 | +| redis_username | string | 否 | | | 如果使用 Redis ACL,则为 Redis 的用户名。如果使用旧式身份验证方法 `requirepass`,则仅配置 `redis_password`。当 `policy` 为 `redis` 时使用。 | +| redis_password | string | 否 | | | 当 `policy` 为 `redis` 或 `redis-cluster` 时,Redis 节点的密码。 | +| redis_ssl | boolean | 否 | false |如果为 true,则在 `policy` 为 `redis` 时使用 SSL 连接到 Redis 集群。| +| redis_ssl_verify | boolean | 否 | false | | 如果为 true,则在 `policy` 为 `redis` 时验证服务器 SSL 证书。| +| redis_database | integer | 否 | 0 | >= 0 | 当 `policy` 为 `redis` 时,Redis 中的数据库编号。| +| redis_timeout | integer | 否 | 1000 | [1,...] | 当 `policy` 为 `redis` 或 `redis-cluster` 时,Redis 超时值(以毫秒为单位)。 | +| redis_cluster_nodes | array[string] | 否 | | | 具有至少两个地址的 Redis 群集节点列表。当 policy 为 redis-cluster 时必填。 | +redis_cluster_name | string | 否 | | | | Redis 集群的名称。当 `policy` 为 `redis-cluster` 时必须使用。| +| redis_cluster_ssl | boolean | 否 | false | | 如果为 `true`,当 `policy` 为 `redis-cluster`时,使用 SSL 连接 Redis 集群。| +| redis_cluster_ssl_verify | boolean | 否 | false | | 如果为 `true`,当 `policy` 为 `redis-cluster` 时,验证服务器 SSL 证书。 | + +## 示例 + +以下示例演示了如何在不同场景中配置 `limit-conn`。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 通过远程地址应用速率限制 + +以下示例演示如何使用 `limit-conn` 通过 `remote_addr` 限制请求速率,并附带示例连接和突发阈值。 + +使用 `limit-conn` 插件创建路由,以允许 2 个并发请求和 1 个过多的并发请求。此外: + +* 配置插件,允许超过 `conn + burst` 的并发请求有 0.1 秒的处理延迟。 +* 将密钥类型设置为 `vars`,以将 `key` 解释为变量。 +* 根据请求的 `remote_address` 计算速率限制计数。 +* 将 `policy` 设置为 `local`,以使用内存中的本地计数器。 +* 将 `rejected_code` 自定义为 `429`。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-conn-route", + "uri": "/get", + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "key_type": "var", + "key": "remote_addr", + "policy": "local", + "rejected_code": 429 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送五个并发请求: + +```shell +seq 1 5 | xargs -n1 -P5 bash -c 'curl -s -o /dev/null -w "Response: %{http_code}\n" "http://127.0.0.1:9080/get"' +``` + +您应该会看到类似以下内容的响应,其中超过阈值的请求被拒绝: + +```text +Response: 200 +Response: 200 +Response: 200 +Response: 429 +Response: 429 +``` + +### 通过远程地址和消费者名称应用速率限制 + +以下示例演示如何使用 `limit-conn` 通过变量组合 `remote_addr` 和 `consumer_name` 对请求进行速率限制。 + +创建消费者 `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john" + }' +``` + +为消费者创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +创建第二个消费者 `jane`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jane" + }' +``` + +为消费者创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jane/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jane-key-auth", + "plugins": { + "key-auth": { + "key": "jane-key" + } + } + }' +``` + +创建一个带有 `key-auth` 和 `limit-conn` 插件的路由,并在 `limit-conn` 插件中指定使用变量组合作为速率限制 key: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-conn-route", + "uri": "/get", + "plugins": { + "key-auth": {}, + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 429, + "key_type": "var_combination", + "key": "$remote_addr $consumer_name" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +作为消费者 `john` 发送五个并发请求: + +```shell +seq 1 5 | xargs -n1 -P5 bash -c 'curl -s -o /dev/null -w "Response: %{http_code}\n" "http://127.0.0.1:9080/get" -H "apikey: john-key"' +``` + +您应该会看到类似以下内容的响应,其中超过阈值的请求被拒绝: + +```text +Response: 200 +Response: 200 +Response: 200 +Response: 429 +Response: 429 +``` + +接下来立刻以消费者 `jane` 的身份发送五个并发请求: + +```shell +seq 1 5 | xargs -n1 -P5 bash -c 'curl -s -o /dev/null -w "Response: %{http_code}\n" "http://127.0.0.1:9080/get" -H "apikey: jane-key"' +``` + +您还应该看到类似以下内容的响应,其中过多的请求被拒绝: + +```text +Response: 200 +Response: 200 +Response: 200 +Response: 429 +Response: 429 +``` + +### 限制 WebSocket 连接速率 + +以下示例演示了如何使用 `limit-conn` 插件来限制并发 WebSocket 连接的数量。 + +启动 [上游 WebSocket 服务器](https://hub.docker.com/r/jmalloc/echo-server): + +```shell +docker run -d \ + -p 8080:8080 \ + --name websocket-server \ + --network=apisix-quickstart-net \ + jmalloc/echo-server +``` + +创建到服务器 WebSocket 端点的路由,并为路由启用 WebSocket。相应地调整 WebSocket 服务器地址。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "ws-route", + "uri": "/.ws", + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "key_type": "var", + "key": "remote_addr", + "rejected_code": 429 + } + }, + "enable_websocket": true, + "upstream": { + "type": "roundrobin", + "nodes": { + "websocket-server:8080": 1 + } + } +}' +``` + +安装 WebSocket 客户端,例如 [websocat](https://github.com/vi/websocat),通过以下路由与 WebSocket 服务器建立连接: + +```shell +websocat "ws://127.0.0.1:9080/.ws" +``` + +在终端中发送 `hello` 消息,您应该会看到 WebSocket 服务器回显相同的消息: + +```text +Request served by 1cd244052136 +hello +hello +``` + +再打开三个终端会话并运行: + +```shell +websocat "ws://127.0.0.1:9080/.ws" +``` + +由于速率限制的影响,当您尝试与服务器建立 WebSocket 连接时,您应该会看到最后一个终端会话打印 `429 Too Many Requests`。 + +### 使用 Redis 服务器在 APISIX 节点之间共享配额 + +以下示例演示了使用 Redis 服务器对多个 APISIX 节点之间的请求进行速率限制,以便不同的 APISIX 节点共享相同的速率限制配额。 + +在每个 APISIX 实例上,使用以下配置创建路由。相应地调整管理 API、Redis 主机、端口、密码和数据库的地址。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-conn-route", + "uri": "/get", + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 429, + "key_type": "var", + "key": "remote_addr", + "policy": "redis", + "redis_host": "192.168.xxx.xxx", + "redis_port": 6379, + "redis_password": "p@ssw0rd", + "redis_database": 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送五个并发请求: + +```shell +seq 1 5 | xargs -n1 -P5 bash -c 'curl -s -o /dev/null -w "Response: %{http_code}\n" "http://127.0.0.1:9080/get"' +``` + +您应该会看到类似以下内容的响应,其中超过阈值的请求被拒绝: + +```text +Response: 200 +Response: 200 +Response: 429 +Response: 429 +Response: 429 +``` + +这表明在不同 APISIX 实例中配置的两个路由共享相同的配额。 + +### 使用 Redis 集群在 APISIX 节点之间共享配额 + +您还可以使用 Redis 集群在多个 APISIX 节点之间应用相同的配额,以便不同的 APISIX 节点共享相同的速率限制配额。 + +确保您的 Redis 实例在 [集群模式](https://redis.io/docs/management/scaling/#create-and-use-a-redis-cluster) 下运行。`limit-conn` 插件配置至少需要两个节点。 + +在每个 APISIX 实例上,使用以下配置创建一个路由。相应地调整管理 API 的地址、Redis 集群节点、密码、集群名称和 SSL 验证。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-conn-route", + "uri": "/get", + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 429, + "key_type": "var", + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "192.168.xxx.xxx:6379", + "192.168.xxx.xxx:16379" + ], + "redis_password": "p@ssw0rd", + "redis_cluster_name": "redis-cluster-1", + "redis_cluster_ssl": true + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送五个并发请求: + +```shell +seq 1 5 | xargs -n1 -P5 bash -c 'curl -s -o /dev/null -w "Response: %{http_code}\n" "http://127.0.0.1:9080/get"' +``` + +您应该会看到类似以下内容的响应,其中超过阈值的请求被拒绝: + +```text +Response: 200 +Response: 200 +Response: 429 +Response: 429 +Response: 429 +``` + +这表明在不同的 APISIX 实例中配置的两条路由共享相同的配额。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/limit-count.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/limit-count.md new file mode 100644 index 0000000..daf1381 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/limit-count.md @@ -0,0 +1,508 @@ +--- +title: limit-count +keywords: + - APISIX + - API 网关 + - Limit Count + - 速率限制 +description: limit-count 插件使用固定窗口算法,通过给定时间间隔内的请求数量来限制请求速率。超过配置配额的请求将被拒绝。 +--- + + + + + + + +## 描述 + +`limit-count` 插件使用固定窗口算法,通过给定时间间隔内的请求数量来限制请求速率。超过配置配额的请求将被拒绝。 + +您可能会在响应中看到以下速率限制标头: + +* `X-RateLimit-Limit`:总配额 +* `X-RateLimit-Remaining`:剩余配额 +* `X-RateLimit-Reset`:计数器重置的剩余秒数 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------------- | ------- | ---------- | ------------- | --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| count | integer | 是 | | > 0 | 给定时间间隔内允许的最大请求数。 | +| time_window | integer | 是 | | > 0 | 速率限制 `count` 对应的时间间隔(以秒为单位)。 | +| key_type | string | 否 | var | ["var","var_combination","constant"] | key 的类型。如果`key_type` 为 `var`,则 `key` 将被解释为变量。如果 `key_type` 为 `var_combination`,则 `key` 将被解释为变量的组合。如果 `key_type` 为 `constant`,则 `key` 将被解释为常量。 | +| key | string | 否 | remote_addr | | 用于计数请求的 key。如果 `key_type` 为 `var`,则 `key` 将被解释为变量。变量不需要以美元符号(`$`)为前缀。如果 `key_type` 为 `var_combination`,则 `key` 会被解释为变量的组合。所有变量都应该以美元符号 (`$`) 为前缀。例如,要配置 `key` 使用两个请求头 `custom-a` 和 `custom-b` 的组合,则 `key` 应该配置为 `$http_custom_a $http_custom_b`。如果 `key_type` 为 `constant`,则 `key` 会被解释为常量值。| +| rejection_code | integer | 否 | 503 | [200,...,599] | 请求因超出阈值而被拒绝时返回的 HTTP 状态代码。| +| rejection_msg | string | 否 | | 非空 | 请求因超出阈值而被拒绝时返回的响应主体。| +| policy | string | 否 | local | ["local","re​​dis","re​​dis-cluster"] | 速率限制计数器的策略。如果是 `local`,则计数器存储在本地内存中。如果是 `redis`,则计数器存储在 Redis 实例上。如果是 `redis-cluster`,则计数器存储在 Redis 集群中。| +| allow_degradation | boolean | 否 | false | | 如果为 true,则允许 APISIX 在插件或其依赖项不可用时继续处理没有插件的请求。| +| show_limit_quota_header | boolean | 否 | true | | 如果为 true,则在响应标头中包含 `X-RateLimit-Limit` 以显示总配额和 `X-RateLimit-Remaining` 以显示剩余配额。| +| group | string | 否 | | 非空 | 插件的 `group` ID,以便同一 `group` 的路由可以共享相同的速率限制计数器。 | +| redis_host | string | 否 | | | Redis 节点的地址。当 `policy` 为 `redis` 时必填。 | +| redis_port | integer | 否 | 6379 | [1,...] | 当 `policy` 为 `redis` 时,Redis 节点的端口。 | +| redis_username | string | 否 | | | 如果使用 Redis ACL,则为 Redis 的用户名。如果使用旧式身份验证方法 `requirepass`,则仅配置 `redis_password`。当 `policy` 为 `redis` 时使用。 | +| redis_password | string | 否 | | | 当 `policy` 为 `redis` 或 `redis-cluster` 时,Redis 节点的密码。 | +| redis_ssl | boolean | 否 | false |如果为 true,则在 `policy` 为 `redis` 时使用 SSL 连接到 Redis 集群。| +| redis_ssl_verify | boolean | 否 | false | | 如果为 true,则在 `policy` 为 `redis` 时验证服务器 SSL 证书。| +| redis_database | integer | 否 | 0 | >= 0 | 当 `policy` 为 `redis` 时,Redis 中的数据库编号。| +| redis_timeout | integer | 否 | 1000 | [1,...] | 当 `policy` 为 `redis` 或 `redis-cluster` 时,Redis 超时值(以毫秒为单位)。 | +| redis_cluster_nodes | array[string] | 否 | | | 具有至少两个地址的 Redis 群集节点列表。当 policy 为 redis-cluster 时必填。 | +redis_cluster_name | string | 否 | | | | Redis 集群的名称。当 `policy` 为 `redis-cluster` 时必须使用。| +| redis_cluster_ssl | boolean | 否 | false | | 如果为 `true`,当 `policy` 为 `redis-cluster`时,使用 SSL 连接 Redis 集群。| +| redis_cluster_ssl_verify | boolean | 否 | false | | 如果为 `true`,当 `policy` 为 `redis-cluster` 时,验证服务器 SSL 证书。 | + +## 示例 + +下面的示例演示了如何在不同情况下配置 `limit-count` 。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 按远程地址应用速率限制 + +下面的示例演示了通过单一变量 `remote_addr` 对请求进行速率限制。 + +创建一个带有 `limit-count` 插件的路由,允许在 30 秒窗口内为每个远程地址设置 1 个配额: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-route", + "uri": "/get", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429, + "key_type": "var", + "key": "remote_addr" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +发送验证请求: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +您应该会看到 `HTTP/1.1 200 OK` 响应。 + +该请求已消耗了时间窗口允许的所有配额。如果您在相同的 30 秒时间间隔内再次发送该请求,您应该会收到 `HTTP/1.1 429 Too Many Requests` 响应,表示该请求超出了配额阈值。 + +### 通过远程地址和消费者名称应用速率限制 + +以下示例演示了通过变量 `remote_addr` 和 `consumer_name` 的组合对请求进行速率限制。它允许每个远程地址和每个消费者在 30 秒窗口内有 1 个配额。 + +创建消费者 `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john" + }' +``` + +为消费者创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +创建第二个消费者 `jane`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jane" + }' +``` + +为消费者创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jane/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jane-key-auth", + "plugins": { + "key-auth": { + "key": "jane-key" + } + } + }' +``` + +创建一个带有 `key-auth` 和 `limit-count` 插件的路由,并在 `limit-count` 插件中指定使用变量组合作为速率限制键: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-route", + "uri": "/get", + "plugins": { + "key-auth": {}, + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429, + "key_type": "var_combination", + "key": "$remote_addr $consumer_name" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +以消费者 `jane` 的身份发送请求: + +```shell +curl -i "http://127.0.0.1:9080/get" -H 'apikey: jane-key' +``` + +您应该会看到一个 `HTTP/1.1 200 OK` 响应以及相应的响应主体。 + +此请求已消耗了为时间窗口设置的所有配额。如果您在相同的 30 秒时间间隔内向消费者 `jane` 发送相同的请求,您应该会收到一个 `HTTP/1.1 429 Too Many Requests` 响应,表示请求超出了配额阈值。 + +在相同的 30 秒时间间隔内向消费者 `john` 发送相同的请求: + +```shell +curl -i "http://127.0.0.1:9080/get" -H 'apikey: john-key' +``` + +您应该看到一个 `HTTP/1.1 200 OK` 响应和相应的响应主体,表明请求不受速率限制。 + +在相同的 30 秒时间间隔内再次以消费者 `john` 的身份发送相同的请求,您应该收到一个 `HTTP/1.1 429 Too Many Requests` 响应。 + +这通过变量 `remote_addr` 和 `consumer_name` 的组合验证了插件速率限制。 + +### 在路由之间共享配额 + +以下示例通过配置 `limit-count` 插件的 `group` 演示了在多个路由之间共享速率限制配额。 + +请注意,同一 `group` 的 `limit-count` 插件的配置应该相同。为了避免更新异常和重复配置,您可以创建一个带有 `limit-count` 插件和上游的服务,以供路由连接。 + +创建服务: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/services" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-service", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429, + "group": "srv1" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +创建两个路由,并将其 `service_id` 配置为 `limit-count-service`,以便它们对插件和上游共享相同的配置: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-route-1", + "service_id": "limit-count-service", + "uri": "/get1", + "plugins": { + "proxy-rewrite": { + "uri": "/get" + } + } + }' +``` + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-route-2", + "service_id": "limit-count-service", + "uri": "/get2", + "plugins": { + "proxy-rewrite": { + "uri": "/get" + } + } + }' +``` + +:::note + +[`proxy-rewrite`](./proxy-rewrite.md) 插件用于将 URI 重写为 `/get`,以便将请求转发到正确的端点。 + +::: + +向路由 `/get1` 发送请求: + +```shell +curl -i "http://127.0.0.1:9080/get1" +``` + +您应该会看到一个 `HTTP/1.1 200 OK` 响应以及相应的响应主体。 + +在相同的 30 秒时间间隔内向路由 `/get2` 发送相同的请求: + +```shell +curl -i "http://127.0.0.1:9080/get2" +``` + +您应该收到 `HTTP/1.1 429 Too Many Requests` 响应,这验证两个路由共享相同的速率限制配额。 + +### 使用 Redis 服务器在 APISIX 节点之间共享配额 + +以下示例演示了使用 Redis 服务器对多个 APISIX 节点之间的请求进行速率限制,以便不同的 APISIX 节点共享相同的速率限制配额。 + +在每个 APISIX 实例上,使用以下配置创建一个路由。相应地调整管理 API 的地址、Redis 主机、端口、密码和数据库。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-route", + "uri": "/get", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429, + "key": "remote_addr", + "policy": "redis", + "redis_host": "192.168.xxx.xxx", + "redis_port": 6379, + "redis_password": "p@ssw0rd", + "redis_database": 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向 APISIX 实例发送请求: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +您应该会看到一个 `HTTP/1.1 200 OK` 响应以及相应的响应主体。 + +在相同的 30 秒时间间隔内向不同的 APISIX 实例发送相同的请求,您应该会收到一个 `HTTP/1.1 429 Too Many Requests` 响应,验证在不同 APISIX 节点中配置的路由是否共享相同的配额。 + +### 使用 Redis 集群在 APISIX 节点之间共享配额 + +您还可以使用 Redis 集群在多个 APISIX 节点之间应用相同的配额,以便不同的 APISIX 节点共享相同的速率限制配额。 + +确保您的 Redis 实例在 [集群模式](https://redis.io/docs/management/scaling/#create-and-use-a-redis-cluster) 下运行。`limit-count` 插件配置至少需要两个节点。 + +在每个 APISIX 实例上,使用以下配置创建路由。相应地调整管理 API 的地址、Redis 集群节点、密码、集群名称和 SSL 验证。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-count-route", + "uri": "/get", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "192.168.xxx.xxx:6379", + "192.168.xxx.xxx:16379" + ], + "redis_password": "p@ssw0rd", + "redis_cluster_name": "redis-cluster-1", + "redis_cluster_ssl": true + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向 APISIX 实例发送请求: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +您应该会看到一个 `HTTP/1.1 200 OK` 响应以及相应的响应主体。 + +在相同的 30 秒时间间隔内向不同的 APISIX 实例发送相同的请求,您应该会收到一个 `HTTP/1.1 429 Too Many Requests` 响应,验证在不同 APISIX 节点中配置的路由是否共享相同的配额。 + +### 使用匿名消费者进行速率限制 + +以下示例演示了如何为常规和匿名消费者配置不同的速率限制策略,其中匿名消费者不需要进行身份验证并且配额较少。虽然此示例使用 [`key-auth`](./key-auth.md) 进行身份验证,但匿名消费者也可以使用 [`basic-auth`](./basic-auth.md)、[`jwt-auth`](./jwt-auth.md) 和 [`hmac-auth`](./hmac-auth.md) 进行配置。 + +创建一个消费者 `john`,并配置 `limit-count` 插件,以允许 30 秒内配额为 3: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john", + "plugins": { + "limit-count": { + "count": 3, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +为消费者 `john` 创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +创建匿名用户 `anonymous`,并配置 `limit-count` 插件,以允许 30 秒内配额为 1: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429 + } + } + }' +``` + +创建路由并配置 `key-auth` 插件以接受匿名消费者 `anonymous` 绕过身份验证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "key-auth-route", + "uri": "/anything", + "plugins": { + "key-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +使用 `john` 的密钥发送五个连续的请求: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H 'apikey: john-key' -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +您应该看到以下响应,显示在 5 个请求中,3 个请求成功(状态代码 200),而其他请求被拒绝(状态代码 429)。 + +```text +200: 3, 429: 2 +``` + +发送五个匿名请求: + +```shell +resp=$(seq 5 | xargs -I{} curl "http://127.0.0.1:9080/anything" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +您应该看到以下响应,表明只有一个请求成功: + +```text +200: 1, 429: 4 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/limit-req.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/limit-req.md new file mode 100644 index 0000000..145989e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/limit-req.md @@ -0,0 +1,289 @@ +--- +title: limit-req +keywords: + - APISIX + - API 网关 + - Limit Request + - limit-req +description: limit-req 插件使用漏桶算法来限制请求的数量并允许节流。 +--- + + + + + + + +## 描述 + +`limit-req` 插件使用 [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) 算法来限制请求的数量并允许节流。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------- | ------- | ------ | ------ | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| rate | integer | True | | > 0 | 每秒允许的最大请求数。超过速率且低于突发的请求将被延迟。| +| bust | integer | True | | >= 0 | 每秒允许延迟的请求数,以进行限制。超过速率和突发的请求将被拒绝。| +| key_type | string | 否 | var | ["var","var_combination"] | key 的类型。如果 `key_type` 为 `var`,则 `key` 将被解释为变量。如果 `key_type` 为 `var_combination`,则 `key` 将被解释为变量的组合。 | +| key | string | 否 | remote_addr | | 用于计数请求的 key。如果 `key_type` 为 `var`,则 `key` 将被解释为变量。变量不需要以美元符号(`$`)为前缀。如果 `key_type` 为 `var_combination`,则 `key` 会被解释为变量的组合。所有变量都应该以美元符号 (`$`) 为前缀。例如,要配置 `key` 使用两个请求头 `custom-a` 和 `custom-b` 的组合,则 `key` 应该配置为 `$http_custom_a $http_custom_b`。如果 `key_type` 为 `constant`,则 `key` 会被解释为常量值。| +| rejection_code | integer | 否 | 503 | [200,...,599] | 请求因超出阈值而被拒绝时返回的 HTTP 状态代码。| +| rejection_msg | string | 否 | | 非空 | 请求因超出阈值而被拒绝时返回的响应主体。| +| nodelay | boolean | 否 | false | | 如果为 true,则不要延迟突发阈值内的请求。 | +| allow_degradation | boolean | 否 | false | | 如果为 true,则允许 APISIX 在插件或其依赖项不可用时继续处理没有插件的请求。| +| policy | string | 否 | local | ["local","re​​dis","re​​dis-cluster"] | 速率限制计数器的策略。如果是 `local`,则计数器存储在本地内存中。如果是 `redis`,则计数器存储在 Redis 实例上。如果是 `redis-cluster`,则计数器存储在 Redis 集群中。| +| allow_degradation | boolean | 否 | false | | 如果为 true,则允许 APISIX 在插件或其依赖项不可用时继续处理没有插件的请求。| +| show_limit_quota_header | boolean | 否 | true | | 如果为 true,则在响应标头中包含 `X-RateLimit-Limit` 以显示总配额和 `X-RateLimit-Remaining` 以显示剩余配额。| +| redis_host | string | 否 | | | Redis 节点的地址。当 `policy` 为 `redis` 时必填。 | +| redis_port | integer | 否 | 6379 | [1,...] | 当 `policy` 为 `redis` 时,Redis 节点的端口。 | +| redis_username | string | 否 | | | 如果使用 Redis ACL,则为 Redis 的用户名。如果使用旧式身份验证方法 `requirepass`,则仅配置 `redis_password`。当 `policy` 为 `redis` 时使用。 | +| redis_password | string | 否 | | | 当 `policy` 为 `redis` 或 `redis-cluster` 时,Redis 节点的密码。 | +| redis_ssl | boolean | 否 | false |如果为 true,则在 `policy` 为 `redis` 时使用 SSL 连接到 Redis 集群。| +| redis_ssl_verify | boolean | 否 | false | | 如果为 true,则在 `policy` 为 `redis` 时验证服务器 SSL 证书。| +| redis_database | integer | 否 | 0 | >= 0 | 当 `policy` 为 `redis` 时,Redis 中的数据库编号。| +| redis_timeout | integer | 否 | 1000 | [1,...] | 当 `policy` 为 `redis` 或 `redis-cluster` 时,Redis 超时值(以毫秒为单位)。 | +| redis_cluster_nodes | array[string] | 否 | | | 具有至少两个地址的 Redis 群集节点列表。当 policy 为 redis-cluster 时必填。 | +redis_cluster_name | string | 否 | | | | Redis 集群的名称。当 `policy` 为 `redis-cluster` 时必须使用。| +| redis_cluster_ssl | boolean | 否 | false | | 如果为 `true`,当 `policy` 为 `redis-cluster`时,使用 SSL 连接 Redis 集群。| +| redis_cluster_ssl_verify | boolean | 否 | false | | 如果为 `true`,当 `policy` 为 `redis-cluster` 时,验证服务器 SSL 证书。 | + +## 示例 + +以下示例演示了如何在不同场景中配置 `limit-req`。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 通过远程地址应用速率限制 + +以下示例演示了通过单个变量 `remote_addr` 对 HTTP 请求进行速率限制。 + +使用 `limit-req` 插件创建允许每个远程地址 1 QPS 的路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d ' + { + "id": "limit-req-route", + "uri": "/get", + "plugins": { + "limit-req": { + "rate": 1, + "burst": 0, + "key": "remote_addr", + "key_type": "var", + "rejected_code": 429, + "nodelay": true + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +发送请求以验证: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +您应该会看到一个 `HTTP/1.1 200 OK` 响应。 + +该请求已消耗了时间窗口允许的所有配额。如果您在同一秒内再次发送请求,您应该会收到 `HTTP/1.1 429 Too Many Requests` 响应,表示请求超出了配额阈值。 + +### 允许速率限制阈值 + +以下示例演示了如何配置 `burst` 以允许速率限制阈值超出配置的值并实现请求限制。您还将看到与未实施限制时的比较。 + +使用 `limit-req` 插件创建一个路由,允许每个远程地址 1 QPS,并将 `burst` 设置为 1,以允许 1 个超过 `rate` 的请求延迟处理: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-req-route", + "uri": "/get", + "plugins": { + "limit-req": { + "rate": 1, + "burst": 1, + "key": "remote_addr", + "rejected_code": 429 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +生成三个对路由的请求: + +```shell +resp=$(seq 3 | xargs -I{} curl -i "http://127.0.0.1:9080/get" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200 responses: $count_200 ; 429 responses: $count_429" +``` + +您可能会看到所有三个请求都成功: + +```text +200 responses: 3 ; 429 responses: 0 +``` + +现在,将 `burst` 更新为 0 或将 `nodelay` 设置为 `true`,如下所示: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/limit-req-route" -X PATCH \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "plugins": { + "limit-req": { + "nodelay": true + } + } + }' +``` + +再次向路由生成三个请求: + +```shell +resp=$(seq 3 | xargs -I{} curl -i "http://127.0.0.1:9080/get" -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200 responses: $count_200 ; 429 responses: $count_429" +``` + +您应该会看到类似以下内容的响应,表明超出速率的请求已被拒绝: + +```text +200 responses: 1 ; 429 responses: 2 +``` + +### 通过远程地址和消费者名称应用速率限制 + +以下示例演示了通过变量组合 `remote_addr` 和 `consumer_name` 来限制请求的速率。 + +使用 `limit-req` 插件创建一个路由,允许每个远程地址和每个消费者 有 1 QPS。 + +创建消费者 `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john" + }' +``` + +为消费者创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +创建第二个消费者 `jane`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jane" + }' +``` + +为消费者创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jane/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jane-key-auth", + "plugins": { + "key-auth": { + "key": "jane-key" + } + } + }' +``` + +创建一个带有 `key-auth` 和 `limit-req` 插件的路由,并在 `limit-req` 插件中指定使用变量组合作为速率限制 key: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "limit-req-route", + "uri": "/get", + "plugins": { + "key-auth": {}, + "limit-req": { + "rate": 1, + "burst": 0, + "key": "$remote_addr $consumer_name", + "key_type": "var_combination", + "rejected_code": 429 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +同时发送两个请求,每个请求针对一个消费者: + +```shell +curl -i "http://127.0.0.1:9080/get" -H 'apikey: jane-key' & \ +curl -i "http://127.0.0.1:9080/get" -H 'apikey: john-key' & +``` + +您应该会收到两个请求的 `HTTP/1.1 200 OK`,表明请求未超过每个消费者的阈值。 + +如果您在同一秒内以任一消费者身份发送更多请求,应该会收到 `HTTP/1.1 429 Too Many Requests` 响应。 + +这验证了插件速率限制是通过变量 `remote_addr` 和 `consumer_name` 的来实现的。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/log-rotate.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/log-rotate.md new file mode 100644 index 0000000..61a2b96 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/log-rotate.md @@ -0,0 +1,113 @@ +--- +title: log-rotate +keywords: + - APISIX + - API 网关 + - Plugin + - 日志切分 +description: 云原生 API 网关 Apache APISIX log-rotate 插件用于定期切分日志目录下的访问日志和错误日志。 +--- + + + +## 描述 + +`log-rotate` 插件用于定期切分日志目录下的访问日志和错误日志。 + +你可以自定义日志轮换的频率以及要保留的日志数量。当日志数量超过限制时,旧的日志会被自动删除。 + +## 参数 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------------ | ------- | ------ | ------- | ------------- | ---------------------------------------------------------------------------- | +| interval | integer | 是 | 60 * 60 | | 每间隔多长时间切分一次日志,以秒为单位。 | +| max_kept | integer | 是 | 24 * 7 | | 最多保留多少份历史日志,超过指定数量后,自动删除老文件。 | +| max_size | integer | 否 | -1 | | 日志文件超过指定大小时进行切分,单位为 Byte。如果 `max_size` 小于 0 或者根据 `interval` 计算的时间到达时,将不会根据 `max_size` 切分日志。 | +| enable_compression | boolean | 否 | false | [false, true] | 当设置为 `true` 时,启用日志文件压缩。该功能需要在系统中安装 `tar` 。 | + +开启该插件后,就会按照参数自动切分日志文件了。比如以下示例是根据 `interval: 10` 和 `max_kept: 10` 得到的样本。 + +```shell +ll logs +``` + +``` +total 44K +-rw-r--r--. 1 resty resty 0 Mar 20 20:33 2020-03-20_20-33-40_access.log +-rw-r--r--. 1 resty resty 2.8K Mar 20 20:33 2020-03-20_20-33-40_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:33 2020-03-20_20-33-50_access.log +-rw-r--r--. 1 resty resty 2.4K Mar 20 20:33 2020-03-20_20-33-50_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:33 2020-03-20_20-34-00_access.log +-rw-r--r--. 1 resty resty 2.4K Mar 20 20:34 2020-03-20_20-34-00_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:34 2020-03-20_20-34-10_access.log +-rw-r--r--. 1 resty resty 2.4K Mar 20 20:34 2020-03-20_20-34-10_error.log +-rw-r--r--. 1 resty resty 0 Mar 20 20:34 access.log +-rw-r--r--. 1 resty resty 1.5K Mar 20 21:31 error.log +``` + +当开启日志文件压缩时,日志文件名称如下所示: + +```shell +ll logs +``` + +```shell +total 10.5K +-rw-r--r--. 1 resty resty 1.5K Mar 20 20:33 2020-03-20_20-33-50_access.log.tar.gz +-rw-r--r--. 1 resty resty 1.5K Mar 20 20:33 2020-03-20_20-33-50_error.log.tar.gz +-rw-r--r--. 1 resty resty 1.5K Mar 20 20:33 2020-03-20_20-34-00_access.log.tar.gz +-rw-r--r--. 1 resty resty 1.5K Mar 20 20:34 2020-03-20_20-34-00_error.log.tar.gz +-rw-r--r--. 1 resty resty 1.5K Mar 20 20:34 2020-03-20_20-34-10_access.log.tar.gz +-rw-r--r--. 1 resty resty 1.5K Mar 20 20:34 2020-03-20_20-34-10_error.log.tar.gz +-rw-r--r--. 1 resty resty 0 Mar 20 20:34 access.log +-rw-r--r--. 1 resty resty 1.5K Mar 20 21:31 error.log +``` + +## 启用插件 + +**该插件默认为禁用状态**,你可以在 `./conf/config.yaml` 中启用 `log-rotate` 插件,不需要在任何路由或服务中绑定。 + +```yaml title="./conf/config.yaml" +plugins: + # the plugins you enabled + - log-rotate + +plugin_attr: + log-rotate: + interval: 3600 # rotate interval (unit: second) + max_kept: 168 # max number of log files will be kept + max_size: -1 # max size of log files will be kept + enable_compression: false # enable log file compression(gzip) or not, default false +``` + +配置完成,你需要重新加载 APISIX。 + +## 删除插件 + +当你不再需要该插件时,只需要在 `./conf/config.yaml` 中删除或注释该插件即可。 + +```yaml +plugins: + # the plugins you enabled + # - log-rotate + +plugin_attr: + +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/loggly.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/loggly.md new file mode 100644 index 0000000..e8b05af --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/loggly.md @@ -0,0 +1,184 @@ +--- +title: loggly +keywords: + - APISIX + - API 网关 + - Plugin + - SolarWinds Loggly +description: API 网关 Apache APISIX loggly 插件可用于将日志转发到 SolarWinds Loggly 进行分析和存储。 +--- + + + +## 描述 + +`loggly` 插件可用于将日志转发到 [SolarWinds Loggly](https://www.solarwinds.com/loggly) 进行分析和存储。 + +当启用插件时,APISIX 会将请求上下文信息序列化为符合 [Loggly Syslog](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/streaming-syslog-without-using-files.htm?cshid=loggly_streaming-syslog-without-using-files) 的数据格式,即具有 [RFC5424](https://datatracker.ietf.org/doc/html/rfc5424) 兼容标头的 Syslog。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|------------------------|---------------|----------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------| +| customer_token | string | 是 | | 将日志发送到 Loggly 时使用的唯一标识符,以确保将日志发送到正确的组织帐户。 | +| severity | string (enum) | 否 | INFO | Syslog 日志事件的严重性级别。包括:`DEBUG`、`INFO`、`NOTICE`、`WARNING`、`ERR`、`CRIT`、`ALERT` 和 `EMEGR`。 | +| severity_map | object | 否 | nil | 一种将上游 HTTP 响应代码映射到 Syslog 中的方法。 `key-value`,其中 `key` 是 HTTP 响应代码,`value`是 Syslog 严重级别。例如`{"410": "CRIT"}`。 | +| tags | array | 否 | | 元数据将包含在任何事件日志中,以帮助进行分段和过滤。 | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | +| include_req_body | boolean | 否 | false | 当设置为 `true` 时,包含请求体。**注意**:如果请求体无法完全存放在内存中,由于 NGINX 的限制,APISIX 无法将它记录下来。 | +| include_req_body_expr | array | 否 | | 当 `include_req_body` 属性设置为 `true` 时的过滤器。只有当此处设置的表达式求值为 `true` 时,才会记录请求体。有关更多信息,请参阅 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 。 | +| include_resp_body | boolean | 否 | false | 当设置为 `true` 时,包含响应体。 | +| include_resp_body_expr | array | 否 | | 当 `include_resp_body` 属性设置为 `true` 时进行过滤响应体,并且只有当此处设置的表达式计算结果为 `true` 时,才会记录响应体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 | + +该插件支持使用批处理器来聚合并批量处理条目(日志或数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置)。 + +如果要生成用户令牌,请在 Loggly 系统中的 `/loggly.com/tokens` 设置,或者在系统中单击 `Logs > Source setup > Customer tokens`。 + +### 默认日志格式示例 + +```text +<10>1 2024-01-06T06:50:51.739Z 127.0.0.1 apisix 58525 - [token-1@41058 tag="apisix"] {"service_id":"","server":{"version":"3.7.0","hostname":"localhost"},"apisix_latency":100.99985313416,"request":{"url":"http://127.0.0.1:1984/opentracing","headers":{"content-type":"application/x-www-form-urlencoded","user-agent":"lua-resty-http/0.16.1 (Lua) ngx_lua/10025","host":"127.0.0.1:1984"},"querystring":{},"uri":"/opentracing","size":155,"method":"GET"},"response":{"headers":{"content-type":"text/plain","server":"APISIX/3.7.0","transfer-encoding":"chunked","connection":"close"},"size":141,"status":200},"route_id":"1","latency":103.99985313416,"upstream_latency":3,"client_ip":"127.0.0.1","upstream":"127.0.0.1:1982","start_time":1704523851634} +``` + +## 插件元数据设置 + +你还可以通过插件元数据配置插件。详细配置如下: + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|------------|---------|-------|----------------------|--------------------------------|---------------------------------------------------------------------| +| host | string | 否 | "logs-01.loggly.com" | | 发送日志的主机的端点。 | +| port | integer | 否 | 514 | | 要连接的 Loggly 端口。仅用于 `syslog` 协议。 | +| timeout | integer | 否 | 5000 | | 发送数据请求超时时间(以毫秒为单位)。 | +| protocol | string | 否 | "syslog" | [ "syslog", "http", "https" ] | 将日志发送到 Loggly 的协议。 | +| log_format | object | 否 | nil | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../../../en/latest/apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +APISIX 支持 [Syslog](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/streaming-syslog-without-using-files.htm)、[HTTP/S](https://documentation.solarwinds.com/en/success_center/loggly/content/admin/http-bulk-endpoint.htm)(批量端点)协议将日志事件发送到 Loggly。**默认情况下 `protocol` 的值为 `syslog`**。该协议允许你通过一些细粒度的控制(基于上游 HTTP 响应代码的日志严重性映射)发送符合 RFC5424 的系统日志事件。但是 HTTP/S 批量端点非常适合以更快的传输速度发送更大量的日志事件。 + +:::note 注意 + +Syslog 协议允许你发送符合 RFC5424 的 syslog 事件并进行细粒度控制。但是在以快速传输速度发送大量日志时,使用 HTTP/S 批量端点会更好。你可以通过以下方式更新元数据以更新使用的协议: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/loggly \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "protocol": "http" +}' +``` + +::: + +## 启用插件 + +以下示例展示了如何在指定路由上启用该插件: + +**完整配置** + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins":{ + "loggly":{ + "customer_token":"0e6fe4bf-376e-40f4-b25f-1d55cb29f5a2", + "tags":["apisix", "testroute"], + "severity":"info", + "severity_map":{ + "503": "err", + "410": "alert" + }, + "buffer_duration":60, + "max_retry_count":0, + "retry_delay":1, + "inactive_timeout":2, + "batch_max_size":10 + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1 + } + }, + "uri":"/index.html" +}' +``` + +**最小化配置** + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins":{ + "loggly":{ + "customer_token":"0e6fe4bf-376e-40f4-b25f-1d55cb29f5a2", + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1 + } + }, + "uri":"/index.html" +}' +``` + +## 测试插件 + +你可以通过以下命令向 APISIX 发出请求: + +```shell +curl -i http://127.0.0.1:9080/index.html +``` + +发出请求后,你就可以在 Loggly 仪表盘上查看相关日志: + +![Loggly Dashboard](../../../assets/images/plugin/loggly-dashboard.png) + +## 删除插件 + +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/loki-logger.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/loki-logger.md new file mode 100644 index 0000000..bc32dca --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/loki-logger.md @@ -0,0 +1,407 @@ +--- +title: loki-logger +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Loki-logger + - Grafana Loki +description: loki-logger 插件通过 Loki HTTP API /loki/api/v1/push 将请求和响应日志批量推送到 Grafana Loki。该插件还支持自定义日志格式。 +--- + + + + + + + +## 描述 + +`loki-logger` 插件通过 [Loki HTTP API](https://grafana.com/docs/loki/latest/reference/loki-http-api/#loki-http-api) `/loki/api/v1/push` 将请求和响应日志批量推送到 [Grafana Loki](https://grafana.com/oss/loki/)。该插件还支持自定义日志格式。 + +启用后,插件会将请求上下文信息序列化为 [JSON object](https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki) 并将其添加到队列中,然后再将其推送到 Loki。有关更多详细信息,请参阅批处理处理器 [batch processor](../batch-processor.md)。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|--|---|---|---|---| +| end_addrs | array[string] | 是 | | | Loki API URL,例如 `http://127.0.0.1:3100`。如果配置了多个端点,日志将被推送到列表中随机确定的端点。 | +| end_uri | string | 否 | /loki/api/v1/push | | Loki 提取端点的 URI 路径。 | +| tenant_id | string | 否 | fake | | Loki 租户 ID。根据 Loki 的 [多租户文档](https://grafana.com/docs/loki/latest/operations/multi-tenancy/#multi-tenancy),在单租户下默认值设置为 `fake`。 | +| headers | object | 否 | | | 请求头键值对(对 `X-Scope-OrgID` 和 `Content-Type` 的设置将会被忽略)。 | +| log_labels | object | 否 | {job = "apisix"} | | Loki 日志标签。支持 [NGINX 变量](https://nginx.org/en/docs/varindex.html) 和值中的常量字符串。变量应以 `$` 符号为前缀。例如,标签可以是 `{"origin" = "apisix"}` 或 `{"origin" = "$remote_addr"}`。| +| ssl_verify | boolean | 否 | true | | 如果为 true,则验证 Loki 的 SSL 证书。| +| timeout | integer | 否 | 3000 | [1, 60000] | Loki 服务 HTTP 调用的超时时间(以毫秒为单位)。| +| keepalive | boolean | 否 | true | | 如果为 true,则保持连接以应对多个请求。| +| keepalive_timeout | integer | 否 | 60000 | >=1000 | Keepalive 超时时间(以毫秒为单位)。| +| keepalive_pool | integer | 否 | 5 | >=1 | 连接池中的最大连接数。| +| log_format | object | 否 | | |自定义日志格式为 JSON 格式的键值对。值中支持 [APISIX 变量](../apisix-variable.md) 和 [NGINX 变量](http://nginx.org/en/docs/varindex.html)。 | +| name | string | 否 | loki-logger | | 批处理器插件的唯一标识符。如果使用 [Prometheus](./prometheus.md) 监控 APISIX 指标,则名称会导出到 `apisix_batch_process_entries`。 | +| include_req_body | boolean | 否 | false | | 如果为 true,则将请求正文包含在日志中。请注意,如果请求正文太大而无法保存在内存中,则由于 NGINX 的限制而无法记录。 | +| include_req_body_expr | array[array] | 否 | | |一个或多个 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 形式条件的数组。在 `include_req_body` 为 true 时使用。仅当此处配置的表达式计算结果为 true 时,才会记录请求正文。| +| include_resp_body | boolean | 否 | false | | 如果为 true,则将响应正文包含在日志中。| +| include_resp_body_expr | array[array] | 否 | | | 一个或多个 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 形式条件的数组。在 `include_resp_body` 为 true 时使用。仅当此处配置的表达式计算结果为 true 时,才会记录响应正文。| + +该插件支持使用批处理器对条目(日志/数据)进行批量聚合和处理,避免了频繁提交数据的需求。批处理器每隔 `5` 秒或当队列中的数据达到 `1000` 时提交数据。有关更多信息或设置自定义配置,请参阅 [批处理器](../batch-processor.md#configuration)。 + +## Plugin Metadata + +您还可以使用 [Plugin Metadata](../terminology/plugin-metadata.md) 全局配置日志格式,该 Plugin Metadata 配置所有 `loki-logger` 插件实例的日志格式。如果在单个插件实例上配置的日志格式与在 Plugin Metadata 上配置的日志格式不同,则在单个插件实例上配置的日志格式优先。 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|------|------|----------|--|-------------| +| log_format | object | 否 | | 日志格式以 JSON 格式声明为键值对。值只支持字符串类型。可以通过在字符串前面加上 `$` 来使用 [APISIX 变量](../apisix-variable.md) 和 [NGINX 变量](http://nginx.org/en/docs/varindex.html) 。 | + +## 示例 + +下面的示例演示了如何为不同场景配置 `loki-logger` 插件。 + +为了遵循示例,请在 Docker 中启动一个示例 Loki 实例: + +```shell +wget https://raw.githubusercontent.com/grafana/loki/v3.0.0/cmd/loki/loki-local-config.yaml -O loki-config.yaml +docker run --name loki -d -v $(pwd):/mnt/config -p 3100:3100 grafana/loki:3.2.1 -config.file=/mnt/config/loki-config.yaml +``` + +此外,启动 Grafana 实例来查看和可视化日志: + +```shell +docker run -d --name=apisix-quickstart-grafana \ + -p 3000:3000 \ + grafana/grafana-oss +``` + +要连接 Loki 和 Grafana,请访问 Grafana,网址为 [`http://localhost:3000`](http://localhost:3000)。在 __Connections > Data sources__ 下,添加新数据源并选择 Loki。您的连接 URL 应遵循 `http://{your_ip_address}:3100` 的格式。保存新数据源时,Grafana 还应测试连接,您应该会看到 Grafana 通知数据源已成功连接。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 以默认日志格式记录请求和响应 + +以下示例演示了如何在路由上配置 `loki-logger` 插件以记录通过路由的请求和响应。 + +使用 `loki-logger` 插件创建路由并配置 Loki 的地址: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "loki-logger-route", + "uri": "/anything", + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://192.168.1.5:3100"] + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +向路由发送一些请求以生成日志条目: + +```shell +curl "http://127.0.0.1:9080/anything" +``` + +您应该会收到所有请求的“HTTP/1.1 200 OK”响应。 + +导航到 [Grafana explore view](http://localhost:3000/explore) 并运行查询 `job = apisix`。您应该会看到与您的请求相对应的许多日志,例如以下内容: + +```json +{ + "route_id": "loki-logger-route", + "response": { + "status": 200, + "headers": { + "date": "Fri, 03 Jan 2025 03:54:26 GMT", + "server": "APISIX/3.11.0", + "access-control-allow-credentials": "true", + "content-length": "391", + "access-control-allow-origin": "*", + "content-type": "application/json", + "connection": "close" + }, + "size": 619 + }, + "start_time": 1735876466, + "client_ip": "192.168.65.1", + "service_id": "", + "apisix_latency": 5.0000038146973, + "upstream": "34.197.122.172:80", + "upstream_latency": 666, + "server": { + "hostname": "0b9a772e68f8", + "version": "3.11.0" + }, + "request": { + "headers": { + "user-agent": "curl/8.6.0", + "accept": "*/*", + "host": "127.0.0.1:9080" + }, + "size": 85, + "method": "GET", + "url": "http://127.0.0.1:9080/anything", + "querystring": {}, + "uri": "/anything" + }, + "latency": 671.0000038147 +} +``` + +这验证了 Loki 已从 APISIX 接收日志。您还可以在 Grafana 中创建仪表板,以进一步可视化和分析日志。 + +### 使用 Plugin Metadata 自定义日志格式 + +以下示例演示了如何使用 [Plugin Metadata](../terminology/plugin-metadata.md) 自定义日志格式。 + +使用 `loki-logger` 插件创建路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "loki-logger-route", + "uri": "/anything", + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://192.168.1.5:3100"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +为 `loki-logger` 配置 Plugin Metadata,它将更新所有需要记录请求的路由的日志格式: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/plugin_metadata/loki-logger" -X PUT \ + -H 'X-API-KEY: ${admin_key}' \ + -d '{ + "log_format": { + "host": "$host", + "client_ip": "$remote_addr", + "route_id": "$route_id", + "@timestamp": "$time_iso8601" + } + }' +``` + +向路由发送请求以生成新的日志条目: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应。 + +导航到 [Grafana explore view](http://localhost:3000/explore) 并运行查询 `job = apisix`。您应该会看到与您的请求相对应的日志条目,类似于以下内容: + +```json +{ + "@timestamp":"2025-01-03T21:11:34+00:00", + "client_ip":"192.168.65.1", + "route_id":"loki-logger-route", + "host":"127.0.0.1" +} +``` + +如果路由上的插件指定了特定的日志格式,它将优先于 Plugin Metadata 中指定的日志格式。例如,按如下方式更新上一个路由上的插件: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/loki-logger-route" -X PATCH \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "plugins": { + "loki-logger": { + "log_format": { + "route_id": "$route_id", + "client_ip": "$remote_addr", + "@timestamp": "$time_iso8601" + } + } + } + }' +``` + +向路由发送请求以生成新的日志条目: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应。 + +导航到 [Grafana explore view](http://localhost:3000/explore) 并重新运行查询 `job = apisix`。您应该会看到与您的请求相对应的日志条目,与路由上配置的格式一致,类似于以下内容: + +```json +{ + "client_ip":"192.168.65.1", + "route_id":"loki-logger-route", + "@timestamp":"2025-01-03T21:19:45+00:00" +} +``` + +### 有条件地记录请求主体 + +以下示例演示了如何有条件地记录请求主体。 + +使用 `loki-logger` 创建路由,仅在 URL 查询字符串 `log_body` 为 `yes` 时记录请求主体: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "loki-logger-route", + "uri": "/anything", + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://192.168.1.5:3100"], + "include_req_body": true, + "include_req_body_expr": [["arg_log_body", "==", "yes"]] + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +使用满足条件的 URL 查询字符串向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything?log_body=yes" -X POST -d '{"env": "dev"}' +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应。 + +导航到 [Grafana explore view](http://localhost:3000/explore) 并重新运行查询 `job = apisix`。您应该会看到与您的请求相对应的日志条目,与路由上配置的格式一致,类似于以下内容: + +```json +{ + "route_id": "loki-logger-route", + ..., + "request": { + "headers": { + ... + }, + "body": "{\"env\": \"dev\"}", + "size": 182, + "method": "POST", + "url": "http://127.0.0.1:9080/anything?log_body=yes", + "querystring": { + "log_body": "yes" + }, + "uri": "/anything?log_body=yes" + }, + "latency": 809.99994277954 +} +``` + +向路由发送一个没有任何 URL 查询字符串的请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" -X POST -d '{"env": "dev"}' +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应。 + +导航到 [Grafana explore view](http://localhost:3000/explore) 并重新运行查询 `job = apisix`。您应该会看到与您的请求相对应的日志条目,与路由上配置的格式一致,类似于以下内容: + +```json +{ + "route_id": "loki-logger-route", + ..., + "request": { + "headers": { + ... + }, + "size": 169, + "method": "POST", + "url": "http://127.0.0.1:9080/anything", + "querystring": {}, + "uri": "/anything" + }, + "latency": 557.00016021729 +} +``` + +:::info + +如果您除了将 `include_req_body` 或 `include_resp_body` 设置为 `true` 之外还自定义了 `log_format`,则插件不会在日志中包含正文。 + +作为一种解决方法,您可以在日志格式中使用 NGINX 变量 `$request_body`,例如: + +```json +{ + "kafka-logger": { + ..., + "log_format": {"body": "$request_body"} + } +} +``` + +::: + +## FAQ + +### 日志未正确推送 + +请查看 `error.log` 文件以获取此类日志。 + +```text +2023/04/30 13:45:46 [error] 19381#19381: *1075673 [lua] batch-processor.lua:95: Batch Processor[loki logger] failed to process entries: loki server returned status: 401, body: no org id, context: ngx.timer, client: 127.0.0.1, server: 0.0.0.0:9081 +``` + +可以根据错误代码 `failed to process entries: loki server returned status: 401, body: no org id` 和 loki 服务器的响应正文来诊断错误。 + +### 当请求每秒 (RPS) 较高时出现错误? + +- 请确保 `keepalive` 相关的配置已正确设置。有关更多信息,请参阅[属性](#属性) 。 +- 请检查 `error.log` 中的日志,查找此类日志。 + + ```text + 2023/04/30 13:49:34 [error] 19381#19381: *1082680 [lua] batch-processor.lua:95: Batch Processor[loki logger] failed to process entries: loki server returned status: 429, body: Ingestion rate limit exceeded for user tenant_1 (limit: 4194304 bytes/sec) while attempting to ingest '1000' lines totaling '616307' bytes, reduce log volume or contact your Loki administrator to see if the limit can be increased, context: ngx.timer, client: 127.0.0.1, server: 0.0.0.0:9081 + ``` + + - 通常与高 QPS 相关的日志如上所示。错误信息为:`Ingestion rate limit exceeded for user tenant_1 (limit: 4194304 bytes/sec) while attempting to ingest '1000' lines totaling '616307' bytes, reduce log volume or contact your Loki administrator to see if the limit can be increased`。 + - 请参考 [Loki 文档](https://grafana.com/docs/loki/latest/configuration/#limits_config) ,添加默认日志量和突发日志量的限制,例如 `ingestion_rate_mb` 和 `ingestion_burst_size_mb`。 + + 在开发过程中进行测试时,将 `ingestion_burst_size_mb` 设置为 100 可以确保 APISIX 以至少 10000 RPS 的速率正确推送日志。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/mocking.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/mocking.md new file mode 100644 index 0000000..17ca3a7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/mocking.md @@ -0,0 +1,255 @@ +--- +title: mocking +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Mocking +description: 本文介绍了关于 Apache APISIX `mocking` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`mocking` 插件用于模拟 API。当执行该插件时,它将随机返回指定格式的模拟数据,并且请求不会转发到上游。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| ------------- | -------| ----- | ---------------- | --------------------------------------------------------------------------- | +| delay | integer| 否 | | 延时返回的时间,单位为秒。 | +| response_status | integer| 否 | 200 | 返回响应的 HTTP 状态码。 | +| content_type | string | 否 | application/json | 返回响应的 Header `Content-Type`。 | +| response_example| string | 否 | | 返回响应的 Body,支持使用变量,例如 `$remote_addr $consumer_name`,与 `response_schema` 字段二选一。 | +| response_schema | object | 否 | | 指定响应的 `jsonschema` 对象,未指定 `response_example` 字段时生效。 | +| with_mock_header| boolean| 否 | true | 当设置为 `true` 时,将添加响应头 `x-mock-by: APISIX/{version}`。设置为 `false` 时则不添加该响应头。 | +| response_headers| object | 否 | | 要在模拟响应中添加的标头。示例:`{"X-Foo": "bar", "X-Few": "baz"}` | + +JSON Schema 在其字段中支持以下类型: + +- `string` +- `number` +- `integer` +- `boolean` +- `object` +- `array` + +以下是一个 JSON Schema 示例: + +```json +{ + "properties":{ + "field0":{ + "example":"abcd", + "type":"string" + }, + "field1":{ + "example":123.12, + "type":"number" + }, + "field3":{ + "properties":{ + "field3_1":{ + "type":"string" + }, + "field3_2":{ + "properties":{ + "field3_2_1":{ + "example":true, + "type":"boolean" + }, + "field3_2_2":{ + "items":{ + "example":155.55, + "type":"integer" + }, + "type":"array" + } + }, + "type":"object" + } + }, + "type":"object" + }, + "field2":{ + "items":{ + "type":"string" + }, + "type":"array" + } + }, + "type":"object" +} +``` + +以下为上述 JSON Schema 可能生成的返回对象: + +```json +{ + "field1": 123.12, + "field3": { + "field3_1": "LCFE0", + "field3_2": { + "field3_2_1": true, + "field3_2_2": [ + 155, + 155 + ] + } + }, + "field0": "abcd", + "field2": [ + "sC" + ] +} +``` + +## 启用插件 + +你可以通过如下命令在指定路由上启用 `mocking` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/index.html", + "plugins": { + "mocking": { + "delay": 1, + "content_type": "application/json", + "response_status": 200, + "response_schema": { + "properties":{ + "field0":{ + "example":"abcd", + "type":"string" + }, + "field1":{ + "example":123.12, + "type":"number" + }, + "field3":{ + "properties":{ + "field3_1":{ + "type":"string" + }, + "field3_2":{ + "properties":{ + "field3_2_1":{ + "example":true, + "type":"boolean" + }, + "field3_2_2":{ + "items":{ + "example":155.55, + "type":"integer" + }, + "type":"array" + } + }, + "type":"object" + } + }, + "type":"object" + }, + "field2":{ + "items":{ + "type":"string" + }, + "type":"array" + } + }, + "type":"object" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## 测试插件 + +通过上述命令启用插件后,可以使用如下方式测试插件是否启用成功: + +当 `mocking` 插件配置如下: + +```JSON +{ + "delay":0, + "content_type":"", + "with_mock_header":true, + "response_status":201, + "response_example":"{\"a\":1,\"b\":2}" +} +``` + +通过如下命令进行测试: + +```shell +curl http://127.0.0.1:9080/test-mock -i +``` + +```Shell +HTTP/1.1 201 Created +Date: Fri, 14 Jan 2022 11:49:34 GMT +Content-Type: application/json;charset=utf8 +Transfer-Encoding: chunked +Connection: keep-alive +x-mock-by: APISIX/2.10.0 +Server: APISIX/2.10.0 + +{"a":1,"b":2} +``` + +## 删除插件 + +当你需要禁用 `mocking` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/mqtt-proxy.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/mqtt-proxy.md new file mode 100644 index 0000000..30567c2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/mqtt-proxy.md @@ -0,0 +1,174 @@ +--- +title: mqtt-proxy +keywords: + - APISIX + - API 网关 + - Plugin + - MQTT Proxy +description: 本文档介绍了 Apache APISIX mqtt-proxy 插件的信息,通过 `mqtt-proxy` 插件可以使用 MQTT 的 `client_id` 进行动态负载平衡。 +--- + + + +## 描述 + +通过 `mqtt-proxy` 插件可以使用 MQTT 的 `client_id` 进行动态负载平衡。它仅适用于 `stream` 模式。 + +这个插件支持 MQTT [3.1.*](http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html) 及 [5.0]( https://docs.oasis-open.org/mqtt/mqtt/v5.0/mqtt-v5.0.html ) 两个协议。 + +## 属性 + +| 名称 | 类型 | 必选项 | 描述 | +| -------------- | ------- | ----- | ------------------------------------------------------ | +| protocol_name | string | 是 | 协议名称,正常情况下应为 `MQTT`。 | +| protocol_level | integer | 是 | 协议级别,MQTT `3.1.*` 为 `4`,MQTT `5.0` 应是`5`。 | + +## 启用插件 + +为了启用该插件,需要先在配置文件(`./conf/config.yaml`)中加载 `stream_proxy` 相关配置。以下配置代表监听 `9100` TCP 端口: + +```yaml title="./conf/config.yaml" + ... + router: + http: 'radixtree_uri' + ssl: 'radixtree_sni' + stream_proxy: # TCP/UDP proxy + tcp: # TCP proxy port list + - 9100 + dns_resolver: + ... +``` + +现在你可以将请求发送到 `9100` 端口。 + +你可以创建一个 stream 路由并启用 `mqtt-proxy` 插件。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": [{ + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + }] + } +}' +``` + +如果你在 macOS 中使用 Docker,则 `host.docker.internal` 是 `host` 的正确属性。 + +该插件暴露了一个变量 `mqtt_client_id`,你可以使用它来通过客户端 ID 进行负载均衡。比如: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1995, + "weight": 1 + }, + { + "host": "127.0.0.2", + "port": 1995, + "weight": 1 + } + ] + } +}' +``` + +不同客户端 ID 的 MQTT 连接将通过一致性哈希算法被转发到不同的节点。如果客户端 ID 为空,将会通过客户端 IP 进行均衡。 + +## 使用 mqtt-proxy 插件启用 mTLS + +Stream 代理可以使用 TCP 连接并且支持 TLS。请参考 [如何通过 tcp 连接接受 tls](../stream-proxy.md/#accept-tls-over-tcp-connection) 打开启用了 TLS 的 stream 代理。 + +`mqtt-proxy` 插件通过 Stream 代理的指定端口的 TCP 通信启用,如果 `tls` 设置为 `true`,则还要求客户端通过 TLS 进行身份验证。 + +配置 `ssl` 提供 CA 证书和服务器证书,以及 SNI 列表。使用 `ssl` 保护 `stream_routes` 的步骤等同于 [protect Routes](../mtls.md/#protect-route)。 + +### 创建 stream_route 并配置 mqtt-proxy 插件和 mTLS + +通过以下示例可以创建一个配置了 `mqtt-proxy` 插件的 `stream_route`,需要提供 CA 证书、客户端证书和客户端密钥(对于不受主机信任的自签名证书,请使用 -k 选项): + +```shell +curl 127.0.0.1:9180/apisix/admin/stream_routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "sni": "${your_sni_name}", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } +}' +``` + +:::note 注意 + +`sni` 名称必须与提供的 CA 和服务器证书创建的 SSL 对象的一个​​或多个 SNI 匹配。 + +::: + +## 删除插件 + +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 \ +-H "X-API-KEY: $admin_key" -X DELETE +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/multi-auth.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/multi-auth.md new file mode 100644 index 0000000..16ac84e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/multi-auth.md @@ -0,0 +1,175 @@ +--- +title: multi-auth +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Multi Auth + - multi-auth +description: 本文档包含有关 Apache APISIX multi-auth 插件的信息。 +--- + + + +## 描述 + +插件 `multi-auth` 用于向 `Route` 或者 `Service` 中,添加多种身份验证方式。它支持 `auth` 类型的插件。您可以使用 `multi-auth` 插件,来组合不同的身份认证方式。 + +插件通过迭代 `auth_plugins` 属性指定的插件列表,提供了灵活的身份认证机制。它允许多个 `Consumer` 在使用不同身份验证方式时共享相同的 `Route` ,同时。例如:一个 Consumer 使用 basic 认证,而另一个消费者使用 JWT 认证。 + +## 属性 + +For Route: + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|--------------|-------|------|-----|-------------------------| +| auth_plugins | array | True | - | 添加需要支持的认证插件。至少需要 2 个插件。 | + +## 启用插件 + +要启用插件,您必须创建两个或多个具有不同身份验证插件配置的 Consumer: + +首先创建一个 Consumer 使用 basic-auth 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "foo1", + "plugins": { + "basic-auth": { + "username": "foo1", + "password": "bar1" + } + } +}' +``` + +然后再创建一个 Consumer 使用 key-auth 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "foo2", + "plugins": { + "key-auth": { + "key": "auth-one" + } + } +}' +``` + +创建 Consumer 之后,您可以配置一个路由或服务来验证请求: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": { + "multi-auth":{ + "auth_plugins":[ + { + "basic-auth":{ } + }, + { + "key-auth":{ + "query":"apikey", + "hide_credentials":true, + "header":"apikey" + } + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## 使用示例 + +如上所述配置插件后,您可以向对应的 API 发起一个请求,如下所示: + +请求开启 basic-auth 插件的 API + +```shell +curl -i -ufoo1:bar1 http://127.0.0.1:9080/hello +``` + +请求开启 key-auth 插件的 API + +```shell +curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -i +``` + +``` +HTTP/1.1 200 OK +... +hello, world +``` + +如果请求未授权,将会返回 `401 Unauthorized` 错误: + +```json +{"message":"Authorization Failed"} +``` + +## 删除插件 + +要删除 `multi-auth` 插件,您可以从插件配置中删除插件对应的 JSON 配置,APISIX 会自动加载,您不需要重新启动即可生效。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/node-status.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/node-status.md new file mode 100644 index 0000000..b210d78 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/node-status.md @@ -0,0 +1,125 @@ +--- +title: node-status +keywords: + - Apache APISIX + - API 网关 + - 插件 + - Node status +description: 本文介绍了 API 网关 Apache APISIX node-status 插件的相关信息。 +--- + + + +## 描述 + +`node-status` 插件可用于通过暴露的 API 查询 APISIX 的请求状态,并返回基本的状态信息。 + +## 插件属性 + +无。 + +## 插件接口 + +该插件将会增加 `/apisix/status` 的接口用来暴露 APISIX 的状态,你需要通过 [public-api](public-api.md) 插件来暴露该接口。 + +## 启用插件 + +`node-status` 插件默认为禁用状态,如果你需要使用该插件,请在配置文件 `./conf/config.yaml` 中启用它: + +``` yaml title="./conf/config.yaml" +plugins: + - limit-req + - node-status + - jwt-auth + - zipkin + ...... +``` + +你需要为 `/apisix/status` API 配置路由,并使用 [public-api](public-api.md) 插件暴露它。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/ns -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/apisix/status", + "plugins": { + "public-api": {} + } +}' +``` + +## 测试插件 + +完成上述配置后,你可以通过以下命令向 `/apisix/status` 发送请求以获取状态信息。 + +```shell +curl http://127.0.0.1:9080/apisix/status -i +``` + +```shell +HTTP/1.1 200 OK +Date: Tue, 03 Nov 2020 11:12:55 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server + +{"status":{"total":"23","waiting":"0","accepted":"22","writing":"1","handled":"22","active":"1","reading":"0"},"id":"6790a064-8f61-44ba-a6d3-5df42f2b1bb3"} +``` + +返回结果中的参数释义如下: + +| 参数 | 说明 | +| ------------ | ---------------------------------------------------------------------- | +| status | APISIX 的状态信息。 | +| total | 客户端请求总数。 | +| waiting | 当前等待客户端请求的空闲连接数。 | +| accepted | 当前已经接受的客户端连接总数。 | +| writing | 当前正在写给客户端响应的连接数。 | +| handled | 当前已经处理的连接总数,除非达到其他资源的限制,否则此值与 `accepted` 相同。 | +| active | 当前活跃的客户端连接数。 | +| reading | 当前正在读取请求头的连接数。 | +| id | APISIX UID 信息,保存在 `./conf/apisix.uid` 文件中。 | + +## 删除插件 + +如果你不再需要该插件,可以从配置文件 (`./conf/config.yaml`) 中删除它: + +``` yaml title="conf/config.yaml" + - limit-req + - jwt-auth + - zipkin + ...... +``` + +你也可以移除暴露 `/apisix/status` 接口的路由。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/ns -H "X-API-KEY: $admin_key" -X DELETE +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ocsp-stapling.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ocsp-stapling.md new file mode 100644 index 0000000..025946f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ocsp-stapling.md @@ -0,0 +1,144 @@ +--- +title: ocsp-stapling +keywords: + - Apache APISIX + - API 网关 + - 插件 + - ocsp-stapling +description: 本文介绍了 API 网关 Apache APISIX ocsp-stapling 插件的相关信息。 +--- + + + +## 描述 + +`ocsp-stapling` 插件可以动态地设置 Nginx 中 [OCSP stapling](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_stapling) 的相关行为。 + +## 启用插件 + +这个插件是默认禁用的,通过修改配置文件 `./conf/config.yaml` 来启用它: + +```yaml +plugins: + - ... + - ocsp-stapling +``` + +修改配置文件之后,重启 APISIX 或者通过插件热加载接口来使配置生效: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugins/reload -H "X-API-KEY: $admin_key" -X PUT +``` + +## 属性 + +插件属性存储在 SSL 资源的 `ocsp_stapling` 字段中。 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|----------------|----------------------|----------|---------------|--------------|-----------------------------------------------------------------------| +| enabled | boolean | False | false | | 与 `ssl_stapling` 指令类似,用于启用或禁用 OCSP stapling 特性 | +| skip_verify | boolean | False | false | | 与 `ssl_stapling_verify` 指令类似,用于启用或禁用对于 OCSP 响应结果的校验 | +| cache_ttl | integer | False | 3600 | >= 60 | 指定 OCSP 响应结果的缓存时间 | + +## 使用示例 + +首先您应该创建一个 SSL 资源,并且证书资源中应该包含颁发者的证书。通常情况下,全链路证书就可以正常工作。 + +如下示例中,生成相关的 SSL 资源: + +```shell +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test.com"], + "ocsp_stapling": { + "enabled": true + } +}' +``` + +通过上述命令生成 SSL 资源后,可以通过以下方法测试: + +```shell +echo -n "Q" | openssl s_client -status -connect localhost:9443 -servername test.com 2>&1 | cat +``` + +``` +... +CONNECTED(00000003) +OCSP response: +====================================== +OCSP Response Data: + OCSP Response Status: successful (0x0) +... +``` + +可以通过以下方法禁用插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test.com"], + "ocsp_stapling": { + "enabled": false + } +}' +``` + +## 删除插件 + +在删除插件之前,需要确保所有的 SSL 资源都已经移除 `ocsp_stapling` 字段,可以通过以下命令实现对单个 SSL 资源的对应字段移除: + +```shell +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PATCH -d ' +{ + "ocsp_stapling": null +}' +``` + +通过修改配置文件 `./conf/config.yaml` 来禁用它: + +```yaml +plugins: + - ... + # - ocsp-stapling +``` + +修改配置文件之后,重启 APISIX 或者通过插件热加载接口来使配置生效: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugins/reload -H "X-API-KEY: $admin_key" -X PUT +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/opa.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/opa.md new file mode 100644 index 0000000..a72a2f1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/opa.md @@ -0,0 +1,329 @@ +--- +title: opa +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Open Policy Agent + - opa +description: 本篇文档介绍了 Apache APISIX 通过 opa 插件与 Open Policy Agent 对接的相关信息。 +--- + + + +## 描述 + +`opa` 插件可用于与 [Open Policy Agent](https://www.openpolicyagent.org) 进行集成,实现后端服务的认证授权与访问服务等功能解耦,减少系统复杂性。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|-------------------|---------|----------|---------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| host | string | 是 | | | OPA 服务的主机地址,例如 `https://localhost:8181`。 | +| ssl_verify | boolean | 否 | true | | 当设置为 `true` 时,将验证 SSL 证书。 | +| policy | string | 是 | | | OPA 策略路径,是 `package` 和 `decision` 配置的组合。当使用高级功能(如自定义响应)时,你可以省略 `decision` 配置。 | +| timeout | integer | 否 | 3000ms | [1, 60000]ms | 设置 HTTP 调用超时时间。 | +| keepalive | boolean | 否 | true | | 当设置为 `true` 时,将为多个请求保持连接并处于活动状态。 | +| keepalive_timeout | integer | 否 | 60000ms | [1000, ...]ms | 连接断开后的闲置时间。 | +| keepalive_pool | integer | 否 | 5 | [1, ...]ms | 连接池限制。 | +| with_route | boolean | 否 | false | | 当设置为 `true` 时,发送关于当前 Route 的信息。 | +| with_service | boolean | 否 | false | | 当设置为 `true` 时,发送关于当前 Service 的信息。 | +| with_consumer | boolean | 否 | false | | 当设置为 `true` 时,发送关于当前 Consumer 的信息。注意,这可能会发送敏感信息,如 API key。请确保在安全的情况下才打开它。 | + +## 数据定义 + +### APISIX 向 OPA 发送信息 + +下述示例代码展示了如何通过 APISIX 向 OPA 服务发送数据: + +```json +{ + "type": "http", + "request": { + "scheme": "http", + "path": "\/get", + "headers": { + "user-agent": "curl\/7.68.0", + "accept": "*\/*", + "host": "127.0.0.1:9080" + }, + "query": {}, + "port": 9080, + "method": "GET", + "host": "127.0.0.1" + }, + "var": { + "timestamp": 1701234567, + "server_addr": "127.0.0.1", + "server_port": "9080", + "remote_port": "port", + "remote_addr": "ip address" + }, + "route": {}, + "service": {}, + "consumer": {} +} +``` + +上述代码具体释义如下: + +- `type` 代表请求类型(如 `http` 或 `stream`); +- `request` 则需要在 `type` 为 `http` 时使用,包含基本的请求信息(如 URL、头信息等); +- `var` 包含关于请求连接的基本信息(如 IP、端口、请求时间戳等); +- `route`、`service` 和 `consumer` 包含的数据与 APISIX 中存储的数据相同,只有当这些对象上配置了 `opa` 插件时才会发送。 + +### OPA 向 APISIX 返回数据 + +下述示例代码展示了 OPA 服务对 APISIX 发送请求后的响应数据: + +```json +{ + "result": { + "allow": true, + "reason": "test", + "headers": { + "an": "header" + }, + "status_code": 401 + } +} +``` + +上述响应中的代码释义如下: + +- `allow` 配置是必不可少的,它表示请求是否允许通过 APISIX 进行转发; +- `reason`、`headers` 和 `status_code` 是可选的,只有当你配置一个自定义响应时才会返回这些选项信息,具体使用方法可查看后续测试用例。 + +## 测试插件 + +首先启动 OPA 环境: + +```shell +docker run -d --name opa -p 8181:8181 openpolicyagent/opa:0.35.0 run -s +``` + +### 基本用法 + +一旦你运行了 OPA 服务,就可以进行基本策略的创建: + +```shell +curl -X PUT '127.0.0.1:8181/v1/policies/example1' \ + -H 'Content-Type: text/plain' \ + -d 'package example1 + +import input.request + +default allow = false + +allow { + # HTTP method must GET + request.method == "GET" +}' +``` + +然后在指定路由上配置 `opa` 插件: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/r1' \ + -H 'X-API-KEY: ' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/*", + "plugins": { + "opa": { + "host": "http://127.0.0.1:8181", + "policy": "example1" + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } +}' +``` + +使用如下命令进行测试: + +```shell +curl -i -X GET 127.0.0.1:9080/get +``` + +```shell +HTTP/1.1 200 OK +``` + +如果尝试向不同的端点发出请求,会出现请求失败的状态: + +```shell +curl -i -X POST 127.0.0.1:9080/post +``` + +```shell +HTTP/1.1 403 FORBIDDEN +``` + +### 使用自定义响应 + +除了基础用法外,你还可以为更复杂的使用场景配置自定义响应,参考示例如下: + +```shell +curl -X PUT '127.0.0.1:8181/v1/policies/example2' \ + -H 'Content-Type: text/plain' \ + -d 'package example2 + +import input.request + +default allow = false + +allow { + request.method == "GET" +} + +# custom response body (Accepts a string or an object, the object will respond as JSON format) +reason = "test" { + not allow +} + +# custom response header (The data of the object can be written in this way) +headers = { + "Location": "http://example.com/auth" +} { + not allow +} + +# custom response status code +status_code = 302 { + not allow +}' +``` + +同时,你可以将 `opa` 插件的策略参数调整为 `example2`,然后发出请求进行测试: + +```shell +curl -i -X GET 127.0.0.1:9080/get +``` + +```shell +HTTP/1.1 200 OK +``` + +此时如果你发出一个失败请求,将会收到来自 OPA 服务的自定义响应反馈,如下所示: + +```shell +curl -i -X POST 127.0.0.1:9080/post +``` + +```shell +HTTP/1.1 302 FOUND +Location: http://example.com/auth + +test +``` + +### 发送 APISIX 数据 + +如果你的 OPA 服务需要根据 APISIX 的某些数据(如 Route 和 Consumer 的详细信息)来进行后续操作时,则可以通过配置插件来实现。 + +下述示例展示了一个简单的 `echo` 策略,它将原样返回 APISIX 发送的数据: + +```shell +curl -X PUT '127.0.0.1:8181/v1/policies/echo' \ + -H 'Content-Type: text/plain' \ + -d 'package echo + +allow = false +reason = input' +``` + +现在就可以在路由上配置插件来发送 APISIX 数据: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/r1' \ + -H 'X-API-KEY: ' \ + -H 'Content-Type: application/json' \ + -d '{ + "uri": "/*", + "plugins": { + "opa": { + "host": "http://127.0.0.1:8181", + "policy": "echo", + "with_route": true + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } +}' +``` + +此时如果你提出一个请求,则可以通过自定义响应看到来自路由的数据: + +```shell +curl -X GET 127.0.0.1:9080/get +``` + +```shell +{ + "type": "http", + "request": { + xxx + }, + "var": { + xxx + }, + "route": { + xxx + } +} +``` + +## 删除插件 + +当你需要禁用 `opa` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/openfunction.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/openfunction.md new file mode 100644 index 0000000..e1797ed --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/openfunction.md @@ -0,0 +1,171 @@ +--- +title: openfunction +keywords: + - Apache APISIX + - API 网关 + - Plugin + - OpenFunction +description: 本文介绍了 API 网关 Apache APISIX 的 openfunction 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`openfunction` 插件用于将开源的分布式无服务器平台 [CNCF OpenFunction](https://openfunction.dev/) 作为动态上游集成至 APISIX。 + +启用 `openfunction` 插件后,该插件会终止对已配置 URI 的请求,并代表客户端向 OpenFunction 的 function 发起一个新的请求,然后 `openfunction` 插件会将响应信息返回至客户端。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| --------------------------- | ------- | ------ | ------- | ------------ | ------------------------------------------------------------ | +| function_uri | string | 是 | | | OpenFunction function uri,例如 `https://localhost:30858/default/function-sample`。 | +| ssl_verify | boolean | 否 | true | | 当设置为 `true` 时执行 SSL 验证。 | +| authorization | object | 否 | | | 访问 OpenFunction 的函数的授权凭证。| +| authorization.service_token | string | 否 | | | OpenFunction service token,其格式为 `xxx:xxx`,支持函数入口的 basic auth 认证方式。 | +| timeout | integer | 否 | 3000 ms | [100,...] ms | OpenFunction action 和 HTTP 调用超时时间,以毫秒为单位。 | +| keepalive | boolean | 否 | true | | 当设置为 `true` 时,保持连接的活动状态以便重复使用。 | +| keepalive_timeout | integer | 否 | 60000 ms| [1000,...] ms| 当连接空闲时,保持该连接处于活动状态的时间,以毫秒为单位。 | +| keepalive_pool | integer | 否 | 5 | [1,...] | 连接断开之前,可接收的最大请求数。 | + +:::note 注意 + +`timeout` 字段规定了 OpenFunction function 的最大执行时间,以及 APISIX 中 HTTP 客户端的请求超时时间。 + +因为 OpenFunction function 调用可能会耗费很长时间来拉取容器镜像和启动容器,如果 `timeout` 字段的值设置太小,可能会导致大量请求失败。 + +::: + +## 前提条件 + +在使用 `openfunction` 插件之前,你需要通过以下命令运行 OpenFunction。详情参考 [OpenFunction 安装指南](https://openfunction.dev/docs/getting-started/installation/) 。 + +请确保当前环境中已经安装对应版本的 Kubernetes 集群。 + +### 创建并推送函数 + +你可以参考 [OpenFunction 官方示例](https://github.com/OpenFunction/samples) 创建函数。构建函数时,你需要使用以下命令为容器仓库生成一个密钥,才可以将函数容器镜像推送到容器仓库 ( 例如 Docker Hub 或 Quay.io)。 + +```shell +REGISTRY_SERVER=https://index.docker.io/v1/ REGISTRY_USER= REGISTRY_PASSWORD= +kubectl create secret docker-registry push-secret \ + --docker-server=$REGISTRY_SERVER \ + --docker-username=$REGISTRY_USER \ + --docker-password=$REGISTRY_PASSWORD +``` + +## 启用插件 + +你可以通过以下命令在指定路由中启用该插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "openfunction": { + "function_uri": "http://localhost:3233/default/function-sample/test", + "authorization": { + "service_token": "test:test" + } + } + } +}' +``` + +## 测试插件 + +使用 `curl` 命令测试: + +```shell +curl -i http://127.0.0.1:9080/hello -X POST -d'test' +``` + +正常返回结果: + +``` +hello, test! +``` + +### 配置路径转发 + +`OpenFunction` 插件还支持 URL 路径转发,同时将请求代理到上游的 OpenFunction API 端点。基本请求路径的扩展(如路由 `/hello/*` 中 `*` 的部分)会被添加到插件配置中指定的 `function_uri`。 + +:::info 重要 + +路由上配置的 `uri` 必须以 `*` 结尾,此功能才能正常工作。APISIX 路由是严格匹配的,`*` 表示此 URI 的任何子路径都将匹配到同一路由。 + +::: + +下面的示例配置了此功能: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello/*", + "plugins": { + "openfunction": { + "function_uri": "http://localhost:3233/default/function-sample", + "authorization": { + "service_token": "test:test" + } + } + } +}' +``` + +现在,对路径 `hello/123` 的任何请求都将调用 OpenFunction 插件设置的对应的函数,并转发添加的路径: + +```shell +curl http://127.0.0.1:9080/hello/123 +``` + +```shell +Hello, 123! +``` + +## 删除插件 + +当你需要禁用 `openfunction` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/openid-connect.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/openid-connect.md new file mode 100644 index 0000000..b490d97 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/openid-connect.md @@ -0,0 +1,257 @@ +--- +title: openid-connect +keywords: + - Apache APISIX + - API 网关 + - OpenID Connect + - OIDC +description: openid-connect 插件支持与 OpenID Connect (OIDC) 身份提供商集成,例如 Keycloak、Auth0、Microsoft Entra ID、Google、Okta 等。它允许 APISIX 对客户端进行身份验证并从身份提供商处获取其信息,然后允许或拒绝其访问上游受保护资源。 +--- + + + + + + + +## 描述 + +`openid-connect` 插件支持与 [OpenID Connect (OIDC)](https://openid.net/connect/) 身份提供商集成,例如 Keycloak、Auth0、Microsoft Entra ID、Google、Okta 等。它允许 APISIX 对客户端进行身份验证,并从身份提供商处获取其信息,然后允许或拒绝其访问上游受保护资源。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------------------------------ | ------- | ------ | --------------------- | ------------- | ------------------------------------------------------------------------------------------------ | +| client_id | string | 是 | | | OAuth 客户端 ID。 | +| client_secret | string | 是 | | | OAuth 客户端 secret。 | +| discovery | string | 是 | | | OpenID 提供商的知名发现文档的 URL,其中包含 OP API 端点列表。插件可以直接利用发现文档中的端点。您也可以单独配置这些端点,这优先于发现文档中提供的端点。 | +| scope | string | 否 | openid | | 与应返回的有关经过身份验证的用户的信息相对应的 OIDC 范围,也称为 [claim](https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims)。这用于向用户授权适当的权限。默认值为 `openid`,这是 OIDC 返回唯一标识经过身份验证的用户的 `sub` 声明所需的范围。可以附加其他范围并用空格分隔,例如 `openid email profile`。 | +| required_scopes | array[string] | 否 | | | 访问令牌中必须存在的范围。当 `bearer_only` 为 `true` 时与自省端点结合使用。如果缺少任何必需的范围,插件将以 403 禁止错误拒绝请求。| +| realm | string | 否 | apisix | | 由于持有者令牌无效,[`WWW-Authenticate`](https://www.rfc-editor.org/rfc/rfc6750#section-3) 响应标头中的领域伴随 401 未经授权的请求。 | +| bearer_only | boolean | 否 | false | | 如果为 true,则严格要求在身份验证请求中使用持有者访问令牌。 | +| logout_path | string | 否 | /logout | | 激活注销的路径。 | +| post_logout_redirect_uri | string | 否 | | | `logout_path` 收到注销请求后将用户重定向到的 URL。| +| redirect_uri | string | 否 | | | 通过 OpenID 提供商进行身份验证后重定向到的 URI。请注意,重定向 URI 不应与请求 URI 相同,而应为请求 URI 的子路径。例如,如果路由的 `uri` 是 `/api/v1/*`,则 `redirect_uri` 可以配置为 `/api/v1/redirect`。如果未配置 `redirect_uri`,APISIX 将在请求 URI 后附加 `/.apisix/redirect` 以确定 `redirect_uri` 的值。| +| timeout | integer | 否 | 3 | [1,...] | 请求超时时间(秒)。| +| ssl_verify | boolean | 否 | false | | 如果为 true,则验证 OpenID 提供商的 SSL 证书。| +| introspection_endpoint | string | 否 | | |用于自检访问令牌的 OpenID 提供程序的 [令牌自检](https://datatracker.ietf.org/doc/html/rfc7662) 端点的 URL。如果未设置,则将使用众所周知的发现文档中提供的自检端点[作为后备](https://github.com/zmartzone/lua-resty-openidc/commit/cdaf824996d2b499de4c72852c91733872137c9c)。| +| introspection_endpoint_auth_method | string | 否 | client_secret_basic | | 令牌自检端点的身份验证方法。该值应为 `introspection_endpoint_auth_methods_supported` [授权服务器元数据](https://www.rfc-editor.org/rfc/rfc8414.html) 中指定的身份验证方法之一,如众所周知的发现文档中所示,例如 `client_secret_basic`、`client_secret_post`、`private_key_jwt` 和 `client_secret_jwt`。| +| token_endpoint_auth_method | string | 否 | client_secret_basic | | 令牌端点的身份验证方法。该值应为 `token_endpoint_auth_methods_supported` [授权服务器元数据](https://www.rfc-editor.org/rfc/rfc8414.html) 中指定的身份验证方法之一,如众所周知的发现文档中所示,例如 `client_secret_basic`、`client_secret_post`、`private_key_jwt` 和 `client_secret_jwt`。如果配置的方法不受支持,则回退到 `token_endpoint_auth_methods_supported` 数组中的第一个方法。| +| public_key | string | 否 | | | 用于验证 JWT 签名 id 的公钥使用非对称算法。提供此值来执行令牌验证将跳过客户端凭据流中的令牌自检。您可以以 `-----BEGIN PUBLIC KEY-----\\n……\\n-----END PUBLIC KEY-----` 格式传递公钥。| +| use_jwks | boolean | 否 | false | | 如果为 true 并且未设置 `public_key`,则使用 JWKS 验证 JWT 签名并跳过客户端凭据流中的令牌自检。JWKS 端点是从发现文档中解析出来的。| +| use_pkce | boolean | 否 | false | | 如果为 true,则使用 [RFC 7636](https://datatracker.ietf.org/doc/html/rfc7636) 中定义的授权码流的代码交换证明密钥 (PKCE)。| +| token_signing_alg_values_expected | string | 否 | | | 用于签署 JWT 的算法,例如 `RS256`。 | +| set_access_token_header | boolean | 否 | true | | 如果为 true,则在请求标头中设置访问令牌。默认情况下,使用 `X-Access-Token` 标头。| +| access_token_in_authorization_header | boolean | 否 | false | | 如果为 true 并且 `set_access_token_header` 也为 true,则在 `Authorization` 标头中设置访问令牌。 | +| set_id_token_header | boolean | 否 | true | | 如果为 true 并且 ID 令牌可用,则在 `X-ID-Token` 请求标头中设置值。 | +| set_userinfo_header | boolean | 否 | true | | 如果为 true 并且用户信息数据可用,则在 `X-Userinfo` 请求标头中设置值。 | +| set_refresh_token_header | boolean | 否 | false | | 如果为 true 并且刷新令牌可用,则在 `X-Refresh-Token` 请求标头中设置值。 | +| session | object | 否 | | | 当 `bearer_only` 为 `false` 且插件使用 Authorization Code 流程时使用的 Session 配置。 | +| session.secret | string | 是 | | 16 个字符以上 | 当 `bearer_only` 为 `false` 时,用于 session 加密和 HMAC 运算的密钥,若未配置则自动生成并保存到 etcd。当在独立模式下使用 APISIX 时,etcd 不再是配置中心,需要配置 `secret`。 | +| session.cookie | object | 否 | | | Cookie 配置。 | +| session.cookie.lifetime | integer | 否 | 3600 | | Cookie 生存时间(秒)。| +| unauth_action | string | 否 | auth | ["auth","deny","pass"] | 未经身份验证的请求的操作。设置为 `auth` 时,重定向到 OpenID 提供程序的身份验证端点。设置为 `pass` 时,允许请求而无需身份验证。设置为 `deny` 时,返回 401 未经身份验证的响应,而不是启动授权代码授予流程。| +| session_contents | object | 否 | | | 会话内容配置。如果未配置,将把所有数据存储在会话中。 | +| session_contents.access_token | boolean | 否 | | | 若为 true,则将访问令牌存储在会话中。 | +| session_contents.id_token | boolean | 否 | | | 若为 true,则将 ID 令牌存储在会话中。 | +| session_contents.enc_id_token | boolean | 否 | | | 若为 true,则将加密的 ID 令牌存储在会话中。 | +| session_contents.user | boolean | 否 | | | 若为 true,则将用户信息存储在会话中。 | +| proxy_opts | object | 否 | | | OpenID 提供程序背后的代理服务器的配置。| +| proxy_opts.http_proxy | string | 否 | | | HTTP 请求的代理服务器地址,例如 `http://:`。| +| proxy_opts.https_proxy | string | 否 | | | HTTPS 请求的代理服务器地址,例如 `http://:`。 | +| proxy_opts.http_proxy_authorization | string | 否 | | Basic [base64 用户名:密码] | 与 `http_proxy` 一起使用的默认 `Proxy-Authorization` 标头值。可以用自定义的 `Proxy-Authorization` 请求标头覆盖。 | +| proxy_opts.https_proxy_authorization | string | 否 | | Basic [base64 用户名:密码] | 与 `https_proxy` 一起使用的默认 `Proxy-Authorization` 标头值。不能用自定义的 `Proxy-Authorization` 请求标头覆盖,因为使用 HTTPS 时,授权在连接时完成。 | +| proxy_opts.no_proxy | string | 否 | | | 不应代理的主机的逗号分隔列表。| +| authorization_params | object | 否 | | | 在请求中发送到授权端点的附加参数。 | +| client_rsa_private_key | string | 否 | | | 用于签署 JWT 以向 OP 进行身份验证的客户端 RSA 私钥。当 `token_endpoint_auth_method` 为 `private_key_jwt` 时必需。 | +| client_rsa_private_key_id | string | 否 | | | 用于计算签名的 JWT 的客户端 RSA 私钥 ID。当 `token_endpoint_auth_method` 为 `private_key_jwt` 时可选。 | +| client_jwt_assertion_expires_in | integer | 否 | 60 | | 用于向 OP 进行身份验证的签名 JWT 的生命周期,以秒为单位。当 `token_endpoint_auth_method` 为 `private_key_jwt` 或 `client_secret_jwt` 时使用。 | +| renew_access_token_on_expiry | boolean | 否 | true | | 如果为 true,则在访问令牌过期或刷新令牌可用时尝试静默更新访问令牌。如果令牌无法更新,则重定向用户进行重新身份验证。| +| access_token_expires_in | integer | 否 | | | 如果令牌端点响应中不存在 `expires_in` 属性,则访问令牌的有效期(以秒为单位)。 | +| refresh_session_interval | integer | 否 | | | 刷新用户 ID 令牌而无需重新认证的时间间隔。如果未设置,则不会检查网关向客户端发出的会话的到期时间。如果设置为 900,则表示在 900 秒后刷新用户的 `id_token`(或浏览器中的会话),而无需重新认证。 | +| iat_slack | integer | 否 | 120 | | ID 令牌中 `iat` 声明的时钟偏差容忍度(以秒为单位)。 | +| accept_none_alg | boolean | 否 | false | | 如果 OpenID 提供程序未签署其 ID 令牌(例如当签名算法设置为`none` 时),则设置为 true。 | +| accept_unsupported_alg | boolean | 否 | true | | 如果为 true,则忽略 ID 令牌签名以接受不支持的签名算法。 | +| access_token_expires_leeway | integer | 否 | 0 | | 访问令牌续订的过期余地(以秒为单位)。当设置为大于 0 的值时,令牌续订将在令牌过期前设定的时间内进行。这样可以避免访问令牌在到达资源服务器时刚好过期而导致的错误。| +| force_reauthorize | boolean | 否 | false | | 如果为 true,即使令牌已被缓存,也执行授权流程。 | +| use_nonce | boolean | 否 | false | | 如果为 true,在授权请求中启用 nonce 参数。 | +| revoke_tokens_on_logout | boolean | 否 | false | | 如果为 true,则通知授权服务器,撤销端点不再需要先前获得的刷新或访问令牌。 | +| jwk_expires_in | integer | 否 | 86400 | | JWK 缓存的过期时间(秒)。 | +| jwt_verification_cache_ignore | boolean | 否 | false | | 如果为 true,则强制重新验证承载令牌并忽略任何现有的缓存验证结果。 | +| cache_segment | string | 否 | | | 缓存段的可选名称,用于分隔和区分令牌自检或 JWT 验证使用的缓存。| +| introspection_interval | integer | 否 | 0 | | 缓存和自省访问令牌的 TTL(以秒为单位)。默认值为 0,这意味着不使用此选项,插件默认使用 `introspection_expiry_claim` 中定义的到期声明传递的 TTL。如果`introspection_interval` 大于 0 且小于 `introspection_expiry_claim` 中定义的到期声明传递的 TTL,则使用`introspection_interval`。| +| introspection_expiry_claim | string | 否 | exp | | 到期声明的名称,它控制缓存和自省访问令牌的 TTL。| +| introspection_addon_headers | array[string] | 否 | | | 用于将其他标头值附加到自省 HTTP 请求。如果原始请求中不存在指定的标头,则不会附加值。| +| claim_validator.issuer.valid_issuers | string[] | 否 | | | 将经过审查的 jwt 发行者列入白名单。当用户未传递时,将使用发现端点返回的颁发者。如果两者均缺失,发行人将无法得到验证| + +注意:schema 中还定义了 `encrypt_fields = {"client_secret"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +## 示例 + +以下示例演示了如何针对不同场景配置 `openid-connect` 插件。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### Authorization Code Flow + +Authorization Code Flow 在 [RFC 6749,第 4.1 节](https://datatracker.ietf.org/doc/html/rfc6749#section-4.1) 中定义。它涉及用临时授权码交换访问令牌,通常由机密和公共客户端使用。 + +下图说明了实施 Authorization Code Flow 时不同实体之间的交互: + +![授权码流程图](https://static.api7.ai/uploads/2023/11/27/Ga2402sb_oidc-code-auth-flow-revised.png) + +当传入请求的标头中或适当的会话 cookie 中不包含访问令牌时,插件将充当依赖方并重定向到授权服务器以继续授权码流程。 + +成功验证后,插件将令牌保留在会话 cookie 中,后续请求将使用存储在 cookie 中的令牌。 + +请参阅 [实现 Authorization Code Flow](../tutorials/keycloak-oidc.md#实现-authorization-code-grant)以获取使用`openid-connect`插件通过授权码流与 Keycloak 集成的示例。 + +### Proof Key for Code Exchange (PKCE) + +Proof Key for Code Exchange (PKCE) 在 [RFC 7636](https://datatracker.ietf.org/doc/html/rfc7636) 中定义。PKCE 通过添加代码质询和验证器来增强授权码流程,以防止授权码拦截攻击。 + +下图说明了使用 PKCE 实现授权码流程时不同实体之间的交互: + +![使用 PKCE 的授权码流程图](https://static.api7.ai/uploads/2024/11/04/aJ2ZVuTC_auth-code-with-pkce.png) + +请参阅 [实现 Authorization Code Grant](../tutorials/keycloak-oidc.md#实现-authorization-code-grant),了解使用 `openid-connect` 插件通过 PKCE 授权码流程与 Keycloak 集成的示例。 + +### Client Credential Flow + +Client Credential Flow 在 [RFC 6749,第 4.4 节](https://datatracker.ietf.org/doc/html/rfc6749#section-4.4) 中定义。它涉及客户端使用自己的凭证请求访问令牌以访问受保护的资源,通常用于机器对机器身份验证,并不代表特定用户。 + +下图说明了实施 Client Credential Flow 时不同实体之间的交互: + +
+Client credential flow diagram +
+
+ +请参阅[实现 Client Credentials Grant](../tutorials/keycloak-oidc.md#实现-client-credentials-grant) 获取使用 `openid-connect` 插件通过客户端凭证流与 Keycloak 集成的示例。 + +### Introspection Flow + +Introspection Flow 在 [RFC 7662](https://datatracker.ietf.org/doc/html/rfc7662) 中定义。它涉及通过查询授权服务器的自省端点来验证访问令牌的有效性和详细信息。 + +在此流程中,当客户端向资源服务器出示访问令牌时,资源服务器会向授权服务器的自省端点发送请求,如果令牌处于活动状态,则该端点会响应令牌详细信息,包括令牌到期时间、相关范围以及它所属的用户或客户端等信息。 + +下图说明了使用令牌自省实现 Introspection Flow 时不同实体之间的交互: + +
+
+Client credential with introspection diagram +
+
+ +请参阅 [实现 Client Credentials Grant](../tutorials/keycloak-oidc.md#实现-client-credentials-grant) 以获取使用 `openid-connect` 插件通过带有令牌自省的客户端凭据流与 Keycloak 集成的示例。 + +### Password Flow + +Password Flow 在 [RFC 6749,第 4.3 节](https://datatracker.ietf.org/doc/html/rfc6749#section-4.3) 中定义。它专为受信任的应用程序而设计,允许它们使用用户的用户名和密码直接获取访问令牌。在此授权类型中,客户端应用程序将用户的凭据连同其自己的客户端 ID 和密钥一起发送到授权服务器,然后授权服务器对用户进行身份验证,如果有效,则颁发访问令牌。 + +虽然高效,但此流程仅适用于高度受信任的第一方应用程序,因为它要求应用程序直接处理敏感的用户凭据,如果在第三方环境中使用,则会带来重大安全风险。 + +下图说明了实施 Password Flow 时不同实体之间的交互: + +
+Password flow diagram +
+
+ +请参阅 [实现 Password Grant](../tutorials/keycloak-oidc.md#实现-password-grant) 获取使用 `openid-connect` 插件通过密码流与 Keycloak 集成的示例。 + +### Refresh Token Grant + +Refresh Token Grant 在 [RFC 6749,第 6 节](https://datatracker.ietf.org/doc/html/rfc6749#section-6) 中定义。它允许客户端使用之前颁发的刷新令牌请求新的访问令牌,而无需用户重新进行身份验证。此流程通常在访问令牌过期时使用,允许客户端无需用户干预即可持续访问资源。刷新令牌与某些 OAuth 流程中的访问令牌一起颁发,其使用寿命和安全要求取决于授权服务器的配置。 + +下图说明了在实施 Password Grant 和 Refresh Token Grant 时不同实体之间的交互: + +
+Password grant with refresh token flow diagram +
+
+ +请参阅 [Refresh Token](../tutorials/keycloak-oidc.md#refresh-token) 获取使用 `openid-connect` 插件通过带令牌刷新的密码流与 Keycloak 集成的示例。 + +## 故障排除 + +本节介绍使用此插件时的一些常见问题,以帮助您排除故障。 + +### APISIX 无法连接到 OpenID 提供商 + +如果 APISIX 无法解析或无法连接到 OpenID 提供商,请仔细检查配置文件 `config.yaml` 中的 DNS 设置并根据需要进行修改。 + +### `No Session State Found` + +如果您在使用[授权码流](#authorization-code-flow) 时遇到 500 内部服务器错误并在日志中显示以下消息,则可能有多种原因。 + +```text +the error request to the redirect_uri path, but there's no session state found +``` + +#### 1. 重定向 URI 配置错误 + +一个常见的错误配置是将 `redirect_uri` 配置为与路由的 URI 相同。当用户发起访问受保护资源的请求时,请求直接命中重定向 URI,且请求中没有 session cookie,从而导致 no session state found 错误。 + +要正确配置重定向 URI,请确保 `redirect_uri` 与配置插件的路由匹配,但不要完全相同。例如,正确的配置是将路由的 `uri` 配置为 `/api/v1/*`,并将 `redirect_uri` 的路径部分配置为 `/api/v1/redirect`。 + +您还应该确保 `redirect_uri` 包含 scheme,例如 `http` 或 `https` 。 + +#### 2. 缺少 Session Secret + +如果您在[standalone 模式](../../../en/latest/deployment-modes.md#standalone)下部署 APISIX,请确保配置了 `session.secret`。 + +用户 session 作为 cookie 存储在浏览器中,并使用 session 密钥进行加密。如果没有通过 `session.secret` 属性配置机密,则会自动生成机密并将其保存到 etcd。然而,在独立模式下,etcd 不再是配置中心。因此,您应该在 YAML 配置中心 `apisix.yaml` 中为此插件显式配置 `session.secret`。 + +#### 3. Cookie 未发送或不存在 + +检查 [`SameSite`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#samesitesamesite-value) cookie 属性是否已正确设置(即您的应用程序是否需要跨站点发送 cookie),看看这是否会成为阻止 cookie 保存到浏览器的 cookie jar 或从浏览器发送的因素。 + +#### 4. 上游发送的标头太大 + +如果您有 NGINX 位于 APISIX 前面来代理客户端流量,请查看 NGINX 的 `error.log` 中是否观察到以下错误: + +```text +upstream sent too big header while reading response header from upstream +``` + +如果是这样,请尝试将 `proxy_buffers` 、 `proxy_buffer_size` 和 `proxy_busy_buffers_size` 调整为更大的值。 + +另一个选项是配置 `session_content` 属性来调整在会话中存储哪些数据。例如,你可以将 `session_content.access_token` 设置为 `true`。 + +#### 5. 无效的客户端密钥 + +验证 `client_secret` 是否有效且正确。无效的 `client_secret` 将导致身份验证失败,并且不会返回任何令牌并将其存储在 session 中。 + +#### 6. PKCE IdP 配置 + +如果您使用授权码流程启用 PKCE,请确保您已将 IdP 客户端配置为使用 PKCE。例如,在 Keycloak 中,您应该在客户端的高级设置中配置 PKCE 质询方法: + +
+PKCE keycloak configuration +
diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/opentelemetry.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/opentelemetry.md new file mode 100644 index 0000000..fa04fb7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/opentelemetry.md @@ -0,0 +1,217 @@ +--- +title: opentelemetry +keywords: + - Apache APISIX + - API 网关 + - Plugin + - OpenTelemetry +description: opentelemetry 插件可用于根据 OpenTelemetry 协议规范上报 Traces 数据,该插件仅支持二进制编码的 OLTP over HTTP。 +--- + + + + + + + +## 描述 + +`opentelemetry` 插件可用于根据 [OpenTelemetry Specification](https://opentelemetry.io/docs/reference/specification/) 协议规范上报 Traces 数据。该插件仅支持二进制编码的 OLTP over HTTP,即请求类型为 `application/x-protobuf` 的数据上报。 + +## 配置 + +默认情况下,服务名称、租户 ID、collector 和 batch span processor 的配置已预配置在[默认配置](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua)中。 + +您可以通过端点 `apisix/admin/plugin_metadata/opentelemetry` 更改插件的配置,例如: + +:::note +您可以从“config.yaml”获取“admin_key”,并使用以下命令保存到环境变量中: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/opentelemetry -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "trace_id_source": "x-request-id", + "resource": { + "service.name": "APISIX" + }, + "collector": { + "address": "127.0.0.1:4318", + "request_timeout": 3, + "request_headers": { + "Authorization": "token" + } + }, + "batch_span_processor": { + "drop_on_queue_full": false, + "max_queue_size": 1024, + "batch_timeout": 2, + "inactive_timeout": 1, + "max_export_batch_size": 16 + }, + "set_ngx_var": false +}' +``` + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|---------------------------------------|---------------|----------|--------------|--------------|-------------| +| sampler | object | 否 | - | - | 采样策略。 | +| sampler.name | string | 否 | `always_off` | ["always_on", "always_off", "trace_id_ratio", "parent_base"] | 采样策略。
`always_on`:全采样;`always_off`:不采样;`trace_id_ratio`:基于 trace id 的百分比采样;`parent_base`:如果存在 tracing 上游,则使用上游的采样决定,否则使用配置的采样策略决策。| +| sampler.options | object | 否 | - | - | 采样策略参数。 | +| sampler.options.fraction | number | 否 | 0 | [0, 1] | `trace_id_ratio`:采样策略的百分比。 | +| sampler.options.root | object | 否 | - | - | `parent_base`:采样策略在没有上游 tracing 时,会使用 root 采样策略做决策。| +| sampler.options.root.name | string | 否 | - | ["always_on", "always_off", "trace_id_ratio"] | root 采样策略。 | +| sampler.options.root.options | object | 否 | - | - | root 采样策略参数。 | +| sampler.options.root.options.fraction | number | 否 | 0 | [0, 1] | `trace_id_ratio` root 采样策略的百分比| +| additional_attributes | array[string] | 否 | - | - | 追加到 trace span 的额外属性,支持内置 NGINX 或 [APISIX 变量](https://apisix.apache.org/docs/apisix/apisix-variable/)。| +| additional_header_prefix_attributes | array[string] | 否 | - | - | 附加到跟踪范围属性的标头或标头前缀。例如,使用 `x-my-header"` 或 `x-my-headers-*` 来包含带有前缀 `x-my-headers-` 的所有标头。 | + +## 示例 + +以下示例展示了如何在不同场景下使用 `opentelemetry` 插件。 + +### 启用 opentelemetry 插件 + +默认情况下,APISIX 中的 `opentelemetry` 插件是禁用的。要启用它,请将插件添加到配置文件中,如下所示: + +```yaml title="config.yaml" +plugins: + - ... + - opentelemetry +``` + +重新加载 APISIX 以使更改生效。 + +有关 `config.yaml` 中可以配置的其他选项,请参阅[静态配置](#静态配置)。 + +### 将 Traces 上报到 OpenTelemetry + +以下示例展示了如何追踪对路由的请求并将 traces 发送到 OpenTelemetry。 + +在 Docker 启动一个 OpenTelemetry collector 实例: + +```shell +docker run -d --name otel-collector -p 4318:4318 otel/opentelemetry-collector-contrib +``` + +创建一个开启了 `opentelemetry` 插件的路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "otel-tracing-route", + "uri": "/anything", + "plugins": { + "opentelemetry": { + "sampler": { + "name": "always_on" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl "http://127.0.0.1:9080/anything" +``` + +你应该收到一个 `HTTP/1.1 200 OK` 响应。 + +在 OpenTelemetry collector 的日志中,你应该看到类似以下的信息: + +```text +2024-02-18T17:14:03.825Z info ResourceSpans #0 +Resource SchemaURL: +Resource attributes: + -> telemetry.sdk.language: Str(lua) + -> telemetry.sdk.name: Str(opentelemetry-lua) + -> telemetry.sdk.version: Str(0.1.1) + -> hostname: Str(e34673e24631) + -> service.name: Str(APISIX) +ScopeSpans #0 +ScopeSpans SchemaURL: +InstrumentationScope opentelemetry-lua +Span #0 + Trace ID : fbd0a38d4ea4a128ff1a688197bc58b0 + Parent ID : + ID : af3dc7642104748a + Name : GET /anything + Kind : Server + Start time : 2024-02-18 17:14:03.763244032 +0000 UTC + End time : 2024-02-18 17:14:03.920229888 +0000 UTC + Status code : Unset + Status message : +Attributes: + -> net.host.name: Str(127.0.0.1) + -> http.method: Str(GET) + -> http.scheme: Str(http) + -> http.target: Str(/anything) + -> http.user_agent: Str(curl/7.64.1) + -> apisix.route_id: Str(otel-tracing-route) + -> apisix.route_name: Empty() + -> http.route: Str(/anything) + -> http.status_code: Int(200) +{"kind": "exporter", "data_type": "traces", "name": "debug"} +``` + +要可视化这些追踪,你可以将 traces 导出到后端服务,例如 Zipkin 和 Prometheus。有关更多详细信息,请参阅[exporters](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter)。 + +### 在日志中使用 trace 变量 + +以下示例展示了如何配置 `opentelemetry` 插件以设置以下内置变量,这些变量可以在日志插件或访问日志中使用: + +- `opentelemetry_context_traceparent`: [W3C trace context](https://www.w3.org/TR/trace-context/#trace-context-http-headers-format) +- `opentelemetry_trace_id`: 当前 span 的 trace_id +- `opentelemetry_span_id`: 当前 span 的 span_id + +如下更新配置文件。你应该自定义访问日志格式以使用 `opentelemetry` 插件变量,并在 `set_ngx_var` 字段中设置 `opentelemetry` 变量。 + +```yaml title="conf/config.yaml" +nginx_config: + http: + enable_access_log: true + access_log_format: '{"time": "$time_iso8601","opentelemetry_context_traceparent": "$opentelemetry_context_traceparent","opentelemetry_trace_id": "$opentelemetry_trace_id","opentelemetry_span_id": "$opentelemetry_span_id","remote_addr": "$remote_addr"}' + access_log_format_escape: json +plugin_attr: + opentelemetry: + set_ngx_var: true +``` + +重新加载 APISIX 以使配置更改生效。 + +```text +{"time": "18/Feb/2024:15:09:00 +0000","opentelemetry_context_traceparent": "00-fbd0a38d4ea4a128ff1a688197bc58b0-8f4b9d9970a02629-01","opentelemetry_trace_id": "fbd0a38d4ea4a128ff1a688197bc58b0","opentelemetry_span_id": "af3dc7642104748a","remote_addr": "172.10.0.1"} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/openwhisk.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/openwhisk.md new file mode 100644 index 0000000..d7dbc1a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/openwhisk.md @@ -0,0 +1,148 @@ +--- +title: openwhisk +keywords: + - Apache APISIX + - API 网关 + - Plugin + - OpenWhisk +description: 本文介绍了关于 Apache APISIX openwhisk 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`openwhisk` 插件用于将开源的分布式无服务器平台 [Apache OpenWhisk](https://openwhisk.apache.org) 作为动态上游集成至 APISIX。 + +启用 `openwhisk` 插件后,该插件会终止对已配置 URI 的请求,并代表客户端向 OpenWhisk 的 API Host 端点发起一个新的请求,然后 `openwhisk` 插件会将响应信息返回至客户端。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ----------------- | ------- | ------ | ------- | ------------ | ------------------------------------------------------------ | +| api_host | string | 是 | | | OpenWhisk API Host 地址,例如 `https://localhost:3233`。 | +| ssl_verify | boolean | 否 | true | | 当设置为 `true` 时执行 SSL 验证。 | +| service_token | string | 是 | | | OpenWhisk service token,其格式为 `xxx:xxx` ,用于 API 调用时的身份认证。 | +| namespace | string | 是 | | | OpenWhisk namespace,例如 `guest`。 | +| action | string | 是 | | | OpenWhisk action,例如 `hello`。 | +| result | boolean | 否 | true | | 当设置为 `true` 时,获得 action 元数据(执行函数并获得响应结果)。 | +| timeout | integer | 否 | 60000ms | [1,60000]ms | OpenWhisk action 和 HTTP 调用超时时间(以毫秒为单位)。 | +| keepalive | boolean | 否 | true | | 当设置为 `true` 时,保持连接的活动状态以便重复使用。 | +| keepalive_timeout | integer | 否 | 60000ms | [1000,...]ms | 当连接空闲时,保持该连接处于活动状态的时间(以毫秒为单位)。 | +| keepalive_pool | integer | 否 | 5 | [1,...] | 连接断开之前,可接收的最大请求数。 | + +:::note 注意 + +`timeout` 字段规定了 OpenWhisk action 的最大执行时间,以及 APISIX 中 HTTP 客户端的请求超时时间。 + +因为 OpenWhisk action 调用可能会耗费很长时间来拉取容器镜像和启动容器,所以如果 `timeout` 字段值设置太小,可能会导致大量的失败请求。 + +在 OpenWhisk 中 `timeout` 字段的值设置范围从 1 ms 到 60000 ms,建议用户将 `timeout` 字段的值至少设置为 1000ms。 + +::: + +## 启用插件 + +### 搭建 Apache OpenWhisk 测试环境 + +1. 在使用 `openwhisk` 插件之前,你需要通过以下命令运行 OpenWhisk standalone 模式。请确保当前环境中已经安装 Docker 软件。 + +```shell +docker run --rm -d \ + -h openwhisk --name openwhisk \ + -p 3233:3233 -p 3232:3232 \ + -v /var/run/docker.sock:/var/run/docker.sock \ + openwhisk/standalone:nightly +docker exec openwhisk waitready +``` + +2. 安装 [openwhisk-cli](https://github.com/apache/openwhisk-cli) 工具: + +你可以在 [openwhisk-cli](https://github.com/apache/openwhisk-cli) 仓库下载已发布的适用于 Linux 系统的可执行二进制文件 wsk。 + +3. 在 OpenWhisk 中注册函数: + +```shell +wsk property set --apihost "http://localhost:3233" --auth "${service_token}" +wsk action update test <(echo 'function main(){return {"ready":true}}') --kind nodejs:14 +``` + +### 创建路由 + +你可以通过以下命令在指定路由中启用该插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "openwhisk": { + "api_host": "http://localhost:3233", + "service_token": "${service_token}", + "namespace": "guest", + "action": "test" + } + } +}' +``` + +### 测试请求 + +使用 `curl` 命令测试: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +正常返回结果: + +```json +{ "ready": true } +``` + +## 删除插件 + +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/prometheus.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/prometheus.md new file mode 100644 index 0000000..8a2637c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/prometheus.md @@ -0,0 +1,474 @@ +--- +title: prometheus +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Prometheus +description: 本文将介绍 prometheus 插件,以及将 APISIX 与 Prometheus 集成以进行指标收集和持续监控。 +--- + + + + + + + +## 描述 + +`prometheus` 插件提供将 APISIX 与 Prometheus 集成的能力。 + +启用该插件后,APISIX 将开始收集相关指标,例如 API 请求和延迟,并以[基于文本的展示格式](https://prometheus.io/docs/instrumenting/exposition_formats/#exposition-formats)导出到 Prometheus。然后,您可以在 Prometheus 中创建事件监控和警报,以监控 API 网关和 API 的健康状况。 + +## 静态配置 + +默认情况下,已在默认配置文件 [`config.lua`](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua) 中对 `prometheus` 进行预配置。 + +要自定义这些值,请将相应的配置添加到 config.yaml 中。例如: + +```yaml +plugin_attr: + prometheus: # 插件:prometheus 属性 + export_uri: /apisix/prometheus/metrics # 设置 Prometheus 指标端点的 URI。 + metric_prefix: apisix_ # 设置 APISIX 生成的 Prometheus 指标的前缀。 + enable_export_server: true # 启用 Prometheus 导出服务器。 + export_addr: # 设置 Prometheus 导出服务器的地址。 + ip: 127.0.0.1 # 设置 IP。 + port: 9091 # 设置端口。 + # metrics: # 为指标创建额外的标签。 + # http_status: # 这些指标将以 `apisix_` 为前缀。 + # extra_labels: # 设置 http_status 指标的额外标签。 + # - upstream_addr: $upstream_addr + # - status: $upstream_status + # expire: 0 # 指标的过期时间(秒)。 + # 0 表示指标不会过期。 + # http_latency: + # extra_labels: # 设置 http_latency 指标的额外标签。 + # - upstream_addr: $upstream_addr + # expire: 0 # 指标的过期时间(秒)。 + # 0 表示指标不会过期。 + # bandwidth: + # extra_labels: # 设置 bandwidth 指标的额外标签。 + # - upstream_addr: $upstream_addr + # expire: 0 # 指标的过期时间(秒)。 + # 0 表示指标不会过期。 + # default_buckets: # 设置 `http_latency` 指标直方图的默认桶。 + # - 10 + # - 50 + # - 100 + # - 200 + # - 500 + # - 1000 + # - 2000 + # - 5000 + # - 10000 + # - 30000 + # - 60000 + # - 500 +``` + +您可以使用 [Nginx 变量](https://nginx.org/en/docs/http/ngx_http_core_module.html)创建 `extra_labels`。请参见[为指标添加额外标签](#为指标添加额外标签)。 + +重新加载 APISIX 以使更改生效。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| ------------ | --------| ------ | ------ | ----------------------------------------------------- | +|prefer_name | boolean | 否 | False | 当设置为 `true` 时,则在`prometheus` 指标中导出路由/服务名称而非它们的 `id`。 | + +## 指标 + +Prometheus 中有不同类型的指标。要了解它们之间的区别,请参见[指标类型](https://prometheus.io/docs/concepts/metric_types/)。 + +以下是 `prometheus` 插件默认导出的指标。有关示例,请参见[获取 APISIX 指标](#获取 APISIX 指标)。请注意,一些指标,例如 `apisix_batch_process_entries`,如果没有数据,将不可见。 + +| 名称 | 类型 | 描述 | +| ----------------------- | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| apisix_bandwidth | counter | APISIX 中每个服务消耗的总流量(字节)。 | +| apisix_etcd_modify_indexes | gauge | APISIX 键的 etcd 修改次数。 | +| apisix_batch_process_entries | gauge | 发送数据时批处理中的剩余条目数,例如使用 `http logger` 和其他日志插件。 | +| apisix_etcd_reachable | gauge | APISIX 是否可以访问 etcd。值为 `1` 表示可达,`0` 表示不可达。 | +| apisix_http_status | counter | 从上游服务返回的 HTTP 状态代码。 | +| apisix_http_requests_total | gauge | 来自客户端的 HTTP 请求数量。 | +| apisix_nginx_http_current_connections | gauge | 当前与客户端的连接数量。 | +| apisix_nginx_metric_errors_total | counter | `nginx-lua-prometheus` 错误的总数。 | +| apisix_http_latency | histogram | HTTP 请求延迟(毫秒)。 | +| apisix_node_info | gauge | APISIX 节点的信息,例如主机名和当前的 APISIX 版本号。 | +| apisix_shared_dict_capacity_bytes | gauge | [NGINX 共享字典](https://github.com/openresty/lua-nginx-module#ngxshareddict) 的总容量。 | +| apisix_shared_dict_free_space_bytes | gauge | [NGINX 共享字典](https://github.com/openresty/lua-nginx-module#ngxshareddict) 中剩余的空间。 | +| apisix_upstream_status | gauge | 上游节点的健康检查状态,如果在上游配置了健康检查,则可用。值为 `1` 表示健康,`0` 表示不健康。 | +| apisix_stream_connection_total | counter | 每个 Stream Route 处理的总连接数。 | + +## 标签 + +[标签](https://prometheus.io/docs/practices/naming/#labels) 是指标的属性,用于区分指标。 + +例如,`apisix_http_status` 指标可以使用 `route` 信息进行标记,以识别 HTTP 状态的来源路由。 + +以下是 APISIX 指标的非详尽标签及其描述。 + +### `apisix_http_status` 的标签 + +以下标签用于区分 `apisix_http_status` 指标。 + +| 名称 | 描述 | +| ------ | ---------------------------------------------------------------------------------------------------------------------- | +| code | 上游节点返回的 HTTP 响应代码。 | +| route | HTTP 状态来源的路由 ID,当 `prefer_name` 为 `false`(默认)时,使用路由 ID,当 `prefer_name` 为 `true` 时,使用路由名称。如果请求不匹配任何路由,则默认为空字符串。 | +| matched_uri | 匹配请求的路由 URI。如果请求不匹配任何路由,则默认为空字符串。 | +| matched_host | 匹配请求的路由主机。如果请求不匹配任何路由,或路由未配置主机,则默认为空字符串。 | +| service | HTTP 状态来源的服务 ID,当 `prefer_name` 为 `false`(默认)时,使用服务 ID,当 `prefer_name` 为 `true` 时,使用服务名称。如果匹配的路由不属于任何服务,则默认为路由上配置的主机值。 | +| consumer | 与请求关联的消费者名称。如果请求没有与之关联的消费者,则默认为空字符串。 | +| node | 上游节点的 IP 地址。 | + +### `apisix_bandwidth` 的标签 + +以下标签用于区分 `apisix_bandwidth` 指标。 + +| 名称 | 描述 | +| ------ | ---------------------------------------------------------------------------------------------------------------------- | +| type | 流量类型,`egress` 或 `ingress`。 | +| route | 带宽对应的路由 ID,当 `prefer_name` 为 `false`(默认)时,使用路由 ID,当 `prefer_name` 为 `true` 时,使用路由名称。如果请求不匹配任何路由,则默认为空字符串。 | +| service | 带宽对应的服务 ID,当 `prefer_name` 为 `false`(默认)时,使用服务 ID,当 `prefer_name` 为 `true` 时,使用服务名称。如果匹配的路由不属于任何服务,则默认为路由上配置的主机值。 | +| consumer | 与请求关联的消费者名称。如果请求没有与之关联的消费者,则默认为空字符串。 | +| node | 上游节点的 IP 地址。 | + +### `apisix_http_latency` 的标签 + +以下标签用于区分 `apisix_http_latency` 指标。 + +| 名称 | 描述 | +| ------ | ---------------------------------------------------------------------------------------------------------------------- | +| type | 延迟类型。有关详细信息,请参见 [延迟类型](#延迟类型)。 | +| route | 延迟对应的路由 ID,当 `prefer_name` 为 `false`(默认)时,使用路由 ID,当 `prefer_name` 为 `true` 时,使用路由名称。如果请求不匹配任何路由,则默认为空字符串。 | +| service | 延迟对应的服务 ID,当 `prefer_name` 为 `false`(默认)时,使用服务 ID,当 `prefer_name` 为 `true` 时,使用服务名称。如果匹配的路由不属于任何服务,则默认为路由上配置的主机值。 | +| consumer | 与延迟关联的消费者名称。如果请求没有与之关联的消费者,则默认为空字符串。 | +| node | 与延迟关联的上游节点的 IP 地址。 | + +#### 延迟类型 + +`apisix_http_latency` 可以标记为以下三种类型之一: + +* `request` 表示从客户端读取第一个字节到最后一个字节发送到客户端之间的时间。 + +* `upstream` 表示等待上游服务响应的时间。 + +* `apisix` 表示 `request` 延迟与 `upstream` 延迟之间的差异。 + +换句话说,APISIX 延迟不仅归因于 Lua 处理。应理解为: + +```text +APISIX 延迟 + = 下游请求时间 - 上游响应时间 + = 下游流量延迟 + NGINX 延迟 +``` + +### `apisix_upstream_status` 的标签 + +以下标签用于区分 `apisix_upstream_status` 指标。 + +| 名称 | 描述 | +| ------ | ---------------------------------------------------------------------------------------------------------------------- | +| name | 与健康检查配置的上游对应的资源 ID,例如 `/apisix/routes/1` 和 `/apisix/upstreams/1`。 | +| ip | 上游节点的 IP 地址。 | +| port | 节点的端口号。 | + +## 示例 + +以下示例演示如何在不同场景中使用 `prometheus` 插件。 + +### 获取 APISIX 指标 + +以下示例演示如何从 APISIX 获取指标。 + +默认的 Prometheus 指标端点和其他与 Prometheus 相关的配置可以在 [静态配置](#静态配置) 中找到。如果您希望自定义这些配置,更新 `config.yaml` 并重新加载 APISIX。 + +如果您在容器化环境中部署 APISIX,并希望外部访问 Prometheus 指标端点,请按如下方式更新配置文件并重新加载 APISIX: + +```yaml title="conf/config.yaml" +plugin_attr: + prometheus: + export_addr: + ip: 0.0.0.0 +``` + +向 APISIX Prometheus 指标端点发送请求: + +```shell +curl "http://127.0.0.1:9091/apisix/prometheus/metrics" +``` + +您应该看到类似以下的输出: + +```text +# HELP apisix_bandwidth Total bandwidth in bytes consumed per Service in Apisix +# TYPE apisix_bandwidth counter +apisix_bandwidth{type="egress",route="",service="",consumer="",node=""} 8417 +apisix_bandwidth{type="egress",route="1",service="",consumer="",node="127.0.0.1"} 1420 +apisix_bandwidth{type="egress",route="2",service="",consumer="",node="127.0.0.1"} 1420 +apisix_bandwidth{type="ingress",route="",service="",consumer="",node=""} 189 +apisix_bandwidth{type="ingress",route="1",service="",consumer="",node="127.0.0.1"} 332 +apisix_bandwidth{type="ingress",route="2",service="",consumer="",node="127.0.0.1"} 332 +# HELP apisix_etcd_modify_indexes Etcd modify index for APISIX keys +# TYPE apisix_etcd_modify_indexes gauge +apisix_etcd_modify_indexes{key="consumers"} 0 +apisix_etcd_modify_indexes{key="global_rules"} 0 +... +``` + +### 在公共 API 端点上公开 APISIX 指标 + +以下示例演示如何禁用默认情况下在端口 `9091` 上公开的 Prometheus 导出服务器,并在 APISIX 用于监听其他客户端请求的公共 API 端点上公开 APISIX Prometheus 指标。 + +在配置文件中禁用 Prometheus 导出服务器,并重新加载 APISIX 以使更改生效: + +```yaml title="conf/config.yaml" +plugin_attr: + prometheus: + enable_export_server: false +``` + +接下来,使用 [`public-api`](../../../zh/latest/plugins/public-api.md) 插件创建一个路由,并为 APISIX 指标公开一个公共 API 端点: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/prometheus-metrics" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "uri": "/apisix/prometheus/metrics", + "plugins": { + "public-api": {} + } + }' +``` + +向新指标端点发送请求以进行验证: + +```shell +curl "http://127.0.0.1:9080/apisix/prometheus/metrics" +``` + +您应该看到类似以下的输出: + +```text +# HELP apisix_http_requests_total 自 APISIX 启动以来客户端请求的总数。 +# TYPE apisix_http_requests_total gauge +apisix_http_requests_total 1 +# HELP apisix_nginx_http_current_connections 当前 HTTP 连接数量。 +# TYPE apisix_nginx_http_current_connections gauge +apisix_nginx_http_current_connections{state="accepted"} 1 +apisix_nginx_http_current_connections{state="active"} 1 +apisix_nginx_http_current_connections{state="handled"} 1 +apisix_nginx_http_current_connections{state="reading"} 0 +apisix_nginx_http_current_connections{state="waiting"} 0 +apisix_nginx_http_current_connections{state="writing"} 1 +... +``` + +### 监控上游健康状态 + +以下示例演示如何监控上游节点的健康状态。 + +使用 `prometheus` 插件创建一个路由,并配置上游的主动健康检查: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "prometheus-route", + "uri": "/get", + "plugins": { + "prometheus": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1, + "127.0.0.1:20001": 1 + }, + "checks": { + "active": { + "timeout": 5, + "http_path": "/status", + "healthy": { + "interval": 2, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + }, + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [500], + "http_failures": 3, + "tcp_failures": 3 + } + } + } + } + }' +``` + +向 APISIX Prometheus 指标端点发送请求: + +```shell +curl "http://127.0.0.1:9091/apisix/prometheus/metrics" +``` + +您应该看到类似以下的输出: + +```text +# HELP apisix_upstream_status 上游健康检查的状态 +# TYPE apisix_upstream_status gauge +apisix_upstream_status{name="/apisix/routes/1",ip="54.237.103.220",port="80"} 1 +apisix_upstream_status{name="/apisix/routes/1",ip="127.0.0.1",port="20001"} 0 +``` + +这显示上游节点 `httpbin.org:80` 是健康的,而上游节点 `127.0.0.1:20001` 是不健康的。 + +### 为指标添加额外标签 + +以下示例演示如何为指标添加额外标签,并在标签值中使用 [Nginx 变量](https://nginx.org/en/docs/http/ngx_http_core_module.html)。 + +目前,仅以下指标支持额外标签: + +* apisix_http_status +* apisix_http_latency +* apisix_bandwidth + +在配置文件中包含以下配置以为指标添加标签,并重新加载 APISIX 以使更改生效: + +```yaml title="conf/config.yaml" +plugin_attr: + prometheus: # 插件:prometheus + metrics: # 根据 NGINX 变量创建额外标签。 + http_status: + extra_labels: # 设置 `http_status` 指标的额外标签。 + - upstream_addr: $upstream_addr # 添加一个额外的 `upstream_addr` 标签,其值为 NGINX 变量 $upstream_addr。 + - route_name: $route_name # 添加一个额外的 `route_name` 标签,其值为 APISIX 变量 $route_name。 +``` + +请注意,如果您在标签值中定义了一个变量,但它与任何现有的 [APISIX 变量](https://apisix.apache.org/zh/docs/apisix/apisix-variable/) 和 [Nginx 变量](https://nginx.org/en/docs/http/ngx_http_core_module.html) 不对应,则标签值将默认为空字符串。 + +使用 `prometheus` 插件创建一个路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "prometheus-route", + "name": "extra-label", + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送请求以进行验证: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +您应该看到 `HTTP/1.1 200 OK` 的响应。 + +向 APISIX Prometheus 指标端点发送请求: + +```shell +curl "http://127.0.0.1:9091/apisix/prometheus/metrics" +``` + +您应该看到类似以下的输出: + +```text +# HELP apisix_http_status APISIX 中每个服务的 HTTP 状态代码 +# TYPE apisix_http_status counter +apisix_http_status{code="200",route="1",matched_uri="/get",matched_host="",service="",consumer="",node="54.237.103.220",upstream_addr="54.237.103.220:80",route_name="extra-label"} 1 +``` + +### 使用 Prometheus 监控 TCP/UDP 流量 + +以下示例演示如何在 APISIX 中收集 TCP/UDP 流量指标。 + +在 `config.yaml` 中包含以下配置以启用 Stream proxy 和 `prometheus` 插件。重新加载 APISIX 以使更改生效: + +```yaml title="conf/config.yaml" +apisix: + proxy_mode: http&stream # 启用 L4 和 L7 代理 + stream_proxy: # 配置 L4 代理 + tcp: + - 9100 # 设置 TCP 代理监听端口 + udp: + - 9200 # 设置 UDP 代理监听端口 + +stream_plugins: + - prometheus # 为 stream proxy 启用 prometheus +``` + +使用 `prometheus` 插件创建一个 Stream Route: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/stream_routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向该 Stream Route 发送请求以进行验证: + +```shell +curl -i "http://127.0.0.1:9100" +``` + +您应该看到 `HTTP/1.1 200 OK` 的响应。 + +向 APISIX Prometheus 指标端点发送请求: + +```shell +curl "http://127.0.0.1:9091/apisix/prometheus/metrics" +``` + +您应该看到类似以下的输出: + +```text +# HELP apisix_stream_connection_total APISIX 中每个 Stream Route 处理的总连接数 +# TYPE apisix_stream_connection_total counter +apisix_stream_connection_total{route="1"} 1 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/proxy-cache.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/proxy-cache.md new file mode 100644 index 0000000..1fdf78c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/proxy-cache.md @@ -0,0 +1,379 @@ +--- +title: proxy-cache +keywords: + - Apache APISIX + - API 网关 + - Proxy Cache +description: proxy-cache 插件根据键缓存响应,支持 GET、POST 和 HEAD 请求的磁盘和内存缓存,从而增强 API 性能。 +--- + + + + + + + +## 描述 + +`proxy-cache` 插件提供了根据缓存键缓存响应的功能。该插​​件支持基于磁盘和基于内存的缓存选项,用于缓存 [GET](https://anything.org/learn/serving-over-http/#get-request)、[POST](https://anything.org/learn/serving-over-http/#post-request) 和 [HEAD](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/HEAD) 请求。 + +可以根据请求 HTTP 方法、响应状态代码、请求标头值等有条件地缓存响应。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------------ | -------------- | ------ | ------------------------- | ------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| cache_strategy | string | 否 | disk | ["disk","memory"] | 缓存策略。缓存在磁盘还是内存中。 | +| cache_zone | string | 否 | disk_cache_one | | 与缓存策略一起使用的缓存区域。该值应与[配置文件](#static-configurations)中定义的缓存区域之一匹配,并与缓存策略相对应。例如,当使用内存缓存策略时,应该使用内存缓存区域。 | +| cache_key | array[string] | 否 | ["$host", "$request_uri"] | | 用于缓存的键。支持[NGINX 变量](https://nginx.org/en/docs/varindex.html)和值中的常量字符串。变量应该以 `$` 符号为前缀。 | +| cache_bypass | array[string] | 否 | | |一个或多个用于解析值的参数,如果任何值不为空且不等于 `0`,则不会从缓存中检索响应。支持值中的 [NGINX 变量](https://nginx.org/en/docs/varindex.html) 和常量字符串。变量应该以 `$` 符号为前缀。| +| cache_method | array[string] | 否 | ["GET", "HEAD"] | ["GET", "POST", "HEAD"] | 应缓存响应的请求方法。| +| cache_http_status | array[integer] | 否 | [200, 301, 404] | [200, 599] | 应缓存响应的响应 HTTP 状态代码。| +| hide_cache_headers | boolean | 否 | false | | 如果为 true,则隐藏 `Expires` 和 `Cache-Control` 响应标头。| +| cache_control | boolean | 否 | false | | 如果为 true,则遵守 HTTP 规范中的 `Cache-Control` 行为。仅对内存中策略有效。 | +| no_cache | array[string] | 否 | | | 用于解析值的一个或多个参数,如果任何值不为空且不等于 `0`,则不会缓存响应。支持 [NGINX 变量](https://nginx.org/en/docs/varindex.html) 和值中的常量字符串。变量应以 `$` 符号为前缀。 | +| cache_ttl | integer | 否 | 300 | >=1 | 在内存中缓存时的缓存生存时间 (TTL),以秒为单位。要调整在磁盘上缓存时的 TTL,请更新[配置文件](#static-configurations) 中的 `cache_ttl`。TTL 值与从上游服务收到的响应标头 [`Cache-Control`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control) 和 [`Expires`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Expires) 中的值一起评估。| + +## 静态配置 + +默认情况下,磁盘缓存时的 `cache_ttl` 和缓存 `zones` 等值已在 [默认配置](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua) 中预先配置。 + +要自定义这些值,请将相应的配置添加到 `config.yaml`。例如: + +```yaml +apisix: + proxy_cache: + cache_ttl: 10s # 仅当 `Expires` 和 `Cache-Control` 响应标头均不存在,或者 APISIX 返回 + # 由于上游不可用导致 `502 Bad Gateway` 或 `504 Gateway Timeout` 时 + # 才会在磁盘上缓存时使用默认缓存 TTL + zones: + - name: disk_cache_one + memory_size: 50m + disk_size: 1G + disk_path: /tmp/disk_cache_one + cache_levels: 1:2 + # - name: disk_cache_two + # memory_size: 50m + # disk_size: 1G + # disk_path: "/tmp/disk_cache_two" + # cache_levels: "1:2" + - name: memory_cache + memory_size: 50m +``` + +重新加载 APISIX 以使更改生效。 + +## 示例 + +以下示例演示了如何为不同场景配置 `proxy-cache`。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 在磁盘上缓存数据 + +磁盘缓存策略具有系统重启时数据持久性以及与内存缓存相比具有更大存储容量的优势。它适用于优先考虑耐用性且可以容忍稍大的缓存访问延迟的应用程序。 + +以下示例演示了如何在路由上使用 `proxy-cache` 插件将数据缓存在磁盘上。 + +使用磁盘缓存策略时,缓存 TTL 由响应标头 `Expires` 或 `Cache-Control` 中的值确定。如果这些标头均不存在,或者 APISIX 由于上游不可用而返回 `502 Bad Gateway` 或 `504 Gateway Timeout`,则缓存 TTL 默认为 [配置文件](#static-configuration) 中配置的值。 + +使用 `proxy-cache` 插件创建路由以将数据缓存在磁盘上: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-cache-route", + "uri": "/anything", + "plugins": { + "proxy-cache": { + "cache_strategy": "disk" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该看到带有以下标头的 `HTTP/1.1 200 OK` 响应,表明插件已成功启用: + +```text +Apisix-Cache-Status: MISS +``` + +由于在第一次响应之前没有可用的缓存,因此显示 `Apisix-Cache-Status: MISS`。 + +在缓存 TTL 窗口内再次发送相同的请求。您应该看到带有以下标头的 `HTTP/1.1 200 OK` 响应,显示缓存已命中: + +```text +Apisix-Cache-Status: HIT +``` + +等待缓存在 TTL 之后过期,然后再次发送相同的请求。您应该看到带有以下标头的 `HTTP/1.1 200 OK` 响应,表明缓存已过期: + +```text +Apisix-Cache-Status: EXPIRED +``` + +### 在内存中缓存数据 + +内存缓存策略具有低延迟访问缓存数据的优势,因为从 RAM 检索数据比从磁盘存储检索数据更快。它还适用于存储不需要长期保存的临时数据,从而可以高效缓存频繁更改的数据。 + +以下示例演示了如何在路由上使用 `proxy-cache` 插件在内存中缓存数据。 + +使用 `proxy-cache` 创建路由并将其配置为使用基于内存的缓存: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-cache-route", + "uri": "/anything", + "plugins": { + "proxy-cache": { + "cache_strategy": "memory", + "cache_zone": "memory_cache", + "cache_ttl": 10 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该看到带有以下标头的 `HTTP/1.1 200 OK` 响应,表明插件已成功启用: + +```text +Apisix-Cache-Status: MISS +``` + +由于在第一次响应之前没有可用的缓存,因此显示 `Apisix-Cache-Status: MISS`。 + +在缓存 TTL 窗口内再次发送相同的请求。您应该看到带有以下标头的 `HTTP/1.1 200 OK` 响应,显示缓存已命中: + +```text +Apisix-Cache-Status: HIT +``` + +### 有条件地缓存响应 + +以下示例演示了如何配置 `proxy-cache` 插件以有条件地缓存响应。 + +使用 `proxy-cache` 插件创建路由并配置 `no_cache` 属性,这样如果 URL 参数 `no_cache` 和标头 `no_cache` 的值中至少有一个不为空且不等于 `0`,则不会缓存响应: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-cache-route", + "uri": "/anything", + "plugins": { + "proxy-cache": { + "no_cache": ["$arg_no_cache", "$http_no_cache"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +向路由发送一些请求,其中 URL 参数的 `no_cache` 值表示绕过缓存: + +```shell +curl -i "http://127.0.0.1:9080/anything?no_cache=1" +``` + +您应该收到所有请求的 `HTTP/1.1 200 OK` 响应,并且每次都观察到以下标头: + +```text +Apisix-Cache-Status: EXPIRED +``` + +向路由发送一些其他请求,其中 URL 参数 `no_cache` 值为零: + +```shell +curl -i "http://127.0.0.1:9080/anything?no_cache=0" +``` + +您应该收到所有请求的 `HTTP/1.1 200 OK` 响应,并开始看到缓存被命中: + +```text +Apisix-Cache-Status: HIT +``` + +您还可以在 `no_cache` 标头中指定以下值: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H "no_cache: 1" +``` + +响应不应该被缓存: + +```text +Apisix-Cache-Status: EXPIRED +``` + +### 有条件地从缓存中检索响应 + +以下示例演示了如何配置 `proxy-cache` 插件以有条件地从缓存中检索响应。 + +使用 `proxy-cache` 插件创建路由并配置 `cache_bypass` 属性,这样如果 URL 参数 `bypass` 和标头 `bypass` 的值中至少有一个不为空且不等于 `0`,则不会从缓存中检索响应: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-cache-route", + "uri": "/anything", + "plugins": { + "proxy-cache": { + "cache_bypass": ["$arg_bypass", "$http_bypass"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +向路由发送一个请求,其中 URL 参数值为 `bypass`,表示绕过缓存: + +```shell +curl -i "http://127.0.0.1:9080/anything?bypass=1" +``` + +您应该看到带有以下标头的 `HTTP/1.1 200 OK` 响应: + +```text +Apisix-Cache-Status: BYPASS +``` + +向路由发送另一个请求,其中 URL 参数 `bypass` 值为零: + +```shell +curl -i "http://127.0.0.1:9080/anything?bypass=0" +``` + +您应该看到带有以下标头的 `HTTP/1.1 200 OK` 响应: + +```text +Apisix-Cache-Status: MISS +``` + +您还可以在 `bypass` 标头中指定以下值: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H "bypass: 1" +``` + +响应应该显示绕过缓存: + +```text +Apisix-Cache-Status: BYPASS +``` + +### 缓存 502 和 504 错误响应代码 + +当上游服务返回 500 范围内的服务器错误时,`proxy-cache` 插件将缓存响应,当且仅当返回的状态为 `502 Bad Gateway` 或 `504 Gateway Timeout`。 + +以下示例演示了当上游服务返回 `504 Gateway Timeout` 时 `proxy-cache` 插件的行为。 + +使用 `proxy-cache` 插件创建路由并配置虚拟上游服务: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-cache-route", + "uri": "/timeout", + "plugins": { + "proxy-cache": { } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "12.34.56.78": 1 + } + } + }' +``` + +生成一些对路由的请求: + +```shell +seq 4 | xargs -I{} curl -I "http://127.0.0.1:9080/timeout" +``` + +您应该会看到类似以下内容的响应: + +```text +HTTP/1.1 504 Gateway Time-out +... +Apisix-Cache-Status: MISS + +HTTP/1.1 504 Gateway Time-out +... +Apisix-Cache-Status: HIT + +HTTP/1.1 504 Gateway Time-out +... +Apisix-Cache-Status: HIT + +HTTP/1.1 504 Gateway Time-out +... +Apisix-Cache-Status: HIT +``` + +但是,如果上游服务返回 `503 Service Temporarily Unavailable`,则响应将不会被缓存。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/proxy-control.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/proxy-control.md new file mode 100644 index 0000000..c268f34 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/proxy-control.md @@ -0,0 +1,104 @@ +--- +title: proxy-control +keywords: + - APISIX + - API 网关 + - Proxy Control +description: 本文介绍了 Apache APISIX proxy-control 插件的相关操作,你可以使用此插件动态地控制 NGINX 代理的行为。 +--- + + + +## 描述 + +使用 `proxy-control` 插件能够动态地控制 NGINX 代理的相关行为。 + +:::info 重要 + +此插件需要 APISIX 在 [APISIX-Runtime](../FAQ.md#如何构建-apisix-runtime-环境) 环境上运行。更多信息请参考 [apisix-build-tools](https://github.com/api7/apisix-build-tools)。 + +::: + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| --------- | ------------- | ----------- | ---------- | ------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------- | +| request_buffering | boolean | 否 | true | | 如果设置为 `true`,插件将动态设置 [`proxy_request_buffering`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering)。 | + +## 启用插件 + +以下示例展示了如何在指定路由上启用 `proxy-control` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/upload", + "plugins": { + "proxy-control": { + "request_buffering": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## 测试插件 + +启用插件后,使用 `curl` 命令请求该路由进行一个大文件的上传测试: + +```shell +curl -i http://127.0.0.1:9080/upload -d @very_big_file +``` + +如果在错误日志中没有找到关于 "a client request body is buffered to a temporary file" 的信息,则说明插件生效。 + +## 删除插件 + +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d +{ + "uri": "/upload", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/proxy-mirror.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/proxy-mirror.md new file mode 100644 index 0000000..d36a96f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/proxy-mirror.md @@ -0,0 +1,145 @@ +--- +title: proxy-mirror +keywords: + - APISIX + - API 网关 + - Proxy Mirror +description: proxy-mirror 插件将入口流量复制到 APISIX 并将其转发到指定的上游,而不会中断常规服务。 +--- + + + + + + + +## 描述 + +`proxy-mirror` 插件将传入流量复制到 APISIX 并将其转发到指定的上游,而不会中断常规服务。您可以将插件配置为镜像所有流量或仅镜像一部分流量。该机制有利于一些用例,包括故障排除、安全检查、分析等。 + +请注意,APISIX 会忽略接收镜像流量的上游主机的任何响应。 + +## 参数 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---- | ------ | ------ | ------ | ------ | ------------------------------------------------------------------------------------------------------- | +| host | string | 是 | | | 将镜像流量转发到的主机的地址。该地址应包含方案但不包含路径,例如 `http://127.0.0.1:8081`。 | +| path | string | 否 | | | 将镜像流量转发到的主机的路径。如果未指定,则默认为路由的当前 URI 路径。如果插件正在镜像 gRPC 流量,则不适用。 | +| path_concat_mode | string | 否 | replace | ["replace", "prefix"] | 指定 `path` 时的连接模式。设置为 `replace` 时,配置的 `path` 将直接用作将镜像流量转发到的主机的路径。设置为 `prefix` 时,转发到的路径将是配置的 `path`,附加路由的请求 URI 路径。如果插件正在镜像 gRPC 流量,则不适用。 | +| sample_ratio | number | 否 | 1 | [0.00001, 1] | 将被镜像的请求的比例。默认情况下,所有流量都会被镜像。| + +## 静态配置 + +默认情况下,插件的超时值在[默认配置](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua)中预先配置。 + +要自定义这些值,请将相应的配置添加到 `config.yaml`。例如: + +```yaml +plugin_attr: + proxy-mirror: + timeout: + connect: 60s + read: 60s + send: 60s +``` + +重新加载 APISIX 以使更改生效。 + +## 示例 + +以下示例演示了如何为不同场景配置 `proxy-mirror`。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 镜像部分流量 + +以下示例演示了如何配置 `proxy-mirror` 以将 50% 的流量镜像到路由并将其转发到另一个上游服务。 + +启动一个示例 NGINX 服务器以接收镜像流量: + +```shell +docker run -p 8081:80 --name nginx nginx +``` + +您应该在终端会话中看到 NGINX 访问日志和错误日志。 + +打开一个新的终端会话并使用 `proxy-mirror` 创建一个路由来镜像 50% 的流量: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "traffic-mirror-route", + "uri": "/get", + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1:8081", + "sample_ratio": 0.5 + } + }, + "upstream": { + "nodes": { + "httpbin.org": 1 + }, + "type": "roundrobin" + } + }' +``` + +发送生成几个请求到路由: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +您应该会收到所有请求的 `HTTP/1.1 200 OK` 响应。 + +导航回 NGINX 终端会话,您应该会看到一些访问日志条目,大约是生成的请求数量的一半: + +```text +172.17.0.1 - - [29/Jan/2024:23:11:01 +0000] "GET /get HTTP/1.1" 404 153 "-" "curl/7.64.1" "-" +``` + +这表明 APISIX 已将请求镜像到 NGINX 服务器。此处,HTTP 响应状态为 `404`,因为示例 NGINX 服务器未实现路由。 + +### 配置镜像超时 + +以下示例演示了如何更新插件的默认连接、读取和发送超时。当将流量镜像到非常慢的后端服务时,这可能很有用。 + +由于请求镜像是作为子请求实现的,子请求中的过度延迟可能导致原始请求被阻止。默认情况下,连接、读取和发送超时设置为 60 秒。要更新这些值,您可以在配置文件的 `plugin_attr` 部分中配置它们,如下所示: + +```yaml title="conf/config.yaml" +plugin_attr: + proxy-mirror: + timeout: + connect: 2000ms + read: 2000ms + send: 2000ms +``` + +重新加载 APISIX 以使更改生效。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/proxy-rewrite.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/proxy-rewrite.md new file mode 100644 index 0000000..7590a0c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/proxy-rewrite.md @@ -0,0 +1,509 @@ +--- +title: proxy-rewrite +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Proxy Rewrite + - proxy-rewrite +description: proxy-rewrite 插件支持重写 APISIX 转发到上游服务的请求。使用此插件,您可以修改 HTTP 方法、请求目标上游地址、请求标头等。 +--- + + + + + + + +## 描述 + +`proxy-rewrite` 插件支持重写 APISIX 转发到上游服务的请求。使用此插件,您可以修改 HTTP 方法、请求目标上游地址、请求标头等。 + +## 属性 + +| 名称 | 类型 | 必需 | 默认值 | 有效值 | 描述 | +|-----------------------------|-----------|----------|---------|------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| uri | string | 否 | | | 新的上游 URI 路径。值支持 [Nginx 变量](https://nginx.org/en/docs/http/ngx_http_core_module.html)。例如,`$arg_name`。 | +| method | string | 否 | | ["GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS","MKCOL", "COPY", "MOVE", "PROPFIND", "PROPFIND","LOCK", "UNLOCK", "PATCH", "TRACE"] | 要使用的重写请求的 HTTP 方法。 | +| regex_uri | array[string] | 否 | | | 用于匹配客户端请求的 URI 路径并组成新的上游 URI 路径的正则表达式。当同时配置 `uri` 和 `regex_uri` 时,`uri` 具有更高的优先级。该数组应包含一个或多个 **键值对**,其中键是用于匹配 URI 的正则表达式,值是新的上游 URI 路径。例如,对于 `["^/iresty/(. *)/(. *)", "/$1-$2", ^/theothers/*", "/theothers"]`,如果请求最初发送到 `/iresty/hello/world`,插件会将上游 URI 路径重写为 `/iresty/hello-world`;如果请求最初发送到 `/theothers/hello/world`,插件会将上游 URI 路径重写为 `/theothers`。| +| host | string | 否 | | | 设置 [`Host`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Host) 请求标头。| +| headers | object | 否 | | | 要执行的标头操作。可以设置为动作动词 `add`、`remove` 和/或 `set` 的对象;或由要 `set` 的标头组成的对象。当配置了多个动作动词时,动作将按照“添加”、“删除”和“设置”的顺序执行。| +| headers.add | object | 否 | | | 要附加到请求的标头。如果请求中已经存在标头,则会附加标头值。标头值可以设置为常量、一个或多个 [Nginx 变量](https://nginx.org/en/docs/http/ngx_http_core_module.html),或者 `regex_uri` 的匹配结果(使用变量,例如 `$1-$2-$3`)。| +| headers.set | object | 否 | | | 要设置请求的标头。如果请求中已经存在标头,则会覆盖标头值。标头值可以设置为常量、一个或多个 [Nginx 变量](https://nginx.org/en/docs/http/ngx_http_core_module.html),或者 `regex_uri` 的匹配结果(使用变量,例如 `$1-$2-$3`)。不应将其用于设置 `Host`。| +| headers.remove | array[string] | 否 | | | 从请求中删除的标头。 +| use_real_request_uri_unsafe | boolean | 否 | false | | 如果为 True,则绕过 URI 规范化并允许完整的原始请求 URI。启用此选项被视为不安全。| + +## 示例 + +下面的示例说明如何在不同场景中在路由上配置 `proxy-rewrite`。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 重写主机标头 + +以下示例演示了如何修改请求中的 `Host` 标头。请注意,您不应使用 `headers.set` 来设置 `Host` 标头。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "methods": ["GET"], + "uri": "/headers", + "plugins": { + "proxy-rewrite": { + "host": "myapisix.demo" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向 `/headers` 发送请求以检查发送到上游的所有请求标头: + +```shell +curl "http://127.0.0.1:9080/headers" +``` + +您应该看到类似于以下内容的响应: + +```text +{ + "headers": { + "Accept": "*/*", + "Host": "myapisix.demo", + "User-Agent": "curl/8.2.1", + "X-Amzn-Trace-Id": "Root=1-64fef198-29da0970383150175bd2d76d", + "X-Forwarded-Host": "127.0.0.1" + } +} +``` + +### 重写 URI 并设置标头 + +以下示例演示了如何重写请求上游 URI 并设置其他标头值。如果客户端请求中存在相同的标头,则插件中设置的相应标头值将覆盖客户端请求中存在的值。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "methods": ["GET"], + "uri": "/", + "plugins": { + "proxy-rewrite": { + "uri": "/anything", + "headers": { + "set": { + "X-Api-Version": "v1", + "X-Api-Engine": "apisix" + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +发送请求以验证: + +```shell +curl "http://127.0.0.1:9080/" -H '"X-Api-Version": "v2"' +``` + +您应该看到类似于以下内容的响应: + +```text +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/8.2.1", + "X-Amzn-Trace-Id": "Root=1-64fed73a-59cd3bd640d76ab16c97f1f1", + "X-Api-Engine": "apisix", + "X-Api-Version": "v1", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "GET", + "origin": "::1, 103.248.35.179", + "url": "http://localhost/anything" +} +``` + +注意到其中两个标头都存在,以及插件中配置的 `X-Api-Version` 标头值覆盖了请求中传递的标头值。 + +### 重写 URI 并附加标头 + +以下示例演示了如何重写请求上游 URI 并附加其他标头值。如果客户端请求中存在相同的标头,则它们的标头值将附加到插件中配置的标头值。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "methods": ["GET"], + "uri": "/", + "plugins": { + "proxy-rewrite": { + "uri": "/headers", + "headers": { + "add": { + "X-Api-Version": "v1", + "X-Api-Engine": "apisix" + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +发送请求以验证: + +```shell +curl "http://127.0.0.1:9080/" -H '"X-Api-Version": "v2"' +``` + +您应该会看到类似以下内容的响应: + +```text +{ + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/8.2.1", + "X-Amzn-Trace-Id": "Root=1-64fed73a-59cd3bd640d76ab16c97f1f1", + "X-Api-Engine": "apisix", + "X-Api-Version": "v1,v2", + "X-Forwarded-Host": "127.0.0.1" + } +} +``` + +请注意,两个标头均存在,并且插件中配置的 `X-Api-Version` 标头值均附加在请求中传递的标头值上。 + +### 删除现有标头 + +以下示例演示了如何删除现有标头 `User-Agent`。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "methods": ["GET"], + "uri": "/headers", + "plugins": { + "proxy-rewrite": { + "headers": { + "remove":[ + "User-Agent" + ] + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +发送请求来验证指定的标头是否被删除: + +```shell +curl "http://127.0.0.1:9080/headers" +``` + +您应该看到类似以下的响应,其中 `User-Agen` 标头已被移除: + +```text +{ + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "X-Amzn-Trace-Id": "Root=1-64fef302-07f2b13e0eb006ba776ad91d", + "X-Forwarded-Host": "127.0.0.1" + } +} +``` + +### 使用 RegEx 重写 URI + +以下示例演示了如何解析原始上游 URI 路径中的文本并使用它们组成新的上游 URI 路径。在此示例中,APISIX 配置为将所有请求从 `/test/user/agent` 转发到 `/user-agent`。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "uri": "/test/*", + "plugins": { + # highlight-start + "proxy-rewrite": { + "regex_uri": ["^/test/(.*)/(.*)", "/$1-$2"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +发送请求到 `/test/user/agent`,检查是否被重定向到 `/user-agent`: + +```shell +curl "http://127.0.0.1:9080/test/user/agent" +``` + +您应该会看到类似以下内容的响应: + +```text +{ + "user-agent": "curl/8.2.1" +} +``` + +### 添加 URL 参数 + +以下示例演示了如何向请求添加 URL 参数。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "methods": ["GET"], + "uri": "/get", + "plugins": { + "proxy-rewrite": { + "uri": "/get?arg1=apisix&arg2=plugin" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +发送请求来验证 URL 参数是否也转发给了上游: + +```shell +curl "http://127.0.0.1:9080/get" +``` + +您应该会看到类似以下内容的响应: + +```text +{ + "args": { + "arg1": "apisix", + "arg2": "plugin" + }, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.2.1", + "X-Amzn-Trace-Id": "Root=1-64fef6dc-2b0e09591db7353a275cdae4", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "127.0.0.1, 103.248.35.148", + # highlight-next-line + "url": "http://127.0.0.1/get?arg1=apisix&arg2=plugin" +} +``` + +### 重写 HTTP 方法 + +以下示例演示如何将 GET 请求重写为 POST 请求。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "proxy-rewrite-route", + "methods": ["GET"], + "uri": "/get", + "plugins": { + "proxy-rewrite": { + "uri": "/anything", + "method":"POST" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向 `/get` 发送 GET 请求,以验证它是否转换为向 `/anything` 发送 POST 请求: + +```shell +curl "http://127.0.0.1:9080/get" +``` + +您应该会看到类似以下内容的响应: + +```text +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.2.1", + "X-Amzn-Trace-Id": "Root=1-64fef7de-0c63387645353998196317f2", + "X-Forwarded-Host": "127.0.0.1" + }, + "json": null, + "method": "POST", + "origin": "::1, 103.248.35.179", + "url": "http://localhost/anything" +} +``` + +### 将消费者名称转发到上游 + +以下示例演示了如何将成功验证的消费者名称转发到上游服务。例如,您将使用 `key-auth` 作为身份验证方法。 + +创建消费者 `JohnDoe`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "JohnDoe" + }' +``` + +为消费者创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/JohnDoe/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +接下来,创建一个启用密钥认证的路由,配置 `proxy-rewrite` 以将消费者名称添加到标头,并删除认证密钥,以使其对上游服务不可见: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "consumer-restricted-route", + "uri": "/get", + "plugins": { + "key-auth": {}, + "proxy-rewrite": { + "headers": { + "set": { + "X-Apisix-Consumer": "$consumer_name" + }, + "remove": [ "Apikey" ] + } + } + }, + "upstream" : { + "nodes": { + "httpbin.org":1 + } + } + }' +``` + +以消费者 `JohnDoe` 的身份向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/get" -H 'apikey: john-key' +``` + +您应该收到一个包含以下主体的 `HTTP/1.1 200 OK` 响应: + +```text +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.4.0", + "X-Amzn-Trace-Id": "Root=1-664b01a6-2163c0156ed4bff51d87d877", + "X-Apisix-Consumer": "JohnDoe", + "X-Forwarded-Host": "127.0.0.1" + }, + "origin": "172.19.0.1, 203.12.12.12", + "url": "http://127.0.0.1/get" +} +``` + +向路由发送另一个请求,不带有有效凭证: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +您应该收到 `HTTP/1.1 403 Forbidden` 响应。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/public-api.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/public-api.md new file mode 100644 index 0000000..b20f802 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/public-api.md @@ -0,0 +1,242 @@ +--- +title: public-api +keywords: + - APISIX + - API 网关 + - Public API +description: public-api 插件公开了一个内部 API 端点,使其可被公开访问。该插件的主要用途之一是公开由其他插件创建的内部端点。 +--- + + + + + + + +## 描述 + +`public-api` 插件公开了一个内部 API 端点,使其可被公开访问。该插件的主要用途之一是公开由其他插件创建的内部端点。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|------|--------|-------|-------|------|------| +| uri | string | 否 | | | 内部端点的 URI。如果未配置,则暴露路由的 URI。| + +## 示例 + +以下示例展示了如何在不同场景中配置 `public-api`。 + +### 在自定义端点暴露 Prometheus 指标 + +以下示例演示如何禁用默认在端口 `9091` 上暴露端点的 Prometheus 导出服务器,并在 APISIX 用于监听其他客户端请求的端口 `9080` 上,通过新的公共 API 端点暴露 APISIX 的 Prometheus 指标。 + +此外,还会配置路由,使内部端点 `/apisix/prometheus/metrics` 通过自定义端点对外公开。 + +:::caution + +如果收集了大量指标,插件可能会占用大量 CPU 资源用于计算,从而影响正常请求的处理。 + +为了解决这个问题,APISIX 使用 [特权代理进程](https://github.com/openresty/lua-resty-core/blob/master/lib/ngx/process.md#enable_privileged_agent) ,并将指标计算卸载至独立进程。如果使用配置文件中 `plugin_attr.prometheus.export_addr` 设定的指标端点,该优化将自动生效。但如果通过 `public-api` 插件暴露指标端点,则不会受益于此优化。 + +::: + +在配置文件中禁用 Prometheus 导出服务器,并重新加载 APISIX 以使更改生效: + +```yaml +plugin_attr: + prometheus: + enable_export_server: false +``` + +接下来,创建一个带有 `public-api` 插件的路由,并为 APISIX 指标暴露一个公共 API 端点。你应将路由的 `uri` 设置为自定义端点路径,并将插件的 `uri` 设置为要暴露的内部端点。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H 'X-API-KEY: ${admin_key}' \ + -d '{ + "id": "prometheus-metrics", + "uri": "/prometheus_metrics", + "plugins": { + "public-api": { + "uri": "/apisix/prometheus/metrics" + } + } + }' +``` + +向自定义指标端点发送请求: + +```shell +curl http://127.0.0.1:9080/prometheus_metrics +``` + +你应看到类似以下的输出: + +```text +# HELP apisix_http_requests_total The total number of client requests since APISIX started +# TYPE apisix_http_requests_total gauge +apisix_http_requests_total 1 +# HELP apisix_nginx_http_current_connections Number of HTTP connections +# TYPE apisix_nginx_http_current_connections gauge +apisix_nginx_http_current_connections{state="accepted"} 1 +apisix_nginx_http_current_connections{state="active"} 1 +apisix_nginx_http_current_connections{state="handled"} 1 +apisix_nginx_http_current_connections{state="reading"} 0 +apisix_nginx_http_current_connections{state="waiting"} 0 +apisix_nginx_http_current_connections{state="writing"} 1 +... +``` + +### 暴露批量请求端点 + +以下示例展示了如何使用 `public-api` 插件来暴露 `batch-requests` 插件的端点,该插件用于将多个请求组合成一个请求,然后将它们发送到网关。 + +创建一个样本路由到 httpbin 的 `/anything` 端点,用于验证目的: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "httpbin-anything", + "uri": "/anything", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +创建一个带有 `public-api` 插件的路由,并将路由的 `uri` 设置为要暴露的内部端点: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "batch-requests", + "uri": "/apisix/batch-requests", + "plugins": { + "public-api": {} + } + }' +``` + +向暴露的批量请求端点发送一个包含 GET 和 POST 请求的流水线请求: + +```shell +curl "http://127.0.0.1:9080/apisix/batch-requests" -X POST -d ' +{ + "pipeline": [ + { + "method": "GET", + "path": "/anything" + }, + { + "method": "POST", + "path": "/anything", + "body": "a post request" + } + ] +}' +``` + +您应该会收到两个请求的响应,类似于以下内容: + +```json +[ + { + "reason": "OK", + "body": "{\n \"args\": {}, \n \"data\": \"\", \n \"files\": {}, \n \"form\": {}, \n \"headers\": {\n \"Accept\": \"*/*\", \n \"Host\": \"127.0.0.1\", \n \"User-Agent\": \"curl/8.6.0\", \n \"X-Amzn-Trace-Id\": \"Root=1-67b6e33b-5a30174f5534287928c54ca9\", \n \"X-Forwarded-Host\": \"127.0.0.1\"\n }, \n \"json\": null, \n \"method\": \"GET\", \n \"origin\": \"192.168.107.1, 43.252.208.84\", \n \"url\": \"http://127.0.0.1/anything\"\n}\n", + "headers": { + ... + }, + "status": 200 + }, + { + "reason": "OK", + "body": "{\n \"args\": {}, \n \"data\": \"a post request\", \n \"files\": {}, \n \"form\": {}, \n \"headers\": {\n \"Accept\": \"*/*\", \n \"Content-Length\": \"14\", \n \"Host\": \"127.0.0.1\", \n \"User-Agent\": \"curl/8.6.0\", \n \"X-Amzn-Trace-Id\": \"Root=1-67b6e33b-0eddcec07f154dac0d77876f\", \n \"X-Forwarded-Host\": \"127.0.0.1\"\n }, \n \"json\": null, \n \"method\": \"POST\", \n \"origin\": \"192.168.107.1, 43.252.208.84\", \n \"url\": \"http://127.0.0.1/anything\"\n}\n", + "headers": { + ... + }, + "status": 200 + } +] +``` + +如果您希望在自定义端点处暴露批量请求端点,请创建一个带有 `public-api` 插件的路由。您应该将路由的 `uri` 设置为自定义端点路径,并将插件的 uri 设置为要暴露的内部端点。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "batch-requests", + "uri": "/batch-requests", + "plugins": { + "public-api": { + "uri": "/apisix/batch-requests" + } + } + }' +``` + +现在批量请求端点应该被暴露为 `/batch-requests`,而不是 `/apisix/batch-requests`。 +向暴露的批量请求端点发送一个包含 GET 和 POST 请求的流水线请求: + +```shell +curl "http://127.0.0.1:9080/batch-requests" -X POST -d ' +{ + "pipeline": [ + { + "method": "GET", + "path": "/anything" + }, + { + "method": "POST", + "path": "/anything", + "body": "a post request" + } + ] +}' +``` + +您应该会收到两个请求的响应,类似于以下内容: + +```json +[ + { + "reason": "OK", + "body": "{\n \"args\": {}, \n \"data\": \"\", \n \"files\": {}, \n \"form\": {}, \n \"headers\": {\n \"Accept\": \"*/*\", \n \"Host\": \"127.0.0.1\", \n \"User-Agent\": \"curl/8.6.0\", \n \"X-Amzn-Trace-Id\": \"Root=1-67b6e33b-5a30174f5534287928c54ca9\", \n \"X-Forwarded-Host\": \"127.0.0.1\"\n }, \n \"json\": null, \n \"method\": \"GET\", \n \"origin\": \"192.168.107.1, 43.252.208.84\", \n \"url\": \"http://127.0.0.1/anything\"\n}\n", + "headers": { + ... + }, + "status": 200 + }, + { + "reason": "OK", + "body": "{\n \"args\": {}, \n \"data\": \"a post request\", \n \"files\": {}, \n \"form\": {}, \n \"headers\": {\n \"Accept\": \"*/*\", \n \"Content-Length\": \"14\", \n \"Host\": \"127.0.0.1\", \n \"User-Agent\": \"curl/8.6.0\", \n \"X-Amzn-Trace-Id\": \"Root=1-67b6e33b-0eddcec07f154dac0d77876f\", \n \"X-Forwarded-Host\": \"127.0.0.1\"\n }, \n \"json\": null, \n \"method\": \"POST\", \n \"origin\": \"192.168.107.1, 43.252.208.84\", \n \"url\": \"http://127.0.0.1/anything\"\n}\n", + "headers": { + ... + }, + "status": 200 + } +] +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/real-ip.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/real-ip.md new file mode 100644 index 0000000..1b85b35 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/real-ip.md @@ -0,0 +1,202 @@ +--- +title: real-ip +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Real IP +description: real-ip 插件允许 Apache APISIX 通过 HTTP 请求头或 HTTP 查询字符串中传递的 IP 地址设置客户端的真实 IP。 +--- + + + + + + + +## 描述 + +`real-ip` 插件允许 APISIX 通过 HTTP 请求头或 HTTP 查询字符串中传递的 IP 地址设置客户端的真实 IP。当 APISIX 位于反向代理之后时,此功能尤其有用,因为在这种情况下,代理可能会被视为请求发起客户端。 + +该插件在功能上类似于 NGINX 的 [ngx_http_realip_module](https://nginx.org/en/docs/http/ngx_http_realip_module.html),但提供了更多的灵活性。 + +## 属性 + +| 名称 | 类型 | 是否必需 | 默认值 | 有效值 | 描述 | +|-------------------|---------------|----------|--------|----------------------------|----------------------------------------------------------------------| +| source | string | 是 | | | 内置变量,例如 `http_x_forwarded_for` 或 `arg_realip`。变量值应为一个有效的 IP 地址,表示客户端的真实 IP 地址,可选地包含端口。 | +| trusted_addresses | array[string] | 否 | | IPv4 或 IPv6 地址数组(接受 CIDR 表示法) | 已知会发送正确替代地址的可信地址。此配置设置 [`set_real_ip_from`](https://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from) 指令。 | +| recursive | boolean | 否 | false | | 如果为 false,则将匹配可信地址之一的原始客户端地址替换为配置的 `source` 中发送的最后一个地址。
如果为 true,则将匹配可信地址之一的原始客户端地址替换为配置的 `source` 中发送的最后一个非可信地址。 | + +:::note +如果 `source` 属性中设置的地址丢失或者无效,该插件将不会更改客户端地址。 +::: + +## 示例 + +以下示例展示了如何在不同场景中配置 `real-ip`。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 从 URI 参数获取真实客户端地址 + +以下示例演示了如何使用 URI 参数更新客户端 IP 地址。 + +创建如下路由。您应配置 `source` 以使用 [APISIX 变量](https://apisix.apache.org/docs/apisix/apisix-variable/)或者 [NGINX 变量](https://nginx.org/en/docs/varindex.html)从 URL 参数 `realip` 获取值。使用 `response-rewrite` 插件设置响应头,以验证客户端 IP 和端口是否实际更新。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "real-ip-route", + "uri": "/get", + "plugins": { + "real-ip": { + "source": "arg_realip", + "trusted_addresses": ["127.0.0.0/24"] + }, + "response-rewrite": { + "headers": { + "remote_addr": "$remote_addr", + "remote_port": "$remote_port" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送带有 URL 参数中的真实 IP 和端口的请求: + +```shell +curl -i "http://127.0.0.1:9080/get?realip=1.2.3.4:9080" +``` + +您应看到响应包含以下头: + +```text +remote-addr: 1.2.3.4 +remote-port: 9080 +``` + +### 从请求头获取真实客户端地址 + +以下示例展示了当 APISIX 位于反向代理(例如负载均衡器)之后时,如何设置真实客户端 IP,此时代理在 [`X-Forwarded-For`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For) 请求头中暴露了真实客户端 IP。 + +创建如下路由。您应配置 `source` 以使用 [APISIX 变量](https://apisix.apache.org/docs/apisix/apisix-variable/)或者 [NGINX 变量](https://nginx.org/en/docs/varindex.html)从请求头 `X-Forwarded-For` 获取值。使用 response-rewrite 插件设置响应头,以验证客户端 IP 是否实际更新。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "real-ip-route", + "uri": "/get", + "plugins": { + "real-ip": { + "source": "http_x_forwarded_for", + "trusted_addresses": ["127.0.0.0/24"] + }, + "response-rewrite": { + "headers": { + "remote_addr": "$remote_addr" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +您应看到响应包含以下头: + +```text +remote-addr: 10.26.3.19 +``` + +IP 地址应对应于请求发起客户端的 IP 地址。 + +### 在多个代理之后获取真实客户端地址 + +以下示例展示了当 APISIX 位于多个代理之后时,如何获取真实客户端 IP,此时 [`X-Forwarded-For`](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For) 请求头包含了一系列代理 IP 地址。 + +创建如下路由。您应配置 `source` 以使用 [APISIX 变量](https://apisix.apache.org/docs/apisix/apisix-variable/)或者 [NGINX 变量](https://nginx.org/en/docs/varindex.html)从请求头 `X-Forwarded-For` 获取值。将 `recursive` 设置为 `true`,以便将匹配可信地址之一的原始客户端地址替换为配置的 `source` 中发送的最后一个非可信地址。然后,使用 `response-rewrite` 插件设置响应头,以验证客户端 IP 是否实际更新。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "real-ip-route", + "uri": "/get", + "plugins": { + "real-ip": { + "source": "http_x_forwarded_for", + "recursive": true, + "trusted_addresses": ["192.128.0.0/16", "127.0.0.0/24"] + }, + "response-rewrite": { + "headers": { + "remote_addr": "$remote_addr" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/get" \ + -H "X-Forwarded-For: 127.0.0.2, 192.128.1.1, 127.0.0.1" +``` + +您应看到响应包含以下头: + +```text +remote-addr: 127.0.0.2 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/redirect.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/redirect.md new file mode 100644 index 0000000..351cf5b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/redirect.md @@ -0,0 +1,177 @@ +--- +title: redirect +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Redirect +description: 本文介绍了关于 Apache APISIX `redirect` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`redirect` 插件可用于配置 URI 重定向。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|---------------------|---------------|-----|-------|--------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| http_to_https | boolean | 否 | false | [true,false] | 当设置为 `true` 并且请求是 HTTP 时,它将被重定向具有相同 URI 和 301 状态码的 HTTPS,原 URI 的查询字符串也将包含在 Location 头中。 | +| uri | string | 否 | | | 要重定向到的 URI,可以包含 NGINX 变量。例如:`/test/index.htm`,`$uri/index.html`,`${uri}/index.html`,`https://example.com/foo/bar`。如果你引入了一个不存在的变量,它不会报错,而是将其视为一个空变量。 | +| regex_uri | array[string] | 否 | | | 将来自客户端的 URL 与正则表达式匹配并重定向。当匹配成功后使用模板替换发送重定向到客户端,如果未匹配成功会将客户端请求的 URI 转发至上游。和 `regex_uri` 不可以同时存在。例如:["^/iresty/(.)/(.)/(.*)","/$1-$2-$3"] 第一个元素代表匹配来自客户端请求的 URI 正则表达式,第二个元素代表匹配成功后发送重定向到客户端的 URI 模板。 | +| ret_code | integer | 否 | 302 | [200, ...] | HTTP 响应码 | +| encode_uri | boolean | 否 | false | [true,false] | 当设置为 `true` 时,对返回的 `Location` Header 按照 [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986) 的编码格式进行编码。 | +| append_query_string | boolean | 否 | false | [true,false] | 当设置为 `true` 时,将原始请求中的查询字符串添加到 `Location` Header。如果已配置 `uri` 或 `regex_uri` 已经包含查询字符串,则请求中的查询字符串将附加一个`&`。如果你已经处理过查询字符串(例如,使用 NGINX 变量 `$request_uri`),请不要再使用该参数以避免重复。 | + +:::note + +* `http_to_https`、`uri` 和 `regex_uri` 只能配置其中一个属性。 +* `http_to_https`、和 `append_query_string` 只能配置其中一个属性。 +* 当开启 `http_to_https` 时,重定向 URL 中的端口将按如下顺序选取一个值(按优先级从高到低排列) + * 从配置文件(`conf/config.yaml`)中读取 `plugin_attr.redirect.https_port`。 + * 如果 `apisix.ssl` 处于开启状态,读取 `apisix.ssl.listen` 并从中随机选一个 `port`。 + * 使用 443 作为默认 `https port`。 + +::: + +## 启用插件 + +以下示例展示了如何在指定路由中启用 `redirect` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/test/index.html", + "plugins": { + "redirect": { + "uri": "/test/default.html", + "ret_code": 301 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1 + } + } +}' +``` + +你也可以在新的 URI 中使用 NGINX 内置的任意变量: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/test", + "plugins": { + "redirect": { + "uri": "$uri/index.html", + "ret_code": 301 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1 + } + } +}' +``` + +## 测试插件 + +通过上述命令启用插件后,可以使用如下命令测试插件是否启用成功: + +```shell +curl http://127.0.0.1:9080/test/index.html -i +``` + +``` +HTTP/1.1 301 Moved Permanently +Date: Wed, 23 Oct 2019 13:48:23 GMT +Content-Type: text/html +Content-Length: 166 +Connection: keep-alive +Location: /test/default.html +... +``` + +通过上述返回结果,可以看到响应码和响应头中的 `Location` 参数,它表示该插件已启用。 + +以下示例展示了如何将 HTTP 重定向到 HTTPS: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": { + "redirect": { + "http_to_https": true + } + } +}' +``` + +基于上述例子进行测试: + +```shell +curl http://127.0.0.1:9080/hello -i +``` + +``` +HTTP/1.1 301 Moved Permanently +... +Location: https://127.0.0.1:9443/hello +... +``` + +## 删除插件 + +当你需要禁用 `redirect` 插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/test/index.html", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/referer-restriction.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/referer-restriction.md new file mode 100644 index 0000000..a69d0dc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/referer-restriction.md @@ -0,0 +1,142 @@ +--- +title: referer-restriction +keywords: + - APISIX + - API 网关 + - Referer restriction +description: 本文介绍了 Apache APISIX referer-restriction 插件的使用方法,通过该插件可以将 referer 请求头中的域名加入黑名单或者白名单来限制其对服务或路由的访问。 +--- + + + +## 描述 + +`referer-restriction` 插件允许用户将 `Referer` 请求头中的域名列入白名单或黑名单来限制该域名对服务或路由的访问。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| --------- | ------------- | ------ | ------ | ------ | -------------------------------- | +| whitelist | array[string] | 否 | | | 白名单域名列表。域名开头可以用 `*` 作为通配符。 | +| blacklist | array[string] | 否 | | | 黑名单域名列表。域名开头可以用 `*` 作为通配符。 | +| message | string | 否 | "Your referer host is not allowed" | [1, 1024] | 在未允许访问的情况下返回的信息。 | +| bypass_missing | boolean | 否 | false | | 当设置为 `true` 时,如果 `Referer` 请求头不存在或格式有误,将绕过检查。 | + +:::info IMPORTANT + +`whitelist` 和 `blacklist` 属性无法同时在同一个服务或路由上使用,只能使用其中之一。 + +::: + +## 启用插件 + +以下示例展示了如何在特定路由上启用 `referer-restriction` 插件,并配置 `whitelist` 和 `bypass_missing` 属性: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "referer-restriction": { + "bypass_missing": true, + "whitelist": [ + "xx.com", + "*.xx.com" + ] + } + } +}' +``` + +## 测试插件 + +通过上述命令启用插件后,你可以在请求中添加 `Referer: http://xx.com/x` 测试插件: + +```shell +curl http://127.0.0.1:9080/index.html -H 'Referer: http://xx.com/x' +``` + +返回的 HTTP 响应头中带有 `200` 状态码则表示访问成功: + +```shell +HTTP/1.1 200 OK +... +``` + +接下来,将请求设置为 `Referer: http://yy.com/x`: + +```shell +curl http://127.0.0.1:9080/index.html -H 'Referer: http://yy.com/x' +``` + +返回的 HTTP 响应头中带有 `403` 状态码,并在响应体中带有 `message` 属性值,代表访问被阻止: + +```shell +HTTP/1.1 403 Forbidden +... +{"message":"Your referer host is not allowed"} +``` + +因为启用插件时会将属性 `bypass_missing` 设置为 `true`,所以未指定 `Refer` 请求头的请求将跳过检查: + +```shell +curl http://127.0.0.1:9080/index.html +``` + +返回的 HTTP 响应头中带有 `200` 状态码,代表访问成功: + +```shell +HTTP/1.1 200 OK +... +``` + +## 删除插件 + +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/request-id.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/request-id.md new file mode 100644 index 0000000..7747fc7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/request-id.md @@ -0,0 +1,292 @@ +--- +title: request-id +keywords: + - APISIX + - API 网关 + - Request ID +description: request-id 插件为通过 APISIX 代理的每个请求添加一个唯一的 ID,可用于跟踪 API 请求。 +--- + + + +## 描述 + +`request-id` 插件为每个通过 APISIX 代理的请求添加一个唯一 ID,可用于跟踪 API 请求。如果请求在 `header_name` 对应的 header 中带有 ID,则插件将使用 header 值作为唯一 ID,而不会用自动生成的 ID 进行覆盖。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------------- | ------- | -------- | -------------- | ------ | ------------------------------ | +| header_name | string | 否 | "X-Request-Id" | | 携带请求唯一 ID 的标头的名称。请注意,如果请求在 `header_name` 标头中携带 ID,则插件将使用标头值作为唯一 ID,并且不会用生成的 ID 覆盖它。| +| include_in_response | 布尔值 | 否 | true | | 如果为 true,则将生成的请求 ID 包含在响应标头中,其中标头的名称是 `header_name` 值。| +| algorithm | string | 否 | "uuid" | ["uuid","nanoid","range_id"] | 用于生成唯一 ID 的算法。设置为 `uuid` 时,插件会生成一个通用唯一标识符。设置为 `nanoid` 时,插件会生成一个紧凑的、URL 安全的 ID。设置为 `range_id` 时,插件会生成具有特定参数的连续 ID。| +| range_id | object | 否 | | |使用 `range_id` 算法生成请求 ID 的配置。| +| range_id.char_set | string | 否 | "abcdefghijklmnopqrstuvwxyzABCDEFGHIGKLMNOPQRSTUVWXYZ0123456789" | 最小长度 6 | 用于 `range_id` 算法的字符集。| +| range_id.length | integer | 否 | 16 | >=6 | 用于 `range_id` 算法的生成的 ID 的长度。| + +## 示例 + +以下示例演示了如何在不同场景中配置“request-id”。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 将请求 ID 附加到默认响应标头 + +以下示例演示了如何在路由上配置 `request-id`,如果请求中未传递标头值,则将生成的请求 ID 附加到默认的 `X-Request-Id` 响应标头。当在请求中设置 `X-Request-Id` 标头时,插件将把请求标头中的值作为请求 ID。 + +使用其默认配置(明确定义)创建带有 `request-id` 插件的路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "request-id-route", + "uri": "/anything", + "plugins": { + "request-id": { + "header_name": "X-Request-Id", + "include_in_response": true, + "algorithm": "uuid" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该会收到一个 `HTTP/1.1 200 OK` 响应,并且会看到响应包含 `X-Request-Id` 标头和生成的 ID: + +```text +X-Request-Id: b9b2c0d4-d058-46fa-bafc-dd91a0ccf441 +``` + +使用标头中的自定义请求 ID 向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H 'X-Request-Id: some-custom-request-id' +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应,并看到响应包含带有自定义请求 ID 的 `X-Request-Id` 标头: + +```text +X-Request-Id:some-custom-request-id +``` + +### 将请求 ID 附加到自定义响应标头 + +以下示例演示如何在路由上配置 `request-id`,将生成的请求 ID 附加到指定的标头。 + +使用 `request-id` 插件创建路由,以定义带有请求 ID 的自定义标头,并将请求 ID 包含在响应标头中: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "request-id-route", + "uri": "/anything", + "plugins": { + "request-id": { + "header_name": "X-Req-Identifier", + "include_in_response": true + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该收到一个 `HTTP/1.1 200 OK` 响应,并看到响应包含带有生成 ID 的 `X-Req-Identifier` 标头: + +```text +X-Req-Identifier:1c42ff59-ee4c-4103-a980-8359f4135b21 +``` + +### 在响应标头中隐藏请求 ID + +以下示例演示如何在路由上配置 `request-id`,将生成的请求 ID 附加到指定的标头。包含请求 ID 的标头应转发到上游服务,但不会在响应标头中返回。 + +使用 `request-id` 插件创建路由,以定义带有请求 ID 的自定义标头,而不在响应标头中包含请求 ID: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "request-id-route", + "uri": "/anything", + "plugins": { + "request-id": { + "header_name": "X-Req-Identifier", + "include_in_response": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该收到 `HTTP/1.1 200 OK` 响应,并在响应标头中看到 `X-Req-Identifier` 标头。在响应主体中,您应该看到: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.6.0", + "X-Amzn-Trace-Id": "Root=1-6752748c-7d364f48564508db1e8c9ea8", + "X-Forwarded-Host": "127.0.0.1", + "X-Req-Identifier": "268092bc-15e1-4461-b277-bf7775f2856f" + }, + ... +} +``` + +这表明请求 ID 已转发到上游服务,但未在响应标头中返回。 + +### 使用 `nanoid` 算法 + +以下示例演示如何在路由上配置 `request-id` 并使用 `nanoid` 算法生成请求 ID。 + +使用 `request-id` 插件创建路由,如下所示: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "request-id-route", + "uri": "/anything", + "plugins": { + "request-id": { + "algorithm": "nanoid" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该收到一个 `HTTP/1.1 200 OK` 响应,并看到响应包含 `X-Req-Identifier` 标头,其中的 ID 使用 `nanoid` 算法生成: + +```text +X-Request-Id: kepgHWCH2ycQ6JknQKrX2 +``` + +### 全局和在路由上附加请求 ID + +以下示例演示如何将 `request-id` 配置为全局插件并在路由上附加两个 ID。 + +为 `request-id` 插件创建全局规则,将请求 ID 添加到自定义标头: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/global_rules" -X PUT -d '{ + "id": "rule-for-request-id", + "plugins": { + "request-id": { + "header_name": "Global-Request-ID" + } + } +}' +``` + +使用 `request-id` 插件创建路由,将请求 ID 添加到不同的自定义标头: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "request-id-route", + "uri": "/anything", + "plugins": { + "request-id": { + "header_name": "Route-Request-ID" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应,并看到响应包含以下标头: + +```text +Global-Request-ID:2e9b99c1-08ed-4a74-b347-49c0891b07ad +Route-Request-ID:d755666b-732c-4f0e-a30e-a7a71ace4e26 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/request-validation.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/request-validation.md new file mode 100644 index 0000000..84bf70e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/request-validation.md @@ -0,0 +1,528 @@ +--- +title: request-validation +keywords: + - APISIX + - API 网关 + - Request Validation +description: request-validation 插件会在将请求转发到上游服务之前对其进行验证。此插件使用 JSON Schema 进行验证,并且可以验证请求的标头和正文。 +--- + + + + + + + +## 描述 + +`request-validation` 插件会在将请求转发到上游服务之前对其进行验证。此插件使用 [JSON Schema](https://github.com/api7/jsonschema) 进行验证,并且可以验证请求的标头和正文。 + +请参阅 [JSON Schema 规范](https://json-schema.org/specification) 了解有关语法的更多信息。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------ | ----------- | ------- | ----- | --------------------------------- | +| header_schema | object | 否 | | | `header` 数据的 `schema` 数据结构。 | +| body_schema | object | 否 | | | `body` 数据的 `schema` 数据结构。 | +| rejected_code | integer | 否 | 400 | [200,...,599] | 当请求被拒绝时要返回的状态码。 | +| rejected_msg | string | 否 | | | 当请求被拒绝时返回的信息。 | + +:::note + +`header_schema` 和 `body_schema` 属性至少需要配置其一。 + +::: + +## 示例 + +以下示例演示了如何针对不同场景配置 `request-validation`。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 验证请求标头 + +下面的示例演示如何根据定义的 JSON Schema 验证请求标头,该模式需要两个特定的标头和标头值符合指定的要求。 + +使用 `request-validation` 插件创建路由,如下所示: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "request-validation-route", + "uri": "/get", + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "required": ["User-Agent", "Host"], + "properties": { + "User-Agent": { + "type": "string", + "pattern": "^curl\/" + }, + "Host": { + "type": "string", + "enum": ["httpbin.org", "httpbin"] + } + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +#### 使用符合架构的请求进行验证 + +发送带有标头 `Host: httpbin` 的请求,该请求符合架构: + +```shell +curl -i "http://127.0.0.1:9080/get" -H "Host: httpbin" +``` + +您应该收到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```json +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin", + "User-Agent": "curl/7.74.0", + "X-Amzn-Trace-Id": "Root=1-6509ae35-63d1e0fd3934e3f221a95dd8", + "X-Forwarded-Host": "httpbin" + }, + "origin": "127.0.0.1, 183.17.233.107", + "url": "http://httpbin/get" +} +``` + +#### 验证请求是否符合架构 + +发送不带任何标头的请求: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +您应该收到 `HTTP/1.1 400 Bad Request` 响应,表明请求未能通过验证: + +```text +property "Host" validation failed: matches none of the enum value +``` + +发送具有所需标头但标头值不符合的请求: + +```shell +curl -i "http://127.0.0.1:9080/get" -H "Host: httpbin" -H "User-Agent: cli-mock" +``` + +您应该收到一个 `HTTP/1.1 400 Bad Request` 响应,显示 `User-Agent` 标头值与预期模式不匹配: + +```text +property "User-Agent" validation failed: failed to match pattern "^curl/" with "cli-mock" +``` + +### 自定义拒绝消息和状态代码 + +以下示例演示了如何在验证失败时自定义响应状态和消息。 + +使用 `request-validation` 配置路由,如下所示: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "request-validation-route", + "uri": "/get", + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "required": ["Host"], + "properties": { + "Host": { + "type": "string", + "enum": ["httpbin.org", "httpbin"] + } + } + }, + "rejected_code": 403, + "rejected_msg": "Request header validation failed." + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +发送一个在标头中配置错误的 `Host` 的请求: + +```shell +curl -i "http://127.0.0.1:9080/get" -H "Host: httpbin2" +``` + +您应该收到带有自定义消息的 `HTTP/1.1 403 Forbidden` 响应: + +```text +Request header validation failed. +``` + +### 验证请求主体 + +以下示例演示如何根据定义的 JSON Schema 验证请求主体。 + +`request-validation` 插件支持两种媒体类型的验证: + +* `application/json` +* `application/x-www-form-urlencoded` + +#### 验证 JSON 请求主体 + +使用 `request-validation` 插件创建路由,如下所示: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "request-validation-route", + "uri": "/post", + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "required": ["Content-Type"], + "properties": { + "Content-Type": { + "type": "string", + "pattern": "^application\/json$" + } + } + }, + "body_schema": { + "type": "object", + "required": ["required_payload"], + "properties": { + "required_payload": {"type": "string"}, + "boolean_payload": {"type": "boolean"}, + "array_payload": { + "type": "array", + "minItems": 1, + "items": { + "type": "integer", + "minimum": 200, + "maximum": 599 + }, + "uniqueItems": true, + "default": [200] + } + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +发送符合架构的 JSON Schema 的请求以验证: + +```shell +curl -i "http://127.0.0.1:9080/post" -X POST \ + -H "Content-Type: application/json" \ + -d '{"required_payload":"hello", "array_payload":[301]}' +``` + +您应该收到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```json +{ + "args": {}, + "data": "{\"array_payload\":[301],\"required_payload\":\"hello\"}", + "files": {}, + "form": {}, + "headers": { + ... + }, + "json": { + "array_payload": [ + 301 + ], + "required_payload": "hello" + }, + "origin": "127.0.0.1, 183.17.233.107", + "url": "http://127.0.0.1/post" +} +``` + +如果你发送请求时没有指定 `Content-Type:application/json`: + +```shell +curl -i "http://127.0.0.1:9080/post" -X POST \ + -d '{"required_payload":"hello,world"}' +``` + +您应该收到类似于以下内容的 `HTTP/1.1 400 Bad Request` 响应: + +```text +property "Content-Type" validation failed: failed to match pattern "^application/json$" with "application/x-www-form-urlencoded" +``` + +如果你发送的请求没有必需的 JSON 字段 `required_pa​​yload`: + +```shell +curl -i "http://127.0.0.1:9080/post" -X POST \ + -H "Content-Type: application/json" \ + -d '{}' +``` + +您应该收到 `HTTP/1.1 400 Bad Request` 响应: + +```text +property "required_payload" is required +``` + +#### 验证 URL 编码的表单主体 + +使用 `request-validation` 插件创建路由,如下所示: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "request-validation-route", + "uri": "/post", + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "required": ["Content-Type"], + "properties": { + "Content-Type": { + "type": "string", + "pattern": "^application\/x-www-form-urlencoded$" + } + } + }, + "body_schema": { + "type": "object", + "required": ["required_payload","enum_payload"], + "properties": { + "required_payload": {"type": "string"}, + "enum_payload": { + "type": "string", + "enum": ["enum_string_1", "enum_string_2"], + "default": "enum_string_1" + } + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +发送带有 URL 编码的表单数据的请求来验证: + +```shell +curl -i "http://127.0.0.1:9080/post" -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "required_payload=hello&enum_payload=enum_string_1" +``` + +您应该收到类似于以下内容的 `HTTP/1.1 400 Bad Request` 响应: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": { + "enum_payload": "enum_string_1", + "required_payload": "hello" + }, + "headers": { + ... + }, + "json": null, + "origin": "127.0.0.1, 183.17.233.107", + "url": "http://127.0.0.1/post" +} +``` + +发送不带 URL 编码字段 `enum_payload` 的请求: + +```shell +curl -i "http://127.0.0.1:9080/post" -X POST \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "required_payload=hello" +``` + +您应该收到以下 `HTTP/1.1 400 Bad Request`: + +```text +property "enum_payload" is required +``` + +## 附录:JSON 模式 + +以下部分提供了样板 JSON 模式,供您调整、组合和使用此插件。有关完整参考,请参阅 [JSON 模式规范](https://json-schema.org/specification)。 + +### 枚举值 + +```json +{ + "body_schema": { + "type": "object", + "required": ["enum_payload"], + "properties": { + "enum_payload": { + "type": "string", + "enum": ["enum_string_1", "enum_string_2"], + "default": "enum_string_1" + } + } + } +} +``` + +### 布尔值 + +```json +{ + "body_schema": { + "type": "object", + "required": ["bool_payload"], + "properties": { + "bool_payload": { + "type": "boolean", + "default": true + } + } + } +} +``` + +### 数值 + +```json +{ + "body_schema": { + "type": "object", + "required": ["integer_payload"], + "properties": { + "integer_payload": { + "type": "integer", + "minimum": 1, + "maximum": 65535 + } + } + } +} +``` + +### 字符串 + +```json +{ + "body_schema": { + "type": "object", + "required": ["string_payload"], + "properties": { + "string_payload": { + "type": "string", + "minLength": 1, + "maxLength": 32 + } + } + } +} +``` + +### 字符串的正则表达式 + +```json +{ + "body_schema": { + "type": "object", + "required": ["regex_payload"], + "properties": { + "regex_payload": { + "type": "string", + "minLength": 1, + "maxLength": 32, + "pattern": "[[^[a-zA-Z0-9_]+$]]" + } + } + } +} +``` + +### 数组 + +```json +{ + "body_schema": { + "type": "object", + "required": ["array_payload"], + "properties": { + "array_payload": { + "type": "array", + "minItems": 1, + "items": { + "type": "integer", + "minimum": 200, + "maximum": 599 + }, + "uniqueItems": true, + "default": [200, 302] + } + } + } +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/response-rewrite.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/response-rewrite.md new file mode 100644 index 0000000..3e0a08d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/response-rewrite.md @@ -0,0 +1,313 @@ +--- +title: response-rewrite +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Response Rewrite + - response-rewrite +description: response-rewrite 插件提供了重写 APISIX 及其上游服务返回给客户端的响应的选项。使用该插件,您可以修改 HTTP 状态代码、请求标头、响应正文等。 +--- + + + + + + + +## 描述 + +`response-rewrite` 插件提供了重写 APISIX 及其上游服务返回给客户端的响应的选项。使用此插件,您可以修改 HTTP 状态代码、请求标头、响应正文等。 + +例如,您可以使用此插件来: + +- 通过设置 `Access-Control-Allow-*` 标头来支持 [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS)。 +- 通过设置 HTTP 状态代码和 `Location` 标头来指示重定向。 + +:::tip + +如果你仅需要重定向功能,建议使用 [redirect](redirect.md) 插件。 + +::: + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +|-----------------|---------|--------|--------|-----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| status_code | integer | 否 | | [200, 598] | 修改上游返回状态码,默认保留原始响应代码。 | +| body | string | 否 | | | 修改上游返回的 `body` 内容,如果设置了新内容,header 里面的 `Content-Length` 字段也会被去掉。 | +| body_base64 | boolean | 否 | false | | 如果为 true,则在发送到客户端之前解码`body` 中配置的响应主体,这对于图像和 protobuf 解码很有用。请注意,此配置不能用于解码上游响应。 | +| headers | object | 否 | | | 按照 `add`、`remove` 和 `set` 的顺序执行的操作。 | +| headers.add | array[string] | 否 | | | 要附加到请求的标头。如果请求中已经存在标头,则会附加标头值。标头值可以设置为常量,也可以设置为一个或多个 [Nginx 变量](https://nginx.org/en/docs/http/ngx_http_core_module.html)。 | +| headers.set | object | 否 | | |要设置到请求的标头。如果请求中已经存在标头,则会覆盖标头值。标头值可以设置为常量,也可以设置为一个或多个[Nginx 变量](https://nginx.org/en/docs/http/ngx_http_core_module.html)。 | +| headers.remove | array[string] | 否 | | | 要从请求中删除的标头。 | +| vars | array[array] | 否 | | | 以 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) 的形式包含一个或多个匹配条件的数组。 | +| filters | array[object] | 否 | | | 通过将一个指定字符串替换为另一个指定字符串来修改响应主体的过滤器列表。不应与 `body` 一起配置。 | +| filters.regex | string | True | | | 用于匹配响应主体的 RegEx 模式。 | +| filters.scope | string | 否 | "once" | ["once","global"] | 替换范围。`once` 替换第一个匹配的实例,`global` 全局替换。 | +| filters.replace | string | True | | | 要替换的内容。 | +| filters.options | string | 否 | "jo" | | 用于控制如何执行匹配操作的 RegEx 选项。请参阅[Lua NGINX 模块](https://github.com/openresty/lua-nginx-module#ngxrematch)以了解可用选项。| + +## 示例 + +以下示例演示了如何在不同场景中在路由上配置 `response-rewrite`。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 重写标头和正文 + +以下示例演示了如何添加响应正文和标头,仅适用于具有 `200` HTTP 状态代码的响应。 + +创建一个带有 `response-rewrite` 插件的路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "response-rewrite-route", + "methods": ["GET"], + "uri": "/headers", + "plugins": { + "response-rewrite": { + "body": "{\"code\":\"ok\",\"message\":\"new json body\"}", + "headers": { + "set": { + "X-Server-id": 3, + "X-Server-status": "on", + "X-Server-balancer-addr": "$balancer_ip:$balancer_port" + } + }, + "vars": [ + [ "status","==",200 ] + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +发送请求以验证: + +```shell +curl -i "http://127.0.0.1:9080/headers" +``` + +您应该收到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```text +... +X-Server-id: 3 +X-Server-status: on +X-Server-balancer-addr: 50.237.103.220:80 + +{"code":"ok","message":"new json body"} +``` + +### 使用 RegEx 过滤器重写标头 + +以下示例演示如何使用 RegEx 过滤器匹配替换响应中的 `X-Amzn-Trace-Id`。 + +创建一个带有 `response-rewrite` 插件的路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "response-rewrite-route", + "methods": ["GET"], + "uri": "/headers", + "plugins":{ + "response-rewrite":{ + "filters":[ + { + "regex":"X-Amzn-Trace-Id", + "scope":"global", + "replace":"X-Amzn-Trace-Id-Replace" + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +发送请求以验证: + +```shell +curl -i "http://127.0.0.1:9080/headers" +``` + +您应该会看到类似以下内容的响应: + +```text +{ + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/8.2.1", + "X-Amzn-Trace-Id-Replace": "Root=1-6500095d-1041b05e2ba9c6b37232dbc7", + "X-Forwarded-Host": "127.0.0.1" + } +} +``` + +### 从 Base64 解码正文 + +以下示例演示如何从 Base64 格式解码正文。 + +创建一个带有 `response-rewrite` 插件的路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "response-rewrite-route", + "methods": ["GET"], + "uri": "/get", + "plugins":{ + "response-rewrite": { + "body": "SGVsbG8gV29ybGQ=", + "body_base64": true + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +发送请求以验证: + +```shell +curl "http://127.0.0.1:9080/get" +``` + +您应该看到以下响应: + +```text +Hello World +``` + +### 重写响应及其与执行阶段的联系 + +以下示例通过使用 `key-auth` 插件配置插件,演示了 `response-rewrite` 插件与 [执行阶段](/apisix/key-concepts/plugins#plugins-execution-lifecycle) 之间的联系,并查看在未经身份验证的请求的情况下,响应仍如何重写为 `200 OK`。 + +创建消费者 `jack`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jack" + }' +``` + +为消费者创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jack-key-auth", + "plugins": { + "key-auth": { + "key": "jack-key" + } + } + }' +``` + +创建一个带有 `key-auth` 的路由,并配置 `response-rewrite` 来重写响应状态码和主体: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "response-rewrite-route", + "uri": "/get", + "plugins": { + "key-auth": {}, + "response-rewrite": { + "status_code": 200, + "body": "{\"code\": 200, \"msg\": \"success\"}" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +使用有效密钥向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/get" -H 'apikey: jack-key' +``` + +您应该收到以下 `HTTP/1.1 200 OK` 响应: + +```text +{"code": 200, "msg": "success"} +``` + +向路由发送一个没有任何键的请求: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +您仍应收到相同的 `HTTP/1.1 200 OK` 响应,而不是来自 `key-auth` 插件的 `HTTP/1.1 401 Unauthorized`。这表明 `response-rewrite` 插件仍在重写响应。 + +这是因为 `response-rewrite` 插件的 **header_filter** 和 **body_filter** 阶段逻辑将在 [`ngx.exit`](https://openresty-reference.readthedocs.io/en/latest/Lua_Nginx_API/#ngxexit) 之后在其他插件的 **access** 或 **rewrite** 阶段继续运行。 + +下表总结了 `ngx.exit` 对执行阶段的影响。 + +| 阶段 | rewrite | access | header_filter | body_filter | +|---------------|----------|----------|---------------|-------------| +| **rewrite** | ngx.exit | | | | +| **access** | × | ngx.exit | | | +| **header_filter** | ✓ | ✓ | ngx.exit | | +| **body_filter** | ✓ | ✓ | × | ngx.exit | + +例如,如果 `ngx.exit` 发生在 **rewrite** 阶段,它将中断 **access** 阶段的执行,但不会干扰 **header_filter** 和 **body_filter** 阶段。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/rocketmq-logger.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/rocketmq-logger.md new file mode 100644 index 0000000..21d8e42 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/rocketmq-logger.md @@ -0,0 +1,225 @@ +--- +title: rocketmq-logger +keywords: + - APISIX + - API 网关 + - Plugin + - RocketMQ +description: API 网关 Apache APISIX 的 rocketmq-logger 插件用于将日志作为 JSON 对象推送到 Apache RocketMQ 集群中。 +--- + + + +## 描述 + +`rocketmq-logger` 插件可以将日志以 JSON 的形式推送给外部 RocketMQ 集群。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------------- | ------- | ------ | ---------------- | ------------- ------- | ------------------------------------------------ | +| nameserver_list | object | 是 | | | RocketMQ 的 nameserver 列表。 | +| topic | string | 是 | | | 要推送的 topic 名称。 | +| key | string | 否 | | | 发送消息的 keys。 | +| tag | string | 否 | | | 发送消息的 tags。 | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | +| timeout | integer | 否 | 3 | [1,...] | 发送数据的超时时间。 | +| use_tls | boolean | 否 | false | | 当设置为 `true` 时,开启 TLS 加密。 | +| access_key | string | 否 | "" | | ACL 认证的 Access key,空字符串表示不开启 ACL。 | +| secret_key | string | 否 | "" | | ACL 认证的 Secret key。 | +| name | string | 否 | "rocketmq logger" | | 标识 logger 的唯一标识符。如果您使用 Prometheus 监视 APISIX 指标,名称将以 `apisix_batch_process_entries` 导出。 | +| meta_format | enum | 否 | "default" | ["default","origin"] | `default`:获取请求信息以默认的 JSON 编码方式。`origin`:获取请求信息以 HTTP 原始请求方式。更多信息,请参考 [meta_format](#meta_format-示例)。| +| include_req_body | boolean | 否 | false | [false, true] | 当设置为 `true` 时,包含请求体。**注意**:如果请求体无法完全存放在内存中,由于 NGINX 的限制,APISIX 无法将它记录下来。| +| include_req_body_expr | array | 否 | | | 当 `include_req_body` 属性设置为 `true` 时进行过滤请求体,并且只有当此处设置的表达式计算结果为 `true` 时,才会记录请求体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 | +| include_resp_body | boolean | 否 | false | [false, true] | 当设置为 `true` 时,包含响应体。 | +| include_resp_body_expr | array | 否 | | | 当 `include_resp_body` 属性设置为 `true` 时进行过滤响应体,并且只有当此处设置的表达式计算结果为 `true` 时,才会记录响应体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 | + +注意:schema 中还定义了 `encrypt_fields = {"secret_key"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +该插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置)。 + +:::tip 提示 + +数据首先写入缓冲区。当缓冲区超过 `batch_max_size` 或 `buffer_duration` 设置的值时,则会将数据发送到 RocketMQ 服务器并刷新缓冲区。 + +如果发送成功,则返回 `true`。如果出现错误,则返回 `nil`,并带有描述错误的字符串 `buffer overflow`。 + +::: + +### meta_format 示例 + +- default: + +```json + { + "upstream": "127.0.0.1:1980", + "start_time": 1619414294760, + "client_ip": "127.0.0.1", + "service_id": "", + "route_id": "1", + "request": { + "querystring": { + "ab": "cd" + }, + "size": 90, + "uri": "/hello?ab=cd", + "url": "http://localhost:1984/hello?ab=cd", + "headers": { + "host": "localhost", + "content-length": "6", + "connection": "close" + }, + "method": "GET" + }, + "response": { + "headers": { + "connection": "close", + "content-type": "text/plain; charset=utf-8", + "date": "Mon, 26 Apr 2021 05:18:14 GMT", + "server": "APISIX/2.5", + "transfer-encoding": "chunked" + }, + "size": 190, + "status": 200 + }, + "server": { + "hostname": "localhost", + "version": "2.5" + }, + "latency": 0 + } +``` + +- origin: + +```http + GET /hello?ab=cd HTTP/1.1 + host: localhost + content-length: 6 + connection: close + + abcdef +``` + +## 插件元数据设置 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|------------|--------|-----|-------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| log_format | object | 否 | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../../../en/latest/apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +:::note 注意 + +该设置全局生效。如果指定了 `log_format`,则所有绑定 `rocketmq-logger` 的路由或服务都将使用该日志格式。 + +::: + +以下示例展示了如何通过 Admin API 配置插件元数据: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/rocketmq-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +在日志收集处,将得到类似下面的日志: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## 启用插件 + +你可以通过如下命令在指定路由上启用 `rocketmq-logger` 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "batch_max_size": 1, + "name": "rocketmq logger" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +该插件还支持一次推送到多个 `nameserver`,示例如下: + +```json +[ + "127.0.0.1:9876", + "127.0.0.2:9876" +] +``` + +## 测试插件 + +你可以通过以下命令向 APISIX 发出请求: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +## 删除插件 + +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/server-info.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/server-info.md new file mode 100644 index 0000000..94c2aa0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/server-info.md @@ -0,0 +1,118 @@ +--- +title: server-info +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Server info + - server-info +description: 本文介绍了关于 Apache APISIX `server-info` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`server-info` 插件可以定期将服务基本信息上报至 etcd。 + +:::warning + +`server-info` 插件已弃用,将在未来的版本中被移除。更多关于弃用和移除计划的信息,请参考[这个讨论](https://github.com/apache/apisix/discussions/12298)。 + +::: + +服务信息中每一项的含义如下: + +| 名称 | 类型 | 描述 | +| ---------------- | ------- | --------------------------------------------------------------------------------------------------------------------- | +| boot_time | integer | APISIX 服务实例的启动时间(UNIX 时间戳),如果对 APISIX 进行热更新操作,该值将被重置。普通的 reload 操作不会影响该值。 | +| id | string | APISIX 服务实例 id。 | +| etcd_version | string | etcd 集群的版本信息,如果 APISIX 和 etcd 集群之间存在网络分区,该值将设置为 `"unknown"`。 | +| version | string | APISIX 版本信息。 | +| hostname | string | 部署 APISIX 的主机或 Pod 的主机名信息。 | + +## 属性 + +无。 + +## 插件接口 + +该插件在 [Control API](../control-api.md) 下暴露了一个 API 接口 `/v1/server_info`。 + +## 启用插件 + +该插件默认是禁用状态,你可以在配置文件(`./conf/config.yaml`)中添加如下配置启用 `server-info` 插件。 + +```yaml title="conf/config.yaml" +plugins: # plugin list + - ... + - server-info +``` + +## 自定义服务信息上报配置 + +我们可以在 `./conf/config.yaml` 文件的 `plugin_attr` 部分修改上报配置。 + +下表是可以自定义配置的参数: + +| 名称 | 类型 | 默认值 | 描述 | +| --------------- | ------- | ------ | --------------------------------------------------------------- | +| report_ttl | integer | 36 | etcd 中服务信息保存的 TTL(单位:秒,最大值:86400,最小值:3)。| + +以下是示例是通过修改配置文件(`conf/config.yaml`)中的 `plugin_attr` 部分将 `report_ttl` 设置为 1 分钟: + +```yaml title="conf/config.yaml" +plugin_attr: + server-info: + report_ttl: 60 +``` + +## 测试插件 + +在启用 `server-info` 插件后,可以通过插件的 Control API 来访问到这些数据: + +```shell +curl http://127.0.0.1:9090/v1/server_info -s | jq . +``` + +```JSON +{ + "etcd_version": "3.5.0", + "id": "b7ce1c5c-b1aa-4df7-888a-cbe403f3e948", + "hostname": "fedora32", + "version": "2.1", + "boot_time": 1608522102 +} +``` + +:::tip + +你可以通过 [APISIX Dashboard](/docs/dashboard/USER_GUIDE) 查看服务信息报告。 + +::: + +## 删除插件 + +如果你想禁用插件,可以将 `server-info` 从配置文件中的插件列表删除,重新加载 APISIX 后即可生效。 + +```yaml title="conf/config.yaml" +plugins: # plugin list + - ... +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/serverless.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/serverless.md new file mode 100644 index 0000000..6b9b847 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/serverless.md @@ -0,0 +1,147 @@ +--- +title: serverless +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Serverless +description: 本文介绍了关于 API 网关 Apache APISIX serverless-pre-function 和 serverless-post-function 插件的基本信息及使用方法。 +--- + + + +## 描述 + +APISIX 有两个 `serverless` 插件:`serverless-pre-function` 和 `serverless-post-function`。 + +`serverless-pre-function` 插件会在指定阶段开始时运行,`serverless-post-function` 插件会在指定阶段结束时运行。这两个插件使用相同的属性。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| --------- | ------------- | ------- | ---------- | ---------------------------------------------------------------------------- | ------------------------------------------------------------------------------ | +| phase | string | 否 | ["access"] | ["rewrite", "access", "header_filter", "body_filter", "log", "before_proxy"] | 执行 serverless 函数的阶段。 | +| functions | array[string] | 是 | | | 指定运行的函数列表。该属性可以包含一个函数,也可以是多个函数,按照先后顺序执行。 | + +:::info 重要 + +此处仅接受函数,不接受其他类型的 Lua 代码。 + +比如匿名函数是合法的: + +```lua +return function() + ngx.log(ngx.ERR, 'one') +end +``` + +闭包也是合法的: + +```lua +local count = 1 +return function() + count = count + 1 + ngx.say(count) +end +``` + +但不是函数类型的代码就是非法的: + +```lua +local count = 1 +ngx.say(count) +``` + +::: + +:::note 注意 + +从 `v2.6` 版本开始,`conf` 和 `ctx` 作为前两个参数传递给 `serverless` 函数。 + +在 `v2.12.0` 版本之前,`before_proxy` 阶段曾被称作 `balancer`。考虑到这一方法是在 `access` 阶段之后、请求到上游之前运行,并且与 `balancer` 没有关联,因此已经更新为 `before_proxy`。 + +::: + +## 启用插件 + +你可以通过以下命令在指定路由中启用该插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : ["return function() ngx.log(ngx.ERR, \"serverless pre function\"); end"] + }, + "serverless-post-function": { + "phase": "rewrite", + "functions" : ["return function(conf, ctx) ngx.log(ngx.ERR, \"match uri \", ctx.curr_req_matched and ctx.curr_req_matched._path); end"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## 测试插件 + +你可以通过以下命令向 APISIX 发出请求: + +```shell +curl -i http://127.0.0.1:9080/index.html +``` + +如果你在 `./logs/error.log` 中发现 `serverless pre function` 和 `match uri /index.html` 两个 error 级别的日志,表示指定的函数已经生效。 + +## 删除插件 + +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/skywalking-logger.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/skywalking-logger.md new file mode 100644 index 0000000..79eab65 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/skywalking-logger.md @@ -0,0 +1,343 @@ +--- +title: skywalking-logger +keywords: + - Apache APISIX + - API 网关 + - Plugin + - SkyWalking +description: skywalking-logger 将请求和响应日志作为 JSON 对象批量推送到 SkyWalking OAP 服务器,并支持日志格式的自定义。 +--- + + + + + + + +## 描述 + +`skywalking-logger` 插件将请求和响应日志作为 JSON 对象批量推送到 SkyWalking OAP 服务器,并支持日志格式的自定义。 + +如果存在现有的跟踪上下文,它会自动设置跟踪日志关联并依赖于 [SkyWalking 跨进程传播标头协议](https://skywalking.apache.org/docs/main/next/en/api/x-process-propagation-headers-v3/)。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------------- | ------- | ------ | -------------------- | ------------- | ---------------------------------------------------------------- | +| endpoint_addr | string | 是 | | | SkyWalking OAP 服务器的 URI。 | +| service_name | string | 否 |"APISIX" | | SkyWalking 服务名称。 | +| service_instance_name | string | 否 |"APISIX Instance Name"| | SkyWalking 服务的实例名称。当设置为 `$hostname` 会直接获取本地主机名。 | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | +| timeout | integer | 否 | 3 | [1,...] | 发送请求后保持连接活动的时间。 | +| name | string | 否 | "skywalking logger" | | 标识 logger 的唯一标识符。如果您使用 Prometheus 监视 APISIX 指标,名称将以 `apisix_batch_process_entries` 导出。 | +| include_req_body | boolean | 否 | false |如果为 true,则将请求主体包含在日志中。请注意,如果请求主体太大而无法保存在内存中,则由于 NGINX 的限制而无法记录。| +| include_req_body_expr | array[array] | 否 | | 一个或多个条件的数组,形式为 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。在 `include_req_body` 为 true 时使用。仅当此处配置的表达式计算结果为 true 时,才会记录请求主体。| +| include_resp_body | boolean | 否 | false | 如果为 true,则将响应主体包含在日志中。| +| include_resp_body_expr | array[array] | 否 | | 一个或多个条件的数组,形式为 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。在 `include_resp_body` 为 true 时使用。仅当此处配置的表达式计算结果为 true 时,才会记录响应主体。| + +该插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置)。 + +## 元数据 + +您还可以通过配置插件元数据来设置日志的格式。可用的配置如下: + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------------- | ------- | ------ | -------------------- | ------------- | ---------------------------------------------------------------- | +| log_format | object | 否 | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +## 示例 + +以下示例演示了如何为不同场景配置 `skywalking-logger` 插件。 + +要按照示例操作,请按照 [Skywalking 的文档](https://skywalking.apache.org/docs/main/next/en/setup/backend/backend-docker/) 使用 Docker Compose 启动存储、OAP 和 Booster UI。设置完成后,OAP 服务器应在 `12800` 上监听,并且您应该能够通过 [http://localhost:8080](http://localhost:8080) 访问 UI。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 以默认日志格式记录请求 + +以下示例演示了如何在路由上配置 `skywalking-logger` 插件,以记录到达路由的请求信息。 + +使用 `skywalking-logger` 插件创建路由,并使用 OAP 服务器 URI 配置插件: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "skywalking-logger-route", + "uri": "/anything", + "plugins": { + "skywalking-logger": { + "endpoint_addr": "http://192.168.2.103:12800" + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应。 + +在 [Skywalking UI](http://localhost:8080) 中,导航至 __General Service__ > __Services__。您应该会看到一个名为 `APISIX` 的服务,其中包含与您的请求相对应的日志条目: + +```json +{ + "upstream_latency": 674, + "request": { + "method": "GET", + "headers": { + "user-agent": "curl/8.6.0", + "host": "127.0.0.1:9080", + "accept": "*/*" + }, + "url": "http://127.0.0.1:9080/anything", + "size": 85, + "querystring": {}, + "uri": "/anything" + }, + "client_ip": "192.168.65.1", + "route_id": "skywalking-logger-route", + "start_time": 1736945107345, + "upstream": "3.210.94.60:80", + "server": { + "version": "3.11.0", + "hostname": "7edbcebe8eb3" + }, + "service_id": "", + "response": { + "size": 619, + "status": 200, + "headers": { + "content-type": "application/json", + "date": "Thu, 16 Jan 2025 12:45:08 GMT", + "server": "APISIX/3.11.0", + "access-control-allow-origin": "*", + "connection": "close", + "access-control-allow-credentials": "true", + "content-length": "391" + } + }, + "latency": 764.9998664856, + "apisix_latency": 90.999866485596 +} +``` + +### 使用插件元数据记录请求和响应标头 + +以下示例演示了如何使用插件元数据和内置变量自定义日志格式,以记录来自请求和响应的特定标头。 + +在 APISIX 中,插件元数据用于配置同一插件的所有插件实例的通用元数据字段。当插件在多个资源中启用并需要对其元数据字段进行通用更新时,它很有用。 + +首先,使用 `skywalking-logger` 插件创建路由,并使用您的 OAP 服务器 URI 配置插件: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "skywalking-logger-route", + "uri": "/anything", + "plugins": { + "skywalking-logger": { + "endpoint_addr": "http://192.168.2.103:12800" + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +接下来,配置 `skywalking-logger` 的插件元数据,以记录自定义请求头 `env` 和响应头 `Content-Type`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/plugin_metadata/skywalking-logger" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr", + "env": "$http_env", + "resp_content_type": "$sent_http_Content_Type" + } + }' +``` + +使用 `env` 标头向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H "env: dev" +``` + +您应该收到 `HTTP/1.1 200 OK` 响应。在 [Skywalking UI](http://localhost:8080) 中,导航至 __General Service__ > __Services__。您应该会看到一个名为 `APISIX` 的服务,其中包含与您的请求相对应的日志条目: + +```json +[ + { + "route_id": "skywalking-logger-route", + "client_ip": "192.168.65.1", + "@timestamp": "2025-01-16T12:51:53+00:00", + "host": "127.0.0.1", + "env": "dev", + "resp_content_type": "application/json" + } +] +``` + +### 有条件地记录请求主体 + +以下示例演示了如何有条件地记录请求主体。 + +使用 `skywalking-logger` 插件创建一个路由,仅当 URL 查询字符串 `log_body` 为 `yes` 时才包含请求主体: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "skywalking-logger-route", + "uri": "/anything", + "plugins": { + "skywalking-logger": { + "endpoint_addr": "http://192.168.2.103:12800", + "include_req_body": true, + "include_req_body_expr": [["arg_log_body", "==", "yes"]] + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +使用满足以下条件的 URL 查询字符串向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything?log_body=yes" -X POST -d '{"env": "dev"}' +``` + +您应该收到 `HTTP/1.1 200 OK` 响应。在 [Skywalking UI](http://localhost:8080) 中,导航到 __General Service__ > __Services__。您应该看到一个名为 `APISIX` 的服务,其中包含与您的请求相对应的日志条目,并记录了请求正文: + +```json +[ + { + "request": { + "url": "http://127.0.0.1:9080/anything?log_body=yes", + "querystring": { + "log_body": "yes" + }, + "uri": "/anything?log_body=yes", + ..., + "body": "{\"env\": \"dev\"}", + }, + ... + } +] +``` + +向路由发送一个没有任何 URL 查询字符串的请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" -X POST -d '{"env": "dev"}' +``` + +您不应该观察到没有请求正文的日志条目。 + +:::info + +如果您除了将 `include_req_body` 或 `include_resp_body` 设置为 `true` 之外还自定义了 `log_format`,则插件不会在日志中包含正文。 + +作为一种解决方法,您可以在日志格式中使用 NGINX 变量 `$request_body`,例如: + +```json +{ + "skywalking-logger": { + ..., + "log_format": {"body": "$request_body"} + } +} +``` + +::: + +### 将跟踪与日志关联 + +以下示例演示了如何在路由上配置 `skywalking-logger` 插件,以记录到达路由的请求信息。 + +使用 `skywalking-logger` 插件创建路由,并使用 OAP 服务器 URI 配置插件: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "skywalking-logger-route", + "uri": "/anything", + "plugins": { + "skywalking": { + "sample_ratio": 1 + }, + "skywalking-logger": { + "endpoint_addr": "http://192.168.2.103:12800" + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +生成几个对路由的请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应。 + +在 [Skywalking UI](http://localhost:8080) 中,导航到 __General Service__ > __Services__。您应该会看到一个名为 `APISIX` 的服务,其中包含与您的请求相对应的跟踪,您可以在其中查看相关日志: + +![trace context](https://static.apiseven.com/uploads/2025/01/16/soUpXm6b_trace-view-logs.png) + +![associated log](https://static.apiseven.com/uploads/2025/01/16/XD934LvU_associated-logs.png) diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/skywalking.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/skywalking.md new file mode 100644 index 0000000..7e459be --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/skywalking.md @@ -0,0 +1,180 @@ +--- +title: skywalking +keywords: + - Apache APISIX + - API 网关 + - Plugin + - SkyWalking +description: skywalking 插件支持与 Apache SkyWalking 集成以进行请求跟踪。 +--- + + + + + + + +## 描述 + +`skywalking` 插件支持与 [Apache SkyWalking](https://skywalking.apache.org) 集成以进行请求跟踪。 + +SkyWalking 使用其原生的 Nginx Lua 跟踪器从服务和 URI 角度提供跟踪、拓扑分析和指标。APISIX 支持 HTTP 协议与 SkyWalking 服务器交互。 + +服务端目前支持 HTTP 和 gRPC 两种协议,在 APISIX 中目前只支持 HTTP 协议。 + +## 静态配置 + +默认情况下,插件的服务名称和端点地址已在[默认配置](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua)中预先配置。 + +要自定义这些值,请将相应的配置添加到 `config.yaml`。例如: + +```yaml +plugin_attr: + skywalking: + report_interval: 3 # 上报间隔时间(秒)。 + service_name: APISIX # SkyWalking 记者的服务名称。 + service_instance_name: "APISIX Instance Name" # SkyWalking 记者的服务实例名称。 + # 设置为 $hostname 可获取本地主机名。 + endpoint_addr: http://127.0.0.1:12800 # SkyWalking HTTP 端点。 +``` + +重新加载 APISIX 以使更改生效。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------ | ------ | ------ | ------ | ------------ | ----------------------------------------------------- | +| sample_ratio | number | 是 | 1 | [0.00001, 1] | 请求采样频率。将采样率设置为 `1` 表示对所有请求进行采样。 | + +## 示例 + +要遵循示例,请按照 [Skywalking 的文档](https://skywalking.apache.org/docs/main/next/en/setup/backend/backend-docker/) 使用 Docker Compose 启动存储、OAP 和 Booster UI。设置完成后,OAP 服务器应监听 `12800`,您应该能够通过 [http://localhost:8080](http://localhost:8080) 访问 UI。 + +更新 APISIX 配置文件以启用 `skywalking` 插件(默认情况下处于禁用状态),并更新端点地址: + +```yaml title="config.yaml" +plugins: + - skywalking + - ... + +plugin_attr: + skywalking: + report_interval: 3 + service_name: APISIX + service_instance_name: APISIX Instance + endpoint_addr: http://192.168.2.103:12800 +``` + +重新加载 APISIX 以使配置更改生效。 + +::: + +以下示例展示了如何通过 Admin API 配置插件元数据: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 跟踪所有请求 + +以下示例演示了如何跟踪通过路由的所有请求。 + +使用 `skywalking` 创建路由,并将采样率配置为 1 以跟踪所有请求: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "skywalking-route", + "uri": "/anything", + "plugins": { + "skywalking": { + "sample_ratio": 1 + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +向路由发送几个请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该收到 `HTTP/1.1 200 OK` 响应。 + +在 [Skywalking UI](http://localhost:8080) 中,导航到 __General Service__ > __Services__。您应该看到一个名为 `APISIX` 的服务,其中包含与您的请求相对应的跟踪: + +![SkyWalking APISIX 跟踪](https://static.apiseven.com/uploads/2025/01/15/UdwiO8NJ_skywalking-traces.png) + +### 将跟踪与日志关联 + +以下示例演示了如何在路由上配置 `skywalking-logger` 插件,以记录到达路由的请求信息。 + +使用 `skywalking-logger` 插件创建路由,并使用你的 OAP 服务器 URI 配置该插件: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "skywalking-logger-route", + "uri": "/anything", + "plugins": { + "skywalking": { + "sample_ratio": 1 + }, + "skywalking-logger": { + "endpoint_addr": "http://192.168.2.103:12800" + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + } + }' +``` + +生成几个对路由的请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该会收到 `HTTP/1.1 200 OK` 响应。 + +在 [Skywalking UI](http://localhost:8080) 中,导航到 __General Service__ > __Services__。您应该会看到一个名为 `APISIX` 的服务,其中包含与您的请求相对应的跟踪,您可以在其中查看相关日志: + +![trace context](https://static.apiseven.com/uploads/2025/01/16/soUpXm6b_trace-view-logs.png) + +![associated log](https://static.apiseven.com/uploads/2025/01/16/XD934LvU_associated-logs.png) diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/sls-logger.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/sls-logger.md new file mode 100644 index 0000000..12f4653 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/sls-logger.md @@ -0,0 +1,180 @@ +--- +title: sls-logger +--- + + + +## 描述 + +`sls-logger` 是使用 [RF5424](https://tools.ietf.org/html/rfc5424) 标准将日志数据以 JSON 格式发送到 [阿里云日志服务](https://help.aliyun.com/document_detail/112903.html?spm=a2c4g.11186623.6.763.21321b47wcwt1u)。 + +该插件提供了将 Log Data 作为批处理推送到阿里云日志服务器的功能。如果您没有收到日志数据,请放心一些时间,它会在我们的批处理处理器中的计时器功能到期后自动发送日志。 + +有关 Apache APISIX 中 Batch-Processor 的更多信息,请参考: +[Batch-Processor](../batch-processor.md) + +## 属性 + +| 属性名称 | 必选项 | 描述 | +|--------- |--------|-----------| +| host | 必要的 | TCP 服务的 IP 地址或主机名,请参考:[阿里云日志服务列表](https://help.aliyun.com/document_detail/29008.html?spm=a2c4g.11186623.2.14.49301b4793uX0z#reference-wgx-pwq-zdb),建议配置 IP 取代配置域名。| +| port | 必要的 | 目标端口,阿里云日志服务默认端口为 10009。| +| timeout | 可选的 | 发送数据超时间。| +| log_format | 可选的 | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | +| project | 必要的 | 日志服务 Project 名称,请提前在阿里云日志服务中创建 Project。| +| logstore | 必须的 | 日志服务 Logstore 名称,请提前在阿里云日志服务中创建 Logstore。| +| access_key_id | 必须的 | AccessKey ID。建议使用阿里云子账号 AK,详情请参见 [授权](https://help.aliyun.com/document_detail/47664.html?spm=a2c4g.11186623.2.15.49301b47lfvxXP#task-xsk-ttc-ry)。| +| access_key_secret | 必须的 | AccessKey Secret。建议使用阿里云子账号 AK,详情请参见 [授权](https://help.aliyun.com/document_detail/47664.html?spm=a2c4g.11186623.2.15.49301b47lfvxXP#task-xsk-ttc-ry)。| +| include_req_body | 可选的 | 是否包含请求体。| +| include_req_body_expr | 可选的 | 当 `include_req_body` 属性设置为 `true` 时的过滤器。只有当此处设置的表达式求值为 `true` 时,才会记录请求体。有关更多信息,请参阅 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 。 | +| include_resp_body | 可选的 | 当设置为 `true` 时,日志中将包含响应体。 | +| include_resp_body_expr | 可选的 | 当 `include_resp_body` 属性设置为 `true` 时进行过滤响应体,并且只有当此处设置的表达式计算结果为 `true` 时,才会记录响应体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 | +|name| 可选的 | 批处理名字。如果您使用 Prometheus 监视 APISIX 指标,名称将以 `apisix_batch_process_entries` 导出。| + +注意:schema 中还定义了 `encrypt_fields = {"access_key_secret"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +本插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认设置情况下批处理器会每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解或自定义批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置) 配置部分。 + +### 默认日志格式示例 + +```json +{ + "route_conf": { + "host": "100.100.99.135", + "buffer_duration": 60, + "timeout": 30000, + "include_req_body": false, + "logstore": "your_logstore", + "log_format": { + "vip": "$remote_addr" + }, + "project": "your_project", + "inactive_timeout": 5, + "access_key_id": "your_access_key_id", + "access_key_secret": "your_access_key_secret", + "batch_max_size": 1000, + "max_retry_count": 0, + "retry_delay": 1, + "port": 10009, + "name": "sls-logger" + }, + "data": "<46>1 2024-01-06T03:29:56.457Z localhost apisix 28063 - [logservice project=\"your_project\" logstore=\"your_logstore\" access-key-id=\"your_access_key_id\" access-key-secret=\"your_access_key_secret\"] {\"vip\":\"127.0.0.1\",\"route_id\":\"1\"}\n" +} +``` + +## 插件元数据设置 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------------- | ------- | ------------------------------------------------ | +| log_format | object | 可选 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../../../en/latest/apisix-variable.md) 或 [Nginx 内置变量](http://nginx.org/en/docs/varindex.html)。特别的,**该设置是全局生效的**,意味着指定 log_format 后,将对所有绑定 sls-logger 的 Route 或 Service 生效。 | + +### 设置日志格式示例 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/sls-logger -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +在日志收集处,将得到类似下面的日志: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## 如何开启 + +1. 下面例子展示了如何为指定路由开启 `sls-logger` 插件的。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/5 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "sls-logger": { + "host": "100.100.99.135", + "port": 10009, + "project": "your_project", + "logstore": "your_logstore", + "access_key_id": "your_access_key_id", + "access_key_secret": "your_access_key_secret", + "timeout": 30000 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +``` +注释:这里的 100.100.99.135 是阿里云华北 3 内外地址。 +``` + +## 测试插件 + +* 成功的情况: + +```shell +$ curl -i http://127.0.0.1:9080/hello +HTTP/1.1 200 OK +... +hello, world +``` + +* 查看阿里云日志服务上传记录 +![sls logger view](../../../assets/images/plugin/sls-logger-1.png "阿里云日志服务预览") + +## 删除插件 + +想要禁用“sls-logger”插件,是非常简单的,将对应的插件配置从 json 配置删除,就会立即生效,不需要重新启动服务: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/splunk-hec-logging.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/splunk-hec-logging.md new file mode 100644 index 0000000..ef22594 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/splunk-hec-logging.md @@ -0,0 +1,218 @@ +--- +title: splunk-hec-logging +keywords: + - Apache APISIX + - API 网关 + - 插件 + - Splunk + - 日志 +description: API 网关 Apache APISIX 的 splunk-hec-logging 插件可用于将请求日志转发到 Splunk HTTP 事件收集器(HEC)中进行分析和存储。 +--- + + + +## 描述 + +`splunk-hec-logging` 插件可用于将请求日志转发到 Splunk HTTP 事件收集器(HEC)中进行分析和存储。 + +启用该插件后,APISIX 将在 `Log Phase` 获取请求上下文信息,并将其序列化为 [Splunk Event Data 格式](https://docs.splunk.com/Documentation/Splunk/latest/Data/FormateventsforHTTPEventCollector#Event_metadata) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 `Splunk HEC` 中。 + +## 属性 + +| 名称 | 必选项 | 默认值 | 描述 | +| ------------------ | ------ | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| endpoint | 是 | | Splunk HEC 端点配置信息。 | +| endpoint.uri | 是 | | Splunk HEC 事件收集 API。 | +| endpoint.token | 是 | | Splunk HEC 身份令牌。 | +| endpoint.channel | 否 | | Splunk HEC 发送渠道标识,更多信息请参考 [About HTTP Event Collector Indexer Acknowledgment](https://docs.splunk.com/Documentation/Splunk/8.2.3/Data/AboutHECIDXAck)。 | +| endpoint.timeout | 否 | 10 | Splunk HEC 数据提交超时时间(以秒为单位)。 | +| ssl_verify | 否 | true | 当设置为 `true` 时,启用 `SSL` 验证。 | +| log_format | 否 | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +本插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免该插件频繁地提交数据。默认情况下每 `5` 秒钟或队列中的数据达到 `1000` 条时,批处理器会自动提交数据,如需了解更多信息或自定义配置,请参考 [Batch-Processor](../batch-processor.md#配置)。 + +### 默认日志格式示例 + +```json +{ + "sourcetype": "_json", + "time": 1704513555.392, + "event": { + "upstream": "127.0.0.1:1980", + "request_url": "http://localhost:1984/hello", + "request_query": {}, + "request_size": 59, + "response_headers": { + "content-length": "12", + "server": "APISIX/3.7.0", + "content-type": "text/plain", + "connection": "close" + }, + "response_status": 200, + "response_size": 118, + "latency": 108.00004005432, + "request_method": "GET", + "request_headers": { + "connection": "close", + "host": "localhost" + } + }, + "source": "apache-apisix-splunk-hec-logging", + "host": "localhost" +} +``` + +## 插件元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------------- | ------- | ------------------------------------------------ | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头。则表明获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +:::info 注意 + +该设置全局生效。如果指定了 `log_format`,则所有绑定 `splunk-hec-logging` 的路由或服务都将使用该日志格式。 + +::: + +以下示例展示了如何通过 Admin API 配置插件元数据: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/splunk-hec-logging \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +配置完成后,你将在日志系统中看到如下类似日志: + +```json +[{"time":1673976669.269,"source":"apache-apisix-splunk-hec-logging","event":{"host":"localhost","client_ip":"127.0.0.1","@timestamp":"2023-01-09T14:47:25+08:00","route_id":"1"},"host":"DESKTOP-2022Q8F-wsl","sourcetype":"_json"}] +``` + +## 启用插件 + +以下示例展示了如何在指定路由上启用该插件: + +**完整配置** + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins":{ + "splunk-hec-logging":{ + "endpoint":{ + "uri":"http://127.0.0.1:8088/services/collector", + "token":"BD274822-96AA-4DA6-90EC-18940FB2414C", + "channel":"FE0ECFAD-13D5-401B-847D-77833BD77131", + "timeout":60 + }, + "buffer_duration":60, + "max_retry_count":0, + "retry_delay":1, + "inactive_timeout":2, + "batch_max_size":10 + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/splunk.do" +}' +``` + +**最小化配置** + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins":{ + "splunk-hec-logging":{ + "endpoint":{ + "uri":"http://127.0.0.1:8088/services/collector", + "token":"BD274822-96AA-4DA6-90EC-18940FB2414C" + } + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + }, + "uri":"/splunk.do" +}' +``` + +## 测试插件 + +你可以通过以下命令向 APISIX 发出请求: + +```shell +curl -i http://127.0.0.1:9080/splunk.do?q=hello +``` + +``` +HTTP/1.1 200 OK +... +hello, world +``` + +访问成功后,你可以登录 Splunk 控制台检索查看日志: + +![splunk hec search view](../../../assets/images/plugin/splunk-hec-admin-cn.png) + +## 删除插件 + +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/syslog.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/syslog.md new file mode 100644 index 0000000..c4a351e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/syslog.md @@ -0,0 +1,144 @@ +--- +title: syslog +keywords: + - APISIX + - API 网关 + - Plugin + - syslog +description: API 网关 Apache APISIX syslog 插件可用于将日志推送到 Syslog 服务器。 +--- + + + +## 描述 + +`syslog` 插件可用于将日志推送到 Syslog 服务器。 + +该插件还实现了将日志数据以 JSON 格式发送到 Syslog 服务的能力。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------------ | ------------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| host | string | 是 | | | IP 地址或主机名。 | +| port | integer | 是 | | | 目标上游端口。 | +| name | string | 否 | "sys logger" | | 标识 logger 的唯一标识符。如果您使用 Prometheus 监视 APISIX 指标,名称将以 `apisix_batch_process_entries` 导出。 | +| timeout | integer | 否 | 3000 | [1, ...] | 上游发送数据超时(以毫秒为单位)。 | +| tls | boolean | 否 | false | | 当设置为 `true` 时执行 SSL 验证。 | +| flush_limit | integer | 否 | 4096 | [1, ...] | 如果缓冲的消息的大小加上当前消息的大小达到(> =)此限制(以字节为单位),则缓冲的日志消息将被写入日志服务器,默认为 4096(4KB)。 | +| drop_limit | integer | 否 | 1048576 | | 如果缓冲的消息的大小加上当前消息的大小大于此限制(以字节为单位),则由于缓冲区大小有限,当前的日志消息将被丢弃,默认为 1048576(1MB)。 | +| sock_type | string | 否 | "tcp" | ["tcp","udp"] | 用于传输层的 IP 协议类型。 | +| max_retry_count | integer | 否 | | [1, ...] | 连接到日志服务器失败或将日志消息发送到日志服务器失败后的最大重试次数。 | +| retry_delay | integer | 否 | | [0, ...] | 重试连接到日志服务器或重试向日志服务器发送日志消息之前的时间延迟(以毫秒为单位)。 | +| pool_size | integer | 否 | 5 | [5, ...] | `sock:keepalive` 使用的 Keepalive 池大小。 | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | +| include_req_body | boolean | 否 | false | [false, true] | 当设置为 `true` 时包括请求体。 | +| include_req_body_expr | array | 否 | | | 当 `include_req_body` 属性设置为 `true` 时的过滤器。只有当此处设置的表达式求值为 `true` 时,才会记录请求体。有关更多信息,请参阅 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 。 | +| include_resp_body | boolean | 否 | false | [false, true] | 当设置为 `true` 时,包含响应体。 | +| include_resp_body_expr | array | 否 | | | 当 `include_resp_body` 属性设置为 `true` 时进行过滤响应体,并且只有当此处设置的表达式计算结果为 `true` 时,才会记录响应体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 | + +该插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认情况下批处理器每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置)。 + +### 默认日志格式示例 + +```text +"<46>1 2024-01-06T02:30:59.145Z 127.0.0.1 apisix 82324 - - {\"response\":{\"status\":200,\"size\":141,\"headers\":{\"content-type\":\"text/plain\",\"server\":\"APISIX/3.7.0\",\"transfer-encoding\":\"chunked\",\"connection\":\"close\"}},\"route_id\":\"1\",\"server\":{\"hostname\":\"baiyundeMacBook-Pro.local\",\"version\":\"3.7.0\"},\"request\":{\"uri\":\"/opentracing\",\"url\":\"http://127.0.0.1:1984/opentracing\",\"querystring\":{},\"method\":\"GET\",\"size\":155,\"headers\":{\"content-type\":\"application/x-www-form-urlencoded\",\"host\":\"127.0.0.1:1984\",\"user-agent\":\"lua-resty-http/0.16.1 (Lua) ngx_lua/10025\"}},\"upstream\":\"127.0.0.1:1982\",\"apisix_latency\":100.99999809265,\"service_id\":\"\",\"upstream_latency\":1,\"start_time\":1704508259044,\"client_ip\":\"127.0.0.1\",\"latency\":101.99999809265}\n" +``` + +## 插件元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|------------|--------|-----|-----|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| log_format | object | 否 | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头。则表明获取 [APISIX 变量](../../../en/latest/apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +:::info 重要 + +该设置全局生效。如果指定了 `log_format`,则所有绑定 `syslog` 的路由或服务都将使用该日志格式。 + +::: + +## 启用插件 + +你可以通过以下命令在指定路由中启用该插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 5044, + "flush_limit" : 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## 测试插件 + +现在你可以向 APISIX 发起请求: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +``` +HTTP/1.1 200 OK +... +hello, world +``` + +## 删除插件 + +当你需要删除该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/tcp-logger.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/tcp-logger.md new file mode 100644 index 0000000..158bb7c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/tcp-logger.md @@ -0,0 +1,194 @@ +--- +title: tcp-logger +keywords: + - Apache APISIX + - API 网关 + - Plugin + - TCP Logger +description: 本文介绍了 API 网关 Apache APISIX 如何使用 tcp-logger 插件将日志数据发送到 TCP 服务器。 +--- + + + +## 描述 + +`tcp-logger` 插件可用于将日志数据发送到 TCP 服务器。 + +该插件还实现了将日志数据以 JSON 格式发送到监控工具或其它 TCP 服务的能力。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------ | ------- | ------------------------------------------------ | +| host | string | 是 | | | TCP 服务器的 IP 地址或主机名。 | +| port | integer | 是 | | [0,...] | 目标端口。 | +| timeout | integer | 否 | 1000 | [1,...] | 发送数据超时间。 | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | +| tls | boolean | 否 | false | | 用于控制是否执行 SSL 验证。 | +| tls_options | string | 否 | | | TLS 选项。 | +| include_req_body | boolean | 否 | | [false, true] | 当设置为 `true` 时,日志中将包含请求体。 | +| include_req_body_expr | array | 否 | | | 当 `include_req_body` 属性设置为 `true` 时的过滤器。只有当此处设置的表达式求值为 `true` 时,才会记录请求体。有关更多信息,请参阅 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 。 | +| include_resp_body | boolean | 否 | false | [false, true]| 当设置为 `true` 时,日志中将包含响应体。 | +| include_resp_body_expr | array | 否 | | | 当 `include_resp_body` 属性设置为 `true` 时进行过滤响应体,并且只有当此处设置的表达式计算结果为 `true` 时,才会记录响应体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 | + +该插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认情况下批处理器每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置)。 + +### 默认日志格式示例 + +```json +{ + "response": { + "status": 200, + "headers": { + "server": "APISIX/3.7.0", + "content-type": "text/plain", + "content-length": "12", + "connection": "close" + }, + "size": 118 + }, + "server": { + "version": "3.7.0", + "hostname": "localhost" + }, + "start_time": 1704527628474, + "client_ip": "127.0.0.1", + "service_id": "", + "latency": 102.9999256134, + "apisix_latency": 100.9999256134, + "upstream_latency": 2, + "request": { + "headers": { + "connection": "close", + "host": "localhost" + }, + "size": 59, + "method": "GET", + "uri": "/hello", + "url": "http://localhost:1984/hello", + "querystring": {} + }, + "upstream": "127.0.0.1:1980", + "route_id": "1" +} +``` + +## 插件元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------------- | ------- | ------------------------------------------------ | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头。则表明获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +:::info 注意 + +该设置全局生效。如果指定了 `log_format`,则所有绑定 `tcp-logger` 的路由或服务都将使用该日志格式。 + +::: + +以下示例展示了如何通过 Admin API 配置插件元数据: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/tcp-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +配置完成后,你将在日志系统中看到如下类似日志: + +```json +{"@timestamp":"2023-01-09T14:47:25+08:00","route_id":"1","host":"localhost","client_ip":"127.0.0.1"} +``` + +## 启用插件 + +你可以通过以下命令在指定路由中启用该插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "tcp-logger": { + "host": "127.0.0.1", + "port": 5044, + "tls": false, + "batch_max_size": 1, + "name": "tcp logger" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## 测试插件 + +现在你可以向 APISIX 发起请求: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +``` +HTTP/1.1 200 OK +... +hello, world +``` + +## 删除插件 + +当你需要删除该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/tencent-cloud-cls.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/tencent-cloud-cls.md new file mode 100644 index 0000000..a5c41bd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/tencent-cloud-cls.md @@ -0,0 +1,202 @@ +--- +title: tencent-cloud-cls +keywords: + - Apache APISIX + - API 网关 + - Plugin + - CLS + - 腾讯云 +description: API 网关 Apache APISIX tencent-cloud-cls 插件可用于将日志推送到[腾讯云日志服务](https://cloud.tencent.com/document/product/614)。 +--- + + + +## 描述 + +`tencent-cloud-cls` 插件可用于将 APISIX 日志使用[腾讯云日志服务](https://cloud.tencent.com/document/product/614) API 推送到您的日志主题。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ----------------- | ------- | ------ |-------| ------------ |------------------------------------------------------------------------------| +| cls_host | string | 是 | | | CLS API 域名,参考[使用 API 上传日志](https://cloud.tencent.com/document/api/614/16873)。| +| cls_topic | string | 是 | | | CLS 日志主题 id。 | +| secret_id | string | 是 | | | 云 API 密钥的 id。 | +| secret_key | string | 是 | | | 云 API 密钥的 key。 | +| sample_ratio | number | 否 | 1 | [0.00001, 1] | 采样的比例。设置为 `1` 时,将对所有请求进行采样。 | +| include_req_body | boolean | 否 | false | [false, true]| 当设置为 `true` 时,日志中将包含请求体。 | +| include_req_body_expr | array | 否 | | | 当 `include_req_body` 属性设置为 `true` 时的过滤器。只有当此处设置的表达式求值为 `true` 时,才会记录请求体。有关更多信息,请参阅 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 。 | +| include_resp_body | boolean | 否 | false | [false, true]| 当设置为 `true` 时,日志中将包含响应体。 | +| include_resp_body_expr | array | 否 | | | 当 `include_resp_body` 属性设置为 `true` 时进行过滤响应体,并且只有当此处设置的表达式计算结果为 `true` 时,才会记录响应体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 | +| global_tag | object | 否 | | | kv 形式的 JSON 数据,可以写入每一条日志,便于在 CLS 中检索。 | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +注意:schema 中还定义了 `encrypt_fields = {"secret_key"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 + +该插件支持使用批处理器来聚合并批量处理条目(日志/数据)。这样可以避免插件频繁地提交数据,默认情况下批处理器每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置)。 + +### 默认日志格式示例 + +```json +{ + "response": { + "headers": { + "content-type": "text/plain", + "connection": "close", + "server": "APISIX/3.7.0", + "transfer-encoding": "chunked" + }, + "size": 136, + "status": 200 + }, + "route_id": "1", + "upstream": "127.0.0.1:1982", + "client_ip": "127.0.0.1", + "apisix_latency": 100.99985313416, + "service_id": "", + "latency": 103.99985313416, + "start_time": 1704525145772, + "server": { + "version": "3.7.0", + "hostname": "localhost" + }, + "upstream_latency": 3, + "request": { + "headers": { + "connection": "close", + "host": "localhost" + }, + "url": "http://localhost:1984/opentracing", + "querystring": {}, + "method": "GET", + "size": 65, + "uri": "/opentracing" + } +} +``` + +## 插件元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------------- | ------- | ------------------------------------------------ | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头。则表明获取 [APISIX 变量](../../../en/latest/apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +:::info 重要 + +该设置全局生效。如果指定了 `log_format`,则所有绑定 `tencent-cloud-cls` 的路由或服务都将使用该日志格式。 + +::: + +以下示例展示了如何通过 Admin API 配置插件元数据: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/tencent-cloud-cls \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +配置完成后,你将在日志系统中看到如下类似日志: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## 启用插件 + +你可以通过以下命令在指定路由中启用该插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "ap-guangzhou.cls.tencentyun.com", + "cls_topic": "${your CLS topic name}", + "global_tag": { + "module": "cls-logger", + "server_name": "YourApiGateWay" + }, + "include_req_body": true, + "include_resp_body": true, + "secret_id": "${your secret id}", + "secret_key": "${your secret key}" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## 测试插件 + +现在你可以向 APISIX 发起请求: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +``` +HTTP/1.1 200 OK +... +hello, world +``` + +## 删除插件 + +当你需要删除该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/traffic-split.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/traffic-split.md new file mode 100644 index 0000000..a460d4b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/traffic-split.md @@ -0,0 +1,637 @@ +--- +title: traffic-split +keywords: + - APISIX + - API 网关 + - Traffic Split + - 灰度发布 + - 蓝绿发布 +description: traffic-split 插件根据条件和/或权重将流量引导至各种上游服务。它提供了一种动态灵活的方法来实施发布策略和管理流量。 +--- + + + + + + + +## 描述 + +`traffic-split` 插件根据条件和/或权重将流量引导至各种上游服务。它提供了一种动态且灵活的方法来实施发布策略和管理流量。 + +:::note 注意 + +由于该插件使用了加权循环算法(特别是在重置 `wrr` 状态时),因此在使用该插件时,可能会存在上游服务之间的流量比例不精准现象。 + +::: + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------------- | --------------| ------ | ------ | ------ |-------------------------------------------------------- -------------------------------------------------- -------------------------------------------------- -------------------------------------------------- --------------------------------------------------| +| rules.match | array[object] | 否 | | | 要执行的一对或多对匹配条件和操作的数组。 | +| rules.match | array[object] | 否 | | | 条件流量分割的匹配规则。 | +| rules.match.vars | array[array] | 否 | | | 以 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) 形式包含一个或多个匹配条件的数组,用于有条件地执行插件。 | +| rules.weighted_upstreams | array[object] | 否 | | | 上游配置列表。 | +| rules.weighted_upstreams.upstream_id | 字符串/整数 | 否 | | | 配置的上游对象的 ID。 | +| rules.weighted_upstreams.weight | 整数 | 否 | weight = 1 | | 每个上游的权重。 | +| rules.weighted_upstreams.upstream | object | 否 | | | 上游配置。此处不支持某些上游配置选项。这些字段为 `service_name`、`discovery_type`、`checks`、`retries`、`retry_timeout`、`desc` 和 `labels`。作为解决方法,您可以创建一个上游对象并在 `upstream_id` 中配置它。| +| rules.weighted_upstreams.upstream.type | array | 否 | roundrobin | [roundrobin, chash] | 流量分割算法。`roundrobin` 用于加权循环,`chash` 用于一致性哈希。| +| rules.weighted_upstreams.upstream.hash_on | array | 否 | vars | | 当 `t​​ype` 为 `chash` 时使用。支持对 [NGINX 变量](https://nginx.org/en/docs/varindex.html)、headers、cookie、Consumer 或 [Nginx 变量](https://nginx.org/en/docs/varindex.html) 的组合进行哈希处理。 | +| rules.weighted_upstreams.upstream.key | string | 否 | | | 当 `t​​ype` 为 `chash` 时使用。当 `hash_on` 设置为 `header` 或 `cookie` 时,需要 `key`。当 `hash_on` 设置为 `consumer` 时,不需要 `key`,因为消费者名称将自动用作密钥。 | +| rules.weighted_upstreams.upstream.nodes | object | 否 | | | 上游节点的地址。 | +| rules.weighted_upstreams.upstream.timeout | object | 否 | 15 | | 连接、发送和接收消息的超时时间(秒)。 | +| rules.weighted_upstreams.upstream.pass_host | array | 否 | "pass" | ["pass", "node", "rewrite"] | 决定如何传递主机名的模式。`pass` 将客户端的主机名传递给上游。`node` 传递上游节点中配置的主机。`rewrite` 传递 `upstream_host` 中配置的值。| +| rules.weighted_upstreams.upstream.name | string | 否 | | | 用于指定服务名称、使用场景等的上游标识符。| +| rules.weighted_upstreams.upstream.upstream_host | string | 否 | | | 当 `pass_host` 为 `rewrite` 时使用。上游的主机名。| + +## 示例 + +以下示例展示了使用 `traffic-split` 插件的不同用例。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 实现 Canary 发布 + +以下示例演示了如何使用此插件实现 Canary 发布。 + +Canary 发布是一种逐步部署,其中越来越多的流量被定向到新版本,从而实现受控和受监控的发布。此方法可确保在完全重定向所有流量之前,尽早识别和解决新版本中的任何潜在问题或错误。 + +创建路由并使用以下规则配置 `traffic-split` 插件: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/headers", + "id": "traffic-split-route", + "plugins": { + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "httpbin.org:443":1 + } + }, + "weight": 3 + }, + { + "weight": 2 + } + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "mock.api7.ai:443":1 + } + } + }' +``` + +每个 Upstream 的流量比例由该 Upstream 的权重占所有 Upstream 总权重的比例决定,这里总权重计算为:3 + 2 = 5。 + +因此,60% 的流量要转发到 `httpbin.org`,另外 40% 的流量要转发到 `mock.api7.ai`。 + +向路由发送 10 个连续请求来验证: + +```shell +resp=$(seq 10 | xargs -I{} curl "http://127.0.0.1:9080/headers" -sL) && \ + count_httpbin=$(echo "$resp" | grep "httpbin.org" | wc -l) && \ + count_mockapi7=$(echo "$resp" | grep "mock.api7.ai" | wc -l) && \ + echo httpbin.org: $count_httpbin, mock.api7.ai: $count_mockapi7 +``` + +您应该会看到类似以下内容的响应: + +```text +httpbin.org: 6, mock.api7.ai: 4 +``` + +相应地调整上游权重以完成金丝雀发布。 + +### 实现蓝绿部署 + +以下示例演示如何使用此插件实现蓝绿部署。 + +蓝绿部署是一种部署策略,涉及维护两个相同的环境:蓝色和绿色。蓝色环境指的是当前的生产部署,绿色环境指的是新的部署。一旦绿色环境经过测试可以投入生产,流量将被路由到绿色环境,使其成为新的生产部署。 + +创建路由并配置 `traffic-split` 插件,以便仅当请求包含标头 `release: new_release` 时才执行插件以重定向流量: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/headers", + "id": "traffic-split-route", + "plugins": { + "traffic-split": { + "rules": [ + { + "match": [ + { + "vars": [ + ["http_release","==","new_release"] + ] + } + ], + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "httpbin.org:443":1 + } + } + } + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "mock.api7.ai:443":1 + } + } + }' +``` + +向路由发送一个带有 `release` 标头的请求: + +```shell +curl "http://127.0.0.1:9080/headers" -H 'release: new_release' +``` + +您应该会看到类似以下内容的响应: + +```json +{ + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + ... + } +} +``` + +向路由发送一个不带任何附加标头的请求: + +```shell +curl "http://127.0.0.1:9080/headers" +``` + +您应该会看到类似以下内容的响应: + +```json +{ + "headers": { + "accept": "*/*", + "host": "mock.api7.ai", + ... + } +} +``` + +### 使用 APISIX 表达式定义 POST 请求的匹配条件 + +以下示例演示了如何在规则中使用 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list),在满足 POST 请求的某些条件时有条件地执行插件。 + +创建路由并使用以下规则配置 `traffic-split` 插件: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/post", + "methods": ["POST"], + "id": "traffic-split-route", + "plugins": { + "traffic-split": { + "rules": [ + { + "match": [ + { + "vars": [ + ["post_arg_id", "==", "1"] + ] + } + ], + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "httpbin.org:443":1 + } + } + } + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "mock.api7.ai:443":1 + } + } + }' +``` + +发送主体为 `id=1` 的 POST 请求: + +```shell +curl "http://127.0.0.1:9080/post" -X POST \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d 'id=1' +``` + +您应该会看到类似以下内容的响应: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": { + "id": "1" + }, + "headers": { + "Accept": "*/*", + "Content-Length": "4", + "Content-Type": "application/x-www-form-urlencoded", + "Host": "httpbin.org", + ... + }, + ... +} +``` + +发送主体中不包含 `id=1` 的 POST 请求: + +```shell +curl "http://127.0.0.1:9080/post" -X POST \ + -H 'Content-Type: application/x-www-form-urlencoded' \ + -d 'random=string' +``` + +您应该看到请求已转发到 `mock.api7.ai`。 + +### 使用 APISIX 表达式定义 AND 匹配条件 + +以下示例演示了如何在规则中使用 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list),在满足多个条件时有条件地执行插件。 + +创建路由并配置 `traffic-split` 插件,以便仅在满足所有三个条件时重定向流量: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/headers", + "id": "traffic-split-route", + "plugins": { + "traffic-split": { + "rules": [ + { + "match": [ + { + "vars": [ + ["arg_name","==","jack"], + ["http_user-id",">","23"], + ["http_apisix-key","~~","[a-z]+"] + ] + } + ], + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "httpbin.org:443":1 + } + }, + "weight": 3 + }, + { + "weight": 2 + } + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "mock.api7.ai:443":1 + } + } + }' +``` + +如果满足条件,则 60% 的流量应定向到 `httpbin.org`,另外 40% 的流量应定向到 `mock.api7.ai`。如果不满足条件,则所有流量都应定向到 `mock.api7.ai`。 + +发送 10 个满足所有条件的连续请求以验证: + +```shell +resp=$(seq 10 | xargs -I{} curl "http://127.0.0.1:9080/headers?name=jack" -H 'user-id: 30' -H 'apisix-key: helloapisix' -sL) && \ + count_httpbin=$(echo "$resp" | grep "httpbin.org" | wc -l) && \ + count_mockapi7=$(echo "$resp" | grep "mock.api7.ai" | wc -l) && \ + echo httpbin.org: $count_httpbin, mock.api7.ai: $count_mockapi7 +``` + +您应该会看到类似以下内容的响应: + +```text +httpbin.org: 6, mock.api7.ai: 4 +``` + +连续发送 10 个不满足条件的请求进行验证: + +```shell +resp=$(seq 10 | xargs -I{} curl "http://127.0.0.1:9080/headers?name=random" -sL) && \ + count_httpbin=$(echo "$resp" | grep "httpbin.org" | wc -l) && \ + count_mockapi7=$(echo "$resp" | grep "mock.api7.ai" | wc -l) && \ + echo httpbin.org: $count_httpbin, mock.api7.ai: $count_mockapi7 +``` + +您应该会看到类似以下内容的响应: + +```text +httpbin.org: 0, mock.api7.ai: 10 +``` + +### 使用 APISIX 表达式定义或匹配条件 + +以下示例演示了如何在规则中使用 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list),在满足任一条件集时有条件地执行插件。 + +创建路由并配置 `traffic-split` 插件,以在满足任一配置条件集时重定向流量: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/headers", + "id": "traffic-split-route", + "plugins": { + "traffic-split": { + "rules": [ + { + "match": [ + { + "vars": [ + ["arg_name","==","jack"], + ["http_user-id",">","23"], + ["http_apisix-key","~~","[a-z]+"] + ] + }, + { + "vars": [ + ["arg_name2","==","rose"], + ["http_user-id2","!",">","33"], + ["http_apisix-key2","~~","[a-z]+"] + ] + } + ], + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "httpbin.org:443":1 + } + }, + "weight": 3 + }, + { + "weight": 2 + } + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "mock.api7.ai:443":1 + } + } + }' +``` + +或者,您也可以使用 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) 中的 OR 运算符来实现这些条件。 + +如果满足条件,则 60% 的流量应定向到 `httpbin.org`,其余 40% 应定向到 `mock.api7.ai`。如果不满足条件,则所有流量都应定向到 `mock.api7.ai`。 + +发送 10 个满足第二组条件的连续请求以验证: + +```shell +resp=$(seq 10 | xargs -I{} curl "http://127.0.0.1:9080/headers?name2=rose" -H 'user-id:30' -H 'apisix-key2: helloapisix' -sL) && \ + count_httpbin=$(echo "$resp" | grep "httpbin.org" | wc -l) && \ + count_mockapi7=$(echo "$resp" | grep "mock.api7.ai" | wc -l) && \ + echo httpbin.org: $count_httpbin, mock.api7.ai: $count_mockapi7 +``` + +您应该会看到类似以下内容的响应: + +```json +httpbin.org: 6, mock.api7.ai: 4 +``` + +发送 10 个连续的不满足任何一组条件的请求来验证: + +```shell +resp=$(seq 10 | xargs -I{} curl "http://127.0.0.1:9080/headers?name=random" -sL) && \ + count_httpbin=$(echo "$resp" | grep "httpbin.org" | wc -l) && \ + count_mockapi7=$(echo "$resp" | grep "mock.api7.ai" | wc -l) && \ + echo httpbin.org: $count_httpbin, mock.api7.ai: $count_mockapi7 +``` + +您应该会看到类似以下内容的响应: + +```json +httpbin.org: 0, mock.api7.ai: 10 +``` + +### 为不同的上游配置不同的规则 + +以下示例演示了如何在规则集和上游之间设置一对一映射。 + +创建一个路由并使用以下匹配规则配置 `traffic-split` 插件,以便在请求包含标头 `x-api-id: 1` 或 `x-api-id: 2` 时将流量重定向到相应的上游服务: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "uri": "/headers", + "id": "traffic-split-route", + "plugins": { + "traffic-split": { + "rules": [ + { + "match": [ + { + "vars": [ + ["http_x-api-id","==","1"] + ] + } + ], + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "httpbin.org:443":1 + } + }, + "weight": 1 + } + ] + }, + { + "match": [ + { + "vars": [ + ["http_x-api-id","==","2"] + ] + } + ], + "weighted_upstreams": [ + { + "upstream": { + "type": "roundrobin", + "scheme": "https", + "pass_host": "node", + "nodes": { + "mock.api7.ai:443":1 + } + }, + "weight": 1 + } + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "postman-echo.com:443": 1 + }, + "scheme": "https", + "pass_host": "node" + } + }' +``` + +发送带有标头 `x-api-id: 1` 的请求: + +```shell +curl "http://127.0.0.1:9080/headers" -H 'x-api-id: 1' +``` + +您应该会看到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```json +{ + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + ... + } +} +``` + +发送带有标头 `x-api-id: 2` 的请求: + +```shell +curl "http://127.0.0.1:9080/headers" -H 'x-api-id: 2' +``` + +您应该会看到类似于以下内容的 `HTTP/1.1 200 OK` 响应: + +```json +{ + "headers": { + "accept": "*/*", + "host": "mock.api7.ai", + ... + } +} +``` + +发送不带任何附加标头的请求: + +```shell +curl "http://127.0.0.1:9080/headers" +``` + +您应该会看到类似以下内容的响应: + +```json +{ + "headers": { + "accept": "*/*", + "host": "postman-echo.com", + ... + } +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ua-restriction.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ua-restriction.md new file mode 100644 index 0000000..101e20a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/ua-restriction.md @@ -0,0 +1,159 @@ +--- +title: ua-restriction +keywords: + - Apache APISIX + - API 网关 + - UA restriction +description: ua-restriction 插件使用用户代理的允许列表或拒绝列表来限制对上游资源的访问,防止网络爬虫过载并增强 API 安全性。 +--- + + + + + + + +## 描述 + +`ua-restriction` 插件支持通过配置用户代理的允许列表或拒绝列表来限制对上游资源的访问。一个常见的用例是防止网络爬虫使上游资源过载并导致服务降级。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| --------- | ------------- | ------ | ------ | ------ | -------------------------------- | +| byp​​ass_missing |boolean| 否 | false | | 如果为 true,则在缺少 `User-Agent` 标头时绕过用户代理限制检查。| +| allowlist | array[string] | 否 | | | 要允许的用户代理列表。支持正则表达式。应配置 `allowlist` 和 `denylist` 中至少一个,但不能同时配置。| +| denylist | array[string] | 否 | | | 要拒绝的用户代理列表。支持正则表达式。应配置 `allowlist` 和 `denylist` 中至少一个,但不能同时配置。| +| message | string | 否 | "Not allowed" | | 拒绝用户代理访问时返回的消息。| + +## 示例 + +以下示例演示了如何针对不同场景配置 `ua-restriction`。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 拒绝网络爬虫并自定义错误消息 + +以下示例演示了如何配置插件以抵御不需要的网络爬虫并自定义拒绝消息。 + +创建路由并配置插件以使用自定义消息阻止特定爬虫访问资源: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ua-restriction-route", + "uri": "/anything", + "plugins": { + "ua-restriction": { + "bypass_missing": false, + "denylist": [ + "(Baiduspider)/(\\d+)\\.(\\d+)", + "bad-bot-1" + ], + "message": "Access denied" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该收到 `HTTP/1.1 200 OK` 响应。 + +使用不允许的用户代理向路由发送另一个请求: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H 'User-Agent: Baiduspider/5.0' +``` + +您应该收到 `HTTP/1.1 403 Forbidden` 响应,其中包含以下消息: + +```text +{"message":"Access denied"} +``` + +### 绕过 UA 限制检查 + +以下示例说明如何配置插件以允许特定用户代理的请求绕过 UA 限制。 + +创建如下路由: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "ua-restriction-route", + "uri": "/anything", + "plugins": { + "ua-restriction": { + "bypass_missing": true, + "allowlist": [ + "good-bot-1" + ], + "message": "Access denied" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +向路由发送一个请求而不修改用户代理: + +```shell +curl -i "http://127.0.0.1:9080/anything" +``` + +您应该收到一个 `HTTP/1.1 403 Forbidden` 响应,其中包含以下消息: + +```text +{"message":"Access denied"} +``` + +向路由发送另一个请求,用户代理为空: + +```shell +curl -i "http://127.0.0.1:9080/anything" -H 'User-Agent: ' +``` + +您应该收到一个 `HTTP/1.1 200 OK` 响应。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/udp-logger.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/udp-logger.md new file mode 100644 index 0000000..45d0983 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/udp-logger.md @@ -0,0 +1,191 @@ +--- +title: udp-logger +keywords: + - APISIX + - API 网关 + - Plugin + - UDP Logger +description: 本文介绍了 API 网关 Apache APISIX 如何使用 udp-logger 插件将日志数据发送到 UDP 服务器。 +--- + + + +## 描述 + +`udp-logger` 插件可用于将日志数据发送到 UDP 服务器。 + +该插件还实现了将日志数据以 JSON 格式发送到监控工具或其它 UDP 服务的能力。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------------ | ------- | ------------------------------------------------ | +| host | string | 是 | | | UDP 服务的 IP 地址或主机名。 | +| port | integer | 是 | | [0,...] | 目标端口。 | +| timeout | integer | 否 | 1000 | [1,...] | 发送数据超时间。 | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | +| name | string | 否 | "udp logger" | | 标识 logger 的唯一标识符。如果您使用 Prometheus 监视 APISIX 指标,名称将以 `apisix_batch_process_entries` 导出。 | +| include_req_body | boolean | 否 | | [false, true] | 当设置为 `true` 时,日志中将包含请求体。 | +| include_req_body_expr | array | 否 | | | 当 `include_req_body` 属性设置为 `true` 时的过滤器。只有当此处设置的表达式求值为 `true` 时,才会记录请求体。有关更多信息,请参阅 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 。 | +| include_resp_body | boolean | 否 | false | [false, true]| 当设置为 `true` 时,日志中将包含响应体。 | +| include_resp_body_expr | array | 否 | | | 当 `include_resp_body` 属性设置为 `true` 时进行过滤响应体,并且只有当此处设置的表达式计算结果为 `true` 时,才会记录响应体。更多信息,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr)。 | + +该插件支持使用批处理器来聚合并批量处理条目(日志和数据)。这样可以避免插件频繁地提交数据,默认情况下批处理器每 `5` 秒钟或队列中的数据达到 `1000` 条时提交数据,如需了解批处理器相关参数设置,请参考 [Batch-Processor](../batch-processor.md#配置)。 + +### 默认日志格式数据 + +```json +{ + "apisix_latency": 99.999988555908, + "service_id": "", + "server": { + "version": "3.7.0", + "hostname": "localhost" + }, + "request": { + "method": "GET", + "headers": { + "connection": "close", + "host": "localhost" + }, + "url": "http://localhost:1984/opentracing", + "size": 65, + "querystring": {}, + "uri": "/opentracing" + }, + "start_time": 1704527399740, + "client_ip": "127.0.0.1", + "response": { + "status": 200, + "size": 136, + "headers": { + "server": "APISIX/3.7.0", + "content-type": "text/plain", + "transfer-encoding": "chunked", + "connection": "close" + } + }, + "upstream": "127.0.0.1:1982", + "route_id": "1", + "upstream_latency": 12, + "latency": 111.99998855591 +} +``` + +## 插件元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ---------------- | ------- | ------ | ------------- | ------- | ------------------------------------------------ | +| log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头。则表明获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | + +:::info 注意 + +该设置全局生效。如果指定了 `log_format`,则所有绑定 `udp-logger` 的路由或服务都将使用该日志格式。 + +::: + +以下示例展示了如何通过 Admin API 配置插件元数据: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/udp-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +配置完成后,你将在日志系统中看到如下类似日志: + +```json +{"@timestamp":"2023-01-09T14:47:25+08:00","route_id":"1","host":"localhost","client_ip":"127.0.0.1"} +``` + +## 如何开启 + +你可以通过如下命令在指定路由上启用 `udp-logger` 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/5 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "udp-logger": { + "host": "127.0.0.1", + "port": 3000, + "batch_max_size": 1, + "name": "udp logger" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" +}' +``` + +## 测试插件 + +现在你可以向 APISIX 发起请求: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +``` +HTTP/1.1 200 OK +... +hello, world +``` + +## 删除插件 + +当你需要删除该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/uri-blocker.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/uri-blocker.md new file mode 100644 index 0000000..5595d3c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/uri-blocker.md @@ -0,0 +1,117 @@ +--- +title: uri-blocker +keywords: + - Apache APISIX + - API 网关 + - URI Blocker +description: 本文介绍了 Apache APISIX uri-blocker 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`uri-blocker` 插件通过指定一系列 `block_rules` 来拦截用户请求。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------- | ------------- | ------ | ------ | ---------- | ------------------------------------------------------------------- | +| block_rules | array[string] | 是 | | | 正则过滤数组。它们都是正则规则,如果当前请求 URI 命中其中任何一个,则将响应代码设置为 `rejected_code` 以退出当前用户请求。例如:`["root.exe", "root.m+"]`。 | +| rejected_code | integer | 否 | 403 | [200, ...] | 当请求 URI 命中 `block_rules` 中的任何一个时,将返回的 HTTP 状态代码。 | +| rejected_msg | string | 否 | | 非空 | 当请求 URI 命中 `block_rules` 中的任何一个时,将返回的 HTTP 响应体。 | +| case_insensitive | boolean | 否 | false | | 是否忽略大小写。当设置为 `true` 时,在匹配请求 URI 时将忽略大小写。 | + +## 启用插件 + +以下示例展示了如何在指定的路由上启用 `uri-blocker` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/*", + "plugins": { + "uri-blocker": { + "block_rules": ["root.exe", "root.m+"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## 测试插件 + +启用并配置插件后,使用 `curl` 命令尝试访问 `block_rules` 中指定文件的 URI: + +```shell +curl -i http://127.0.0.1:9080/root.exe?a=a +``` + +如果发现返回了带有 `403` 状态码的 HTTP 响应头,则代表插件生效: + +```shell +HTTP/1.1 403 Forbidden +Date: Wed, 17 Jun 2020 13:55:41 GMT +Content-Type: text/html; charset=utf-8 +Content-Length: 150 +Connection: keep-alive +Server: APISIX web server +... +``` + +通过设置属性 `rejected_msg` 的值为 `access is not allowed`,将会收到包含如下信息的响应体: + +```shell +... +{"error_msg":"access is not allowed"} +... +``` + +## 删除插件 + +当你需要禁用 `uri-blocker` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/wolf-rbac.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/wolf-rbac.md new file mode 100644 index 0000000..058029c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/wolf-rbac.md @@ -0,0 +1,301 @@ +--- +title: wolf-rbac +keywords: + - Apache APISIX + - API 网关 + - Plugin + - wolf RBAC + - wolf-rbac +description: 本文介绍了关于 Apache APISIX `wolf-rbac` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +`wolf-rbac` 插件为 [role-based access control](https://en.wikipedia.org/wiki/Role-based_access_control) 系统提供了添加 [wolf](https://github.com/iGeeky/wolf) 到 Route 或 Service 的功能。此插件需要与 [Consumer](../terminology/consumer.md) 一起使用。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +| ------------- | ------ | ------ | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| server | string | 否 | "http://127.0.0.1:12180" | `wolf-server` 的服务地址。 | +| appid | string | 否 | "unset" | 在 `wolf-console` 中已经添加的应用 id。该字段支持使用 [APISIX Secret](../terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | +| header_prefix | string | 否 | "X-" | 自定义 HTTP 头的前缀。`wolf-rbac` 在鉴权成功后,会在请求头 (用于传给后端) 及响应头 (用于传给前端) 中添加 3 个 header:`X-UserId`, `X-Username`, `X-Nickname`。| + +## 接口 + +该插件在启用时将会增加以下接口: + +* /apisix/plugin/wolf-rbac/login +* /apisix/plugin/wolf-rbac/change_pwd +* /apisix/plugin/wolf-rbac/user_info + +:::note + +以上接口需要通过 [public-api](../../../en/latest/plugins/public-api.md) 插件暴露。 + +::: + +## 前提条件 + +如果要使用这个插件,你必须要[安装 wolf](https://github.com/iGeeky/wolf/blob/master/quick-start-with-docker/README.md) 并启动它。 + +完成后,你需要添加`application`、`admin`、`regular user`、`permission`、`resource` 等字段,并将用户授权到 [wolf-console](https://github.com/iGeeky/wolf/blob/master/docs/usage.md)。 + +## 启用插件 + +首先需要创建一个 Consumer 并配置该插件,如下所示: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username":"wolf_rbac", + "plugins":{ + "wolf-rbac":{ + "server":"http://127.0.0.1:12180", + "appid":"restful" + } + }, + "desc":"wolf-rbac" +}' +``` + +:::note + +示例中填写的 `appid`,必须是已经在 wolf 控制台中存在的。 + +::: + +然后你需要添加 `wolf-rbac` 插件到 Route 或 Service 中。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/*", + "plugins": { + "wolf-rbac": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "www.baidu.com:80": 1 + } + } +}' +``` + +你还可以通过 [APISIX Dashboard](https://github.com/apache/apisix-dashboard) 的 Web 界面完成上述操作。 + + + +## 测试插件 + +你可以使用 [public-api](../../../en/latest/plugins/public-api.md) 插件来暴露 API. + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/wal \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/apisix/plugin/wolf-rbac/login", + "plugins": { + "public-api": {} + } +}' +``` + +同样,你需要参考上述命令为 `change_pwd` 和 `user_info` 两个 API 配置路由。 + +现在你可以登录并获取 wolf `rbac_token`: + +```shell +curl http://127.0.0.1:9080/apisix/plugin/wolf-rbac/login -i \ +-H "Content-Type: application/json" \ +-d '{"appid": "restful", "username":"test", "password":"user-password", "authType":1}' +``` + +``` +HTTP/1.1 200 OK +Date: Wed, 24 Jul 2019 10:33:31 GMT +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server +{"rbac_token":"V1#restful#eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts","user_info":{"nickname":"test","username":"test","id":"749"}} +``` + +:::note + +上述示例中,`appid`、`username` 和 `password` 必须为 wolf 系统中真实存在的。 + +`authType` 为认证类型,`1` 为密码认证(默认),`2` 为 LDAP 认证。`wolf` 从 0.5.0 版本开始支持了 LDAP 认证。 + +::: + +也可以使用 x-www-form-urlencoded 方式登陆: + +```shell +curl http://127.0.0.1:9080/apisix/plugin/wolf-rbac/login -i \ +-H "Content-Type: application/x-www-form-urlencoded" \ +-d 'appid=restful&username=test&password=user-password' +``` + +现在开始测试 Route: + +- 缺少 token + +```shell +curl http://127.0.0.1:9080/ -H"Host: www.baidu.com" -i +``` + +```shell +HTTP/1.1 401 Unauthorized +... +{"message":"Missing rbac token in request"} +``` + +- token 放到请求头 (Authorization) 中: + +```shell +curl http://127.0.0.1:9080/ -H"Host: www.baidu.com" \ +-H 'Authorization: V1#restful#eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts' -i +``` + +```shell +HTTP/1.1 200 OK + + +``` + +- token 放到请求头 (x-rbac-token) 中: + +```shell +curl http://127.0.0.1:9080/ -H"Host: www.baidu.com" \ +-H 'x-rbac-token: V1#restful#eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts' -i +``` + +```shell +HTTP/1.1 200 OK + + +``` + +- token 放到请求参数中: + +```shell +curl 'http://127.0.0.1:9080?rbac_token=V1%23restful%23eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts' -H"Host: www.baidu.com" -i +``` + +```shell +HTTP/1.1 200 OK + + +``` + +- token 放到 `cookie` 中: + +```shell +curl http://127.0.0.1:9080 -H"Host: www.baidu.com" \ +--cookie x-rbac-token=V1#restful#eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts -i +``` + +```shell +HTTP/1.1 200 OK + + +``` + +- 获取用户信息: + +```shell +curl http://127.0.0.1:9080/apisix/plugin/wolf-rbac/user_info \ +--cookie x-rbac-token=V1#restful#eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts -i +``` + +```shell +HTTP/1.1 200 OK +{ + "user_info":{ + "nickname":"test", + "lastLogin":1582816780, + "id":749, + "username":"test", + "appIDs":["restful"], + "manager":"none", + "permissions":{"USER_LIST":true}, + "profile":null, + "roles":{}, + "createTime":1578820506, + "email":"" + } +} +``` + +- 更改用户的密码: + +```shell +curl http://127.0.0.1:9080/apisix/plugin/wolf-rbac/change_pwd \ +-H "Content-Type: application/json" \ +--cookie x-rbac-token=V1#restful#eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6NzQ5LCJ1c2VybmFtZSI6InRlc3QiLCJtYW5hZ2VyIjoiIiwiYXBwaWQiOiJyZXN0ZnVsIiwiaWF0IjoxNTc5NDQ5ODQxLCJleHAiOjE1ODAwNTQ2NDF9.n2-830zbhrEh6OAxn4K_yYtg5pqfmjpZAjoQXgtcuts -i \ +-X PUT -d '{"oldPassword": "old password", "newPassword": "new password"}' +``` + +```shell +HTTP/1.1 200 OK +{"message":"success to change password"} +``` + +## 删除插件 + +当你需要禁用 `wolf-rbac` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/*", + "plugins": { + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "www.baidu.com:80": 1 + } + } +}' +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/workflow.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/workflow.md new file mode 100644 index 0000000..5ea0529 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/workflow.md @@ -0,0 +1,386 @@ +--- +title: workflow +keywords: + - Apache APISIX + - API 网关 + - Plugin + - workflow + - 流量控制 +description: workflow 插件支持根据给定的一组规则有条件地执行对客户端流量的用户定义操作。这提供了一种实现复杂流量管理的细粒度方法。 +--- + + + + + + + +## 描述 + +`workflow` 插件支持根据给定的规则集有条件地执行对客户端流量的用户定义操作,这些规则集使用 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) 定义。这为流量管理提供了一种细粒度的方法。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| ------------- | ------ | ------ | ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------- | ------------- | +| rules | array[object] | 是 | | | 一对或多对匹配条件和要执行的操作组成的数组。 | +| rules.case | array[array] | 否 | | | 一个或多个匹配条件的数组,形式为 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list),例如 `{"arg_name", "==", "json"}`。 | +| rules.actions | array[object] | 是 | | | 条件匹配成功后要执行的操作的数组。目前数组只支持一个操作,必须是 `return` 或者 `limit-count`。当操作配置为 `return` 时,可以配置条件匹配成功时返回给客户端的 HTTP 状态码。当操作配置为 `limit-count` 时,可以配置 [`limit-count`](./limit-count.md) 插件除 `group` 之外的所有选项。 | + +## 示例 + +以下示例演示了如何在不同场景中使用 `workflow` 插件。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +### 有条件地返回响应 HTTP 状态代码 + +以下示例演示了一个简单的规则,其中包含一个匹配条件和一个关联操作,用于有条件地返回 HTTP 状态代码。 + +使用 `workflow` 插件创建一个路由,当请求的 URI 路径为 `/anything/rejected` 时返回 HTTP 状态代码 403: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "workflow-route", + "uri": "/anything/*", + "plugins": { + "workflow":{ + "rules":[ + { + "case":[ + ["uri", "==", "/anything/rejected"] + ], + "actions":[ + [ + "return", + {"code": 403} + ] + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +发送与任何规则都不匹配的请求: + +```shell +curl -i "http://127.0.0.1:9080/anything/anything" +``` + +您应该收到 `HTTP/1.1 200 OK` 响应。 + +发送与配置的规则匹配的请求: + +```shell +curl -i "http://127.0.0.1:9080/anything/rejected" +``` + +您应该收到以下 `HTTP/1.1 403 Forbidden` 响应: + +```text +{"error_msg":"rejected by workflow"} +``` + +### 通过 URI 和查询参数有条件地应用速率限制 + +以下示例演示了一条具有两个匹配条件和一个关联操作的规则,用于有条件地限制请求速率。 + +使用 `workflow` 插件创建路由,以在 URI 路径为 `/anything/rate-limit` 且查询参数 `env` 值为 `v1` 时应用速率限制: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "workflow-route", + "uri": "/anything/*", + "plugins":{ + "workflow":{ + "rules":[ + { + "case":[ + ["uri", "==", "/anything/rate-limit"], + ["arg_env", "==", "v1"] + ], + "actions":[ + [ + "limit-count", + { + "count":1, + "time_window":60, + "rejected_code":429 + } + ] + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +生成两个符合第二条规则的连续请求: + +```shell +curl -i "http://127.0.0.1:9080/anything/rate-limit?env=v1" +``` + +您应该收到 `HTTP/1.1 200 OK` 响应和 `HTTP 429 Too Many Requests` 响应。 + +生成不符合条件的请求: + +```shell +curl -i "http://127.0.0.1:9080/anything/anything?env=v1" +``` + +您应该收到所有请求的 `HTTP/1.1 200 OK` 响应,因为它们不受速率限制。 + +### 消费者有条件地应用速率限制 + +以下示例演示了如何配置插件以根据以下规范执行速率限制: + +* 消费者 `john` 在 30 秒内应有 5 个请求的配额 +* 消费者 `jane` 在 30 秒内应有 3 个请求的配额 +* 所有其他消费者在 30 秒内应有 2 个请求的配额 + +虽然此示例将使用 [`key-auth`](./key-auth.md),但您可以轻松地将其替换为其他身份验证插件。 + +创建消费者 `john`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "john" + }' +``` + +Create `key-auth` credential for the consumer: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/john/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-john-key-auth", + "plugins": { + "key-auth": { + "key": "john-key" + } + } + }' +``` + +创建第二个消费者 `jane`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jane" + }' +``` + +为消费者创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jane/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jane-key-auth", + "plugins": { + "key-auth": { + "key": "jane-key" + } + } + }' +``` + +创建第三个消费者 `jimmy`: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "username": "jimmy" + }' +``` + +为消费者创建 `key-auth` 凭证: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/consumers/jimmy/credentials" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "cred-jimmy-key-auth", + "plugins": { + "key-auth": { + "key": "jimmy-key" + } + } + }' +``` + +使用 `workflow` 和 `key-auth` 插件创建路由,并设置所需的速率限制规则: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "workflow-route", + "uri": "/anything", + "plugins":{ + "key-auth": {}, + "workflow":{ + "rules":[ + { + "actions": [ + [ + "limit-count", + { + "count": 5, + "key": "consumer_john", + "key_type": "constant", + "rejected_code": 429, + "time_window": 30 + } + ] + ], + "case": [ + [ + "consumer_name", + "==", + "john" + ] + ] + }, + { + "actions": [ + [ + "limit-count", + { + "count": 3, + "key": "consumer_jane", + "key_type": "constant", + "rejected_code": 429, + "time_window": 30 + } + ] + ], + "case": [ + [ + "consumer_name", + "==", + "jane" + ] + ] + }, + { + "actions": [ + [ + "limit-count", + { + "count": 2, + "key": "$consumer_name", + "key_type": "var", + "rejected_code": 429, + "time_window": 30 + } + ] + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +为了验证,请使用 `john` 的密钥发送 6 个连续的请求: + +```shell +resp=$(seq 6 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H 'apikey: john-key' -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +您应该看到以下响应,显示在 6 个请求中,5 个请求成功(状态代码 200),而其他请求被拒绝(状态代码 429)。 + +```text +200: 5,429: 1 +``` + +使用 `jane` 的密钥连续发送 6 个请求: + +```shell +resp=$(seq 6 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H 'apikey: jane-key' -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +您应该看到以下响应,显示在 6 个请求中,3 个请求成功(状态代码 200),而其他请求被拒绝(状态代码 429)。 + +```text +200: 3,429: 3 +``` + +使用 `jimmy` 的密钥发送 3 个连续请求: + +```shell +resp=$(seq 3 | xargs -I{} curl "http://127.0.0.1:9080/anything" -H 'apikey: jimmy-key' -o /dev/null -s -w "%{http_code}\n") && \ + count_200=$(echo "$resp" | grep "200" | wc -l) && \ + count_429=$(echo "$resp" | grep "429" | wc -l) && \ + echo "200": $count_200, "429": $count_429 +``` + +您应该看到以下响应,显示在 3 个请求中,2 个请求成功(状态代码 200),而其他请求被拒绝(状态代码 429)。 + +```text +200: 2,429: 1 +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/zipkin.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/zipkin.md new file mode 100644 index 0000000..5e6488e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/plugins/zipkin.md @@ -0,0 +1,265 @@ +--- +title: zipkin +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Zipkin +description: Zipkin 是一个开源的分布式链路追踪系统。`zipkin` 插件为 APISIX 提供了追踪功能,并根据 Zipkin API 规范将追踪数据上报给 Zipkin。 +--- + + + + + + + +## 描述 + +[Zipkin](https://github.com/openzipkin/zipkin) 是一个开源的分布式链路追踪系统。`zipkin` 插件为 APISIX 提供了追踪功能,并根据 [Zipkin API 规范](https://zipkin.io/pages/instrumenting.html) 将追踪数据上报给 Zipkin。 + +该插件还支持将追踪数据发送到其他兼容的收集器,例如 [Jaeger](https://www.jaegertracing.io/docs/1.51/getting-started/#migrating-from-zipkin) 和 [Apache SkyWalking](https://skywalking.apache.org/docs/main/latest/en/setup/backend/zipkin-trace/#zipkin-receiver),这两者都支持 Zipkin [v1](https://zipkin.io/zipkin-api/zipkin-api.yaml) 和 [v2](https://zipkin.io/zipkin-api/zipkin2-api.yaml) API。 + +## 静态配置 + +默认情况下,`zipkin` 插件的 NGINX 变量配置在 [默认配置](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua) 中设置为 `false`: + +要修改此值,请将更新后的配置添加到 `config.yaml` 中。例如: + +```yaml +plugin_attr: + zipkin: + set_ngx_var: true +``` + +重新加载 APISIX 以使更改生效。 + +## 属性 + +查看配置文件以获取所有插件可用的配置选项。 + +| 名称 | 类型 | 是否必需 | 默认值 | 有效值 | 描述 | +|--------------|---------|----------|----------------|-------------|------------------| +| endpoint | string | 是 | | | 要 POST 的 Zipkin span 端点,例如 `http://127.0.0.1:9411/api/v2/spans`。 | +|sample_ratio| number | 是 | | [0.00001, 1] | 请求采样频率。设置为 `1` 表示对每个请求进行采样。 | +|service_name| string | 否 | "APISIX" | | 在 Zipkin 中显示的服务名称。 | +|server_addr | string | 否 | `$server_addr` 的值 | IPv4 地址 | Zipkin 报告器的 IPv4 地址。例如,可以将其设置为你的外部 IP 地址。 | +|span_version| integer | 否 | `2` | [1, 2] | span 类型的版本。 | + +## 示例 + +以下示例展示了使用 `zipkin` 插件的不同用例。 + +### 将追踪数据发送到 Zipkin + +以下示例演示了如何追踪对路由的请求,并将追踪数据发送到使用 [Zipkin API v2](https://zipkin.io/zipkin-api/zipkin2-api.yaml) 的 Zipkin。还将介绍 span 版本 2 和 版本 1 之间的区别。 + +在 Docker 中启动一个 Zipkin 实例: + +```shell +docker run -d --name zipkin -p 9411:9411 openzipkin/zipkin +``` + +创建一条路由,开启 `zipkin` 插件,并使用其默认的 `span_version`,即 `2`。同时请根据需要调整 Zipkin HTTP 端点的 IP 地址,将采样比率配置为 `1` 以追踪每个请求。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "zipkin-tracing-route", + "uri": "/anything", + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9411/api/v2/spans", + "sample_ratio": 1, + "span_version": 2 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl "http://127.0.0.1:9080/anything" +``` + +你应该收到一个类似于以下的 `HTTP/1.1 200 OK` 响应: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "127.0.0.1", + "User-Agent": "curl/7.64.1", + "X-Amzn-Trace-Id": "Root=1-65af2926-497590027bcdb09e34752b78", + "X-B3-Parentspanid": "347dddedf73ec176", + "X-B3-Sampled": "1", + "X-B3-Spanid": "429afa01d0b0067c", + "X-B3-Traceid": "aea58f4b490766eccb08275acd52a13a", + "X-Forwarded-Host": "127.0.0.1" + }, + ... +} +``` + +导航到 Zipkin Web UI [http://127.0.0.1:9411/zipkin](http://127.0.0.1:9411/zipkin) 并点击 __Run Query__,你应该看到一个与请求对应的 trace: + +![来自请求的追踪](https://static.api7.ai/uploads/2024/01/23/MaXhacYO_zipkin-run-query.png) + +点击 __Show__ 查看更多 trace 细节: + +![v2 trace span](https://static.api7.ai/uploads/2024/01/23/3SmfFq9f_trace-details.png) + +请注意,使用 span 版本 2 时,每个被 trace 的请求会创建以下 span: + +```text +request +├── proxy +└── response +``` + +其中 `proxy` 表示从请求开始到 `header_filter` 开始的时间,而 `response` 表示从 `header_filter` 开始到 `log` 开始的时间。 + +现在,更新路由上的插件以使用 span 版本 1: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/zipkin-tracing-route" -X PATCH \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "plugins": { + "zipkin": { + "span_version": 1 + } + } + }' +``` + +向路由发送另一个请求: + +```shell +curl "http://127.0.0.1:9080/anything" +``` + +在 Zipkin Web UI 中,你应该看到一个具有以下细节的新 trace: + +![v1 trace span](https://static.api7.ai/uploads/2024/01/23/OPw2sTPa_v1-trace-spans.png) + +请注意,使用较旧的 span 版本 1 时,每个被追踪的请求会创建以下 span: + +```text +request +├── rewrite +├── access +└── proxy + └── body_filter +``` + +### 将追踪数据发送到 Jaeger + +以下示例演示了如何追踪对路由的请求并将追踪数据发送到 Jaeger。 + +在 Docker 中启动一个 Jaeger 实例: + +```shell +docker run -d --name jaeger \ + -e COLLECTOR_ZIPKIN_HOST_PORT=9411 \ + -p 16686:16686 \ + -p 9411:9411 \ + jaegertracing/all-in-one +``` + +创建一条路由并开启 `zipkin` 插件。请根据需要调整 Zipkin HTTP 端点的 IP 地址,并将采样比率配置为 `1` 以追踪每个请求。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${admin_key}" \ + -d '{ + "id": "zipkin-tracing-route", + "uri": "/anything", + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9411/api/v2/spans", + "sample_ratio": 1 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }' +``` + +向路由发送请求: + +```shell +curl "http://127.0.0.1:9080/anything" +``` + +你应该收到一个 `HTTP/1.1 200 OK` 响应。 + +导航到 Jaeger Web UI [http://127.0.0.1:16686](http://127.0.0.1:16686),选择 APISIX 作为服务,并点击 __Find Traces__,您应该看到一个与请求对应的 trace: + +![jaeger trace](https://static.api7.ai/uploads/2024/01/23/X6QdLN3l_jaeger.png) + +同样地,一旦点击进入一个追踪,你应该会找到更多 span 细节: + +![jaeger 细节](https://static.api7.ai/uploads/2024/01/23/iP9fXI2A_jaeger-details.png) + +### 在日志中使用追踪变量 + +以下示例演示了如何配置 `zipkin` 插件以设置以下内置变量,这些变量可以在日志插件或访问日志中使用: + +- `zipkin_context_traceparent`: [W3C trace context](https://www.w3.org/TR/trace-context/#trace-context-http-headers-format) +- `zipkin_trace_id`: 当前 span 的 trace_id +- `zipkin_span_id`: 当前 span 的 span_id + +按照以下方式更新配置文件。你可以自定义访问日志格式以使用 `zipkin` 插件变量,并在 `set_ngx_var` 字段中设置 `zipkin` 变量。 + +```yaml title="conf/config.yaml" +nginx_config: + http: + enable_access_log: true + access_log_format: '{"time": "$time_iso8601","zipkin_context_traceparent": "$zipkin_context_traceparent","zipkin_trace_id": "$zipkin_trace_id","zipkin_span_id": "$zipkin_span_id","remote_addr": "$remote_addr"}' + access_log_format_escape: json +plugin_attr: + zipkin: + set_ngx_var: true +``` + +重新加载 APISIX 以使配置更改生效。 + +当生成请求时,你应该看到类似的访问日志: + +```text +{"time": "23/Jan/2024:06:28:00 +0000","zipkin_context_traceparent": "00-61bce33055c56f5b9bec75227befd142-13ff3c7370b29925-01","zipkin_trace_id": "61bce33055c56f5b9bec75227befd142","zipkin_span_id": "13ff3c7370b29925","remote_addr": "172.28.0.1"} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/profile.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/profile.md new file mode 100644 index 0000000..abd2022 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/profile.md @@ -0,0 +1,42 @@ +--- +title: 基于环境变量进行配置文件切换 +--- + + + +配置之所以从代码中提取出来,就是为了更好适应变化。通常我们的应用都有开发环境、生产环境等不同运行环境,这些环境下应用的一些配置肯定会有不同,比如:配置中心的地址等。 + +如果把所有环境的配置都放在同一个文件里,非常不好管理,我们接到新需求后,在开发环境进行开发时,需要将配置文件中的参数都改成开发环境的,提交代码时还要改回去,这样改来改去非常容易出错。 + +上述问题的解决办法就是通过环境变量来区分当前运行环境,并通过环境变量来切换不同配置文件。APISIX 中对应的环境变量就是:`APISIX_PROFILE`。 + +在没有设置 `APISIX_PROFILE` 时,默认使用以下三个配置文件: + +* conf/config.yaml +* conf/apisix.yaml +* conf/debug.yaml + +如果设置了 `APISIX_PROFILE` 的值为 `prod`,则使用以下三个配置文件: + +* conf/config-prod.yaml +* conf/apisix-prod.yaml +* conf/debug-prod.yaml + +通过这种方式虽然会增加配置文件的数量,但可以独立管理,再配置 git 等版本管理工具,还能更好实现版本管理。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/router-radixtree.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/router-radixtree.md new file mode 100644 index 0000000..0631be2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/router-radixtree.md @@ -0,0 +1,353 @@ +--- +title: 路由 RadixTree +--- + + + +### 什么是 libradixtree? + +[libradixtree](https://github.com/api7/lua-resty-radixtree), 是在 `Lua` 中为 `OpenResty` 实现的自适应 +[基数树](https://zh.wikipedia.org/wiki/%E5%9F%BA%E6%95%B0%E6%A0%91) 。 + +`Apache APISIX` 使用 `libradixtree` 作为路由调度库。 + +### 如何在 Apache APISIX 中使用 libradixtree? + +`libradixtree` 是基于 [rax](https://github.com/antirez/rax) 的 `lua-resty-*` 实现。 + +我们通过下面的示例可以有一个直观的理解。 + +#### 1. 完全匹配 + +```text +/blog/foo +``` + +此时只能匹配 `/blog/foo` 。 + +#### 2. 前缀匹配 + +```text +/blog/bar* +``` + +它将匹配带有前缀 `/blog/bar` 的路径, +例如: `/blog/bar/a` 、 `/blog/bar/b` 、 `/blog/bar/c/d/e` 、 `/blog/bar` 等。 + +#### 3. 匹配优先级 + +完全匹配 -> 深度前缀匹配 + +以下是规则: + +```text +/blog/foo/* +/blog/foo/a/* +/blog/foo/c/* +/blog/foo/bar +``` + +| 路径 | 匹配结果 | +| --------------- | --------------- | +| /blog/foo/bar | `/blog/foo/bar` | +| /blog/foo/a/b/c | `/blog/foo/a/*` | +| /blog/foo/c/d | `/blog/foo/c/*` | +| /blog/foo/gloo | `/blog/foo/*` | +| /blog/bar | not match | + +#### 4. 不同的路由具有相同 `uri` + +当不同的路由有相同的 `uri` 时,可以通过设置路由的 `priority` 字段来决定先匹配哪条路由,或者添加其他匹配规则来区分不同的路由。 + +注意:在匹配规则中, `priority` 字段优先于除 `uri` 之外的其他规则。 + +1、不同的路由有相同的 `uri` 并设置 `priority` 字段 + +创建两条 `priority` 值不同的路由(值越大,优先级越高)。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "priority": 3, + "uri": "/hello" +}' +``` + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + }, + "priority": 2, + "uri": "/hello" +}' +``` + +测试: + +```shell +curl http://127.0.0.1:1980/hello +1980 +``` + +所有请求只到达端口 `1980` 的路由。 + +2、不同的路由有相同的 `uri` 并设置不同的匹配条件 + +以下是设置主机匹配规则的示例: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "hosts": ["localhost.com"], + "uri": "/hello" +}' +``` + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + }, + "hosts": ["test.com"], + "uri": "/hello" +}' +``` + +测试: + +```shell +$ curl http://127.0.0.1:9080/hello -H 'host: localhost.com' +1980 +``` + +```shell +$ curl http://127.0.0.1:9080/hello -H 'host: test.com' +1981 +``` + +```shell +$ curl http://127.0.0.1:9080/hello +{"error_msg":"404 Route Not Found"} +``` + +`host` 规则匹配,请求命中对应的上游,`host` 不匹配,请求返回 404 消息。 + +#### 5. 参数匹配 + +当使用 `radixtree_uri_with_parameter` 时,我们可以用参数匹配路由。 + +例如,使用配置: + +```yaml +apisix: + router: + http: 'radixtree_uri_with_parameter' +``` + +示例: + +```bash +/blog/:name +``` + +此时将匹配 `/blog/dog` 和 `/blog/cat`。 + +更多使用方式请参考:[lua-resty-radixtree#parameters-in-path](https://github.com/api7/lua-resty-radixtree/#parameters-in-path) + +### 如何通过 Nginx 内置变量过滤路由 + +具体参数及使用方式请查看 [radixtree#new](https://github.com/api7/lua-resty-radixtree#new) 文档,下面是一个简单的示例: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/index.html", + "vars": [ + ["http_host", "==", "iresty.com"], + ["cookie_device_id", "==", "a66f0cdc4ba2df8c096f74c9110163a9"], + ["arg_name", "==", "json"], + ["arg_age", ">", "18"], + ["arg_address", "~~", "China.*"] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +这个路由需要请求头 `host` 等于 `iresty.com`, +请求 cookie `_device_id` 等于 `a66f0cdc4ba2df8c096f74c9110163a9` 等。 + +### 如何通过 POST 表单属性过滤路由 + +APISIX 支持通过 POST 表单属性过滤路由,其中需要您使用 `Content-Type` = `application/x-www-form-urlencoded` 的 POST 请求。 + +我们可以定义这样的路由: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "methods": ["POST"], + "uri": "/_post", + "vars": [ + ["post_arg_name", "==", "json"] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +当 POST 表单中包含 `name=json` 的属性时,将匹配到路由。 + +### 如何通过 GraphQL 属性过滤路由 + +目前,APISIX 可以处理 HTTP GET 和 POST 方法。请求体正文可以是 GraphQL 查询字符串,也可以是 JSON 格式的内容。 + +APISIX 支持通过 GraphQL 的一些属性过滤路由。目前我们支持: + +* graphql_operation +* graphql_name +* graphql_root_fields + +例如,像这样的 GraphQL: + +```graphql +query getRepo { + owner { + name + } + repo { + created + } +} +``` + +* `graphql_operation` 是 `query` +* `graphql_name` 是 `getRepo`, +* `graphql_root_fields` 是 `["owner", "repo"]` + +我们可以用以下方法过滤掉这样的路由: + +```shell +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "methods": ["POST", "GET"], + "uri": "/graphql", + "vars": [ + ["graphql_operation", "==", "query"], + ["graphql_name", "==", "getRepo"], + ["graphql_root_fields", "has", "owner"] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +我们可以通过以下三种方式分别去验证 GraphQL 匹配: + +1. 使用 GraphQL 查询字符串 + +```shell +$ curl -H 'content-type: application/graphql' -X POST http://127.0.0.1:9080/graphql -d ' +query getRepo { + owner { + name + } + repo { + created + } +}' +``` + +2. 使用 JSON 格式 + +```shell +$ curl -H 'content-type: application/json' -X POST \ +http://127.0.0.1:9080/graphql --data '{"query": "query getRepo { owner {name } repo {created}}"}' +``` + +3. 尝试 `GET` 请求 + +```shell +$ curl -H 'content-type: application/graphql' -X GET \ +"http://127.0.0.1:9080/graphql?query=query getRepo { owner {name } repo {created}}" -g +``` + +为了防止花费太多时间读取无效的 `GraphQL` 请求正文,我们只读取前 `1 MiB` +来自请求体的数据。此限制是通过以下方式配置的: + +```yaml +graphql: + max_size: 1048576 +``` + +如果你需要传递一个大于限制的 GraphQL 查询语句,你可以增加 `conf/config.yaml` 中的值。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/ssl-protocol.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/ssl-protocol.md new file mode 100644 index 0000000..d776e7e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/ssl-protocol.md @@ -0,0 +1,353 @@ +--- +title: SSL 协议 +--- + + + +`APISIX` 支持 TLS 协议,还支持动态的为每一个 SNI 指定不同的 TLS 协议版本。 + +**为了安全考虑,APISIX 默认使用的加密套件不支持 TLSv1.1 以及更低的版本。** +**如果你需要启用 TLSv1.1 协议,请在 config.yaml 的配置项 apisix.ssl.ssl_ciphers 增加 TLSv1.1 协议所支持的加密套件。** + +## ssl_protocols 配置 + +### 静态配置 + +静态配置中 config.yaml 的 ssl_protocols 参数会作用于 APISIX 全局,但是不能动态修改,仅当匹配的 SSL 资源未设置 `ssl_protocols`,静态配置才会生效。 + +```yaml +apisix: + ssl: + ssl_protocols: TLSv1.2 TLSv1.3 # default TLSv1.2 TLSv1.3 +``` + +### 动态配置 + +使用 ssl 资源中 ssl_protocols 字段动态的为每一个 SNI 指定不同的 TLS 协议版本。 + +指定 test.com 域名使用 TLSv1.2 TLSv1.3 协议版本: + +```bash +{ + "cert": "$cert", + "key": "$key", + "snis": ["test.com"], + "ssl_protocols": [ + "TLSv1.2", + "TLSv1.3" + ] +} +``` + +### 注意事项 + +- 动态配置优先级比静态配置更高,当 ssl 资源配置项 ssl_protocols 不为空时 静态配置将会被覆盖。 +- 静态配置作用于全局需要重启 apisix 才能生效。 +- 动态配置可细粒度的控制每个 SNI 的 TLS 协议版本,并且能够动态修改,相比于静态配置更加灵活。 + +## 使用示例 + +### 如何指定 TLSv1.1 协议 + +存在一些老旧的客户端,仍然采用较低级别的 TLSv1.1 协议版本,而新的产品则使用较高安全级别的 TLS 协议版本。如果让新产品支持 TLSv1.1 可能会带来一些安全隐患。为了保证 API 的安全性,我们需要在协议版本之间进行灵活转换。 +例如:test.com 是老旧客户端所使用的域名,需要将其配置为 TLSv1.1 而 test2.com 属于新产品,同时支持了 TLSv1.2,TLSv1.3 协议。 + +1. config.yaml 配置。 + +```yaml +apisix: + ssl: + ssl_protocols: TLSv1.3 + # ssl_ciphers is for reference only + ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA +``` + +2. 为 test.com 域名指定 TLSv1.1 协议版本。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test.com"], + "ssl_protocols": [ + "TLSv1.1" + ] +}' +``` + +3. 为 test.com 创建 SSL 对象,未指定 TLS 协议版本,将默认使用静态配置。 + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat server2.crt)"'", + "key": "'"$(cat server2.key)"'", + "snis": ["test2.com"] +}' +``` + +4. 访问验证 + +使用 TLSv1.3 访问 test.com 失败: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +``` + +使用 TLSv1.1 访问 test.com 成功: + +```shell +$ curl --tls-max 1.1 --tlsv1.1 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.1 (OUT), TLS handshake, Client hello (1): +* TLSv1.1 (IN), TLS handshake, Server hello (2): +* TLSv1.1 (IN), TLS handshake, Certificate (11): +* TLSv1.1 (IN), TLS handshake, Server key exchange (12): +* TLSv1.1 (IN), TLS handshake, Server finished (14): +* TLSv1.1 (OUT), TLS handshake, Client key exchange (16): +* TLSv1.1 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.1 (OUT), TLS handshake, Finished (20): +* TLSv1.1 (IN), TLS handshake, Finished (20): +* SSL connection using TLSv1.1 / ECDHE-RSA-AES256-SHA +``` + +使用 TLSv1.3 访问 test2.com 成功: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +``` + +使用 TLSv1.1 访问 test2.com 失败: + +```shell +curl --tls-max 1.1 --tlsv1.1 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.1 (OUT), TLS handshake, Client hello (1): +* TLSv1.1 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +``` + +### 证书关联多个域名,但域名之间使用不同的 TLS 协议 + +有时候,我们可能会遇到这样一种情况,即一个证书关联了多个域名,但是它们需要使用不同的 TLS 协议来保证安全性。例如 test.com 域名需要使用 TlSv1.2 协议,而 test2.com 域名则需要使用 TLSv1.3 协议。在这种情况下,我们不能简单地为所有的域名创建一个 SSL 对象,而是需要为每个域名单独创建一个 SSL 对象,并指定相应的协议版本。这样,我们就可以根据不同的域名和协议版本来进行正确的 SSL 握手和加密通信。示例如下: + +1. 使用证书为 test.com 创建 ssl 对象,并指定 TLSv1.2 协议。 + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test.com"], + "ssl_protocols": [ + "TLSv1.2" + ] +}' +``` + +2. 使用与 test.com 同一证书,为 test2.com 创建 ssl 对象,并指定 TLSv1.3 协议。 + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/2 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test2.com"], + "ssl_protocols": [ + "TLSv1.3" + ] +}' +``` + +3. 访问验证 + +使用 TLSv1.2 访问 test.com 成功: + +```shell +$ curl --tls-max 1.2 --tlsv1.2 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.2 (OUT), TLS handshake, Client hello (1): +* TLSv1.2 (IN), TLS handshake, Server hello (2): +* TLSv1.2 (IN), TLS handshake, Certificate (11): +* TLSv1.2 (IN), TLS handshake, Server key exchange (12): +* TLSv1.2 (IN), TLS handshake, Server finished (14): +* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): +* TLSv1.2 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.2 (OUT), TLS handshake, Finished (20): +* TLSv1.2 (IN), TLS handshake, Finished (20): +* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 +* ALPN, server accepted to use h2 +* Server certificate: +* subject: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test.com +* start date: Jul 20 15:50:08 2023 GMT +* expire date: Jul 17 15:50:08 2033 GMT +* issuer: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test.com +* SSL certificate verify result: EE certificate key too weak (66), continuing anyway. +* Using HTTP2, server supports multi-use +* Connection state changed (HTTP/2 confirmed) +* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0 +* Using Stream ID: 1 (easy handle 0x5608905ee2e0) +> HEAD / HTTP/2 +> Host: test.com:9443 +> user-agent: curl/7.74.0 +> accept: */* + +``` + +使用 TLSv1.3 协议访问 test.com 失败: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version + +``` + +使用 TLSv1.3 协议访问 test2.com 成功: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +* ALPN, server accepted to use h2 +* Server certificate: +* subject: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test2.com +* start date: Jul 20 16:05:47 2023 GMT +* expire date: Jul 17 16:05:47 2033 GMT +* issuer: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test2.com +* SSL certificate verify result: EE certificate key too weak (66), continuing anyway. +* Using HTTP2, server supports multi-use +* Connection state changed (HTTP/2 confirmed) +* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0 +* Using Stream ID: 1 (easy handle 0x55569cbe42e0) +> HEAD / HTTP/2 +> Host: test2.com:9443 +> user-agent: curl/7.74.0 +> accept: */* +> +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* old SSL session ID is stale, removing +``` + +使用 TLSv1.2 协议访问 test2.com 失败: + +```shell +$ curl --tls-max 1.2 --tlsv1.2 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.2 (OUT), TLS handshake, Client hello (1): +* TLSv1.2 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/status-api.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/status-api.md new file mode 100644 index 0000000..7eae237 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/status-api.md @@ -0,0 +1,78 @@ +--- +title: Status API +--- + + + +在 Apache APISIX 中,Status API 用于: + +* 检查 APISIX 是否已成功启动并正确运行 +* 检查所有 workers 是否已收到配置并加载 + +要更改 Status API 服务器的默认端点(`127.0.0.1:7085`),请更改配置文件(`conf/config.yaml`)中 `status` 部分中的 `ip` 和 `port`: + +```yaml +apisix: + status: + ip: "127.0.0.1" + port: 7085 +``` + +此 API 可用于在 APISIX 开始接收用户请求之前对 APISIX 执行就绪探测。 + +### GET /status + +返回报告 APISIX 工作人员状态的 JSON。如果 APISIX 未运行,建立 TCP 连接时请求将报错。否则,如果请求到达正在运行的 worker,此端点将始终返回 ok。 + +```json +{ + "status": "ok" +} +``` + +### GET /status/ready + +当所有 worker 都已加载配置时,返回 `ok`;否则,返回特定错误,错误代码为 `503`。以下是具体示例。 + +当所有 worker 都已加载配置时: + +```json +{ + "status": "ok" +} +``` + +当 1 个 workers 尚未初始化时: + +```json +{ + "status": "error", + "error": "worker count: 16 but status report count: 15" +} +``` + +当特定 worker 尚未加载配置时: + +```json +{ + "error": "worker id: 9 has not received configuration", + "status": "error" +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/stream-proxy.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/stream-proxy.md new file mode 100644 index 0000000..eb0cd36 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/stream-proxy.md @@ -0,0 +1,234 @@ +--- +title: TCP/UDP 动态代理 +--- + + + +众多的闻名的应用和服务,像 LDAP、MYSQL 和 RTMP,选择 TCP 作为通信协议。但是像 DNS、syslog 和 RADIUS 这类非事务性的应用,他们选择了 UDP 协议。 + +APISIX 可以对 TCP/UDP 协议进行代理并实现动态负载均衡。在 nginx 世界,称 TCP/UDP 代理为 stream 代理,在 APISIX 这里我们也遵循了这个声明。 + +## 如何开启 Stream 代理 + +要启用该选项,请将 `apisix.proxy_mode` 设置为 `stream` 或 `http&stream`,具体取决于您是只需要 stream 代理还是需要 http 和 stream。然后在 `conf/config.yaml` 中添加 `apisix.stream_proxy` 选项并指定 APISIX 应充当 stream 代理并侦听传入请求的地址列表。 + +```yaml +apisix: + proxy_mode: http&stream # enable both http and stream proxies + stream_proxy: # TCP/UDP proxy + tcp: # TCP proxy address list + - 9100 + - "127.0.0.1:9101" + udp: # UDP proxy address list + - 9200 + - "127.0.0.1:9211" +``` + +## 如何设置 route + +简例如下: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } +}' +``` + +例子中 APISIX 对客户端 IP 为 `127.0.0.1` 的请求代理转发到上游主机 `127.0.0.1:1995`。 +更多用例,请参照 [test case](https://github.com/apache/apisix/blob/master/t/stream-node/sanity.t)。 + +## 更多 route 匹配选项 + +我们可以添加更多的选项来匹配 route。目前 Stream Route 配置支持 3 个字段进行过滤: + +- server_addr: 接受 Stream Route 连接的 APISIX 服务器的地址。 +- server_port: 接受 Stream Route 连接的 APISIX 服务器的端口。 +- remote_addr: 发出请求的客户端地址。 + +例如 + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "server_addr": "127.0.0.1", + "server_port": 2000, + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } +}' +``` + +例子中 APISIX 会把服务器地址为 `127.0.0.1`, 端口为 `2000` 代理到上游地址 `127.0.0.1:1995`。 + +让我们再举一个实际场景的例子: + +1. 将此配置放在 `config.yaml` 中 + + ```yaml + apisix: + proxy_mode: http&stream # enable both http and stream proxies + stream_proxy: # TCP/UDP proxy + tcp: # TCP proxy address list + - 9100 # by default uses 0.0.0.0 + - "127.0.0.10:9101" + ``` + +2. 现在运行一个 mysql docker 容器并将端口 3306 暴露给主机 + + ```shell + $ docker run --name mysql -e MYSQL_ROOT_PASSWORD=toor -p 3306:3306 -d mysql mysqld --default-authentication-plugin=mysql_native_password + # check it using a mysql client that it works + $ mysql --host=127.0.0.1 --port=3306 -u root -p + Enter password: + Welcome to the MySQL monitor. Commands end with ; or \g. + Your MySQL connection id is 25 + ... + mysql> + ``` + +3. 现在我们将创建一个带有服务器过滤的 stream 路由: + + ```shell + curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "server_addr": "127.0.0.10", + "server_port": 9101, + "upstream": { + "nodes": { + "127.0.0.1:3306": 1 + }, + "type": "roundrobin" + } + }' + ``` + + 每当 APISIX 服务器 `127.0.0.10` 和端口 `9101` 收到连接时,它只会将请求转发到 mysql 上游。让我们测试一下: + +4. 向 `9100` 发出请求(在 config.yaml 中启用 stream 代理端口),过滤器匹配失败。 + + ```shell + $ mysql --host=127.0.0.1 --port=9100 -u root -p + Enter password: + ERROR 2013 (HY000): Lost connection to MySQL server at 'reading initial communication packet', system error: 2 + ``` + + 下面的请求匹配到了 stream 路由,所以它可以正常代理到 mysql。 + + ```shell + mysql --host=127.0.0.10 --port=9101 -u root -p + Enter password: + Welcome to the MySQL monitor. Commands end with ; or \g. + Your MySQL connection id is 26 + ... + mysql> + ``` + +完整的匹配选项列表参见 [Admin API 的 Stream Route](./admin-api.md#stream-route)。 + +## 接收基于 TCP 的 TLS 连接 + +APISIX 支持接收基于 TCP 的 TLS 连接。 + +首先,我们需要给对应的 TCP 地址启用 TLS: + +```yaml +apisix: + proxy_mode: http&stream # enable both http and stream proxies + stream_proxy: # TCP/UDP proxy + tcp: # TCP proxy address list + - addr: 9100 + tls: true +``` + +接着,我们需要为给定的 SNI 配置证书。 +具体步骤参考 [Admin API 的 SSL](./admin-api.md#ssl)。 +mTLS 也是支持的,参考 [保护路由](./mtls.md#保护路由)。 + +然后,我们需要配置一个 route,匹配连接并代理到上游: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } +}' +``` + +当连接为基于 TCP 的 TLS 时,我们可以通过 SNI 来匹配路由,比如: + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "sni": "a.test.com", + "upstream": { + "nodes": { + "127.0.0.1:5991": 1 + }, + "type": "roundrobin" + } +}' +``` + +在这里,握手时发送 SNI `a.test.com` 的连接会被代理到 `127.0.0.1:5991`。 + +## 代理到基于 TCP 的 TLS 上游 + +APISIX 还支持代理到基于 TCP 的 TLS 上游。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "scheme": "tls", + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } +}' +``` + +通过设置 `scheme` 为 `tls`,APISIX 将与上游进行 TLS 握手。 + +当客户端也使用基于 TCP 的 TLS 上游时,客户端发送的 SNI 将传递给上游。否则,将使用一个假的 SNI `apisix_backend`。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/support-fips-in-apisix.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/support-fips-in-apisix.md new file mode 100644 index 0000000..6321a40 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/support-fips-in-apisix.md @@ -0,0 +1,60 @@ +--- +id: support-fips-in-apisix +title: 通过 OpenSSL 3.0 使 APISIX 支持 FIPS 模式 +keywords: + - API 网关 + - Apache APISIX + - 贡献代码 + - 构建 APISIX + - OpenSSL 3.0 FIPS +description: 本文将介绍如何在 Apache APISIX 中使用 OpenSSL 3.0 来编译 apisix-runtime,即可启用 FIPS 模式。 +--- + + + +目前,OpenSSL 3.0 [支持了](https://www.openssl.org/blog/blog/2022/08/24/FIPS-validation-certificate-issued/) [FIPS](https://en.wikipedia.org/wiki/FIPS_140-2) 模式。为了在 APISIX 中支持 FIPS 模式,你应该使用 OpenSSL 3.0 来编译 apisix-runtime。 + +## 编译 + +如果你需要使用 OpenSSL 3.0 来编译 apisix-runtime,请以 root 用户角色来执行以下命令: + +```bash +cd $(mktemp -d) +OPENSSL3_PREFIX=${OPENSSL3_PREFIX-/usr/local} +apt install -y build-essential +git clone https://github.com/openssl/openssl +cd openssl +./Configure --prefix=$OPENSSL3_PREFIX/openssl-3.0 enable-fips +make install +echo $OPENSSL3_PREFIX/openssl-3.0/lib64 > /etc/ld.so.conf.d/openssl3.conf +ldconfig +$OPENSSL3_PREFIX/openssl-3.0/bin/openssl fipsinstall -out $OPENSSL3_PREFIX/openssl-3.0/ssl/fipsmodule.cnf -module $OPENSSL3_PREFIX/openssl-3.0/lib64/ossl-modules/fips.so +sed -i 's@# .include fipsmodule.cnf@.include '"$OPENSSL3_PREFIX"'/openssl-3.0/ssl/fipsmodule.cnf@g; s/# \(fips = fips_sect\)/\1\nbase = base_sect\n\n[base_sect]\nactivate=1\n/g' $OPENSSL3_PREFIX/openssl-3.0/ssl/openssl.cnf +cd .. + +export cc_opt="-I$OPENSSL3_PREFIX/openssl-3.0/include" +export ld_opt="-L$OPENSSL3_PREFIX/openssl-3.0/lib64 -Wl,-rpath,$OPENSSL3_PREFIX/openssl-3.0/lib64" + +wget --no-check-certificate https://raw.githubusercontent.com/api7/apisix-build-tools/master/build-apisix-runtime.sh +chmod +x build-apisix-runtime.sh +./build-apisix-runtime.sh +``` + +apisix-runtime 将安装在 `/usr/local/openresty`。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/api-gateway.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/api-gateway.md new file mode 100644 index 0000000..d56d831 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/api-gateway.md @@ -0,0 +1,42 @@ +--- +title: API Gateway +keywords: + - Apache APISIX + - API 网关 + - 网关 +description: 本文主要介绍了 API 网关的作用以及为什么需要 API 网关。 +--- + + + +## 描述 + +API 网关是位于客户端与后端服务集之间的 API 管理工具。API 网关相当于反向代理,用于接受所有 API 的调用、整合处理这些调用所需的各种服务,并返回相应的结果。API 网关通常会处理**跨 API 服务系统使用**的常见任务,并统一接入进行管理。通过 API 网关的统一拦截,可以实现对 API 接口的安全、日志等共性需求,如用户身份验证、速率限制和统计信息。 + +## 为什么需要 API 网关? + +与传统的 API 微服务相比,API 网关有很多好处。比如: + +- 它是所有 API 请求的唯一入口。 +- 可用于将请求转发到不同的后端,或根据请求头将请求转发到不同的服务。 +- 可用于执行身份验证、授权和限速。 +- 它可用于支持分析,例如监控、日志记录和跟踪。 +- 可以保护 API 免受 SQL 注入、DDOS 攻击和 XSS 等恶意攻击媒介的攻击。 +- 它可以降低 API 和微服务的复杂性。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/consumer-group.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/consumer-group.md new file mode 100644 index 0000000..eaf4514 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/consumer-group.md @@ -0,0 +1,124 @@ +--- +title: Consumer Groups +keywords: + - API 网关 + - Apache APISIX + - Consumer Groups +description: 本文介绍了 Apache APISIX Consumer Group 对象的概念及使用方法。 +--- + + + +## 描述 + +通过 Consumer Groups,你可以在同一个消费者组中启用任意数量的[插件](./plugin.md),并在一个或者多个[消费者](./consumer.md)中引用该消费者组。 + +## 配置示例 + +以下示例展示了如何创建消费者组并将其绑定到消费者中。 + +创建一个共享相同限流配额的消费者组: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumer_groups/company_a \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "limit-count": { + "count": 200, + "time_window": 60, + "rejected_code": 503, + "group": "grp_company_a" + } + } +}' +``` + +在消费者组中创建消费者: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + } + }, + "group_id": "company_a" +}' +``` + +当 APISIX 无法找到 `group_id` 中定义的消费者组时,创建或者更新消费者的请求将会终止,并返回错误码 `404`。 + +如果消费者已经配置了 `plugins` 字段,那么消费者组中配置的插件将与之合并。 + +:::tip + +此处需要注意两点: + +1. 当在同一个插件分别配置在[消费者](./consumer.md)、[路由](./route.md)、[插件配置](./plugin-config.md)和[服务](./service.md)中时,只有一份配置是生效的,并且消费者的优先级最高。更多信息,请参考 [Plugin](./plugin.md)。 +2. 如果消费者和消费者组配置了相同的插件,则消费者中的插件配置优先级更高。对于第一点,因为消费者组需要配置在消费者中,因此你只需关心消费者中插件的优先级。 + +::: + +如下示例,假如你配置了一个消费者组: + +```json title="Consumer Group" +{ + "id": "bar", + "plugins": { + "response-rewrite": { + "body": "hello" + } + } +} +``` + +并配置了消费者: + +```json title="Consumer" +{ + "username": "foo", + "group_id": "bar", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + }, + "response-rewrite": { + "body": "world" + } + } +} +``` + +那么 `response-rewrite` 中的 `body` 将保留 `world`。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/consumer.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/consumer.md new file mode 100644 index 0000000..22f4a0f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/consumer.md @@ -0,0 +1,177 @@ +--- +title: Consumer +keywords: + - APISIX + - API 网关 + - 消费者 + - Consumer +description: 本文介绍了 Apache APISIX Consumer 对象的作用以及如何使用 Consumer。 +--- + + + +## 描述 + +Consumer 是某类服务的消费者,需要与用户认证配合才可以使用。当不同的消费者请求同一个 API 时,APISIX 会根据当前请求的用户信息,对应不同的 Plugin 或 Upstream 配置。如果 [Route](./route.md)、[Service](./service.md)、[Consumer](./consumer.md) 和 [Plugin Config](./plugin-config.md) 都绑定了相同的插件,只有消费者的插件配置会生效。插件配置的优先级由高到低的顺序是:Consumer > Route > Plugin Config > Service。 + +对于 API 网关而言,一般情况可以通过请求域名、客户端 IP 地址等字段识别到某类请求方,然后进行插件过滤并转发请求到指定上游。但有时候该方式达不到用户需求,因此 APISIX 支持了 Consumer 对象。 + +![Consumer](../../../assets/images/consumer-who.png) + +如上图所示,作为 API 网关,需要知道 API Consumer(消费方)具体是谁,这样就可以对不同 API Consumer 配置不同规则。 + +## 配置选项 + +定义 Consumer 的字段如下: + +| 名称 | 必选项 | 描述 | +| -------- | ---- | ------------------------------------------------------------------------------| +| username | 是 | Consumer 名称。 | +| plugins | 否 | Consumer 对应的插件配置。详细信息,请参考 [Plugins](./plugin.md)。 | + +## 识别消费者 + +在 APISIX 中,识别 Consumer 的过程如下图: + +![Consumer Internal](../../../assets/images/consumer-internal.png) + +1. 授权认证:比如有 [key-auth](../plugins/key-auth.md)、[JWT](../plugins/jwt-auth.md) 等; +2. 获取 consumer_name:通过授权认证,即可自然获取到对应的 Consumer name,它是 Consumer 对象的唯一识别标识; +3. 获取 Consumer 上绑定的 Plugin 或 Upstream 信息:完成对不同 Consumer 做不同配置的效果。 + +当有不同的使用者请求相同的 API,并且需要根据使用者执行不同的插件和上游配置时,使用 Consumer 是非常合适的。需要与用户身份验证系统结合使用。 + +目前,可以与 Consumer 配置的身份验证插件包括 `basic-auth` 、`hmac-auth`、`jwt-auth`、`key-auth`、`ldap-auth` 和 `wolf-rbac`。 + +你可以参考 [key-auth](../plugins/key-auth.md) 认证授权插件的调用逻辑,进一步理解 Consumer 概念和使用。 + +:::note 注意 + +如需了解更多关于 Consumer 对象的信息,你可以参考 [Admin API Consumer](../admin-api.md#consumer) 资源介绍。 + +::: + +## 使用示例 + +以下示例介绍了如何对某个 Consumer 开启指定插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +1. 创建 Consumer,指定认证插件 `key-auth`,并开启特定插件 `limit-count`。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/consumers \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }' + ``` + +2. 创建路由,设置路由规则和启用插件配置。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }' + ``` + +3. 测试插件。 + + 连续发送三次测试请求,前两次返回正常,没达到限速阈值。 + + ```shell + curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -I + ``` + + 第三次测试返回 `503`,请求被限制: + + ```shell + HTTP/1.1 503 Service Temporarily Unavailable + ... + ``` + +通过 [consumer-restriction](../plugins/consumer-restriction.md) 插件,限制用户 `jack` 对该 Route 的访问。 + +1. 设置黑名单,禁止 jack 访问该 API。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "plugins": { + "key-auth": {}, + "consumer-restriction": { + "blacklist": [ + "jack" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }' + ``` + +2. 通过以下命令访问该路由,均返回 `403`,`jack` 被禁止访问。 + + ```shell + curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -I + ``` + + 返回结果: + + ``` + HTTP/1.1 403 + ... + ``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/credential.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/credential.md new file mode 100644 index 0000000..e7b39ec --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/credential.md @@ -0,0 +1,152 @@ +--- +title: Credential +keywords: + - APISIX + - API 网关 + - 凭证 + - Credential +description: 本文介绍了 Apache APISIX Credential 对象的作用以及如何使用 Credential。 +--- + + + +## 描述 + +Credential 是存放 [Consumer](./consumer.md) 凭证配置的对象。 +一个 Consumer 可以使用不同类型的多个凭证。 +当你需要为一个 Consumer 配置不同类型的多个凭证时,就会用到 Credential。 + +目前,Credential 可以配置的身份认证插件包括 `basic-auth`、`hmac-auth`、`jwt-auth` 以及 `key-auth`。 + +## 配置选项 + + 定义 Credential 的字段如下: + +| 名称 | 必选项 | 描述 | +|---------|-----|-----------------------------------------------------| +| desc | 否 | Credential 描述。 | +| labels | 否 | Credential 标签。 | +| plugins | 否 | Credential 对应的插件配置。详细信息,请参考 [Plugins](./plugin.md)。 | + +:::note + +如需了解更多关于 Credential 对象的信息,你可以参考 [Admin API Credential](../admin-api.md#credential) 资源介绍。 + +::: + +## 使用示例 + +[Consumer 使用示例](./consumer.md#使用示例) 介绍了如何对 Consumer 配置认证插件,并介绍了如何配合其他插件使用。 +在该示例中,该 Consumer 只有一个 key-auth 类型的凭证。 +现在假设用户需要为该 Consumer 配置多个凭证,你可以使用 Credential 来支持这一点。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +1. 创建 Consumer。不指定认证插件,而是稍后使用 Credential 来配置认证插件。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/consumers \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "username": "jack" + }' + ``` + +2. 为 Consumer 配置 2 个 启用 `key-auth` 的 Credential。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials/key-auth-one \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "plugins": { + "key-auth": { + "key": "auth-one" + } + } + }' + ``` + + ```shell + curl http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials/key-auth-two \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "plugins": { + "key-auth": { + "key": "auth-two" + } + } + }' + ``` + +3. 创建路由,设置路由规则和启用插件配置。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }' + ``` + +4. 测试插件 + + 分别使用 `auth-one` 和 `auth-two` 两个 key 来测试请求,都响应正常。 + + ```shell + curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -I + curl http://127.0.0.1:9080/hello -H 'apikey: auth-two' -I + ``` + + 为该 Consumer 启用 `limit-count` 插件。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/consumers \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "username": "jack", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }' + ``` + + 分别使用这两个 key 连续 3 次以上请求该路由,测试返回 `503`,请求被限制。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/global-rule.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/global-rule.md new file mode 100644 index 0000000..8b3dc7a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/global-rule.md @@ -0,0 +1,73 @@ +--- +title: Global rules +keywords: + - API 网关 + - Apache APISIX + - Global Rules + - 全局规则 +description: 本文介绍了全局规则的概念以及如何启用全局规则。 +--- + + + +## 描述 + +[Plugin](plugin.md) 配置可直接绑定在 [Route](route.md) 上,也可以被绑定在 [Service](service.md) 或 [Consumer](consumer.md) 上。 + +如果你需要一个能作用于所有请求的 Plugin,可以通过 Global Rules 启用一个全局的插件配置。 + +全局规则相对于 Route、Service、Plugin Config、Consumer 中的插件配置,Global Rules 中的插件总是优先执行。 + +## 使用示例 + +以下示例展示了如何为所有请求启用 `limit-count` 插件: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/global_rules/1 -X PUT \ + -H 'Content-Type: application/json' \ + -H "X-API-KEY: $admin_key" \ + -d '{ + "plugins": { + "limit-count": { + "time_window": 60, + "policy": "local", + "count": 2, + "key": "remote_addr", + "rejected_code": 503 + } + } + }' +``` + +你也可以通过以下命令查看所有的全局规则: + +```shell +curl http://127.0.0.1:9180/apisix/admin/global_rules -H "X-API-KEY: $admin_key" +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/plugin-config.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/plugin-config.md new file mode 100644 index 0000000..7253890 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/plugin-config.md @@ -0,0 +1,178 @@ +--- +title: Plugin Config +keywords: + - API 网关 + - Apache APISIX + - 插件配置 + - Plugin Config +description: Plugin Config 对象,可以用于创建一组通用的插件配置,并在路由中使用这组配置。 +--- + + + +## 描述 + +在很多情况下,我们在不同的路由中会使用相同的插件规则,此时就可以通过 Plugin Config 来设置这些规则。Plugin Config 属于一组通用插件配置的抽象。 + +`plugins` 的配置可以通过 [Admin API](../admin-api.md#plugin-config) `/apisix/admin/plugin_configs` 进行单独配置,在路由中使用 `plugin_config_id` 与之进行关联。 + +对于同一个插件的配置,只能有一个是有效的,优先级为 Consumer > Route > Plugin Config > Service。 + +## 使用示例 + +你可以参考如下步骤将 Plugin Config 绑定在路由上。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +1. 创建 Plugin config。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/plugin_configs/1 \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' + { + "desc": "enable limit-count plugin", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503 + } + } + }' + ``` + +2. 创建路由并绑定 `Plugin Config 1`。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' + { + "uris": ["/index.html"], + "plugin_config_id": 1, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }' + ``` + +如果找不到对应的 Plugin Config,该路由上的请求会报 `503` 错误。 + +## 注意事项 + +如果路由中已经配置了 `plugins`,那么 Plugin Config 里面的插件配置将会与 `plugins` 合并。 + +相同的插件不会覆盖掉 `plugins` 原有的插件配置。详细信息,请参考 [Plugin](./plugin.md)。 + +1. 假设你创建了一个 Plugin Config。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/plugin_configs/1 \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' + { + "desc": "enable ip-restruction and limit-count plugin", + "plugins": { + "ip-restriction": { + "whitelist": [ + "127.0.0.0/24", + "113.74.26.106" + ] + }, + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503 + } + } + }' + ``` + +2. 并在路由中引入 Plugin Config。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' + { + "uris": ["/index.html"], + "plugin_config_id": 1, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + "plugins": { + "proxy-rewrite": { + "uri": "/test/add", + "host": "apisix.iresty.com" + }, + "limit-count": { + "count": 20, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }' + ``` + +3. 最后实现的效果如下。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -i -d ' + { + "uris": ["/index.html"], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + "plugins": { + "ip-restriction": { + "whitelist": [ + "127.0.0.0/24", + "113.74.26.106" + ] + }, + "proxy-rewrite": { + "uri": "/test/add", + "host": "apisix.iresty.com" + }, + "limit-count": { + "count": 20, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }' + ``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/plugin-metadata.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/plugin-metadata.md new file mode 100644 index 0000000..3af77a0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/plugin-metadata.md @@ -0,0 +1,85 @@ +--- +title: Plugin Metadata +keywords: + - API 网关 + - Apache APISIX + - 插件元数据配置 + - Plugin Metadata +description: APISIX 的插件元数据 +--- + + + +## 摘要 + +在本文档中,您将了解到 APISIX 中,插件元数据的基本概念和您可能使用到的场景。 + +浏览文档末尾的相关资源,获取与此相关的更多信息。 + +## 描述 + +在 APISIX 中,配置通用的元数据属性,可以作用于包含该元数据插件的所有路由及服务中。例如为`rocketmq-logger`指定了 `log_format`,则所有绑定 rocketmq-logger 的路由或服务都将使用该日志格式。 + +下图说明了插件元数据的概念,使用两个不同路由上的 [syslog](https://apisix.apache.org/zh/docs/apisix/plugins/syslog/) 插件的实例,以及为 [syslog](https://apisix.apache.org/zh/docs/apisix/plugins/syslog/) 插件设置全局`log_format`的插件元数据对象: + +![plugin_metadata](https://static.apiseven.com/uploads/2023/04/17/Z0OFRQhV_plugin%20metadata.svg) + +如果没有另外指定,插件元数据对象上的`log_format`应将相同的日志格式统一应用于两个`syslog`插件。但是,由于`/orders`路由上的`syslog`插件具有不同的`log_format`,因此访问该路由的请求将按照路由中插件指定的`log_format`生成日志。 + +在插件级别设置的元数据属性更加精细,并且比`全局`元数据对象具有更高的优先级。 + +插件元数据对象只能用于具有元数据属性的插件。有关哪些插件具有元数据属性的更多详细信息,请查看插件配置属性及相关信息。 + +## 配置示例 + +以下示例展示了如何通过 Admin API 配置插件元数据: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/http-logger \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +配置完成后,你将在日志系统中看到如下类似日志: + +```json +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## 相关资源 + +核心概念 - [插件](https://apisix.apache.org/docs/apisix/terminology/plugin/) diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/plugin.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/plugin.md new file mode 100644 index 0000000..ba42e81 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/plugin.md @@ -0,0 +1,348 @@ +--- +title: Plugin +keywords: + - API 网关 + - Apache APISIX + - 插件 + - 插件优先级 +description: 本文介绍了 APISIX Plugin 对象的相关信息及其使用方法,并且介绍了如何自定义插件优先级、自定义错误响应、动态控制插件执行状态等。 +--- + + + +## 描述 + +APISIX 插件可以扩展 APISIX 的功能,以满足组织或用户特定的流量管理、可观测性、安全、请求/响应转换、无服务器计算等需求。 + +APISIX 提供了许多现有的插件,可以定制和编排以满足你的需求。这些插件可以全局启用,以在每个传入请求上触发,也可以局部绑定到其他对象,例如在 [Route](./route.md)、[Service](./service.md)、[Consumer](./consumer.md) 或 [Plugin Config](./plugin-config.md) 上。你可以参考 [Admin API](../admin-api.md#plugin) 了解如何使用该资源。 + +如果现有的 APISIX 插件不满足需求,你还可以使用 Lua 或其他语言(如 Java、Python、Go 和 Wasm)编写自定义插件。 + +## 插件安装 + +默认情况下,大多数 APISIX 插件都已[安装](https://github.com/apache/apisix/blob/master/apisix/cli/config.lua): + +```lua title="apisix/cli/config.lua" +local _M = { + ... + plugins = { + "real-ip", + "ai", + "client-control", + "proxy-control", + "request-id", + "zipkin", + "ext-plugin-pre-req", + "fault-injection", + "mocking", + "serverless-pre-function", + ... + }, + ... +} +``` + +如果您想调整插件安装,请将自定义的 `plugins` 配置添加到 `config.yaml` 中。例如: + +```yaml +plugins: + - real-ip # 安装 + - ai + - real-ip + - ai + - client-control + - proxy-control + - request-id + - zipkin + - ext-plugin-pre-req + - fault-injection + # - mocking # 不安装 + - serverless-pre-function + ... # 其它插件 +``` + +完整配置参考请参见 [`config.yaml.example`](https://github.com/apache/apisix/blob/master/conf/config.yaml.example)。 + +重新加载 APISIX 以使配置更改生效。 + +## 插件执行生命周期 + +安装的插件首先会被初始化。然后会检查插件的配置,以确保插件配置遵循定义的[JSON Schema](https://json-schema.org)。 + +当一个请求通过 APISIX 时,插件的相应方法会在以下一个或多个阶段中执行: `rewrite`, `access`, `before_proxy`, `header_filter`, `body_filter`, and `log`。这些阶段在很大程度上受到[OpenResty 指令](https://openresty-reference.readthedocs.io/en/latest/Directives/)的影响。 + +
+
+Routes Diagram +
+
+ +## 插件执行顺序 + +通常情况下,插件按照以下顺序执行: + +1. [全局规则](./global-rule.md) 插件 + 1. rewrite 阶段的插件 + 2. access 阶段的插件 + +2. 绑定到其他对象的插件 + 1. rewrite 阶段的插件 + 2. access 阶段的插件 + +在每个阶段内,你可以在插件的 `_meta.priority` 字段中可选地定义一个新的优先级数,该优先级数优先于默认插件优先级在执行期间。具有更高优先级数的插件首先执行。 + +例如,如果你想在请求到达路由时,让 `limit-count`(优先级 1002)先于 `ip-restriction`(优先级 3000)运行,可以通过将更高的优先级数传递给 `limit-count` 的 `_meta.priority` 字段来实现: + +```json +{ + ..., + "plugins": { + "limit-count": { + ..., + "_meta": { + "priority": 3010 + } + } + } +} +``` + +若要将此插件实例的优先级重置为默认值,只需从插件配置中删除`_meta.priority`字段即可。 + +## 插件合并优先顺序 + +当同一个插件在全局规则中和局部规则(例如路由)中同时配置时,两个插件将顺序执行。 + +然而,如果相同的插件在多个对象上本地配置,例如在[`Route`](route.md), [`Service`](service.md), [`Consumer`](consumer.md) 或[`Plugin Config`](plugin-config.md) 上,每个非全局插件只会执行一次,因为在执行期间,针对特定的优先顺序,这些对象中配置的插件会被合并: + +`Consumer` > `Consumer Group` > `Route` > `Plugin Config` > `Service` + +因此,如果相同的插件在不同的对象中具有不同的配置,则合并期间具有最高优先顺序的插件配置将被使用。 + +## 通用配置 + +通过 `_meta` 配置项可以将一些通用的配置应用于插件,你可以参考下文使用这些通用配置。通用配置如下: + +| 名称 | 类型 | 描述 | +|--------------- |-------------- |----------------| +| disable | boolean | 当设置为 `true` 时,则禁用该插件。可选值为 `true` 和 `false`。 | +| error_response | string/object | 自定义错误响应。 | +| priority | integer | 自定义插件优先级。 | +| filter | array | 根据请求的参数,在运行时控制插件是否执行。此配置由一个或多个 {var, operator, val} 元素组成列表,类似:`{{var, operator, val}, {var, operator, val}, ...}}`。例如 `{"arg_version", "==", "v2"}`,表示当前请求参数 `version` 是 `v2`。这里的 `var` 与 NGINX 内部自身变量命名是保持一致。操作符的使用方法,请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list)。| + +### 禁用指定插件 + +通过 `disable` 参数,你可以将某个插件调整为“禁用状态”,即请求不会经过该插件。 + +```json +{ + "proxy-rewrite": { + "_meta": { + "disable": true + } + } +} +``` + +### 自定义错误响应 + +通过 `error_response` 配置,可以将任意插件的错误响应配置成一个固定的值,避免因为插件内置的错误响应信息而带来不必要的麻烦。 + +如下配置表示将 `jwt-auth` 插件的错误响应自定义为 `Missing credential in request`。 + +```json +{ + "jwt-auth": { + "_meta": { + "error_response": { + "message": "Missing credential in request" + } + } + } +} +``` + +### 自定义插件优先级 + +所有插件都有默认优先级,但是你仍然可以通过 `priority` 配置项来自定义插件优先级,从而改变插件执行顺序。 + +```json + { + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + }, + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } +} +``` + +`serverless-pre-function` 的默认优先级是 `10000`,`serverless-post-function` 的默认优先级是 `-2000`。默认情况下会先执行 `serverless-pre-function` 插件,再执行 `serverless-post-function` 插件。 + +上面的配置则将 `serverless-pre-function` 插件的优先级设置为 `-2000`,`serverless-post-function` 插件的优先级设置为 `10000`,因此 `serverless-post-function` 插件会优先执行。 + +:::note 注意 + +- 自定义插件优先级只会影响插件实例绑定的主体,不会影响该插件的所有实例。比如上面的插件配置属于路由 A,路由 B 上的插件 `serverless-post-function` 和 `serverless-post-function` 插件执行顺序不会受到影响,会使用默认优先级。 +- 自定义插件优先级不适用于 Consumer 上配置的插件的 `rewrite` 阶段。路由上配置的插件的 `rewrite` 阶段将会优先运行,然后才会运行 Consumer 上除 `auth` 类插件之外的其他插件的 `rewrite` 阶段。 + +::: + +### 动态控制插件执行状态 + +默认情况下,在路由中指定的插件都会被执行。但是你可以通过 `filter` 配置项为插件添加一个过滤器,通过过滤器的执行结果控制插件是否执行。 + +1. 如下配置表示,只有当请求查询参数中 `version` 值为 `v2` 时,`proxy-rewrite` 插件才会执行。 + + ```json + { + "proxy-rewrite": { + "_meta": { + "filter": [ + ["arg_version", "==", "v2"] + ] + }, + "uri": "/anything" + } + } + ``` + +2. 使用下述配置创建一条完整的路由。 + + ```json + { + "uri": "/get", + "plugins": { + "proxy-rewrite": { + "_meta": { + "filter": [ + ["arg_version", "==", "v2"] + ] + }, + "uri": "/anything" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + } + ``` + +3. 当请求中不带任何参数时,`proxy-rewrite` 插件不会执行,请求将被转发到上游的 `/get`。 + + ```shell + curl -v /dev/null http://127.0.0.1:9080/get -H"host:httpbin.org" + ``` + + ```shell + < HTTP/1.1 200 OK + ...... + < Server: APISIX/2.15.0 + < + { + "args": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.79.1", + "X-Amzn-Trace-Id": "Root=1-62eb6eec-46c97e8a5d95141e621e07fe", + "X-Forwarded-Host": "httpbin.org" + }, + "origin": "127.0.0.1, 117.152.66.200", + "url": "http://httpbin.org/get" + } + ``` + +4. 当请求中携带参数 `version=v2` 时,`proxy-rewrite` 插件执行,请求将被转发到上游的 `/anything`: + + ```shell + curl -v /dev/null http://127.0.0.1:9080/get?version=v2 -H"host:httpbin.org" + ``` + + ```shell + < HTTP/1.1 200 OK + ...... + < Server: APISIX/2.15.0 + < + { + "args": { + "version": "v2" + }, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.79.1", + "X-Amzn-Trace-Id": "Root=1-62eb6f02-24a613b57b6587a076ef18b4", + "X-Forwarded-Host": "httpbin.org" + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1, 117.152.66.200", + "url": "http://httpbin.org/anything?version=v2" + } + ``` + +## 热加载 + +APISIX 的插件是热加载的,不管你是新增、删除还是修改插件,都不需要重启服务。 + +只需要通过 Admin API 发送一个 HTTP 请求即可: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugins/reload -H "X-API-KEY: $admin_key" -X PUT +``` + +:::note 注意 + +如果你已经在路由规则里配置了某个插件(比如在 Route 的 `plugins` 字段里面添加了它),然后在配置文件中禁用了该插件,在执行路由规则时则会跳过该插件。 + +::: + +## Standalone 模式下的热加载 + +关于 Stand Alone 模式下的热加载的信息,请参考 [stand alone 模式](../../../en/latest/deployment-modes.md#standalone)。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/route.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/route.md new file mode 100644 index 0000000..5c2270a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/route.md @@ -0,0 +1,136 @@ +--- +title: Route +keywords: + - API 网关 + - Apache APISIX + - Route + - 路由 +description: 本文讲述了路由的概念以及使用方法。 +--- + + + +## 描述 + +Route(也称为路由)是 APISIX 中最基础和最核心的资源对象,APISIX 可以通过路由定义规则来匹配客户端请求,根据匹配结果加载并执行相应的插件,最后将请求转发给到指定的上游服务。 + +## 配置简介 + +路由中主要包含三部分内容: + +- 匹配规则:比如 `uri`、`host`、`remote_addr` 等等,你也可以自定义匹配规则,详细信息请参考 [Route body 请求参数](../admin-api.md#route-request-body-parameters)。 +- 插件配置:你可以根据业务需求,在路由中配置相应的插件来实现功能。详细信息请参考 [Plugin](./plugin.md) 和 [plugin-config](./plugin-config.md)。 +- 上游信息:路由会根据配置的负载均衡信息,将请求按照规则转发至相应的上游。详细信息请参考 [Upstream](./upstream.md)。 + +下图示例是一些 Route 规则的实例,当某些属性值相同时,图中用相同颜色标识。 + +![路由示例](../../../assets/images/routes-example.png) + +你可以在路由中完成所有参数的配置,该方式设置容易设置,每个路由的相对独立自由度比较高。示例如下: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key_type": "var", + "key": "remote_addr" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +当你的路由中有比较多的重复配置(比如启用相同的插件配置或上游信息),你也可以通过配置 [Service](service.md) 和 [Upstream](upstream.md) 的 ID 或者其他对象的 ID 来完成路由配置。示例如下: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugin_config_id": "123456789apacheapisix", + "upstream_id": "1" +}' +``` + +:::tip 提示 + +APISIX 所有的资源对象的 ID,均使用字符串格式,如果使用的上游 ID、服务 ID 或其他资源对象的 ID 大于 14 个字符时,请务必使用字符串形式表示该资源对象。例如: + +```json + "plugin_config_id": "1234a67891234apisix", + "service_id": "434199918991639234", + "upstream_id": "123456789123456789" +``` + +::: + +## 配置示例 + +以下示例创建的路由,是把 URI 为 `/index.html` 的请求代理到地址为 `127.0.0.1:1980` 的上游服务。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "uri": "/index.html", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +```shell +HTTP/1.1 201 Created +Date: Sat, 31 Aug 2019 01:17:15 GMT +Content-Type: text/plain +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX web server + +{"node":{"value":{"uri":"\/index.html","upstream":{"nodes":{"127.0.0.1:1980":1},"type":"roundrobin"}},"createdIndex":61925,"key":"\/apisix\/routes\/1","modifiedIndex":61925}} +``` + +当接收到成功应答后,表示该路由已成功创建。 + +更多信息,请参考 [Admin API 的 Route 对象](../admin-api.md#route)。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/router.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/router.md new file mode 100644 index 0000000..f9d6615 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/router.md @@ -0,0 +1,56 @@ +--- +title: Router +keywords: + - API 网关 + - Apache APISIX + - Router +description: 本文介绍了如何选择 Apache APISIX 的 Router。 +--- + + + +## 描述 + +APISIX 区别于其他 API 网关的一大特点是允许用户选择不同 Router 来更好匹配自由业务,在性能、自由之间做最适合选择。 + +你可以通过配置 `conf/config.yaml` 文件,来设置符合自身业务需求的路由。 + +## 配置简介 + +Router 具有以下配置: + +- `apisix.router.http`: HTTP 请求路由。 + + - `radixtree_uri`:只使用 `uri` 作为主索引。基于 `radixtree` 引擎,支持全量和深前缀匹配,更多信息请参考[如何使用 router-radixtree](../../../en/latest/router-radixtree.md)。 + - `绝对匹配`:完整匹配给定的 `uri` ,比如 `/foo/bar`,`/foo/glo`。 + - `前缀匹配`:末尾使用 `*` 代表给定的 `uri` 是前缀匹配。比如 `/foo*`,则允许匹配 `/foo/`、`/foo/a`和`/foo/b`等。 + - `匹配优先级`:优先尝试绝对匹配,若无法命中绝对匹配,再尝试前缀匹配。 + - `任意过滤属性`:允许指定任何 Nginx 内置变量作为过滤条件,比如 URL 请求参数、请求头、cookie 等。 + - `radixtree_uri_with_parameter`:同 `radixtree_uri` 但额外有参数匹配的功能。 + - `radixtree_host_uri`:(默认)使用 `host + uri` 作为主索引(基于 `radixtree` 引擎),对当前请求会同时匹配 `host` 和 `uri`,支持的匹配条件与 `radixtree_uri` 基本一致。 + +::: 注意 + +在 3.2 及之前版本,APISIX 使用 `radixtree_uri` 作为默认路由,`radixtree_uri` 比 `radixtree_host_uri` 拥有更好的性能,如果你对性能有更高的要求,并且能够接受 `radixtree_uri` 只使用 `uri` 作为主索引的特点,可以考虑继续使用 `radixtree_uri` 作为默认路由 + +::: + +- `apisix.router.ssl`:SSL 加载匹配路由。 + - `radixtree_sni`:(默认)使用 `SNI` (Server Name Indication) 作为主索引(基于 radixtree 引擎)。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/script.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/script.md new file mode 100644 index 0000000..3e1820e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/script.md @@ -0,0 +1,46 @@ +--- +title: Script +keywords: + - API 网关 + - Apache APISIX + - Router +description: 本文介绍了 Apache APISIX Script 的使用方法及注意事项。 +--- + + + +## 描述 + +Script 表示将在 `HTTP` 请求/响应生命周期期间执行的脚本。 + +Script 配置需要绑定在路由上。 + +Script 与 Plugin 不兼容,并且 Script 优先执行 Script,这意味着配置 Script 后,Route 上配置的 Plugin 将**不被执行**。 + +理论上,在 Script 中可以编写任意 Lua 代码,你也可以直接调用已有的插件以复用已有的代码。 + +Script 也有执行阶段概念,支持 `access`、`header_filter`、`body_filter` 和 `log` 阶段。系统会在相应阶段中自动执行 `Script` 脚本中对应阶段的代码。 + +```json +{ + ... + "script": "local _M = {} \n function _M.access(api_ctx) \n ngx.log(ngx.INFO,\"hit access phase\") \n end \nreturn _M" +} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/secret.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/secret.md new file mode 100644 index 0000000..22a3f49 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/secret.md @@ -0,0 +1,351 @@ +--- +title: Secret +--- + + + +## 描述 + +密钥是指 APISIX 运行过程中所需的任何敏感信息,它可能是核心配置的一部分(如 etcd 的密码),也可能是插件中的一些敏感信息。APISIX 中常见的密钥类型包括: + +- 一些组件(etcd、Redis、Kafka 等)的用户名、密码 +- 证书的私钥 +- API 密钥 +- 敏感的插件配置字段,通常用于身份验证、hash、签名或加密 + +APISIX Secret 允许用户在 APISIX 中通过一些密钥管理服务(Vault 等)来存储密钥,在使用的时候根据 key 进行读取,确保密钥在整个平台中不以明文的形式存在。 + +其工作原理如图所示: +![secret](../../../assets/images/secret.png) + +APISIX 目前支持通过以下方式存储密钥: + +- [环境变量](#使用环境变量管理密钥) +- [HashiCorp Vault](#使用-vault-管理密钥) +- [AWS Secrets Manager](#使用-aws-secrets-manager-管理密钥) +- [GCP Secrets Manager](#使用-gcp-secrets-manager-管理密钥) + +你可以在以下插件的 consumer 配置中通过指定格式的变量来使用 APISIX Secret 功能,比如 `key-auth` 插件。 + +:::note + +如果某个配置项为:`key: "$ENV://ABC"`,当 APISIX Secret 中没有检索到 $ENV://ABC 对应的真实值,那么 key 的值将是 "$ENV://ABC" 而不是 `nil`。 + +::: + +## 使用环境变量管理密钥 + +使用环境变量来管理密钥意味着你可以将密钥信息保存在环境变量中,在配置插件时通过特定格式的变量来引用环境变量。APISIX 支持引用系统环境变量和通过 Nginx `env` 指令配置的环境变量。 + +### 引用方式 + +``` +$ENV://$env_name/$sub_key +``` + +- env_name: 环境变量名称 +- sub_key: 当环境变量的值是 JSON 字符串时,获取某个属性的值 + +如果环境变量的值是字符串类型,如: + +``` +export JACK_AUTH_KEY=abc +``` + +则可以通过如下方式引用: + +``` +$ENV://JACK_AUTH_KEY +``` + +如果环境变量的值是一个 JSON 字符串,例如: + +``` +export JACK={"auth-key":"abc","openid-key": "def"} +``` + +则可以通过如下方式引用: + +``` +# 获取环境变量 JACK 的 auth-key +$ENV://JACK/auth-key + +# 获取环境变量 JACK 的 openid-key +$ENV://JACK/openid-key +``` + +### 示例:在 key-auth 插件中使用 + +第一步:APISIX 实例启动前创建环境变量 + +``` +export JACK_AUTH_KEY=abc +``` + +第二步:在 `key-auth` 插件中引用环境变量 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$ENV://JACK_AUTH_KEY" + } + } +}' +``` + +通过以上步骤,可以将 `key-auth` 插件中的 key 配置保存在环境变量中,而不是在配置插件时明文显示。 + +## 使用 Vault 管理密钥 + +使用 Vault 来管理密钥意味着你可以将密钥信息保存在 Vault 服务中,在配置插件时通过特定格式的变量来引用。APISIX 目前支持对接 [Vault KV 引擎的 V1 版本](https://developer.hashicorp.com/vault/docs/secrets/kv/kv-v1)。 + +### 引用方式 + +``` +$secret://$manager/$id/$secret_name/$key +``` + +- manager: 密钥管理服务,可以是 Vault、AWS、GCP 等 +- APISIX Secret 资源 ID,需要与添加 APISIX Secret 资源时指定的 ID 保持一致 +- secret_name: 密钥管理服务中的密钥名称 +- key:密钥管理服务中密钥对应的 key + +### 示例:在 key-auth 插件中使用 + +第一步:在 Vault 中创建对应的密钥,可以使用如下命令: + +```shell +vault kv put apisix/jack auth-key=value +``` + +第二步:通过 Admin API 添加 Secret 资源,配置 Vault 的地址等连接信息: + +```shell +curl http://127.0.0.1:9180/apisix/admin/secrets/vault/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "https://127.0.0.1:8200", + "prefix": "apisix", + "token": "root" +}' +``` + +如果使用 APISIX Standalone 版本,则可以在 `apisix.yaml` 文件中添加如下配置: + +```yaml +secrets: + - id: vault/1 + prefix: apisix + token: root + uri: 127.0.0.1:8200 +``` + +:::tip + +它现在支持使用 [`namespace` 字段](../admin-api.md#secret-config-body-requset-parameters) 设置 [HashiCorp Vault Enterprise](https://developer.hashicorp.com/vault/docs/enterprise/namespaces#vault-api-and-namespaces) 和 HCP Vault 所支持的多租户命名空间概念。 + +::: + +第三步:在 `key-auth` 插件中引用 APISIX Secret 资源,填充秘钥信息: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$secret://vault/1/jack/auth-key" + } + } +}' +``` + +通过上面两步操作,当用户请求命中 `key-auth` 插件时,会通过 APISIX Secret 组件获取到 key 在 Vault 中的真实值。 + +## 使用 AWS Secrets Manager 管理密钥 + +使用 AWS Secrets Manager 管理密钥是一种安全且便捷的方式来存储和管理敏感信息。通过这种方式,你可以将密钥信息保存在 AWS Secret Manager 中,并在配置 APISIX 插件时通过特定的格式引用这些密钥。 + +APISIX 目前支持两种访问方式: [长期凭证的访问方式](https://docs.aws.amazon.com/zh_cn/sdkref/latest/guide/access-iam-users.html) 和 [短期凭证的访问方式](https://docs.aws.amazon.com/zh_cn/sdkref/latest/guide/access-temp-idc.html)。 + +### 引用方式 + +在 APISIX 中引用密钥时,可以使用以下格式: + +``` +$secret://$manager/$id/$secret_name/$key +``` + +- manager: 密钥管理服务,可以是 Vault、AWS 等 +- APISIX Secret 资源 ID,需要与添加 APISIX Secret 资源时指定的 ID 保持一致 +- secret_name: 密钥管理服务中的密钥名称 +- key:当密钥的值是 JSON 字符串时,获取某个属性的值 + +### 相关参数 + +| 名称 | 必选项 | 默认值 | 描述 | +| --- | --- | --- | --- | +| access_key_id | 是 | | AWS 访问密钥 ID | +| secret_access_key | 是 | | AWS 访问密钥 | +| session_token | 否 | | 临时访问凭证信息 | +| region | 否 | us-east-1 | AWS 区域 | +| endpoint_url | 否 | https://secretsmanager.{region}.amazonaws.com | AWS Secret Manager 地址 | + +### 示例:在 key-auth 插件中使用 + +这里以 key-auth 插件的使用为例,展示如何通过 AWS Secret Manager 管理密钥: + +第一步:在 AWS Secret Manager 中创建对应的密钥,这里使用 [localstack](https://www.localstack.cloud/) 模拟,可以使用如下命令: + +```shell +docker exec -i localstack sh -c "awslocal secretsmanager create-secret --name jack --description 'APISIX Secret' --secret-string '{\"auth-key\":\"value\"}'" +``` + +第二步:通过 Admin API 添加 Secret 资源,配置 AWS Secret Manager 的地址等连接信息: + +你可以在环境变量中存储关键密钥信息,保证配置信息是安全的,在使用到地方进行引用: + +```shell +export AWS_ACCESS_KEY_ID= +export AWS_SECRET_ACCESS_KEY= +export AWS_SESSION_TOKEN= +``` + +当然,你也可以通过直接在配置中指定所有信息内容: + +```shell +curl http://127.0.0.1:9180/apisix/admin/secrets/aws/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "endpoint_url": "http://127.0.0.1:4566", + "region": "us-east-1", + "access_key_id": "access", + "secret_access_key": "secret", + "session_token": "token" +}' +``` + +如果使用 APISIX Standalone 版本,则可以在 `apisix.yaml` 文件中添加如下配置: + +```yaml +secrets: + - id: aws/1 + endpoint_url: http://127.0.0.1:4566 + region: us-east-1 + access_key_id: access + secret_access_key: secret + session_token: token +``` + +第三步:在 `key-auth` 插件中引用 APISIX Secret 资源,填充秘钥信息: + +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$secret://aws/1/jack/auth-key" + } + } +}' +``` + +通过上面两步操作,当用户请求命中 `key-auth` 插件时,会通过 APISIX Secret 组件获取到 key 在 AWS Secret Manager 中的真实值。 + +### 验证 + +你可以通过如下指令进行验证: + +```shell +# 示例:将下面的 your_route 替换为实际的路由路径 +curl -i http://127.0.0.1:9080/your_route -H 'apikey: value' +``` + +这将验证 key-auth 插件是否正确地使用 AWS Secret Manager 中的密钥。 + +## 使用 GCP Secrets Manager 管理密钥 + +使用 GCP Secret Manager 来管理密钥意味着你可以将密钥信息保存在 GCP 服务中,在配置插件时通过特定格式的变量来引用。APISIX 目前支持对接 GCP Secret Manager, 所支持的验证方式是[OAuth 2.0](https://developers.google.com/identity/protocols/oauth2?hl=zh-cn)。 + +### 引用方式 + +``` +$secret://$manager/$id/$secret_name/$key +``` + +引用方式和之前保持一致: + +- manager: 密钥管理服务,可以是 Vault、AWS\GCP 等 +- APISIX Secret 资源 ID,需要与添加 APISIX Secret 资源时指定的 ID 保持一致 +- secret_name: 密钥管理服务中的密钥名称 +- key:当密钥的值是 JSON 字符串时,获取某个属性的值 + +### 必要参数 + +| 名称 | 必选项 | 默认值 | 描述 | +| ----------------------- | -------- | ------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------- | +| auth_config | 是 | | `auth_config` 和 `auth_file` 必须配置一个。 | +| auth_config.client_email | 是 | | 谷歌服务帐号的 email 参数。 | +| auth_config.private_key | 是 | | 谷歌服务帐号的私钥参数。 | +| auth_config.project_id | 是 | | 谷歌服务帐号的项目 ID。 | +| auth_config.token_uri | 否 | https://oauth2.googleapis.com/token | 请求谷歌服务帐户的令牌的 URI。 | +| auth_config.entries_uri | 否 | https://secretmanager.googleapis.com/v1 | 谷歌密钥服务访问端点 API。 | +| auth_config.scope | 否 | https://www.googleapis.com/auth/cloud-platform | 谷歌服务账号的访问范围,可参考 [OAuth 2.0 Scopes for Google APIs](https://developers.google.com/identity/protocols/oauth2/scopes)| +| auth_file | 是 | | `auth_config` 和 `auth_file` 必须配置一个。 | +| ssl_verify | 否 | true | 当设置为 `true` 时,启用 `SSL` 验证。 | + +你需要配置相应的认证参数,或者通过 auth_file 来指定认证文件,其中 auth_file 的内容为认证参数的 json 格式。 + +### 示例 + +以下一种正确的配置实例: + +``` +curl http://127.0.0.1:9180/apisix/admin/secrets/gcp/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "auth_config" : { + "client_email": "email@apisix.iam.gserviceaccount.com", + "private_key": "private_key", + "project_id": "apisix-project", + "token_uri": "https://oauth2.googleapis.com/token", + "entries_uri": "https://secretmanager.googleapis.com/v1", + "scope": ["https://www.googleapis.com/auth/cloud-platform"] + } +}' + +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/service.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/service.md new file mode 100644 index 0000000..aad15ce --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/service.md @@ -0,0 +1,124 @@ +--- +title: Service +keywords: + - API 网关 + - Apache APISIX + - Router +description: 本文介绍了 Apache APISIX Service 对象的概念及其使用方法。 +--- + + + +## 描述 + +Service(也称之为服务)是某类 API 的抽象(也可以理解为一组 Route 的抽象)。它通常与上游服务抽象是一一对应的,但与路由之间,通常是 1:N 即一对多的关系。参看下图。 + +![服务示例](../../../assets/images/service-example.png) + +不同路由规则同时绑定到一个服务上,这些路由将具有相同的上游和插件配置,减少冗余配置。当路由和服务都开启同一个插件时,路由中的插件优先级高于服务中的插件。关于插件优先级的更多信息,请参考 [Plugin](./plugin.md)。 + +更多关于 Service 的信息,请参考 [Admin API 的 Service 对象](../admin-api.md#service)。 + +## 配置示例 + +以下示例创建了一个启用限流插件的服务,并且将该服务绑定到 ID 为 `100` 和 `101` 的路由上。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +1. 创建服务。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/services/200 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }' + ``` + +2. 创建 ID 为 `100` 的路由,并绑定 ID 为 `200` 的服务。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/100 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "methods": ["GET"], + "uri": "/index.html", + "service_id": "200" + }' + ``` + +3. 创建 ID 为 `101` 的路由,并绑定 ID 为 `200` 的服务。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/101 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "methods": ["GET"], + "uri": "/foo/index.html", + "service_id": "200" + }' + ``` + +当然你也可以为路由指定不同的插件配置或上游。比如在以下示例中,我们设置了不同的限流参数,其他部分(比如上游)则继续使用上述服务中的配置参数。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/102 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/bar/index.html", + "id": "102", + "service_id": "200", + "plugins": { + "limit-count": { + "count": 2000, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } +}' +``` + +:::tip 提示 + +当路由和服务都启用同一个插件时,路由中的插件配置会优先于服务。更多信息,请参考[Plugin](./plugin.md)。 + +::: diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/upstream.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/upstream.md new file mode 100644 index 0000000..5e7b15b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/terminology/upstream.md @@ -0,0 +1,255 @@ +--- +title: Upstream +keywords: + - APISIX + - API 网关 + - 上游 + - Upstream +description: 本文介绍了 Apache APISIX Upstream 对象的作用以及如何使用 Upstream。 +--- + + + +## 描述 + +Upstream(也称之为上游)是对虚拟主机抽象,即应用层服务或节点的抽象。你可以通过 Upstream 对象对多个服务节点按照配置规则进行负载均衡。 + +上游的地址信息可以直接配置到[路由](./route.md)(或[服务](./service.md))中。 + +![Upstream 示例](../../../assets/images/upstream-example.png) + +如上图所示,当多个路由(或服务)使用该上游时,你可以单独创建上游对象,在路由中通过使用 `upstream_id` 的方式引用资源,减轻维护压力。 + +你也可以将上游的信息直接配置在指定路由或服务中,不过路由中的配置优先级更高,优先级行为与[插件](./plugin.md) 非常相似。 + +## 配置参数 + +APISIX 的 Upstream 对象除了基本的负载均衡算法外,还支持对上游做主被动健康检查、重试等逻辑。更多信息,请参考 [Admin API 中的 Upstream 资源](../admin-api.md#upstream)。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +1. 创建上游对象用例。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/upstreams/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "type": "chash", + "key": "remote_addr", + "nodes": { + "127.0.0.1:80": 1, + "httpbin.org:80": 2 + } + }' + ``` + + 上游对象创建后,可以被路由或服务引用。 + +2. 在路由中使用创建的上游对象。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "uri": "/index.html", + "upstream_id": 1 + }' + ``` + +3. 为方便使用,你也可以直接把上游信息直接配置在某个路由或服务。 + +以下示例是将上游信息直接配置在路由中: + +```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }' +``` + +## 使用示例 + +- 配置健康检查的示例。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + } + "type": "roundrobin", + "retries": 2, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 2, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + } + }' + ``` + + 更多信息,请参考[健康检查的文档](../tutorials/health-check.md)。 + +以下是使用不同 [`hash_on`](../admin-api.md#upstream-body-request-methods) 类型的配置示例: + +### Consumer + +1. 创建一个 Consumer 对象。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/consumers \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-jack" + } + } + }' + ``` + +2. 创建路由,启用 `key-auth` 插件,配置 `upstream.hash_on` 的类型为 `consumer`。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + }, + "type": "chash", + "hash_on": "consumer" + }, + "uri": "/server_port" + }' + ``` + +3. 测试请求,认证通过后的 `consumer_name` 将作为负载均衡哈希算法的哈希值。 + + ```shell + curl http://127.0.0.1:9080/server_port -H "apikey: auth-jack" + ``` + +### Cookie + +1. 创建路由并配置 `upstream.hash_on` 的类型为 `cookie`。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "uri": "/hash_on_cookie", + "upstream": { + "key": "sid", + "type": "chash", + "hash_on": "cookie", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + } + }' + ``` + +2. 客户端请求携带 `Cookie`。 + + ```shell + curl http://127.0.0.1:9080/hash_on_cookie \ + -H "X-API-KEY: $admin_key" \ + -H "Cookie: sid=3c183a30cffcda1408daf1c61d47b274" + ``` + +### Header + +1. 创建路由并配置 `upstream.hash_on` 的类型为 `header`,`key` 为 `content-type`。 + + ```shell + curl http://127.0.0.1:9180/apisix/admin/routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d ' + { + "uri": "/hash_on_header", + "upstream": { + "key": "content-type", + "type": "chash", + "hash_on": "header", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + } + }' + ``` + +2. 客户端请求携带 `content-type` 的 `header`。 + +```shell + curl http://127.0.0.1:9080/hash_on_header \ + -H "X-API-KEY: $admin_key" \ + -H "Content-Type: application/json" +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/client-to-apisix-mtls.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/client-to-apisix-mtls.md new file mode 100644 index 0000000..a697c6d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/client-to-apisix-mtls.md @@ -0,0 +1,328 @@ +--- +title: 配置客户端与 APISIX 之间的双向认证(mTLS) +keywords: + - mTLS + - API 网关 + - APISIX +description: 本文介绍了如何在客户端和 Apache APISIX 之间配置双向认证(mTLS)。 +--- + + + +mTLS 是一种双向身份认证的方式。如果在你的网络环境中,要求只有受信任的客户端才可以访问服务端,那么可以启用 mTLS 来验证客户端的身份,保证服务端 API 的安全。本文主要介绍了如何配置客户端与 Apache APISIX 之间的双向认证(mTLS)。 + +## 配置 + +本示例包含以下过程: + +1. 生成证书; +2. 在 APISIX 中配置证书; +3. 在 APISIX 中创建并配置路由; +4. 测试验证。 + +为了使测试结果更加清晰,本文提到的示例会向上游传递一些有关客户端证书的信息,其中包括:`serial`,`fingerprint` 和 `common name`。 + +### 生成证书 + +我们需要生成三个测试证书,分别是根证书、服务器证书、客户端证书。只需通过以下命令,就可以通过 `OpenSSL` 生成我们需要的测试证书。 + +```shell +# 根证书 +openssl genrsa -out ca.key 2048 +openssl req -new -sha256 -key ca.key -out ca.csr -subj "/CN=ROOTCA" +openssl x509 -req -days 36500 -sha256 -extensions v3_ca -signkey ca.key -in ca.csr -out ca.cer + +# 服务器证书 +openssl genrsa -out server.key 2048 +# 注意:CN 值中的 `test.com` 为我们要测试的域名/主机名。 +openssl req -new -sha256 -key server.key -out server.csr -subj "/CN=test.com" +openssl x509 -req -days 36500 -sha256 -extensions v3_req -CA ca.cer -CAkey ca.key -CAserial ca.srl -CAcreateserial -in server.csr -out server.cer + +# 客户端证书 +openssl genrsa -out client.key 2048 +openssl req -new -sha256 -key client.key -out client.csr -subj "/CN=CLIENT" +openssl x509 -req -days 36500 -sha256 -extensions v3_req -CA ca.cer -CAkey ca.key -CAserial ca.srl -CAcreateserial -in client.csr -out client.cer + +# 将客户端证书转换为 pkcs12 供 Windows 使用(可选) +openssl pkcs12 -export -clcerts -in client.cer -inkey client.key -out client.p12 +``` + +### 在 APISIX 中配置证书 + +使用 `curl` 命令请求 APISIX Admin API 创建一个 SSL 资源并指定 SNI。 + +:::note 注意 + +证书中的换行需要替换为其转义字符 `\n` + +::: + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/ssls/1' \ +--header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "sni": "test.com", + "cert": "<服务器证书>", + "key": "<服务器证书私钥>", + "client": { + "ca": "<客户端证书公钥>" + } +}' +``` + +- `sni`:指定证书的域名(CN),当客户端尝试通过 TLS 与 APISIX 握手时,APISIX 会将 `ClientHello` 中的 SNI 数据与该字段进行匹配,找到对应的服务器证书进行握手。 +- `cert`:服务器证书。 +- `key`:服务器证书的私钥。 +- `client.ca`:用来验证客户端证书的 CA 文件。为了演示方便,这里使用了同一个 `CA`。 + +### 配置测试路由 + +使用 `curl` 命令请求 APISIX Admin API 创建一个路由。 + +```shell +curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/1' \ +--header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "uri": "/anything", + "plugins": { + "proxy-rewrite": { + "headers": { + "X-Ssl-Client-Fingerprint": "$ssl_client_fingerprint", + "X-Ssl-Client-Serial": "$ssl_client_serial", + "X-Ssl-Client-S-DN": "$ssl_client_s_dn" + } + } + }, + "upstream": { + "nodes": { + "httpbin.org":1 + }, + "type":"roundrobin" + } +}' +``` + +APISIX 会根据 SNI 和上一步创建的 SSL 资源自动处理 TLS 握手,所以我们不需要在路由中指定主机名(但也可以显式地指定主机名)。 + +另外,上面 `curl` 命令中,我们启用了 `proxy-rewrite` 插件,它将动态地更新请求头的信息,示例中变量值的来源是 `NGINX` 变量,你可以在这里找到它们:http://nginx.org/en/docs/http/ngx_http_ssl_module.html#variables。 + +### 测试验证 + +由于我们使用域名 `test.com` 作为测试域名,在开始验证之前,我们必须先将测试域名添加到你的 DNS 或者本地的 `hosts` 文件中。 + +1. 如果我们不使用 `hosts`,只是想测试一下结果,那么你可以使用下面的命令直接进行测试: + +``` +curl --resolve "test.com:9443:127.0.0.1" https://test.com:9443/anything -k --cert ./client.cer --key ./client.key +``` + +2. 如果你需要修改 `hosts`,请阅读下面示例(以 Ubuntu 为例): + +- 修改 /etc/hosts 文件 + + ```shell + # 127.0.0.1 localhost + 127.0.0.1 test.com + ``` + +- 验证测试域名是否生效 + + ```shell + ping test.com + + PING test.com (127.0.0.1) 56(84) bytes of data. + 64 bytes from localhost.localdomain (127.0.0.1): icmp_seq=1 ttl=64 time=0.028 ms + 64 bytes from localhost.localdomain (127.0.0.1): icmp_seq=2 ttl=64 time=0.037 ms + 64 bytes from localhost.localdomain (127.0.0.1): icmp_seq=3 ttl=64 time=0.036 ms + 64 bytes from localhost.localdomain (127.0.0.1): icmp_seq=4 ttl=64 time=0.031 ms + ^C + --- test.com ping statistics --- + 4 packets transmitted, 4 received, 0% packet loss, time 3080ms + rtt min/avg/max/mdev = 0.028/0.033/0.037/0.003 ms + ``` + +- 测试 + + ```shell + curl https://test.com:9443/anything -k --cert ./client.cer --key ./client.key + ``` + + 然后你将收到下面的响应体: + + ```shell + { + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "*/*", + "Host": "test.com", + "User-Agent": "curl/7.81.0", + "X-Amzn-Trace-Id": "Root=1-63256343-17e870ca1d8f72dc40b2c5a9", + "X-Forwarded-Host": "test.com", + "X-Ssl-Client-Fingerprint": "c1626ce3bca723f187d04e3757f1d000ca62d651", + "X-Ssl-Client-S-Dn": "CN=CLIENT", + "X-Ssl-Client-Serial": "5141CC6F5E2B4BA31746D7DBFE9BA81F069CF970" + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1", + "url": "http://test.com/anything" + } + ``` + +由于我们在示例中配置了 `proxy-rewrite` 插件,我们可以看到响应体中包含上游收到的请求体,包含了正确数据。 + +## 基于对 URI 正则表达式匹配,绕过 MTLS + +APISIX 允许配置 URI 白名单以便绕过 MTLS。如果请求的 URI 在白名单内,客户端证书将不被检查。注意,如果针对白名单外的 URI 发请求,而该请求缺乏客户端证书或者提供了非法客户端证书,会得到 HTTP 400 响应,而不是在 SSL 握手阶段被拒绝。 + +### 时序图 + +![skip mtls](../../../assets/images/skip-mtls.png) + +### 例子 + +1. 配置路由和证书 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```bash +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/*", + "upstream": { + "nodes": { + "httpbin.org": 1 + } + } +}' + +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert": "'"$( GET /uuid HTTP/2 +> Host: admin.apisix.dev:9443 +> user-agent: curl/7.68.0 +> accept: */* +> +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* old SSL session ID is stale, removing +* Connection state changed (MAX_CONCURRENT_STREAMS == 128)! +< HTTP/2 400 +< date: Fri, 21 Apr 2023 07:53:23 GMT +< content-type: text/html; charset=utf-8 +< content-length: 229 +< server: APISIX/3.2.0 +< + +400 Bad Request + +

400 Bad Request

+
openresty
+

Powered by APISIX.

+ +* Connection #0 to host admin.apisix.dev left intact +``` + +3. 虽然没提供客户端证书,但是 URI 在白名单内,请求会被成功处理和响应。 + +```bash +curl https://admin.apisix.dev:9443/anything/foobar -i \ +--resolve 'admin.apisix.dev:9443:127.0.0.1' --cacert t/certs/mtls_ca.crt +HTTP/2 200 +content-type: application/json +content-length: 416 +date: Fri, 21 Apr 2023 07:58:28 GMT +access-control-allow-origin: * +access-control-allow-credentials: true +server: APISIX/3.2.0 +... +``` + +## 总结 + +想了解更多有关 Apache APISIX 的 mTLS 功能介绍,可以阅读:[TLS 双向认证](../mtls.md)。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/expose-api.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/expose-api.md new file mode 100644 index 0000000..9561717 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/expose-api.md @@ -0,0 +1,126 @@ +--- +title: 发布 API +keywords: + - API 网关 + - Apache APISIX + - 发布路由 + - 创建服务 +description: 本文介绍了如何通过 Apache APISIX 发布服务和路由。 +--- + + + +## 描述 + +本文将引导你了解 APISIX 的上游、路由以及服务的概念,并介绍如何通过 APISIX 发布你的 API。 + +## 概念介绍 + +### 上游 + +[Upstream](../terminology/upstream.md) 也称为上游,上游是对虚拟主机的抽象,即应用层服务或节点的抽象。 + +上游的作用是按照配置规则对服务节点进行负载均衡,它的地址信息可以直接配置到路由或服务上。当多个路由或服务引用同一个上游时,可以通过创建上游对象,在路由或服务中使用上游 ID 的方式引用上游,减轻维护压力。 + +### 路由 + +[Route](../terminology/route.md) 也称为路由,是 APISIX 中最基础和最核心的资源对象。 + +APISIX 可以通过路由定义规则来匹配客户端请求,根据匹配结果加载并执行相应的[插件](../terminology/plugin.md),最后把请求转发给到指定的上游服务。路由中主要包含三部分内容:匹配规则、插件配置和上游信息。 + +### 服务 + +[Service](../terminology/service.md) 也称为服务,是某类 API 的抽象(也可以理解为一组 Route 的抽象)。它通常与上游服务抽象是一一对应的,Route 与 Service 之间,通常是 N:1 的关系。 + +## 前提条件 + +在进行如下操作前,请确保你已经通过 Docker [启动 APISIX](../installation-guide.md)。 + +## 公开你的服务 + +1. 创建上游。 + +创建一个包含 `httpbin.org` 的上游服务,你可以使用它进行测试。这是一个返回服务,它将返回我们在请求中传递的参数。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/upstreams/1" \ +-H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } +}' +``` + +在该命令中,我们指定了 Apache APISIX 的 Admin API Key 为 `edd1c9f034335f136f87ad84b625c8f1`,并且使用 `roundrobin` 作为负载均衡机制,并设置了 `httpbin.org:80` 为上游服务。为了将该上游绑定到路由,此处需要把 `upstream_id` 设置为 `1`。此处你可以在 `nodes` 下指定多个上游,以达到负载均衡的效果。 + +如需了解更多信息,请参考[上游](../terminology/upstream.md)。 + +2. 创建路由。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" \ +-H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "methods": ["GET"], + "host": "example.com", + "uri": "/anything/*", + "upstream_id": "1" +}' +``` + +:::note 注意 + +创建上游非必须步骤,你可以通过在路由中,添加 `upstream` 对象,达到上述的效果。例如: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/1" \ +-H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "methods": ["GET"], + "host": "example.com", + "uri": "/anything/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +::: + +3. 测试路由。 + +在创建完成路由后,你可以通过以下命令测试路由是否正常: + +``` +curl -i -X GET "http://127.0.0.1:9080/anything/get?foo1=bar1&foo2=bar2" -H "Host: example.com" +``` + +该请求将被 APISIX 转发到 `http://httpbin.org:80/anything/get?foo1=bar1&foo2=bar2`。 + +## 更多教程 + +你可以查看[保护 API](./protect-api.md) 来保护你的 API。 + +接下来,你可以通过 APISIX 的一些[插件](../plugins/batch-requests.md),实现更多功能。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/health-check.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/health-check.md new file mode 100644 index 0000000..870c4b4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/health-check.md @@ -0,0 +1,240 @@ +--- +title: 健康检查 +keywords: + - APISIX + - API 网关 + - 健康检查 +description: 本文介绍了如何使用 API 网关 Apache APISIX 的健康检查功能来检查上游节点的健康状态。 +--- + + +## 描述 + +本文主要介绍了 Apache APISIX 的健康检查功能。健康检查功能可以在上游节点发生故障或者迁移时,将请求代理到健康的节点上,最大程度避免服务不可用的问题。APISIX 的健康检查功能使用 [lua-resty-healthcheck](https://github.com/api7/lua-resty-healthcheck) 实现,并分为主动检查和被动检查。 + +## 主动健康检查 + +主动健康检查主要是指 APISIX 通过预设的探针类型,主动探测上游节点的存活性。目前 APISIX 支持 `HTTP`、`HTTPS`、`TCP` 三种探针类型。 + +当发向健康节点 A 的 N 个连续探针都失败时(取决于如何配置),则该节点将被标记为不健康,不健康的节点将会被 APISIX 的负载均衡器忽略,无法收到请求;若某个不健康的节点,连续 M 个探针都成功,则该节点将被重新标记为健康,进而可以被代理。 + +## 被动健康检查 + +被动健康检查是指,通过判断从 APISIX 转发到上游节点的请求响应状态,来判断对应的上游节点是否健康。相对于主动健康检查,被动健康检查的方式无需发起额外的探针,但是也无法提前感知节点状态,可能会有一定量的失败请求。 + +若发向健康节点 A 的 N 个连续请求都被判定为失败(取决于如何配置),则该节点将被标记为不健康。 + +:::note 注意 + +由于不健康的节点无法收到请求,仅使用被动健康检查策略无法重新将节点标记为健康,因此通常需要结合主动健康检查策略。 + +::: + +:::tip 提示 + +- 只有在 `upstream` 被请求时才会开始健康检查,如果 `upstream` 被配置但没有被请求,不会触发启动健康检查。 +- 如果没有健康的节点,那么请求会继续发送给上游。 + +::: + +## 属性 + +| 名称 | 配置类型 | 类型 | 有效值 | 默认值 | 描述 | +| ----------------------------------------------- | ------------------ | ------- | -------------------- | --------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------- | +| upstream.checks.active.type | 主动检查 | string | `http` `https` `tcp` | http | 主动检查的类型。 | +| upstream.checks.active.timeout | 主动检查 | integer | | 1 | 主动检查的超时时间(单位为秒)。 | +| upstream.checks.active.concurrency | 主动检查 | integer | | 10 | 主动检查时同时检查的目标数。 | +| upstream.checks.active.http_path | 主动检查 | string | | / | 主动检查的 HTTP 请求路径。 | +| upstream.checks.active.host | 主动检查 | string | | ${upstream.node.host} | 主动检查的 HTTP 请求主机名。 | +| upstream.checks.active.port | 主动检查 | integer | `1` 至 `65535` | ${upstream.node.port} | 主动检查的 HTTP 请求主机端口。 | +| upstream.checks.active.https_verify_certificate | 主动检查 | boolean | | true | 主动检查使用 HTTPS 类型检查时,是否检查远程主机的 SSL 证书。 | +| upstream.checks.active.req_headers | 主动检查 | array | | [] | 主动检查使用 HTTP 或 HTTPS 类型检查时,设置额外的请求头信息。 | +| upstream.checks.active.healthy.interval | 主动检查(健康节点)| integer | `>= 1` | 1 | 主动检查(健康节点)检查的间隔时间(单位为秒)| +| upstream.checks.active.healthy.http_statuses | 主动检查(健康节点)| array | `200` 至 `599` | [200, 302] | 主动检查(健康节点)HTTP 或 HTTPS 类型检查时,健康节点的 HTTP 状态码。 | +| upstream.checks.active.healthy.successes | 主动检查(健康节点)| integer | `1` 至 `254` | 2 | 主动检查(健康节点)确定节点健康的次数。 | +| upstream.checks.active.unhealthy.interval | 主动检查(非健康节点)| integer | `>= 1` | 1 | 主动检查(非健康节点)检查的间隔时间(单位为秒)| +| upstream.checks.active.unhealthy.http_statuses | 主动检查(非健康节点)| array | `200` 至 `599` | [429, 404, 500, 501, 502, 503, 504, 505] | 主动检查(非健康节点)HTTP 或 HTTPS 类型检查时,非健康节点的 HTTP 状态码。 | +| upstream.checks.active.unhealthy.http_failures | 主动检查(非健康节点)| integer | `1` 至 `254` | 5 | 主动检查(非健康节点)HTTP 或 HTTPS 类型检查时,确定节点非健康的次数。 | +| upstream.checks.active.unhealthy.tcp_failures | 主动检查(非健康节点)| integer | `1` 至 `254` | 2 | 主动检查(非健康节点)TCP 类型检查时,确定节点非健康的次数。 | +| upstream.checks.active.unhealthy.timeouts | 主动检查(非健康节点)| integer | `1` 至 `254` | 3 | 主动检查(非健康节点)确定节点非健康的超时次数。 | +| upstream.checks.passive.type | 被动检查 | string | `http` `https` `tcp` | http | 被动检查的类型。 | +| upstream.checks.passive.healthy.http_statuses | 被动检查(健康节点)| array | `200` 至 `599` | [200, 201, 202, 203, 204, 205, 206, 207, 208, 226, 300, 301, 302, 303, 304, 305, 306, 307, 308] | 被动检查(健康节点)HTTP 或 HTTPS 类型检查时,健康节点的 HTTP 状态码。 | +| upstream.checks.passive.healthy.successes | 被动检查(健康节点)| integer | `0` 至 `254` | 5 | 被动检查(健康节点)确定节点健康的次数。 | +| upstream.checks.passive.unhealthy.http_statuses | 被动检查(非健康节点)| array | `200` 至 `599` | [429, 500, 503] | 被动检查(非健康节点)HTTP 或 HTTPS 类型检查时,非健康节点的 HTTP 状态码。 | +| upstream.checks.passive.unhealthy.tcp_failures | 被动检查(非健康节点)| integer | `0` 至 `254` | 2 | 被动检查(非健康节点)TCP 类型检查时,确定节点非健康的次数。 | +| upstream.checks.passive.unhealthy.timeouts | 被动检查(非健康节点)| integer | `0` 至 `254` | 7 | 被动检查(非健康节点)确定节点非健康的超时次数。 | +| upstream.checks.passive.unhealthy.http_failures | 被动检查(非健康节点)| integer | `0` 至 `254` | 5 | 被动检查(非健康节点)HTTP 或 HTTPS 类型检查时,确定节点非健康的次数。 | + +## 配置示例 + +你可以通过 Admin API 在路由中启用健康检查功能: + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1970": 1 + }, + "type": "roundrobin", + "retries": 2, + "checks": { + "active": { + "timeout": 5, + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 2, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + }, + "req_headers": ["User-Agent: curl/7.29.0"] + }, + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [500], + "http_failures": 3, + "tcp_failures": 3 + } + } + } + } +}' +``` + +启用成功后,如果 APISIX 探测到不健康的节点,将会在错误日志中输出如下日志: + +```shell +enabled healthcheck passive while logging request +failed to receive status line from 'nil (127.0.0.1:1980)': closed +unhealthy TCP increment (1/2) for '(127.0.0.1:1980)' +failed to receive status line from 'nil (127.0.0.1:1980)': closed +unhealthy TCP increment (2/2) for '(127.0.0.1:1980' +``` + +:::tip 提示 + +需要将错误日志的级别调整为 `info` 才可以观测到上述日志信息 + +::: + +你可以通过[控制接口](../control-api.md) 中的 `GET /v1/healthcheck` 接口获取健康检查信息。如下所示: + +```shell + +curl http://127.0.0.1:9090/v1/healthcheck/upstreams/healthycheck -s | jq . + +``` + +## 健康检查信息 + +APISIX 提供了丰富的健康检查信息,其中 `status` 以及 `counter` 的返回对于健康检查是至关重要的。在 APISIX 中,节点有四个状态:`healthy`、`unhealthy`、`mostly_unhealthy`、`mostly_healthy`。`mostly_healthy` 状态表示当前节点状态是健康的,但在健康检查期间,节点健康检测并不是一直是成功的。`mostly_unhealthy` 状态表示当前节点状态是不健康的,但在健康检查期间,节点健康检测并不是一直是失败的。节点的状态转换取决于本次健康检查的成功或失败,以及 `counter` 中记录的 `tcp_failure`、`http_failure`、`success`、`timeout_failure` 四个数据。 + +获取健康检查信息,通过以下 curl 命令可以获取健康检查信息: + +```shell +curl -i http://127.0.0.1:9090/v1/healthcheck +``` + +响应示例: + +```json +[ + { + "nodes": {}, + "name": "/apisix/routes/1", + "type": "http" + }, + { + "nodes": [ + { + "port": 1970, + "hostname": "127.0.0.1", + "status": "healthy", + "ip": "127.0.0.1", + "counter": { + "tcp_failure": 0, + "http_failure": 0, + "success": 0, + "timeout_failure": 0 + } + }, + { + "port": 1980, + "hostname": "127.0.0.1", + "status": "healthy", + "ip": "127.0.0.1", + "counter": { + "tcp_failure": 0, + "http_failure": 0, + "success": 0, + "timeout_failure": 0 + } + } + ], + "name": "/apisix/routes/example-hc-route", + "type": "http" + } +] +``` + +### 状态转换图 + +![image](../../../assets/images/health_check_node_state_diagram.png) + +请注意,所有节点在没有初始探测的情况下都以`healthy`状态启动,计数器仅在状态更改时重置和更新。因此,当节点处于`healthy`状态且所有后续检查都成功时,`success`计数器不会更新,保持为零。 + +### counter 信息 + +若健康检查失败,`counter` 中的 `success` 计数将被置零。若健康检查成功,则会将 `tcp_failure`、`http_failure`、`timeout_failure` 数据置零。 + +| 名称 | 描述 | 作用 | +|----------------|------------------------|----------------------------------------------------------------------------| +|success | 健康检查成功的次数 |当 success 大于 healthy.successes 配置值时,节点会变为 healthy 状态 | +|tcp_failure | TCP 类型健康检查失败次数 |当 tcp_failure 大于 unhealthy.tcp_failures 配置值时,节点会变为 unhealthy 状态 | +|http_failure | HTTP 类型的健康检查失败次数 |当 http_failure 大于 unhealthy.http_failures 配置值时,节点会变为 unhealthy 状态 | +|timeout_failure | 节点健康检查超时次数 |当 timeout_failure 大于 unhealthy.timeouts 配置值时,节点会变为 unhealthy 状态 | diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/keycloak-oidc.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/keycloak-oidc.md new file mode 100644 index 0000000..94acab7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/keycloak-oidc.md @@ -0,0 +1,467 @@ +--- +title: Set Up SSO with Keycloak (OIDC) +keywords: + - APISIX + - API 网关 + - OIDC + - Keycloak +description: 本文介绍如何使用 openid-connect 插件,通过 authorization code grant、client credentials grant 和 password grant 将 APISIX 与 Keycloak 集成。 +--- + + + +[OpenID Connect (OIDC)](https://openid.net/connect/) 是 [OAuth 2.0 协议](https://www.rfc-editor.org/rfc/rfc6749) 之上的简单身份层。它允许客户端基于身份提供者执行的身份验证来验证最终用户的身份,以及以可互操作和类似 REST 的方式获取有关最终​​用户的基本个人资料信息。借助 APISIX 和 [Keycloak](https://www.keycloak.org/),您可以实现基于 OIDC 的身份验证流程来保护您的 API 并启用单点登录 (SSO)。 + +[Keycloak](https://www.keycloak.org/) 是适用于现代应用程序和服务的开源身份和访问管理解决方案。Keycloak 支持单点登录 (SSO),这使得服务能够通过 OIDC 和 OAuth 2.0 等协议与 Keycloak 进行交互。此外,Keycloak 还支持将身份验证委托给第三方身份提供商,例如 Facebook 和 Google。 + +本教程将向您展示如何使用 [`openid-connect`](/hub/openid-connect) 插件,通过 [authorization code grant](#implement-authorization-code-grant)、[client credentials grant](#implement-client-credentials-grant) 和 [password grant](#implement-password-grant) 将 APISIX 与 Keycloak 集成。 + +## 配置 Keycloak + +在 Docker 中以 [开发模式](https://www.keycloak.org/server/configuration#_starting_keycloak_in_development_mode) 启动一个名为 `apisix-quickstart-keycloak` 的 Keycloak 实例,管理员名称为 `quickstart-admin`,密码为 `quickstart-admin-pass`,暴露的端口映射到宿主机上的 `8080`: + +```shell +docker run -d --name "apisix-quickstart-keycloak" \ + -e 'KEYCLOAK_ADMIN=quickstart-admin' \ + -e 'KEYCLOAK_ADMIN_PASSWORD=quickstart-admin-pass' \ + -p 8080:8080 \ + quay.io/keycloak/keycloak:18.0.2 start-dev +``` + +Keycloak 提供了一个易于使用的 Web UI,帮助管理员管理所有资源,例如客户端、角色和用户。 + +在浏览器中导航到 `http://localhost:8080` 以访问 Keycloak 网页,然后单击 __管理控制台__: + +![web-ui](https://static.api7.ai/uploads/2023/03/30/ItcwYPIx_web-ui.png) + +输入管理员用户名 `quickstart-admin` 和密码 `quickstart-admin-pass` 并登录: + +![admin-signin](https://static.api7.ai/uploads/2023/03/30/6W3pjzE1_admin-signin.png) + +您需要在以下步骤中保持登录状态来配置 Keycloak。 + +### 创建 Realm + +Keycloak 中的 realm 是管理用户、凭证和角色等资源的工作区。不同领域中的资源彼此隔离。您需要为 APISIX 创建一个名为`quickstart-realm` 的 realm。 + +在左侧菜单中,将鼠标悬停在 **Master** 上,然后在下拉菜单中选择 __Add realm__: + +![create-realm](https://static.api7.ai/uploads/2023/03/30/S1Xvqliv_create-realm.png) + +输入 realm 名称 `quickstart-realm`,然后单击 `__Create__` 进行创建: + +![add-realm](https://static.api7.ai/uploads/2023/03/30/jwb7QU8k_add-realm.png) + +### 创建 Client + +Keycloak 中的 client 是请求 Keycloak 对用户进行身份验证的实体。更多情况下,client 是希望使用 Keycloak 保护自身安全并提供单点登录解决方案的应用程序。APISIX 相当于负责向 Keycloak 发起身份验证请求的 client,因此您需要创建其对应的客户端,名为 `apisix-quickstart-client`。 + +单击 __Clients__ > __Create__,打开 __Add Client__ 页面: + +![create-client](https://static.api7.ai/uploads/2023/03/30/qLom0axN_create-client.png) + +输入 __Client ID__ 为 `apisix-quickstart-client`,然后选择 __Client Protocol__ 为 `openid-connect` 并 __Save__: + +![add-client](https://static.api7.ai/uploads/2023/03/30/X5on2r7x_add-client.png) + +Client `apisix-quickstart-client` 已创建。重定向到详细信息页面后,选择 `confidential` 作为 __Access Type__: + +![config-client](https://static.api7.ai/uploads/2023/03/30/v70c8y9F_config-client.png) + +当用户在 SSO 期间登录成功时,Keycloak 会携带状态和代码将客户端重定向到 __Valid Redirect URIs__ 中的地址。为简化操作,输入通配符 `*` 以将任何 URI 视为有效: + +![client-redirect](https://static.api7.ai/uploads/2023/03/30/xLxcyVkn_client-redirect.png) + +如果您正在 [使用 PKCE authorization code grant](#implement-authorization-code-grant),请在客户端的高级设置中配置 PKCE 质询方法: + +
+PKCE keycloak configuration +
+ +如果您正在实施 [client credentials grant](#implement-client-credentials-grant),请为 client 启用服务帐户: + +![enable-service-account](https://static.api7.ai/uploads/2023/12/29/h1uNtghd_sa.png) + +选择 __Save__ 以应用自定义配置。 + +### 创建 User + +Keycloak 中的用户是能够登录系统的实体。他们可以拥有与自己相关的属性,例如用户名、电子邮件和地址。 + +如果您只实施 [client credentials grant](#implement-client-credentials-grant),则可以 [跳过此部分](#obtain-the-oidc-configuration)。 + +点击 __Users__ > __Add user__ 打开 __Add user__ 页面: + +![create-user](https://static.api7.ai/uploads/2023/03/30/onQEp23L_create-user.png) + +点击 __Users__ > __Add user__ 打开 __Add user__ 页面: + +![add-user](https://static.api7.ai/uploads/2023/03/30/EKhuhgML_add-user.png) + +点击 __Credentials__,然后将 __Password__ 设置为 `quickstart-user-pass`。将 __Temporary__ 切换为 `OFF` 以关闭限制,这样您第一次登录时就无需更改密码: + +![user-pass](https://static.api7.ai/uploads/2023/03/30/rQKEAEnh_user-pass.png) + +## 获取 OIDC 配置 + +在本节中,您将从 Keycloak 获取关键的 OIDC 配置并将其定义为 shell 变量。本节之后的步骤将使用这些变量通过 shell 命令配置 OIDC。 + +:::info + +打开一个单独的终端按照步骤操作并定义相关的 shell 变量。然后本节之后的步骤可以直接使用定义的变量。 + +::: + +### 获取发现端点 + +单击 __Realm Settings__,然后右键单击 __OpenID Endpoints Configuration__ 并复制链接。 + +![get-discovery](https://static.api7.ai/uploads/2023/03/30/526lbJbg_get-discovery.png) + +该链接应与以下内容相同: + +```text +http://localhost:8080/realms/quickstart-realm/.well-known/openid-configuration +``` + +在 OIDC 身份验证期间需要使用此端点公开的配置值。使用您的主机 IP 更新地址并保存到环境变量: + +```shell +export KEYCLOAK_IP=192.168.42.145 # replace with your host IP +export OIDC_DISCOVERY=http://${KEYCLOAK_IP}:8080/realms/quickstart-realm/.well-known/openid-configuration +``` + +### 获取客户端 ID 和密钥 + +单击 __Clients__ > `apisix-quickstart-client` > __Credentials__,并从 __Secret__ 复制客户端密钥: + +![client-ID](https://static.api7.ai/uploads/2023/03/30/MwYmU20v_client-id.png) + +![client-secret](https://static.api7.ai/uploads/2023/03/30/f9iOG8aN_client-secret.png) + +将 OIDC 客户端 ID 和密钥保存到环境变量: + +```shell +export OIDC_CLIENT_ID=apisix-quickstart-client +export OIDC_CLIENT_SECRET=bSaIN3MV1YynmtXvU8lKkfeY0iwpr9cH # replace with your value +``` + +## 实现 Authorization Code Grant + +Authorization Code Grant 由 Web 和移动应用程序使用。流程从授权服务器在浏览器中显示登录页面开始,用户可以在其中输入其凭据。在此过程中,将短期授权码交换为访问令牌,APISIX 将其存储在浏览器会话 cookie 中,并将随访问上游资源服务器的每次请求一起发送。 + +要实现 Authorization Code Grant,请使用 `openid-connect` 插件创建一个路由,如下所示: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "auth-with-oidc", + "uri":"/anything/*", + "plugins": { + "openid-connect": { + "bearer_only": false, + "client_id": "'"$OIDC_CLIENT_ID"'", + "client_secret": "'"$OIDC_CLIENT_SECRET"'", + "discovery": "'"$OIDC_DISCOVERY"'", + "scope": "openid profile", + "redirect_uri": "http://localhost:9080/anything/callback" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "httpbin.org:80":1 + } + } +}' +``` + +或者,如果您想使用 PKCE 实现 authorization code grant,请使用 `openid-connect` 插件创建一个路由如下: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "auth-with-oidc", + "uri":"/anything/*", + "plugins": { + "openid-connect": { + "bearer_only": false, + "use_pkce": true, + "client_id": "'"$OIDC_CLIENT_ID"'", + "client_secret": "'"$OIDC_CLIENT_SECRET"'", + "discovery": "'"$OIDC_DISCOVERY"'", + "scope": "openid profile", + "redirect_uri": "http://localhost:9080/anything/callback" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "httpbin.org:80":1 + } + } +}' +``` + +### 使用有效凭证进行验证 + +在浏览器中导航至 `http://127.0.0.1:9080/anything/test`。请求将重定向到登录页面: + +![test-sign-on](https://static.api7.ai/uploads/2023/03/30/i38u1x9a_validate-sign.png) + +使用正确的用户名 `quickstart-user` 和密码 `quickstart-user-pass` 登录。如果成功,请求将被转发到 `httpbin.org`,您应该会看到类似以下内容的响应: + +```json +{ + "args": {}, + "data": "", + "files": {}, + "form": {}, + "headers": { + "Accept": "text/html..." + ... + }, + "json": null, + "method": "GET", + "origin": "127.0.0.1, 59.71.244.81", + "url": "http://127.0.0.1/anything/test" +} +``` + +### 使用无效凭证进行验证 + +使用错误的凭证登录。您应该会看到身份验证失败: + +![test-sign-failed](https://static.api7.ai/uploads/2023/03/31/YOuSYX1r_validate-sign-failed.png) + +## 实现 Client Credential Grant + +在 client credential grant 中,客户端无需任何用户参与即可获得访问令牌。它通常用于机器对机器 (M2M) 通信。 + +要实现 client credential grant,请使用 `openid-connect` 插件创建路由,以使用身份提供者的 JWKS 端点来验证令牌。端点将从发现文档中获取。 + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "auth-with-oidc", + "uri":"/anything/*", + "plugins": { + "openid-connect": { + "use_jwks": true, + "client_id": "'"$OIDC_CLIENT_ID"'", + "client_secret": "'"$OIDC_CLIENT_SECRET"'", + "discovery": "'"$OIDC_DISCOVERY"'", + "scope": "openid profile", + "redirect_uri": "http://localhost:9080/anything/callback" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "httpbin.org:80":1 + } + } +}' +``` + +或者,如果您想使用自省端点来验证令牌,请按如下方式创建路由: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "auth-with-oidc", + "uri":"/anything/*", + "plugins": { + "openid-connect": { + "bearer_only": true, + "client_id": "'"$OIDC_CLIENT_ID"'", + "client_secret": "'"$OIDC_CLIENT_SECRET"'", + "discovery": "'"$OIDC_DISCOVERY"'", + "scope": "openid profile", + "redirect_uri": "http://localhost:9080/anything/callback" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "httpbin.org:80":1 + } + } +}' +``` + +自省端点将从发现文档中获取。 + +### 使用有效访问令牌进行验证 + +在 [令牌端点](https://www.keycloak.org/docs/latest/securing_apps/#token-endpoint) 获取 Keycloak 服务器的访问令牌: + +```shell +curl -i "http://$KEYCLOAK_IP:8080/realms/quickstart-realm/protocol/openid-connect/token" -X POST \ + -d 'grant_type=client_credentials' \ + -d 'client_id='$OIDC_CLIENT_ID'' \ + -d 'client_secret='$OIDC_CLIENT_SECRET'' +``` + +预期响应类似于以下内容: + +```text +{"access_token":"eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJoT3ludlBPY2d6Y3VWWnYtTU42bXZKMUczb0dOX2d6MFo3WFl6S2FSa1NBIn0.eyJleHAiOjE3MDM4MjU1NjQsImlhdCI6MTcwMzgyNTI2NCwianRpIjoiMWQ4NWE4N2UtZDFhMC00NThmLThiMTItNGZiYWM2ODA5YmYwIiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguMS44Mzo4MDgwL3JlYWxtcy9xdWlja3N0YXJ0LXJlYWxtIiwiYXVkIjoiYWNjb3VudCIsInN1YiI6IjE1OGUzOWFlLTk0YjAtNDI3Zi04ZGU3LTU3MTRhYWYwOGYzOSIsInR5cCI6IkJlYXJlciIsImF6cCI6ImFwaXNpeC1xdWlja3N0YXJ0LWNsaWVudCIsImFjciI6IjEiLCJyZWFsbV9hY2Nlc3MiOnsicm9sZXMiOlsiZGVmYXVsdC1yb2xlcy1xdWlja3N0YXJ0LXJlYWxtIiwib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfYXV0aG9yaXphdGlvbiJdfSwicmVzb3VyY2VfYWNjZXNzIjp7ImFjY291bnQiOnsicm9sZXMiOlsibWFuYWdlLWFjY291bnQiLCJtYW5hZ2UtYWNjb3VudC1saW5rcyIsInZpZXctcHJvZmlsZSJdfX0sInNjb3BlIjoiZW1haWwgcHJvZmlsZSIsImVtYWlsX3ZlcmlmaWVkIjpmYWxzZSwiY2xpZW50SG9zdCI6IjE3Mi4xNy4wLjEiLCJjbGllbnRJZCI6ImFwaXNpeC1xdWlja3N0YXJ0LWNsaWVudCIsInByZWZlcnJlZF91c2VybmFtZSI6InNlcnZpY2UtYWNjb3VudC1hcGlzaXgtcXVpY2tzdGFydC1jbGllbnQiLCJjbGllbnRBZGRyZXNzIjoiMTcyLjE3LjAuMSJ9.TltzSXqrJuVID7aGrb35jn-oc07U_-jugSn-3jKz4A44LwtAsME_8b3qkmR4boMOIht_5pF6bnnp70MFAlg6JKu4_yIQDxF_GAHjnZXEO8OCKhtIKwXm2w-hnnJVIhIdGkIVkbPP0HfILuar_m0hpa53VpPBGYR-OS4pyh0KTUs8MB22xAEqyz9zjCm6SX9vXCqgeVkSpRW2E8NaGEbAdY25uY-ZC4dI_pON87Ey5e8GdD6HQLXQlGIOdCDi3N7k0HDoD9TZRv2bMRPfy4zVYm1ZlClIuF79A-ZBwr0c-XYuq7t6EY0gPGEXB-s0SaKlrIU5S9JBeVXRzYvqAih41g","expires_in":300,"refresh_expires_in":0,"token_type":"Bearer","not-before-policy":0,"scope":"email profile"} +``` + +将访问令牌保存到环境变量: + +```shell +# replace with your access token +export ACCESS_TOKEN="eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJoT3ludlBPY2d6Y3VWWnYtTU42bXZKMUczb0dOX2d6MFo3WFl6S2FSa1NBIn0.eyJleHAiOjE3MDM4MjU1NjQsImlhdCI6MTcwMzgyNTI2NCwianRpIjoiMWQ4NWE4N2UtZDFhMC00NThmLThiMTItNGZiYWM2ODA5YmYwIiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguMS44Mzo4MDgwL3JlYWxtcy9xdWlja3N0YXJ0LXJlYWxtIiwiYXVkIjoiYWNjb3VudCIsInN1YiI6IjE1OGUzOWFlLTk0YjAtNDI3Zi04ZGU3LTU3MTRhYWYwOGYzOSIsInR5cCI6IkJlYXJlciIsImF6cCI6ImFwaXNpeC1xdWlja3N0YXJ0LWNsaWVudCIsImFjciI6IjEiLCJyZWFsbV9hY2Nlc3MiOnsicm9sZXMiOlsiZGVmYXVsdC1yb2xlcy1xdWlja3N0YXJ0LXJlYWxtIiwib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfYXV0aG9yaXphdGlvbiJdfSwicmVzb3VyY2VfYWNjZXNzIjp7ImFjY291bnQiOnsicm9sZXMiOlsibWFuYWdlLWFjY291bnQiLCJtYW5hZ2UtYWNjb3VudC1saW5rcyIsInZpZXctcHJvZmlsZSJdfX0sInNjb3BlIjoiZW1haWwgcHJvZmlsZSIsImVtYWlsX3ZlcmlmaWVkIjpmYWxzZSwiY2xpZW50SG9zdCI6IjE3Mi4xNy4wLjEiLCJjbGllbnRJZCI6ImFwaXNpeC1xdWlja3N0YXJ0LWNsaWVudCIsInByZWZlcnJlZF91c2VybmFtZSI6InNlcnZpY2UtYWNjb3VudC1hcGlzaXgtcXVpY2tzdGFydC1jbGllbnQiLCJjbGllbnRBZGRyZXNzIjoiMTcyLjE3LjAuMSJ9.TltzSXqrJuVID7aGrb35jn-oc07U_-jugSn-3jKz4A44LwtAsME_8b3qkmR4boMOIht_5pF6bnnp70MFAlg6JKu4_yIQDxF_GAHjnZXEO8OCKhtIKwXm2w-hnnJVIhIdGkIVkbPP0HfILuar_m0hpa53VpPBGYR-OS4pyh0KTUs8MB22xAEqyz9zjCm6SX9vXCqgeVkSpRW2E8NaGEbAdY25uY-ZC4dI_pON87Ey5e8GdD6HQLXQlGIOdCDi3N7k0HDoD9TZRv2bMRPfy4zVYm1ZlClIuF79A-ZBwr0c-XYuq7t6EY0gPGEXB-s0SaKlrIU5S9JBeVXRzYvqAih41g" +``` + +使用有效的访问令牌向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything/test" -H "Authorization: Bearer $ACCESS_TOKEN" +``` + +`HTTP/1.1 200 OK` 响应验证对上游资源的请求是否已获得授权。 + +### 使用无效访问令牌进行验证 + +使用无效访问令牌向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything/test" -H "Authorization: Bearer invalid-access-token" +``` + +`HTTP/1.1 401 Unauthorized` 响应验证 OIDC 插件是否拒绝了具有无效访问令牌的请求。 + +### 验证无访问令牌 + +向无访问令牌的路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything/test" +``` + +`HTTP/1.1 401 Unauthorized` 响应验证 OIDC 插件拒绝没有访问令牌的请求。 + +## 实施 Password Grant + +Password Grant 是一种将用户凭据交换为访问令牌的传统方法。 + +要实施 Password Grant,请使用 `openid-connect` 插件创建路由,以使用身份提供者的 JWKS 端点来验证令牌。端点将从发现文档中获取。 + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "auth-with-oidc", + "uri":"/anything/*", + "plugins": { + "openid-connect": { + "use_jwks": true, + "client_id": "'"$OIDC_CLIENT_ID"'", + "client_secret": "'"$OIDC_CLIENT_SECRET"'", + "discovery": "'"$OIDC_DISCOVERY"'", + "scope": "openid profile", + "redirect_uri": "http://localhost:9080/anything/callback" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "httpbin.org:80":1 + } + } +}' +``` + +### 使用有效访问令牌进行验证 + +在 [令牌端点](https://www.keycloak.org/docs/latest/securing_apps/#token-endpoint) 获取 Keycloak 服务器的访问令牌: + +```shell +OIDC_USER=quickstart-user +OIDC_PASSWORD=quickstart-user-pass +curl -i "http://$KEYCLOAK_IP:8080/realms/quickstart-realm/protocol/openid-connect/token" -X POST \ + -d 'grant_type=password' \ + -d 'client_id='$OIDC_CLIENT_ID'' \ + -d 'client_secret='$OIDC_CLIENT_SECRET'' \ + -d 'username='$OIDC_USER'' \ + -d 'password='$OIDC_PASSWORD'' +``` + +预期响应类似于以下内容: + +```text +{"access_token":"eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJ6U3FFaXN6VlpuYi1sRWMzZkp0UHNpU1ZZcGs4RGN3dXI1Mkx5V05aQTR3In0.eyJleHAiOjE2ODAxNjA5NjgsImlhdCI6MTY4MDE2MDY2OCwianRpIjoiMzQ5MTc4YjQtYmExZC00ZWZjLWFlYTUtZGY2MzJiMDJhNWY5IiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguNDIuMTQ1OjgwODAvcmVhbG1zL3F1aWNrc3RhcnQtcmVhbG0iLCJhdWQiOiJhY2NvdW50Iiwic3ViIjoiMTg4MTVjM2EtNmQwNy00YTY2LWJjZjItYWQ5NjdmMmIwMTFmIiwidHlwIjoiQmVhcmVyIiwiYXpwIjoiYXBpc2l4LXF1aWNrc3RhcnQtY2xpZW50Iiwic2Vzc2lvbl9zdGF0ZSI6ImIxNmIyNjJlLTEwNTYtNDUxNS1hNDU1LWYyNWUwNzdjY2I3NiIsImFjciI6IjEiLCJyZWFsbV9hY2Nlc3MiOnsicm9sZXMiOlsiZGVmYXVsdC1yb2xlcy1xdWlja3N0YXJ0LXJlYWxtIiwib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfYXV0aG9yaXphdGlvbiJdfSwicmVzb3VyY2VfYWNjZXNzIjp7ImFjY291bnQiOnsicm9sZXMiOlsibWFuYWdlLWFjY291bnQiLCJtYW5hZ2UtYWNjb3VudC1saW5rcyIsInZpZXctcHJvZmlsZSJdfX0sInNjb3BlIjoicHJvZmlsZSBlbWFpbCIsInNpZCI6ImIxNmIyNjJlLTEwNTYtNDUxNS1hNDU1LWYyNWUwNzdjY2I3NiIsImVtYWlsX3ZlcmlmaWVkIjpmYWxzZSwicHJlZmVycmVkX3VzZXJuYW1lIjoicXVpY2tzdGFydC11c2VyIn0.uD_7zfZv5182aLXu9-YBzBDK0nr2mE4FWb_4saTog2JTqFTPZZa99Gm8AIDJx2ZUcZ_ElkATqNUZ4OpWmL2Se5NecMw3slJReewjD6xgpZ3-WvQuTGpoHdW5wN9-Rjy8ungilrnAsnDA3tzctsxm2w6i9KISxvZrzn5Rbk-GN6fxH01VC5eekkPUQJcJgwuJiEiu70SjGnm21xDN4VGkNRC6jrURoclv3j6AeOqDDIV95kA_MTfBswDFMCr2PQlj5U0RTndZqgSoxwFklpjGV09Azp_jnU7L32_Sq-8coZd0nj5mSdbkJLJ8ZDQDV_PP3HjCP7EHdy4P6TyZ7oGvjw","expires_in":300,"refresh_expires_in":1800,"refresh_token":"eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICI0YjFiNTQ3Yi0zZmZjLTQ5YzQtYjE2Ni03YjdhNzIxMjk1ODcifQ.eyJleHAiOjE2ODAxNjI0NjgsImlhdCI6MTY4MDE2MDY2OCwianRpIjoiYzRjNjNlMTEtZTdlZS00ZmEzLWJlNGYtNDMyZWQ4ZmY5OTQwIiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguNDIuMTQ1OjgwODAvcmVhbG1zL3F1aWNrc3RhcnQtcmVhbG0iLCJhdWQiOiJodHRwOi8vMTkyLjE2OC40Mi4xNDU6ODA4MC9yZWFsbXMvcXVpY2tzdGFydC1yZWFsbSIsInN1YiI6IjE4ODE1YzNhLTZkMDctNGE2Ni1iY2YyLWFkOTY3ZjJiMDExZiIsInR5cCI6IlJlZnJlc2giLCJhenAiOiJhcGlzaXgtcXVpY2tzdGFydC1jbGllbnQiLCJzZXNzaW9uX3N0YXRlIjoiYjE2YjI2MmUtMTA1Ni00NTE1LWE0NTUtZjI1ZTA3N2NjYjc2Iiwic2NvcGUiOiJwcm9maWxlIGVtYWlsIiwic2lkIjoiYjE2YjI2MmUtMTA1Ni00NTE1LWE0NTUtZjI1ZTA3N2NjYjc2In0.8xYP4bhDg1U9B5cTaEVD7B4oxNp8wwAYEynUne_Jm78","token_type":"Bearer","not-before-policy":0,"session_state":"b16b262e-1056-4515-a455-f25e077ccb76","scope":"profile email"} +``` + +将访问令牌和刷新令牌保存到环境变量中。刷新令牌将在刷新令牌步骤中使用。 + +```shell +# replace with your access token +export ACCESS_TOKEN="eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJ6U3FFaXN6VlpuYi1sRWMzZkp0UHNpU1ZZcGs4RGN3dXI1Mkx5V05aQTR3In0.eyJleHAiOjE2ODAxNjA5NjgsImlhdCI6MTY4MDE2MDY2OCwianRpIjoiMzQ5MTc4YjQtYmExZC00ZWZjLWFlYTUtZGY2MzJiMDJhNWY5IiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguNDIuMTQ1OjgwODAvcmVhbG1zL3F1aWNrc3RhcnQtcmVhbG0iLCJhdWQiOiJhY2NvdW50Iiwic3ViIjoiMTg4MTVjM2EtNmQwNy00YTY2LWJjZjItYWQ5NjdmMmIwMTFmIiwidHlwIjoiQmVhcmVyIiwiYXpwIjoiYXBpc2l4LXF1aWNrc3RhcnQtY2xpZW50Iiwic2Vzc2lvbl9zdGF0ZSI6ImIxNmIyNjJlLTEwNTYtNDUxNS1hNDU1LWYyNWUwNzdjY2I3NiIsImFjciI6IjEiLCJyZWFsbV9hY2Nlc3MiOnsicm9sZXMiOlsiZGVmYXVsdC1yb2xlcy1xdWlja3N0YXJ0LXJlYWxtIiwib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfYXV0aG9yaXphdGlvbiJdfSwicmVzb3VyY2VfYWNjZXNzIjp7ImFjY291bnQiOnsicm9sZXMiOlsibWFuYWdlLWFjY291bnQiLCJtYW5hZ2UtYWNjb3VudC1saW5rcyIsInZpZXctcHJvZmlsZSJdfX0sInNjb3BlIjoicHJvZmlsZSBlbWFpbCIsInNpZCI6ImIxNmIyNjJlLTEwNTYtNDUxNS1hNDU1LWYyNWUwNzdjY2I3NiIsImVtYWlsX3ZlcmlmaWVkIjpmYWxzZSwicHJlZmVycmVkX3VzZXJuYW1lIjoicXVpY2tzdGFydC11c2VyIn0.uD_7zfZv5182aLXu9-YBzBDK0nr2mE4FWb_4saTog2JTqFTPZZa99Gm8AIDJx2ZUcZ_ElkATqNUZ4OpWmL2Se5NecMw3slJReewjD6xgpZ3-WvQuTGpoHdW5wN9-Rjy8ungilrnAsnDA3tzctsxm2w6i9KISxvZrzn5Rbk-GN6fxH01VC5eekkPUQJcJgwuJiEiu70SjGnm21xDN4VGkNRC6jrURoclv3j6AeOqDDIV95kA_MTfBswDFMCr2PQlj5U0RTndZqgSoxwFklpjGV09Azp_jnU7L32_Sq-8coZd0nj5mSdbkJLJ8ZDQDV_PP3HjCP7EHdy4P6TyZ7oGvjw" +export REFRESH_TOKEN="eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICI0YjFiNTQ3Yi0zZmZjLTQ5YzQtYjE2Ni03YjdhNzIxMjk1ODcifQ.eyJleHAiOjE2ODAxNjI0NjgsImlhdCI6MTY4MDE2MDY2OCwianRpIjoiYzRjNjNlMTEtZTdlZS00ZmEzLWJlNGYtNDMyZWQ4ZmY5OTQwIiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguNDIuMTQ1OjgwODAvcmVhbG1zL3F1aWNrc3RhcnQtcmVhbG0iLCJhdWQiOiJodHRwOi8vMTkyLjE2OC40Mi4xNDU6ODA4MC9yZWFsbXMvcXVpY2tzdGFydC1yZWFsbSIsInN1YiI6IjE4ODE1YzNhLTZkMDctNGE2Ni1iY2YyLWFkOTY3ZjJiMDExZiIsInR5cCI6IlJlZnJlc2giLCJhenAiOiJhcGlzaXgtcXVpY2tzdGFydC1jbGllbnQiLCJzZXNzaW9uX3N0YXRlIjoiYjE2YjI2MmUtMTA1Ni00NTE1LWE0NTUtZjI1ZTA3N2NjYjc2Iiwic2NvcGUiOiJwcm9maWxlIGVtYWlsIiwic2lkIjoiYjE2YjI2MmUtMTA1Ni00NTE1LWE0NTUtZjI1ZTA3N2NjYjc2In0.8xYP4bhDg1U9B5cTaEVD7B4oxNp8wwAYEynUne_Jm78" +``` + +使用有效的访问令牌向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything/test" -H "Authorization: Bearer $ACCESS_TOKEN" +``` + +`HTTP/1.1 200 OK` 响应验证对上游资源的请求是否已获得授权。 + +### 使用无效访问令牌进行验证 + +使用无效访问令牌向路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything/test" -H "Authorization: Bearer invalid-access-token" +``` + +`HTTP/1.1 401 Unauthorized` 响应验证 OIDC 插件是否拒绝了具有无效访问令牌的请求。 + +### 验证无访问令牌 + +向无访问令牌的路由发送请求: + +```shell +curl -i "http://127.0.0.1:9080/anything/test" +``` + +`HTTP/1.1 401 Unauthorized` 响应验证 OIDC 插件拒绝没有访问令牌的请求。 + +### 刷新令牌 + +要刷新访问令牌,请向 Keycloak 令牌端点发送请求,如下所示: + +```shell +curl -i "http://$KEYCLOAK_IP:8080/realms/quickstart-realm/protocol/openid-connect/token" -X POST \ + -d 'grant_type=refresh_token' \ + -d 'client_id='$OIDC_CLIENT_ID'' \ + -d 'client_secret='$OIDC_CLIENT_SECRET'' \ + -d 'refresh_token='$REFRESH_TOKEN'' +``` + +您应该看到类似以下的响应,其中包含新的访问令牌和刷新令牌,您可以将其用于后续请求和令牌刷新: + +```text +{"access_token":"eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJTdnVwLXlPMHhDdTJBVi1za2pCZ0h6SHZNaG1mcDVDQWc0NHpYb2QxVTlNIn0.eyJleHAiOjE3MzAyNzQ3NDUsImlhdCI6MTczMDI3NDQ0NSwianRpIjoiMjk2Mjk5MWUtM2ExOC00YWFiLWE0NzAtODgxNWEzNjZjZmM4IiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguMTUyLjU6ODA4MC9yZWFsbXMvcXVpY2tzdGFydC1yZWFsbSIsImF1ZCI6ImFjY291bnQiLCJzdWIiOiI2ZWI0ZTg0Yy00NmJmLTRkYzUtOTNkMC01YWM5YzE5MWU0OTciLCJ0eXAiOiJCZWFyZXIiLCJhenAiOiJhcGlzaXgtcXVpY2tzdGFydC1jbGllbnQiLCJzZXNzaW9uX3N0YXRlIjoiNTU2ZTQyYjktMjE2Yi00NTEyLWE5ZjAtNzE3ZTAyYTQ4MjZhIiwiYWNyIjoiMSIsInJlYWxtX2FjY2VzcyI6eyJyb2xlcyI6WyJkZWZhdWx0LXJvbGVzLXF1aWNrc3RhcnQtcmVhbG0iLCJvZmZsaW5lX2FjY2VzcyIsInVtYV9hdXRob3JpemF0aW9uIl19LCJyZXNvdXJjZV9hY2Nlc3MiOnsiYWNjb3VudCI6eyJyb2xlcyI6WyJtYW5hZ2UtYWNjb3VudCIsIm1hbmFnZS1hY2NvdW50LWxpbmtzIiwidmlldy1wcm9maWxlIl19fSwic2NvcGUiOiJlbWFpbCBwcm9maWxlIiwic2lkIjoiNTU2ZTQyYjktMjE2Yi00NTEyLWE5ZjAtNzE3ZTAyYTQ4MjZhIiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJxdWlja3N0YXJ0LXVzZXIifQ.KLqn1LQdazoPBqLLR856C35XpqbMO9I7WFt3KrDxZF1N8vwv4AvZYWI_2rsbdjCakh9JmPgyYRgEGufYLiDBsqy9CrMVejAIJPYsJIonIXBCp5Ysu92ODJuqtTKuuJ6K7dam7fisBFfCBbVvGspnZ3p0caedpOaF_kSd-F8ARHKVsmkuX3_ucDrP3UctjEXHezefTY4YHjNMB9wuMDPXX2vXt2BsOasnznsIHHHX-ZH8JY6eEfWPtfx0qAED6lVZICT6Rqj_j5-Cf9ogzFtLyy_XvtG9BbHME2B8AXYpxdzqxOxmVVbZdrB8elfmFjs1R3vUn2r3xA9hO_znZo_IoQ","expires_in":300,"refresh_expires_in":1800,"refresh_token":"eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICIwYWYwZTAwYy0xMThjLTRkNDktYmIwMS1iMDIwNDE3MmFjMzIifQ.eyJleHAiOjE3MzAyNzYyNDUsImlhdCI6MTczMDI3NDQ0NSwianRpIjoiZGQyZTJmYTktN2Y3Zi00MjM5LWEwODAtNWQyZDFiZTdjNzk4IiwiaXNzIjoiaHR0cDovLzE5Mi4xNjguMTUyLjU6ODA4MC9yZWFsbXMvcXVpY2tzdGFydC1yZWFsbSIsImF1ZCI6Imh0dHA6Ly8xOTIuMTY4LjE1Mi41OjgwODAvcmVhbG1zL3F1aWNrc3RhcnQtcmVhbG0iLCJzdWIiOiI2ZWI0ZTg0Yy00NmJmLTRkYzUtOTNkMC01YWM5YzE5MWU0OTciLCJ0eXAiOiJSZWZyZXNoIiwiYXpwIjoiYXBpc2l4LXF1aWNrc3RhcnQtY2xpZW50Iiwic2Vzc2lvbl9zdGF0ZSI6IjU1NmU0MmI5LTIxNmItNDUxMi1hOWYwLTcxN2UwMmE0ODI2YSIsInNjb3BlIjoiZW1haWwgcHJvZmlsZSIsInNpZCI6IjU1NmU0MmI5LTIxNmItNDUxMi1hOWYwLTcxN2UwMmE0ODI2YSJ9.Uad4BVuojHfyxqedFT5BHliWjIqVDbjM-Xeme0G2AAg","token_type":"Bearer","not-before-policy":0,"session_state":"556e42b9-216b-4512-a9f0-717e02a4826a","scope":"email profile"} +``` diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/observe-your-api.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/observe-your-api.md new file mode 100644 index 0000000..d79797a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/observe-your-api.md @@ -0,0 +1,246 @@ +--- +title: 监控 API +keywords: + - API 网关 + - Apache APISIX + - 可观测性 + - 监控 + - 插件 +description: 本文介绍了 API 网关 Apache APISIX 可观察性插件并了解如何设置这些插件。 +--- + + + +APISIX 中提供了很多具有丰富功能的可观测性插件。你可以通过使用和设置这些插件,来了解 API 行为,进而使整个业务流程更加清晰。 + +## API 可观测性 + +**API 可观测性**已经成为 API 开发的一部分,因为它解决了与 API 一致性、可靠性和快速迭代 API 功能的相关问题。可观测性可分为三个关键部分:日志、指标、链路追踪,接下来让我们逐个了解它们。 + +![Observability of three key areas](https://static.apiseven.com/2022/09/14/6321cf14c555a.jpg) + +## 前提条件 + +在进行该教程之前,请确保你已经[公开服务](./expose-api.md)。 + +## 日志 + +在 APISIX 中,**日志**可分为访问日志和错误日志。访问日志主要记录了每个请求的上下文信息,错误日志则是 APISIX 运行打印的日志信息,包括 NGINX 和插件相关的信息。APISIX 的日志存储在 `./apisix/logs/` 目录下。当然你可以通过一些 APISIX 的日志插件,将 APISIX 的日志发送到指定的日志服务中,APISIX 提供了以下插件: + +- [http-logger](../plugins/http-logger.md) +- [skywalking-logger](../plugins/skywalking-logger.md) +- [tcp-logger](../plugins/tcp-logger.md) +- [kafka-logger](../plugins/kafka-logger.md) +- [rocketmq-logger](../plugins/rocketmq-logger.md) +- [udp-logger](../plugins/udp-logger.md) +- [clickhouse-logger](../plugins/clickhouse-logger.md) +- [error-logger](../plugins/error-log-logger.md) +- [google-cloud-logging](../plugins/google-cloud-logging.md) + +你可以在 APISIX [插件中心](../plugins/http-logger.md) 查看 APISIX 支持的所有日志插件。接下来我们将使用 `http-logger` 插件为你演示如何将 APISIX 的日志数据发送到 HTTP/HTTPS 服务器中。 + +:::note 注意 + +你可以使用 [mockbin.com](https://mockbin.org/) 生成一个模拟的 HTTP 服务器来存储和查看日志。 + +::: + +以下示例展示了在指定路由上启动 `http-logger` 的示例。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell + +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "plugins": { + "http-logger": { + "uri": "http://mockbin.org/bin/5451b7cd-af27-41b8-8df1-282ffea13a61" + } + }, + "upstream_id": "1", + "uri": "/get" +}' + +``` + +:::note 注意 + +你可以通过修改 `uri` 属性,将上述 `http-logger` 的服务器地址更换为你的服务器地址: + +```json +{ + "uri": "http://mockbin.org/bin/5451b7cd-af27-41b8-8df1-282ffea13a61" +} +``` + +::: + +创建成功后,你可以通过以下命令向 `get` 端点发送请求以生成日志。 + +```shell +curl -i http://127.0.0.1:9080/get +``` + +请求成功后,你可以单击[模拟服务器链接](http://mockbin.org/bin/5451b7cd-af27-41b8-8df1-282ffea13a61/log)查看访问日志。 + +![http-logger-plugin-test-screenshot](https://static.apiseven.com/2022/09/14/6321d1d83eb7a.png) + +## 指标 + +**指标**是在⼀段时间内测量的数值。与⽇志不同,指标在默认情况下是结构化的,这使得查询和优化存储变得更加容易。而 APISIX 也提供了 [Prometheus](../plugins/prometheus.md) 的插件来获取你的 API 指标,并在 Prometheus 中暴露它们。通过使用 APISIX 提供的 Grafana 仪表板元数据,并从 Prometheus 中获取指标,更加方便地监控你的 API。 + +你可以通过以下命令启用 `prometheus` 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/get", + "plugins": { + "prometheus": {} + }, + "upstream_id": "1" +}' +``` + +启用成功后,你可以通过 `/apisix/prometheus/metrics` 接口获取 APISIX 的指标。 + +```shell +curl -i http://127.0.0.1:9091/apisix/prometheus/metrics +``` + +返回结果如下: + +```text +HTTP/1.1 200 OK +Server: openresty +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive + +# HELP apisix_batch_process_entries batch process remaining entries +# TYPE apisix_batch_process_entries gauge +apisix_batch_process_entries{name="http logger",route_id="1",server_addr="172.19.0.8"} 0 +# HELP apisix_etcd_modify_indexes Etcd modify index for APISIX keys +# TYPE apisix_etcd_modify_indexes gauge +apisix_etcd_modify_indexes{key="consumers"} 17819 +apisix_etcd_modify_indexes{key="global_rules"} 17832 +apisix_etcd_modify_indexes{key="max_modify_index"} 20028 +apisix_etcd_modify_indexes{key="prev_index"} 18963 +apisix_etcd_modify_indexes{key="protos"} 0 +apisix_etcd_modify_indexes{key="routes"} 20028 +... +``` + +你还可以通过 `http://localhost:9090/targets` 在 Prometheus 仪表板上查看端点的状态。 + +![plu​​gin-orchestration-configure-rule-screenshot](https://static.apiseven.com/2022/09/14/6321d30b32024.png) + +如上图,APISIX 公开的指标端点已启动并正在运行。 + +现在,你可以查询 `apisix_http_status` 的指标,查看 APISIX 处理了哪些 HTTP 请求及其结果。 + +![prometheus-plugin-dashboard-query-http-status-screenshot](https://static.apiseven.com/2022/09/14/6321d30aed3b2.png) + +除此之外,你还可以查看在本地实例中运行的 Grafana 仪表板。请访问 `http://localhost:3000/`。 + +![prometheus-plugin-grafana-dashboard-screenshot](https://static.apiseven.com/2022/09/14/6321d30bba97c.png) + +目前,APISIX 还提供了其他两个关于指标的插件: + +- [Node status 插件](../plugins/node-status.md)(https://apisix.apache.org/docs/apisix/plugins/node-status/) +- [Datadog 插件](../plugins/datadog.md) + +## 链路追踪 + +**链路追踪**就是将一次请求还原成调用链路,并将该请求的调用情况使用拓扑的方式展现,比如展示各个微服务节点上的耗时,请求具体经过了哪些服务器以及每个服务节点的请求状态等内容。 + +[Zipkin](https://zipkin.io/) 一个开源的分布式追踪系统。APISIX 的[zipkin 插件](../plugins/zipkin.md) 支持根据 [Zipkin API 规范](https://zipkin.io/pages/instrumenting.html) 收集链路信息并报告给 Zipkin Collector。 + +:::tip 提示 + +使用该插件前,请确保你已经有一个正在运行的 Zipkin 实例。你可以使用 Docker 快速启动一个 Zipkin 实例: + +``` +docker run -d -p 9411:9411 openzipkin/zipkin +``` + +::: + +你可以通过如下示例,在指定路由中启用 `zipkin` 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "methods": [ + "GET" + ], + "uri": "/get", + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9411/api/v2/spans", + "sample_ratio": 1 + } + }, + "upstream_id": "1" +}' +``` + +你可以通过以下命令请求 APISIX: + +```shell +curl -i http://127.0.0.1:9080/get +``` + +如下所示,返回结果中的 `header` 部分附加了一些额外的跟踪标识符(TraceId、SpanId 和 ParentId): + +```text +"X-B3-Parentspanid": "61bd3f4046a800e7", +"X-B3-Sampled": "1", +"X-B3-Spanid": "855cd5465957f414", +"X-B3-Traceid": "e18985df47dab632d62083fd96626692", +``` + +你可以通过访问 `http://127.0.0.1:9411/zipkin`,在 Zipkin 的 Web UI 上看到请求链路。 + +![Zipkin plugin output 1](https://static.apiseven.com/2022/09/14/6321dc27f3d33.png) + +![Zipkin plugin output 2](https://static.apiseven.com/2022/09/14/6321dc284049c.png) + +你也可以通过另外两个插件进行链路追踪: + +- [Skywalking 插件](../plugins/skywalking.md) + +- [OpenTelemetry 插件](../plugins/opentelemetry.md) + +## 总结 + +API 可观测性是一种用于在 API 世界中管理应用程序的框架,APISIX 的插件可以通过集成到多个可观测性平台来帮助你监控 API,让你更专注于开发核心业务功能,无需为集成多个可观测性应用花费更多时间。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/protect-api.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/protect-api.md new file mode 100644 index 0000000..833ee3f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/tutorials/protect-api.md @@ -0,0 +1,138 @@ +--- +title: 保护 API +keywords: + - API 网关 + - Apache APISIX + - 发布路由 + - 创建服务 +description: 本文介绍了如何通过 Apache APISIX 发布服务和路由。 +--- + + + +## 描述 + +本文将为你介绍使用限流限速和安全插件保护你的 API。 + +## 概念介绍 + +### 插件 + +[Plugin](../terminology/plugin.md) 也称之为插件,它是扩展 APISIX 应用层能力的关键机制,也是在使用 APISIX 时最常用的资源对象。插件主要是在 HTTP 请求或响应生命周期期间执行的、针对请求的个性化策略。插件可以与路由、服务或消费者绑定。 + +:::note 注意 + +如果 [路由](../terminology/route.md)、[服务](../terminology/service.md)、[插件配置](../terminology/plugin-config.md) 或消费者都绑定了相同的插件,则只有一份插件配置会生效,插件配置的优先级由高到低顺序是:消费者 > 路由 > 插件配置 > 服务。同时在插件执行过程中也会涉及 6 个阶段,分别是 `rewrite`、`access`、`before_proxy`、`header_filter`、`body_filter` 和 `log`。 + +::: + +## 前提条件 + +在进行该教程前,请确保你已经[公开服务](./expose-api.md)。 + +## 保护 API + +在很多时候,我们的 API 并不是处于一个非常安全的状态,它随时会收到不正常的访问,一旦访问流量突增,可能就会导致你的 API 发生故障,产生不必要的损失。因此你可以通过速率限制保护你的 API 服务,限制非正常的访问请求,保障 API 服务的稳定运行。对此,我们可以使用如下方式进行: + +1. 限制请求速率; +2. 限制单位时间内的请求数; +3. 延迟请求; +4. 拒绝客户端请求; +5. 限制响应数据的速率。 + +为了实现上述功能,APISIX 提供了多个限流限速的插件,包括 [limit-conn](../plugins/limit-conn.md)、[limit-count](../plugins/limit-count.md) 和 [limit-req](../plugins/limit-req.md)。 + +- `limit-conn` 插件主要用于限制客户端对服务的并发请求数。 +- `limit-req` 插件使用漏桶算法限制对用户服务的请求速率。 +- `limit-count` 插件主要用于在指定的时间范围内,限制每个客户端总请求个数。 + +接下来,我们将以 `limit-count` 插件为例,为你介绍如何通过限流限速插件保护你的 API。 + +1. 创建路由。 + +:::note + +您可以这样从 `config.yaml` 中获取 `admin_key` 并存入环境变量: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +``` + +::: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key_type": "var", + "key": "remote_addr" + } + }, + "upstream_id": "1" +}' + +``` + +以上配置中,使用了[公开服务](./expose-api.md)中创建的上游创建了一个 ID 为 `1` 的路由, ,并且启用了 `limit-count` 插件。该插件仅允许客户端在 60 秒内,访问上游服务 2 次,超过两次,则会返回 `503` 错误码。 + +2. 测试插件。 + +```shell + +curl http://127.0.0.1:9080/index.html + +``` + +使用上述命令连续访问三次后,则会出现如下错误。 + +``` + +503 Service Temporarily Unavailable + +

503 Service Temporarily Unavailable

+
openresty
+ + +``` + +返回上述结果,则表示 `limit-count` 插件已经配置成功。 + +## 流量控制插件 + +APISIX 除了提供限流限速的插件外,还提供了很多其他的关于 **traffic** 插件来满足实际场景的需求: + +- [proxy-cache](../plugins/proxy-cache.md):该插件提供缓存后端响应数据的能力,它可以和其他插件一起使用。该插件支持基于磁盘和内存的缓存。 +- [request-validation](../plugins/request-validation.md):该插件用于提前验证向上游服务转发的请求。 +- [proxy-mirror](../plugins/proxy-mirror.md):该插件提供了镜像客户端请求的能力。流量镜像是将线上真实流量拷贝到镜像服务中,以便在不影响线上服务的情况下,对线上流量或请求内容进行具体的分析。 +- [api-breaker](../plugins/api-breaker.md):该插件实现了 API 熔断功能,从而帮助我们保护上游业务服务。 +- [traffic-split](../plugins/traffic-split.md):该插件使用户可以逐步引导各个上游之间的流量百分比。,你可以使用该插件实现蓝绿发布,灰度发布。 +- [request-id](../plugins/request-id.md):该插件通过 APISIX 为每一个请求代理添加 `unique` ID 用于追踪 API 请求。 +- [proxy-control](../plugins/proxy-control.md):该插件能够动态地控制 NGINX 代理的相关行为。 +- [client-control](../plugins/client-control.md):该插件能够通过设置客户端请求体大小的上限来动态地控制 NGINX 处理客户端的请求。 + +## 更多操作 + +你可以参考[监控 API](./observe-your-api.md) 文档,对 APISIX 进行监控,日志采集,链路追踪等。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/upgrade-guide-from-2.15.x-to-3.0.0.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/upgrade-guide-from-2.15.x-to-3.0.0.md new file mode 100644 index 0000000..914c293 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/upgrade-guide-from-2.15.x-to-3.0.0.md @@ -0,0 +1,516 @@ +--- +title: 升级指南 +keywords: + - APISIX + - APISIX 升级指南 + - APISIX 版本升级 +description: 本文档将引导你了解如何升级 APISIX 版本。 +--- + + + +## APISIX 的版本升级方式 + +APISIX 的版本号遵循[语义化版本](https://semver.org/lang/zh-CN/)。 + +升级到 APISIX 3.0.0 是一个重大的版本升级,我们建议您先升级到 2.15.x,然后再升级到 3.0.0。 + +## 从 2.15.x 升级到 3.0.0 + +### 升级注意事项和重大更新 + +在升级之前,请查看 [3.0.0-beta](./CHANGELOG.md#300-beta) 和 [3.0.0](./CHANGELOG.md#300) 中的 Change 部分,以了解 3.0.0 版本的不兼容的修改与重大更新。 + +#### 部署 + +基于 alpine 的镜像已不再支持,如果你使用了 alpine 的镜像,那么你需要将镜像替换为基于 debian/centos 的镜像。 + +目前,我们提供了: + +- 基于 debian/centos 的镜像,你可以在 [DockerHub](https://hub.docker.com/r/apache/apisix/tags?page=1&ordering=last_updated) 上找到它们 +- CentOS 7 和 CentOS 8 的 RPM 包,支持 AMD64 和 ARM64 架构,可参考文章[通过 RPM 仓库安装](./installation-guide.md#通过-rpm-仓库安装) +- Debian 11(bullseye) 的 DEB 包,支持 AMD64 和 ARM64 架构,可参考文章[通过 DEB 仓库安装](./installation-guide.md#通过-deb-仓库安装) + +3.0.0 对部署模式进行了重大更新,具体如下: + +- 支持数据面与控制面分离的部署模式,具体可参考 [Decoupled](../../en/latest/deployment-modes.md#decoupled) +- 如在使用中仍需沿用原来的部署模式,那么可以使用部署模式中的 `traditional` 模式,并且更新配置文件,具体可参考 [Traditional](../../en/latest/deployment-modes.md#traditional) +- 支持 Standalone 模式,需要更新配置文件,具体可参考 [Standalone](../../en/latest/deployment-modes.md#standalone) + +#### 依赖项 + +如果你使用提供的二进制包(Debian 和 RHEL)或者镜像,则它们已经捆绑了 APISIX 所有必要的依赖项,你可以跳过本节。 + +APISIX 的一些特性需要在 OpenResty 中引入额外的 NGINX 模块。如果要使用这些功能,你需要构建一个自定义的 OpenResty 发行版(APISIX-Runtime)。你可以参考 [api7/apisix-build-tools](https://github.com/api7/apisix-build-tools) 中的代码,构建自己的 APISIX-Runtime 环境。 + +如果你希望 APISIX 运行在原生的 OpenResty 上,这种情况下将只支持运行在 OpenResty 1.19.3.2 及以上的版本。 + +#### 迁移 + +##### 静态配置迁移 + +APISIX 的配置方式是用自定义的 `conf/config.yaml` 中的内容覆盖默认的 `conf/config-default.yaml`,如果某个配置项在 `conf/config.yaml` 中不存在,那么就使用 `conf/config-default.yaml` 中的配置。在 3.0.0 中,我们调整了 `conf/config-default.yaml` 配置文件中的部分细节,具体内容如下。 + +###### 移动配置项 + +从 2.15.x 到 3.0.0 版本,在 `conf/config-default.yaml` 有一些配置项的位置被移动了。如果你使用了这些配置项,那么你需要将它们移动到新的位置。 + +调整内容: + + * `config_center` 功能改由 `deployment` 中的 `config_provider` 实现 + * `etcd` 字段整体迁移到 `deployment` 中 + * 以下的 Admin API 配置移动到 `deployment` 中的 `admin` 字段 + - admin_key + - enable_admin_cors + - allow_admin + - admin_listen + - https_admin + - admin_api_mtls + - admin_api_version + +你可以在 `conf/config-default.yaml` 中找到这些配置的新的确切位置。 + +###### 更新配置项 + +某些配置在 3.0.0 中被移除了,并被新的配置项替代。如果你使用了这些配置项,那么你需要将它们更新为新的配置项。 + +调整内容: + + * 去除 `apisix.ssl.enable_http2` 和 `apisix.ssl.listen_port`,使用 `apisix.ssl.listen` 替代。 + + 如果在 `conf/config.yaml` 中有这样的配置: + + ```yaml + ssl: + enable_http2: true + listen_port: 9443 + ``` + + 则在 3.0.0 版本中需要转换成如下所示: + + ```yaml + ssl: + listen: + - port: 9443 + enable_http2: true + ``` + + * 去除 `nginx_config.http.lua_shared_dicts`,用 `nginx_config.http.custom_lua_shared_dict` 替代,这个配置用于声明自定义插件的共享内存。 + + 如果在 `conf/config.yaml` 中有这样的配置: + + ```yaml + nginx_config: + http: + lua_shared_dicts: + my_dict: 1m + ``` + + 则在 3.0.0 版本中需要转换成如下所示: + + ```yaml + nginx_config: + http: + custom_lua_shared_dict: + my_dict: 1m + ``` + + * 去除 `etcd.health_check_retry`,用 `deployment.etcd.startup_retry` 替代,这个配置用于在启动时,重试连接 etcd 的次数。 + + 如果在 `conf/config.yaml` 中有这样的配置: + + ```yaml + etcd: + health_check_retry: 2 + ``` + + 则在 3.0.0 版本中需要转换成如下所示: + + ```yaml + deployment: + etcd: + startup_retry: 2 + ``` + + * 去除 `apisix.port_admin`,用 `deployment.apisix.admin_listen` 替代。 + + 如果在 `conf/config.yaml` 中有这样的配置: + + ```yaml + apisix: + port_admin: 9180 + ``` + + 则在 3.0.0 中需要转换成如下所示: + + ```yaml + deployment: + apisix: + admin_listen: + ip: 127.0.0.1 # 替换成实际暴露的 IP + port: 9180 + ``` + + * 修改 `enable_cpu_affinity` 的默认值为 `false`。主要是因为越来越多的用户通过容器部署 APISIX,由于 Nginx 的 worker_cpu_affinity 不计入 cgroup,默认启用 worker_cpu_affinity 会影响 APISIX 的行为,例如多个实例会被绑定到一个 CPU 上。为了避免这个问题,我们在 `conf/config-default.yaml` 中默认禁用 `enable_cpu_affinity` 选项。 + * 去除 `apisix.real_ip_header`,用 `nginx_config.http.real_ip_header` 替代 + +##### 数据迁移 + +如果你需要备份与恢复数据,可以利用 ETCD 的备份与恢复功能,参考 [etcdctl snapshot](https://etcd.io/docs/v3.5/op-guide/maintenance/#snapshot-backup)。 + +#### 数据兼容 + +在 3.0.0 中,我们调整了部分数据结构,这些调整影响到 APISIX 的路由、上游、插件等数据。3.0.0 版本与 2.15.x 版本之间数据不完全兼容。因此,你无法使用 3.0.0 版本的 APISIX 直接连接到 2.15.x 版本 APISIX 使用的 ETCD 集群。 + +为了保持数据兼容,有两种方式,仅供参考: + + 1. 梳理 ETCD 中的数据,将不兼容的数据备份然后清除,将备份的数据结构转换成 3.0.0 版本的数据结构,通过 3.0.0 版本的 Admin API 来恢复数据 + 2. 梳理 ETCD 中的数据,编写脚本,将 2.15.x 版本的数据结构批量转换成 3.0.0 版本的数据结构 + +数据层面调整内容如下。 + + * 将插件配置的元属性 `disable` 移动到 `_meta` 中。 + + `disable` 表示该插件的启用/禁用状态,如果在 ETCD 中存在这样的数据结构: + + ```json + { + "plugins":{ + "limit-count":{ + ... // 插件配置 + "disable":true + } + } + } + ``` + + 则在 3.0.0 版本中,这个插件的数据结构应该变成如下所示: + + ```json + { + "plugins":{ + "limit-count":{ + ... // 插件配置 + "_meta":{ + "disable":true + } + } + } + } + ``` + + 注意:`disable` 是插件的元配置,该调整对所有插件配置生效,不仅仅是 `limit-count` 插件。 + + * 去除路由的 `service_protocol` 字段,使用 `upstream.scheme` 替代。 + + 如果在 ETCD 中存在这样的数据结构: + + ```json + { + "uri":"/hello", + "service_protocol":"grpc", + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:1980":1 + } + } + } + ``` + + 则在 3.0.0 版本中,这个路由的数据结构应该变成如下所示: + + ```json + { + "uri":"/hello", + "upstream":{ + "type":"roundrobin", + "scheme":"grpc", + "nodes":{ + "127.0.0.1:1980":1 + } + } + } + ``` + + * 去除 `authz-keycloak` 插件中的 `audience` 字段,使用 `client_id` 替代。 + + 如果在 ETCD 中 `authz-keycloak` 的插件配置存在这样的数据结构: + + ```json + { + "plugins":{ + "authz-keycloak":{ + ... // 插件配置 + "audience":"Client ID" + } + } + } + ``` + + 则在 3.0.0 中,这个路由的数据结构应该变成如下所示: + + ```json + { + "plugins":{ + "authz-keycloak":{ + ... // 插件配置 + "client_id":"Client ID" + } + } + } + ``` + + * 去除 `mqtt-proxy` 插件中的 `upstream`,在插件外部配置 `upstream`,并在插件中引用。 + + 如果在 ETCD 中 `mqtt-proxy` 的插件配置存在这样的数据结构: + + ```json + { + "remote_addr":"127.0.0.1", + "plugins":{ + "mqtt-proxy":{ + "protocol_name":"MQTT", + "protocol_level":4, + "upstream":{ + "ip":"127.0.0.1", + "port":1980 + } + } + } + } + ``` + + 则在 3.0.0 版本中,这个插件的数据结构应该变成如下所示: + + ```json + { + "remote_addr":"127.0.0.1", + "plugins":{ + "mqtt-proxy":{ + "protocol_name":"MQTT", + "protocol_level":4 + } + }, + "upstream":{ + "type":"chash", + "key":"mqtt_client_id", + "nodes":[ + { + "host":"127.0.0.1", + "port":1980, + "weight":1 + } + ] + } + } + ``` + + * 去除 `syslog` 插件中的 `max_retry_times` 和 `retry_interval` 字段,使用 `max_retry_count` 和 `retry_delay` 替代。 + + 如果在 ETCD 中 `syslog` 的插件配置存在这样的数据结构: + + ```json + { + "plugins":{ + "syslog":{ + "max_retry_times":1, + "retry_interval":1, + ... // 其他配置 + } + } + } + ``` + + 则在 3.0.0 版本中,这个插件的数据结构应该变成如下所示: + + ```json + { + "plugins":{ + "syslog":{ + "max_retry_count":1, + "retry_delay":1, + ... // 其他配置 + } + } + } + ``` + + * 去除 `proxy-rewrite` 插件中的 `scheme` 字段,在配置上游时,用 `upstream.scheme` 替代。 + + 如果在 ETCD 中 `proxy-rewrite` 的插件配置存在这样的数据结构: + + ```json + { + "plugins":{ + "proxy-rewrite":{ + "scheme":"https", + ... // 其他配置 + } + }, + "upstream":{ + "nodes":{ + "127.0.0.1:1983":1 + }, + "type":"roundrobin" + }, + "uri":"/hello" + } + ``` + + 则在 3.0.0 版本中,这个插件的数据结构应该变成如下所示: + + ```json + { + "plugins":{ + "proxy-rewrite":{ + ... // 其他配置 + } + }, + "upstream":{ + "scheme":"https", + "nodes":{ + "127.0.0.1:1983":1 + }, + "type":"roundrobin" + }, + "uri":"/hello" + } + ``` + +#### Admin API + +在 3.0.0 版本中,我们对 Admin API 也进行了一些调整。使得 Admin API 更加易用,更加符合 RESTful 的设计理念,具体调整内容如下。 + + * 操作资源时(包括查询单个资源和列表资源),删除了响应体中的 `count`、`action` 和 `node` 字段,并将 `node` 中的内容提升到响应体的根节点。 + + 在 2.x 版本中,通过 Admin API 查询 `/apisix/admin/routes/1` 的响应格式是这样的: + + ```json + { + "count":1, + "action":"get", + "node":{ + "key":"\/apisix\/routes\/1", + "value":{ + ... // 配置内容 + } + } + } + ``` + + 在 3.0.0 版本中,通过 Admin API 查询 `/apisix/admin/routes/1` 资源的响应格式调整为如下所示: + + ```json + { + "key":"\/apisix\/routes\/1", + "value":{ + ... // 配置内容 + } + } + ``` + + * 查询列表资源时,删除 `dir` 字段,新增 `list` 字段,存放列表资源的数据;新增 `total` 字段,存放列表资源的总数。 + + 在 2.x 版本中,通过 Admin API 查询 `/apisix/admin/routes` 的响应格式是这样的: + + ```json + { + "action":"get", + "count":2, + "node":{ + "key":"\/apisix\/routes", + "nodes":[ + { + "key":"\/apisix\/routes\/1", + "value":{ + ... // 配置内容 + } + }, + { + "key":"\/apisix\/routes\/2", + "value":{ + ... // 配置内容 + } + } + ], + "dir":true + } + } + ``` + + 在 3.0.0 版本中,通过 Admin API 查询 `/apisix/admin/routes` 资源的响应格式调整为如下所示: + + ```json + { + "list":[ + { + "key":"\/apisix\/routes\/1", + "value":{ + ... // 配置内容 + } + + }, + { + "key":"\/apisix\/routes\/2", + "value":{ + ... // 配置内容 + } + } + ], + "total":2 + } + ``` + + * 调整 ssl 资源的请求路径,从 `/apisix/admin/ssl/{id}` 调整为 `/apisix/admin/ssls/{id}`。 + + 在 2.x 版本中,通过 Admin API 操作 ssl 资源是这样的: + + ```shell + curl -i http://{apisix_listen_address}/apisix/admin/ssl/{id} + ``` + + 在 3.0.0 版本中,通过 Admin API 操作 ssl 资源调整为如下所示: + + ```shell + curl -i http://{apisix_listen_address}/apisix/admin/ssls/{id} + ``` + + * 调整 proto 资源的请求路径,从 `/apisix/admin/proto/{id}` 调整为 `/apisix/admin/protos/{id}`。 + + 在 2.x 版本中,通过 Admin API 操作 proto 资源是这样的: + + ```shell + curl -i http://{apisix_listen_address}/apisix/admin/proto/{id} + ``` + + 在 3.0.0 版本中,通过 Admin API 操作 proto 资源调整为如下所示: + + ```shell + curl -i http://{apisix_listen_address}/apisix/admin/protos/{id} + ``` + +除以上内容外,我们也将 Admin API 的端口调整为 9180。 + +## 总结 + +Apache APISIX 3.0.0 版本的发布,将产品的更多细节迭代了一大步。由于大版本的更新迭代会导致一些配置与数据也相应进行调整,为此我们为您整理了这份 APISIX 升级指南。希望对各位在使用 APISIX 的过程中,对于版本的更新操作也更得心应手。 + +如果您有任何问题或意见,欢迎随时在社区进行交流。 diff --git a/CloudronPackages/APISIX/apisix-source/docs/zh/latest/wasm.md b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/wasm.md new file mode 100644 index 0000000..ef8ce0c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/docs/zh/latest/wasm.md @@ -0,0 +1,84 @@ +# Wasm + +APISIX 支持使用 [Proxy Wasm SDK](https://github.com/proxy-wasm/spec#sdks) 编写的 Wasm 插件。 + +目前,仅实现了少数 API。请关注 [wasm-nginx-module](https://github.com/api7/wasm-nginx-module) 以了解进展。 + +## 编程模型 + +所有插件都在同一个 Wasm VM 中运行,就像 Lua 插件在 Lua VM 中一样。 + +每个插件都有自己的 VMContext(根 ctx)。 + +每个配置的路由/全局规则都有自己的 PluginContext(插件 ctx)。例如,如果我们有一个配置了 Wasm 插件的服务,并且有两个路由继承自它,将会有两个插件 ctx。 + +每个命中该配置的 HTTP 请求都有自己的 HttpContext(HTTP ctx)。例如,如果我们同时配置了全局规则和路由,HTTP 请求将有两个 HTTP ctx,一个用于来自全局规则的插件 ctx,另一个用于来自路由的插件 ctx。 + +## 如何使用 + +首先,我们需要在`config.yaml`中定义插件: + +```yaml +wasm: + plugins: + - name: wasm_log # 插件的名称 + priority: 7999 # 优先级 + file: t/wasm/log/main.go.wasm # `.wasm` 文件的路径 + http_request_phase: access # 默认是"access",可以是["access", "rewrite"]之一 +``` + +就是这样。现在您可以像使用常规插件一样使用 Wasm 插件。 + +例如,在指定路由上启用此插件: + +**注意** + +您可以从`config.yaml`中获取`admin_key`,并使用以下命令将其保存到环境变量中: + +```bash +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed's/"//g') +``` + +然后执行以下命令: + +```bash +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/index.html", + "plugins": { + "wasm_log": { + "conf": "blahblah" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +以下是插件中可以配置的属性: + +|名称 | 类型 | 要求 | 默认 | 有效 | 描述| +|---|---|---|---|---|---| +|conf|字符串或结构体 | 必填 | 无 | 不得为空 |插件 ctx 配置,可以通过 Proxy Wasm SDK 获取| + +这里是 Proxy Wasm 回调与 APISIX 阶段的映射: + +- `proxy_on_configure`:在新配置没有 PluginContext 时运行一次。例如,当第一个请求命中配置了 Wasm 插件的路由时。 +- `proxy_on_http_request_headers`:在 access/rewrite 阶段运行,具体取决于`http_request_phase`的配置。 +- `proxy_on_http_request_body`:在与`proxy_on_http_request_headers`相同的阶段运行。要运行此回调,我们需要在`proxy_on_http_request_headers`中将属性`wasm_process_req_body`设置为非空值。请参考`t/wasm/request-body/main.go`作为示例。 +- `proxy_on_http_response_headers`:在 header_filter 阶段运行。 +- `proxy_on_http_response_body`:在 body_filter 阶段运行。要运行此回调,我们需要在`proxy_on_http_response_headers`中将属性`wasm_process_resp_body`设置为非空值。请参考`t/wasm/response-rewrite/main.go`作为示例。 + +## 示例 + +我们在这个仓库的`t/wasm/`下重新实现了一些 Lua 插件: + +- fault - injection +- forward - auth +- response - rewrite +- Slack +- Twitter diff --git a/CloudronPackages/APISIX/apisix-source/example/apisix/plugins/3rd-party.lua b/CloudronPackages/APISIX/apisix-source/example/apisix/plugins/3rd-party.lua new file mode 100644 index 0000000..bcb4e02 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/example/apisix/plugins/3rd-party.lua @@ -0,0 +1,51 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") + + +local schema = { + type = "object", + properties = { + body = { + description = "body to replace response.", + type = "string" + }, + }, + required = {"body"}, +} + +local plugin_name = "3rd-party" + +local _M = { + version = 0.1, + priority = 12, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +function _M.access(conf, ctx) + return 200, conf.body +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/example/apisix/stream/plugins/3rd-party.lua b/CloudronPackages/APISIX/apisix-source/example/apisix/stream/plugins/3rd-party.lua new file mode 100644 index 0000000..bcb4e02 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/example/apisix/stream/plugins/3rd-party.lua @@ -0,0 +1,51 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") + + +local schema = { + type = "object", + properties = { + body = { + description = "body to replace response.", + type = "string" + }, + }, + required = {"body"}, +} + +local plugin_name = "3rd-party" + +local _M = { + version = 0.1, + priority = 12, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +function _M.access(conf, ctx) + return 200, conf.body +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/example/build-dev-image.dockerfile b/CloudronPackages/APISIX/apisix-source/example/build-dev-image.dockerfile new file mode 100644 index 0000000..da0d827 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/example/build-dev-image.dockerfile @@ -0,0 +1,32 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +FROM ubuntu:20.04 + +# Install Test::Nginx +RUN apt update +RUN apt install -y cpanminus make +RUN cpanm --notest Test::Nginx + +# Install development utils +RUN apt install -y sudo git gawk curl nano vim inetutils-ping + +WORKDIR /apisix + +ENV PERL5LIB=.:$PERL5LIB + +ENTRYPOINT ["tail", "-f", "/dev/null"] diff --git a/CloudronPackages/APISIX/apisix-source/example/my_hook.lua b/CloudronPackages/APISIX/apisix-source/example/my_hook.lua new file mode 100644 index 0000000..ef06f66 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/example/my_hook.lua @@ -0,0 +1,29 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local apisix = require("apisix") + +local old_http_init = apisix.http_init +apisix.http_init = function (...) + ngx.log(ngx.EMERG, "my hook works in http") + old_http_init(...) +end + +local old_stream_init = apisix.stream_init +apisix.stream_init = function (...) + ngx.log(ngx.EMERG, "my hook works in stream") + old_stream_init(...) +end diff --git a/CloudronPackages/APISIX/apisix-source/logos/apache-apisix.png b/CloudronPackages/APISIX/apisix-source/logos/apache-apisix.png new file mode 100644 index 0000000..6e3aefc Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/logos/apache-apisix.png differ diff --git a/CloudronPackages/APISIX/apisix-source/logos/apisix-white-bg.jpg b/CloudronPackages/APISIX/apisix-source/logos/apisix-white-bg.jpg new file mode 100644 index 0000000..6a9ae00 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/logos/apisix-white-bg.jpg differ diff --git a/CloudronPackages/APISIX/apisix-source/logos/cncf-landscape-white-bg.jpg b/CloudronPackages/APISIX/apisix-source/logos/cncf-landscape-white-bg.jpg new file mode 100644 index 0000000..0ee426c Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/logos/cncf-landscape-white-bg.jpg differ diff --git a/CloudronPackages/APISIX/apisix-source/logos/cncf-white-bg.jpg b/CloudronPackages/APISIX/apisix-source/logos/cncf-white-bg.jpg new file mode 100644 index 0000000..0e95790 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/logos/cncf-white-bg.jpg differ diff --git a/CloudronPackages/APISIX/apisix-source/powered-by.md b/CloudronPackages/APISIX/apisix-source/powered-by.md new file mode 100644 index 0000000..1be5320 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/powered-by.md @@ -0,0 +1,129 @@ +--- +title: Powered by Apache APISIX +--- + + + +This page documents an alphabetical list of institutions that are using APISIX for research and production, +or providing commercial products including APISIX. + +Users are encouraged to add themselves to this page, [issue](https://github.com/apache/apisix/issues/487) and PR are welcomed. + +1. aimiaobi 妙笔 AI +1. AUGUR 奥格科技股份有限公司 +1. AISPEECH 思必驰信息科技股份有限公司 +1. cunw 湖南新云网 +1. Chaolian 超链云商 +1. CCB Fintech 建信金科 +1. CTRL 开创云 +1. 51tiangou 大商天狗 +1. DaoCloud +1. dasouche 大搜车 +1. dataoke 大淘客 +1. 嘀嗒出行 +1. dusto.cn 浙江大东鞋业有限公司 +1. Dian 小电科技 +1. eFactory +1. ehomepay 理房通 +1. eZone 简单一点科技 +1. fansup +1. Tencent Game 腾讯游戏 +1. haieruplus 海尔优家 +1. hellowin 好洛维 +1. HelloTalk, Inc. +1. 航天网信 +1. Huawei 华为 +1. 虎牙 +1. 好医生集团 +1. ihomefnt 艾佳生活 +1. intsig 上海合合信息科技股份有限公司 +1. jiandanxinli 简单心理 +1. jr.ly 同程金服 +1. 凯叔讲故事 +1. ke.com 贝壳找房 +1. Meizu 魅族 +1. 明源云客 +1. 美菜网 +1. Netease 网易 +1. NASA JPL 美国国家航空航天局 喷气推进实验室 +1. Purcotton 深圳全棉时代科技有限公司 +1. 360 奇虎 +1. sinog2c 湖南国科云通 +1. sinovatech 炎黄新星 +1. Taikanglife 泰康云 +1. tangdou 糖豆网 +1. Tencent Cloud 腾讯云 +1. Travelsky 中国航信 +1. Unistam +1. vbill 随行付 +1. VIVO +1. 万思 +1. willclass 会课 +1. 金山办公 +1. Xin 优信二手车 +1. 雪球 +1. Youtu 腾讯优图 +1. YMM 满帮集团 +1. 中移杭研 +1. 紫豪网络 +1. zuzuche 租租车 +1. zybang 作业帮 +1. 中食安泓(广东)健康产业有限公司 +1. 上海泽怡信息科技 +1. 北京新片场传媒股份有限公司 +1. 武汉精臣智慧标识科技有限公司 +1. 北京大学信息技术高等研究院 +1. HONOR 荣耀 +1. 群之脉信息科技 +1. 大房鸭 +1. 优特云 +1. 外研在线 +1. 数地科技 +1. 微吼 +1. 小鹏汽车 +1. Ideacreep + + + +## User Cases + +## NASA JPL + +Using Apache APISIX as an API gateway to deal with north-south and east-west traffic between microservices. + +## ke.com + +Using Apache APISIX as traffic entry gateway + +## meizu + +Using Apache APISIX as api gateway (limit, grpc transcode, abtest, dynamic configures ...) + +## zuzuche.com + +Using Apache APISIX as a gateway, it uses the functions of current limiting, speed limiting, black-and-white list and so on. In the later stage, it also wants to add gRPC protocol, Serverless, custom plug-in, and other functions to meet business needs. + +## souche.com + +Using Apache APISIX as a Web ACL gateway to deal with backend OA systems traffic. + +## HelloTalk, Inc. + +Using Apache APISIX as an API gateway to manage all API and SSL certificates in test\dev\CMS environment. diff --git a/CloudronPackages/APISIX/apisix-source/t/APISIX.pm b/CloudronPackages/APISIX/apisix-source/t/APISIX.pm new file mode 100644 index 0000000..d8480c4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/APISIX.pm @@ -0,0 +1,1029 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +package t::APISIX; + +use lib 'lib'; +use Cwd qw(cwd); +use Test::Nginx::Socket::Lua::Stream -Base; + +repeat_each(1); +log_level('info'); +no_long_string(); +no_shuffle(); +no_root_location(); # avoid generated duplicate 'location /' +worker_connections(128); +master_on(); + +my $apisix_home = $ENV{APISIX_HOME} || cwd(); +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +$ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); +$ENV{TEST_NGINX_FAST_SHUTDOWN} ||= 1; + +sub read_file($) { + my $infile = shift; + open my $in, "$apisix_home/$infile" + or die "cannot open $infile for reading: $!"; + my $data = do { local $/; <$in> }; + close $in; + $data; +} + +sub local_dns_resolver() { + open my $in, "/etc/resolv.conf" or die "cannot open /etc/resolv.conf"; + my @lines = <$in>; + my @dns_addrs = (); + foreach my $line (@lines){ + $line =~ m/^nameserver\s+(\d+[.]\d+[.]\d+[.]\d+)\s*$/; + if ($1) { + push(@dns_addrs, $1); + } + } + close($in); + return @dns_addrs +} + + +my $dns_addrs_str = ""; +my $dns_addrs_tbl_str = ""; +my $enable_local_dns = $ENV{"ENABLE_LOCAL_DNS"}; +if ($enable_local_dns) { + my @dns_addrs = local_dns_resolver(); + $dns_addrs_tbl_str = "{"; + foreach my $addr (@dns_addrs){ + $dns_addrs_str = "$dns_addrs_str $addr"; + $dns_addrs_tbl_str = "$dns_addrs_tbl_str\"$addr\", "; + } + $dns_addrs_tbl_str = "$dns_addrs_tbl_str}"; +} else { + $dns_addrs_str = "8.8.8.8 114.114.114.114"; + $dns_addrs_tbl_str = "{\"8.8.8.8\", \"114.114.114.114\"}"; +} +my $custom_dns_server = $ENV{"CUSTOM_DNS_SERVER"}; +if ($custom_dns_server) { + $dns_addrs_tbl_str = "{\"$custom_dns_server\"}"; +} + + +my $events_module = $ENV{TEST_EVENTS_MODULE} // "lua-resty-events"; +my $test_default_config = <<_EOC_; + -- read the default configuration, modify it, and the Lua package + -- cache will persist it for loading by other entrypoints + -- it is used to replace the test::nginx implementation + local default_config = require("apisix.cli.config") + default_config.plugin_attr.prometheus.enable_export_server = false + default_config.apisix.events.module = "$events_module" +_EOC_ + +my $user_yaml_config = read_file("conf/config.yaml"); +my $ssl_crt = read_file("t/certs/apisix.crt"); +my $ssl_key = read_file("t/certs/apisix.key"); +my $ssl_ecc_crt = read_file("t/certs/apisix_ecc.crt"); +my $ssl_ecc_key = read_file("t/certs/apisix_ecc.key"); +my $test2_crt = read_file("t/certs/test2.crt"); +my $test2_key = read_file("t/certs/test2.key"); +my $etcd_pem = read_file("t/certs/etcd.pem"); +my $etcd_key = read_file("t/certs/etcd.key"); +$user_yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + proxy_mode: http&stream + stream_proxy: + tcp: + - 9100 + enable_resolv_search_opt: false +_EOC_ + +my $etcd_enable_auth = $ENV{"ETCD_ENABLE_AUTH"} || "false"; + +if ($etcd_enable_auth eq "true") { + $user_yaml_config .= <<_EOC_; +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + user: root + password: 5tHkHhYkjr6cQY +_EOC_ +} + +my $profile = $ENV{"APISIX_PROFILE"}; + + +my $apisix_file; +my $apisix_file_json; +my $debug_file; +my $config_file; +if ($profile) { + $apisix_file = "apisix-$profile.yaml"; + $apisix_file_json = "apisix-$profile.json"; + $debug_file = "debug-$profile.yaml"; + $config_file = "config-$profile.yaml"; +} else { + $apisix_file = "apisix.yaml"; + $apisix_file_json = "apisix.json"; + $debug_file = "debug.yaml"; + $config_file = "config.yaml"; +} + + +my $dubbo_upstream = ""; +my $dubbo_location = ""; +my $version = eval { `$nginx_binary -V 2>&1` }; +if ($version =~ m/\/mod_dubbo/) { + $dubbo_upstream = <<_EOC_; + upstream apisix_dubbo_backend { + server 0.0.0.1; + + balancer_by_lua_block { + apisix.http_balancer_phase() + } + + multi 1; + keepalive 320; + } + +_EOC_ + + $dubbo_location = <<_EOC_; + location \@dubbo_pass { + access_by_lua_block { + apisix.dubbo_access_phase() + } + + dubbo_pass_all_headers on; + dubbo_pass_body on; + dubbo_pass \$dubbo_service_name \$dubbo_service_version \$dubbo_method apisix_dubbo_backend; + + header_filter_by_lua_block { + apisix.http_header_filter_phase() + } + + body_filter_by_lua_block { + apisix.http_body_filter_phase() + } + + log_by_lua_block { + apisix.http_log_phase() + } + } + +_EOC_ +} + +my $grpc_location = <<_EOC_; + location \@grpc_pass { + access_by_lua_block { + apisix.grpc_access_phase() + } + +_EOC_ + +if ($version =~ m/\/apisix-nginx-module/) { + $grpc_location .= <<_EOC_; + grpc_set_header ":authority" \$upstream_host; +_EOC_ +} else { + $grpc_location .= <<_EOC_; + grpc_set_header "Host" \$upstream_host; +_EOC_ +} + +$grpc_location .= <<_EOC_; + grpc_set_header Content-Type application/grpc; + grpc_set_header TE trailers; + grpc_socket_keepalive on; + grpc_pass \$upstream_scheme://apisix_backend; + mirror /proxy_mirror_grpc; + + header_filter_by_lua_block { + apisix.http_header_filter_phase() + } + + body_filter_by_lua_block { + apisix.http_body_filter_phase() + } + + log_by_lua_block { + apisix.http_log_phase() + } + } +_EOC_ + +my $a6_ngx_directives = ""; +if ($version =~ m/\/apisix-nginx-module/) { + $a6_ngx_directives = <<_EOC_; + apisix_delay_client_max_body_check on; + apisix_mirror_on_demand on; + wasm_vm wasmtime; +_EOC_ +} + +my $a6_ngx_vars = ""; +if ($version =~ m/\/apisix-nginx-module/) { + $a6_ngx_vars = <<_EOC_; + set \$wasm_process_req_body ''; + set \$wasm_process_resp_body ''; +_EOC_ +} + +add_block_preprocessor(sub { + my ($block) = @_; + my $wait_etcd_sync = $block->wait_etcd_sync // 0.1; + + if ($block->apisix_yaml && (!defined $block->yaml_config)) { + $user_yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + } + + if ($block->apisix_json && (!defined $block->yaml_config)) { + $user_yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: json +_EOC_ + } + + my $lua_deps_path = $block->lua_deps_path // <<_EOC_; + lua_package_path "$apisix_home/?.lua;$apisix_home/?/init.lua;$apisix_home/deps/share/lua/5.1/?/init.lua;$apisix_home/deps/share/lua/5.1/?.lua;$apisix_home/apisix/?.lua;$apisix_home/t/?.lua;$apisix_home/t/xrpc/?.lua;$apisix_home/t/xrpc/?/init.lua;;"; + lua_package_cpath "$apisix_home/?.so;$apisix_home/deps/lib/lua/5.1/?.so;$apisix_home/deps/lib64/lua/5.1/?.so;;"; +_EOC_ + + my $main_config = $block->main_config // <<_EOC_; +worker_rlimit_core 500M; +env ENABLE_ETCD_AUTH; +env APISIX_PROFILE; +env PATH; # for searching external plugin runner's binary +env TEST_NGINX_HTML_DIR; +env OPENSSL_BIN; +_EOC_ + + + if ($version =~ m/\/apisix-nginx-module/) { + $main_config .= <<_EOC_; +thread_pool grpc-client-nginx-module threads=1; + +lua { + lua_shared_dict prometheus-metrics 15m; + lua_shared_dict standalone-config 10m; + lua_shared_dict status-report 1m; + lua_shared_dict nacos 10m; +} +_EOC_ + } + + # set default `timeout` to 5sec + my $timeout = $block->timeout // 5; + $block->set_value("timeout", $timeout); + + my $stream_tls_request = $block->stream_tls_request; + if ($stream_tls_request) { + # generate a springboard to send tls stream request + $block->set_value("stream_conf_enable", 1); + # avoid conflict with stream_enable + $block->set_value("stream_enable"); + $block->set_value("request", "GET /stream_tls_request"); + + my $sni = "nil"; + if ($block->stream_sni) { + $sni = '"' . $block->stream_sni . '"'; + } + chomp $stream_tls_request; + + my $repeat = "1"; + if (defined $block->stream_session_reuse) { + $repeat = "2"; + } + + my $config = <<_EOC_; + location /stream_tls_request { + content_by_lua_block { + local sess + for _ = 1, $repeat do + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 2005) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + sess, err = sock:sslhandshake(sess, $sni, false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + local bytes, err = sock:send("$stream_tls_request") + if not bytes then + ngx.say("send stream request error: ", err) + return + end + local data, err = sock:receive("*a") + if not data then + sock:close() + ngx.say("receive stream response error: ", err) + return + end + ngx.print(data) + sock:close() + end + } + } +_EOC_ + $block->set_value("config", $config) + } + + # handling shell exec in test Nginx + my $exec_snippet = $block->exec; + if ($exec_snippet) { + # capture the stdin & max response size + my $stdin = "nil"; + if ($block->stdin) { + $stdin = '"' . $block->stdin . '"'; + } + chomp $exec_snippet; + chomp $stdin; + + my $max_size = $block->max_size // 8096; + $block->set_value("request", "GET /exec_request"); + + my $config = $block->config // ''; + $config .= <<_EOC_; + location /exec_request { + content_by_lua_block { + local shell = require("resty.shell") + local ok, stdout, stderr, reason, status = shell.run([[ $exec_snippet ]], $stdin, @{[$timeout*1000]}, $max_size) + if not ok then + ngx.log(ngx.WARN, "failed to execute the script with status: " .. status .. ", reason: " .. reason .. ", stderr: " .. stderr) + ngx.print("stdout: ", stdout) + ngx.print("stderr: ", stderr) + return + end + ngx.print(stdout) + } + } +_EOC_ + + $block->set_value("config", $config) + } + + my $stream_enable = $block->stream_enable; + if ($block->stream_request) { + # Like stream_tls_request, if stream_request is given, automatically enable stream + $stream_enable = 1; + } + + my $stream_conf_enable = $block->stream_conf_enable; + my $extra_stream_config = $block->extra_stream_config // ''; + my $stream_upstream_code = $block->stream_upstream_code // <<_EOC_; + local sock = ngx.req.socket() + local data = sock:receive("1") + ngx.say("hello world") +_EOC_ + + my $stream_config = $block->stream_config // <<_EOC_; + $lua_deps_path + lua_socket_log_errors off; + + lua_shared_dict lrucache-lock-stream 10m; + lua_shared_dict plugin-limit-conn-stream 10m; + lua_shared_dict etcd-cluster-health-check-stream 10m; + lua_shared_dict worker-events-stream 10m; + lua_shared_dict upstream-healthcheck-stream 10m; + + lua_shared_dict kubernetes-stream 1m; + lua_shared_dict kubernetes-first-stream 1m; + lua_shared_dict kubernetes-second-stream 1m; + lua_shared_dict tars-stream 1m; + + upstream apisix_backend { + server 127.0.0.1:1900; + balancer_by_lua_block { + apisix.stream_balancer_phase() + } + } +_EOC_ + + my $stream_extra_init_by_lua_start = $block->stream_extra_init_by_lua_start // ""; + + my $stream_init_by_lua_block = $block->stream_init_by_lua_block // <<_EOC_; + if os.getenv("APISIX_ENABLE_LUACOV") == "1" then + require("luacov.runner")("t/apisix.luacov") + jit.off() + end + + require "resty.core" + + $stream_extra_init_by_lua_start + + apisix = require("apisix") + local args = { + dns_resolver = $dns_addrs_tbl_str, + } + apisix.stream_init(args) +_EOC_ + + my $stream_extra_init_by_lua = $block->stream_extra_init_by_lua // ""; + my $stream_extra_init_worker_by_lua = $block->stream_extra_init_worker_by_lua // ""; + + $stream_config .= <<_EOC_; + init_by_lua_block { + $test_default_config + $stream_init_by_lua_block + $stream_extra_init_by_lua + } + init_worker_by_lua_block { + apisix.stream_init_worker() + $stream_extra_init_worker_by_lua + } + + $extra_stream_config + + server { + listen unix:$apisix_home/t/servroot/logs/stream_worker_events.sock; + access_log off; + content_by_lua_block { + require("resty.events.compat").run() + } + } + + # fake server, only for test + server { + listen 1995; + + content_by_lua_block { + $stream_upstream_code + } + } +_EOC_ + + if (defined $stream_enable) { + $block->set_value("stream_config", $stream_config); + } + + my $custom_trusted_cert = $block->custom_trusted_cert // 'cert/apisix.crt'; + + my $stream_server_config = $block->stream_server_config // <<_EOC_; + listen 2005 ssl; + ssl_certificate cert/apisix.crt; + ssl_certificate_key cert/apisix.key; + lua_ssl_trusted_certificate cert/apisix.crt; + + ssl_session_cache shared:STREAM_SSL:20m; + ssl_session_timeout 10m; + ssl_session_tickets off; + + ssl_client_hello_by_lua_block { + apisix.ssl_client_hello_phase() + } + + ssl_certificate_by_lua_block { + apisix.ssl_phase() + } + + preread_by_lua_block { + -- wait for etcd sync + ngx.sleep($wait_etcd_sync) + apisix.stream_preread_phase() + } + + proxy_pass apisix_backend; +_EOC_ + + if ($version =~ m/\/apisix-nginx-module/) { + $stream_server_config .= <<_EOC_; + proxy_ssl_server_name on; + proxy_ssl_name \$upstream_sni; + set \$upstream_sni "apisix_backend"; +_EOC_ + } + + $stream_server_config .= <<_EOC_; + log_by_lua_block { + apisix.stream_log_phase() + } +_EOC_ + + if (defined $stream_enable) { + $block->set_value("stream_server_config", $stream_server_config); + } + + if (defined $stream_conf_enable) { + $main_config .= <<_EOC_; +stream { +$stream_config + server { + listen 1985; + $stream_server_config + } +} +_EOC_ + } + + $block->set_value("main_config", $main_config); + + # The new directive is introduced here to modify the schema + # before apisix validate in require("apisix") + # Todo: merge extra_init_by_lua_start and extra_init_by_lua + my $extra_init_by_lua_start = $block->extra_init_by_lua_start // ""; + + my $extra_init_by_lua = $block->extra_init_by_lua // ""; + my $init_by_lua_block = $block->init_by_lua_block // <<_EOC_; + if os.getenv("APISIX_ENABLE_LUACOV") == "1" then + require("luacov.runner")("t/apisix.luacov") + jit.off() + end + + require "resty.core" + + $extra_init_by_lua_start + + apisix = require("apisix") + local args = { + dns_resolver = $dns_addrs_tbl_str, + } + apisix.http_init(args) + + -- set apisix_lua_home into constants module + -- it may be used by plugins to determine the work path of apisix + local constants = require("apisix.constants") + constants.apisix_lua_home = "$apisix_home" + + $extra_init_by_lua +_EOC_ + + my $extra_init_worker_by_lua = $block->extra_init_worker_by_lua // ""; + + my $http_config = $block->http_config // ''; + $http_config .= <<_EOC_; + $lua_deps_path + + lua_shared_dict plugin-limit-req 10m; + lua_shared_dict plugin-limit-count 10m; + lua_shared_dict plugin-limit-count-reset-header 10m; + lua_shared_dict plugin-limit-conn 10m; + lua_shared_dict plugin-ai-rate-limiting 10m; + lua_shared_dict plugin-ai-rate-limiting-reset-header 10m; + lua_shared_dict internal-status 10m; + lua_shared_dict upstream-healthcheck 32m; + lua_shared_dict worker-events 10m; + lua_shared_dict lrucache-lock 10m; + lua_shared_dict balancer-ewma 1m; + lua_shared_dict balancer-ewma-locks 1m; + lua_shared_dict balancer-ewma-last-touched-at 1m; + lua_shared_dict plugin-limit-req-redis-cluster-slot-lock 1m; + lua_shared_dict plugin-limit-count-redis-cluster-slot-lock 1m; + lua_shared_dict plugin-limit-conn-redis-cluster-slot-lock 1m; + lua_shared_dict tracing_buffer 10m; # plugin skywalking + lua_shared_dict access-tokens 1m; # plugin authz-keycloak + lua_shared_dict discovery 1m; # plugin authz-keycloak + lua_shared_dict plugin-api-breaker 10m; + lua_capture_error_log 1m; # plugin error-log-logger + lua_shared_dict etcd-cluster-health-check 10m; # etcd health check + lua_shared_dict ext-plugin 1m; + lua_shared_dict kubernetes 1m; + lua_shared_dict kubernetes-first 1m; + lua_shared_dict kubernetes-second 1m; + lua_shared_dict tars 1m; + lua_shared_dict ocsp-stapling 10m; + lua_shared_dict mcp-session 10m; + lua_shared_dict xds-config 1m; + lua_shared_dict xds-config-version 1m; + lua_shared_dict cas_sessions 10m; + + proxy_ssl_name \$upstream_host; + proxy_ssl_server_name on; + + resolver $dns_addrs_str; + resolver_timeout 5; + + underscores_in_headers on; + lua_socket_log_errors off; + client_body_buffer_size 8k; + + variables_hash_bucket_size 128; + + upstream apisix_backend { + server 0.0.0.1; +_EOC_ + + if ($version =~ m/\/apisix-nginx-module/) { + $http_config .= <<_EOC_; + keepalive 32; + + balancer_by_lua_block { + apisix.http_balancer_phase() + } + } +_EOC_ + } else { + $http_config .= <<_EOC_; + balancer_by_lua_block { + apisix.http_balancer_phase() + } + + keepalive 32; + } + + lua_shared_dict prometheus-metrics 10m; +_EOC_ + } + + $http_config .= <<_EOC_; + + $dubbo_upstream + + init_by_lua_block { + $test_default_config + $init_by_lua_block + } + + init_worker_by_lua_block { + require("apisix").http_init_worker() + $extra_init_worker_by_lua + } + + exit_worker_by_lua_block { + require("apisix").http_exit_worker() + } + + log_format main escape=default '\$remote_addr - \$remote_user [\$time_local] \$http_host "\$request" \$status \$body_bytes_sent \$request_time "\$http_referer" "\$http_user_agent" \$upstream_addr \$upstream_status \$upstream_response_time "\$upstream_scheme://\$upstream_host\$upstream_uri"'; + + # fake server, only for test + server { + listen 1980; + listen 1981; + listen 1982; + listen 5044; + +_EOC_ + + if (defined $block->upstream_server_config) { + $http_config .= $block->upstream_server_config; + } + + my $ipv6_fake_server = ""; + if (defined $block->listen_ipv6) { + $ipv6_fake_server = "listen \[::1\]:1980;"; + } + + $http_config .= <<_EOC_; + $ipv6_fake_server + server_tokens off; + + access_log logs/fake-server-access.log main; + + location / { + content_by_lua_block { + require("lib.server").go() + } + + more_clear_headers Date; + } + } + + $a6_ngx_directives + + server { + listen 1983 ssl; + ssl_certificate cert/apisix.crt; + ssl_certificate_key cert/apisix.key; + lua_ssl_trusted_certificate cert/apisix.crt; +_EOC_ + + if (defined $block->upstream_server_config) { + $http_config .= $block->upstream_server_config; + } + + $http_config .= <<_EOC_; + server_tokens off; + + access_log logs/fake-server-access.log main; + + ssl_certificate_by_lua_block { + local ngx_ssl = require "ngx.ssl" + ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name()) + } + + location / { + content_by_lua_block { + require("lib.server").go() + } + + more_clear_headers Date; + } + } + +_EOC_ + + $http_config .= <<_EOC_; + server { + listen 7085; + location /status/ready { + content_by_lua_block { + apisix.status_ready() + } + } + location /status { + content_by_lua_block { + apisix.status() + } + } + } + server { + listen unix:$apisix_home/t/servroot/logs/worker_events.sock; + access_log off; + location / { + content_by_lua_block { + require("resty.events.compat").run() + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); + + my $TEST_NGINX_HTML_DIR = $ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); + my $ipv6_listen_conf = ''; + if (defined $block->listen_ipv6) { + $ipv6_listen_conf = "listen \[::1\]:1984;" + } + + my $config = $block->config // ''; + $config .= <<_EOC_; + $ipv6_listen_conf + + listen 1994 quic reuseport; + listen 1994 ssl; + http2 on; + http3 on; + ssl_certificate cert/apisix.crt; + ssl_certificate_key cert/apisix.key; + lua_ssl_trusted_certificate $custom_trusted_cert; + + ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; + + ssl_session_cache shared:SSL:20m; + ssl_session_timeout 10m; + ssl_session_tickets off; + + ssl_client_hello_by_lua_block { + apisix.ssl_client_hello_phase() + } + + ssl_certificate_by_lua_block { + apisix.ssl_phase() + } + + access_log logs/access.log main; + + set \$dubbo_service_name ''; + set \$dubbo_service_version ''; + set \$dubbo_method ''; + + location = /apisix/nginx_status { + allow 127.0.0.0/24; + access_log off; + stub_status; + } + + location /apisix/admin { + set \$upstream_scheme 'http'; + set \$upstream_host \$http_host; + set \$upstream_uri ''; + + content_by_lua_block { + apisix.http_admin() + } + } + + location /v1/ { + content_by_lua_block { + apisix.http_control() + } + } + + location / { + set \$upstream_mirror_host ''; + set \$upstream_mirror_uri ''; + set \$upstream_upgrade ''; + set \$upstream_connection ''; + + set \$upstream_scheme 'http'; + set \$upstream_host \$http_host; + set \$upstream_uri ''; + set \$ctx_ref ''; + + set \$upstream_cache_zone off; + set \$upstream_cache_key ''; + set \$upstream_cache_bypass ''; + set \$upstream_no_cache ''; + $a6_ngx_vars + + proxy_cache \$upstream_cache_zone; + proxy_cache_valid any 10s; + proxy_cache_min_uses 1; + proxy_cache_methods GET HEAD POST; + proxy_cache_lock_timeout 5s; + proxy_cache_use_stale off; + proxy_cache_key \$upstream_cache_key; + proxy_no_cache \$upstream_no_cache; + proxy_cache_bypass \$upstream_cache_bypass; + + access_by_lua_block { + -- wait for etcd sync + ngx.sleep($wait_etcd_sync) + apisix.http_access_phase() + } + + proxy_http_version 1.1; + proxy_set_header Host \$upstream_host; + proxy_set_header Upgrade \$upstream_upgrade; + proxy_set_header Connection \$upstream_connection; + proxy_set_header X-Real-IP \$remote_addr; + proxy_pass_header Date; + + ### the following x-forwarded-* headers is to send to upstream server + + set \$var_x_forwarded_proto \$scheme; + set \$var_x_forwarded_host \$host; + set \$var_x_forwarded_port \$server_port; + + proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto \$var_x_forwarded_proto; + proxy_set_header X-Forwarded-Host \$var_x_forwarded_host; + proxy_set_header X-Forwarded-Port \$var_x_forwarded_port; + + proxy_pass \$upstream_scheme://apisix_backend\$upstream_uri; + mirror /proxy_mirror; + + header_filter_by_lua_block { + apisix.http_header_filter_phase() + } + + body_filter_by_lua_block { + apisix.http_body_filter_phase() + } + + log_by_lua_block { + apisix.http_log_phase() + } + } + + $grpc_location + $dubbo_location + + location = /proxy_mirror { + internal; +_EOC_ + + if ($version !~ m/\/apisix-nginx-module/) { + $config .= <<_EOC_; + if (\$upstream_mirror_uri = "") { + return 200; + } +_EOC_ + } + + $config .= <<_EOC_; + proxy_http_version 1.1; + proxy_set_header Host \$upstream_host; + proxy_pass \$upstream_mirror_uri; + } + + location = /proxy_mirror_grpc { + internal; +_EOC_ + + if ($version !~ m/\/apisix-nginx-module/) { + $config .= <<_EOC_; + if (\$upstream_mirror_uri = "") { + return 200; + } +_EOC_ + } + + $config .= <<_EOC_; + grpc_pass \$upstream_mirror_host; + } +_EOC_ + + $block->set_value("config", $config); + + my $user_apisix_yaml = $block->apisix_yaml // ""; + if ($user_apisix_yaml) { + $user_apisix_yaml = <<_EOC_; +>>> ../conf/$apisix_file +$user_apisix_yaml +_EOC_ + } + + my $user_apisix_json = $block->apisix_json // ""; + if ($user_apisix_json){ + $user_apisix_json = <<_EOC_; +>>> ../conf/$apisix_file_json +$user_apisix_json +_EOC_ + } + + my $yaml_config = $block->yaml_config // $user_yaml_config; + + my $default_deployment = <<_EOC_; +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +_EOC_ + + if ($yaml_config !~ m/deployment:/) { + $yaml_config = $default_deployment . $yaml_config; + } + + if ($block->extra_yaml_config) { + $yaml_config .= $block->extra_yaml_config; + } + + my $user_debug_config = $block->debug_config // ""; + + my $user_files = $block->user_files; + $user_files .= <<_EOC_; +>>> ../conf/$debug_file +$user_debug_config +>>> ../conf/$config_file +$yaml_config +>>> ../conf/cert/apisix.crt +$ssl_crt +>>> ../conf/cert/apisix.key +$ssl_key +>>> ../conf/cert/apisix_ecc.crt +$ssl_ecc_crt +>>> ../conf/cert/apisix_ecc.key +$ssl_ecc_key +>>> ../conf/cert/test2.crt +$test2_crt +>>> ../conf/cert/test2.key +$test2_key +>>> ../conf/cert/etcd.pem +$etcd_pem +>>> ../conf/cert/etcd.key +$etcd_key +$user_apisix_yaml +$user_apisix_json +_EOC_ + + $block->set_value("user_files", $user_files); + + if ((!defined $block->error_log) && (!defined $block->no_error_log) + && (!defined $block->grep_error_log) + && (!defined $block->ignore_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + $block; +}); + +sub run_or_exit ($) { + my ($cmd) = @_; + my $output = `$cmd`; + if ($?) { + warn "$output"; + exit 1; + } +} + +add_cleanup_handler(sub { + if ($ENV{FLUSH_ETCD}) { + delete $ENV{APISIX_PROFILE}; + run_or_exit "etcdctl del --prefix /apisix"; + run_or_exit "./bin/apisix init_etcd"; + } +}); + +1; diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/api.t b/CloudronPackages/APISIX/apisix-source/t/admin/api.t new file mode 100644 index 0000000..8d78802 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/api.t @@ -0,0 +1,246 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /apisix/admin/routes"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: Server header for admin API +--- response_headers_like +Server: APISIX/(.*) + + + +=== TEST 2: Server header for admin API without token +--- yaml_config +deployment: + admin: + admin_key: + - key: a + name: a + role: admin +apisix: + node_listen: 1984 + enable_server_tokens: false +--- error_code: 401 +--- response_headers +Server: APISIX + + + +=== TEST 3: Version header for admin API (without apikey) +--- yaml_config +deployment: + admin: + admin_key: + - key: a + name: a + role: admin +apisix: + admin_api_version: default +--- error_code: 401 +--- response_headers +! X-API-VERSION + + + +=== TEST 4: Version header for admin API (v2) +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: ~ + admin_api_version: v2 +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- response_headers +X-API-VERSION: v2 + + + +=== TEST 5: Version header for admin API (v3) +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: ~ + admin_api_version: v3 +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- response_headers +X-API-VERSION: v3 + + + +=== TEST 6: CORS header for admin API +--- response_headers +Access-Control-Allow-Origin: * + + + +=== TEST 7: CORS header disabled for admin API +--- yaml_config +deployment: + admin: + admin_key: ~ + enable_admin_cors: false +--- response_headers +Access-Control-Allow-Origin: + + + +=== TEST 8: Compatibility for admin API (v2) +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: ~ + admin_api_version: default +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- response_headers +X-API-VERSION: v2 +--- response_body_like: "/apisix/routes" + + + +=== TEST 9: Head method support for admin API +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin', + ngx.HTTP_HEAD) + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: Access with api key, and admin_key_required=true +--- yaml_config +deployment: + admin: + admin_key_required: true + admin_key: + - name: admin + role: admin + key: rDAkLJbqvoBzBOoxuYAUDbbWaSilvIca +--- more_headers +X-API-KEY: rDAkLJbqvoBzBOoxuYAUDbbWaSilvIca +--- request +GET /apisix/admin/routes +--- error_code: 200 + + + +=== TEST 11: Access with wrong api key, and admin_key_required=true +--- yaml_config +deployment: + admin: + admin_key_required: true +--- more_headers +X-API-KEY: wrong-key +--- request +GET /apisix/admin/routes +--- error_code: 401 + + + +=== TEST 12: Access without api key, and admin_key_required=true +--- yaml_config +deployment: + admin: + admin_key_required: true +--- request +GET /apisix/admin/routes +--- error_code: 401 + + + +=== TEST 13: Access with api key, but admin_key_required=false +--- yaml_config +deployment: + admin: + admin_key_required: false + admin_key: + - name: admin + role: admin + key: rDAkLJbqvoBzBOoxuYAUDbbWaSilvIca +--- more_headers +X-API-KEY: rDAkLJbqvoBzBOoxuYAUDbbWaSilvIca +--- request +GET /apisix/admin/routes +--- error_code: 200 +--- error_log +Admin key is bypassed! + + + +=== TEST 14: Access with wrong api key, but admin_key_required=false +--- yaml_config +deployment: + admin: + admin_key_required: false +--- more_headers +X-API-KEY: wrong-key +--- request +GET /apisix/admin/routes +--- error_code: 200 +--- error_log +Admin key is bypassed! + + + +=== TEST 15: Access without api key, but admin_key_required=false +--- yaml_config +deployment: + admin: + admin_key_required: false +--- request +GET /apisix/admin/routes +--- error_code: 200 +--- error_log +Admin key is bypassed! diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/balancer.t b/CloudronPackages/APISIX/apisix-source/t/admin/balancer.t new file mode 100644 index 0000000..a480c79 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/balancer.t @@ -0,0 +1,243 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $init_by_lua_block = <<_EOC_; + require "resty.core" + apisix = require("apisix") + core = require("apisix.core") + apisix.http_init() + + function test(route, ctx, count) + local balancer = require("apisix.balancer") + local res = {} + for i = 1, count or 12 do + ctx.balancer_try_count = 0 + local server, err = balancer.pick_server(route, ctx) + if err then + ngx.say("failed: ", err) + end + + core.log.warn("host: ", server.host, " port: ", server.port) + res[server.host] = (res[server.host] or 0) + 1 + end + + local keys = {} + for k,v in pairs(res) do + table.insert(keys, k) + end + table.sort(keys) + + for _, key in ipairs(keys) do + ngx.say("host: ", key, " count: ", res[key]) + end + + ctx.server_picker = nil + end +_EOC_ + $block->set_value("init_by_lua_block", $init_by_lua_block); +}); + +run_tests; + +__DATA__ + +=== TEST 1: roundrobin with same weight +--- config + location /t { + content_by_lua_block { + local up_conf = { + type = "roundrobin", + nodes = { + {host = "39.97.63.215", port = 80, weight = 1, priority = 0}, + {host = "39.97.63.216", port = 81, weight = 1, priority = 0}, + {host = "39.97.63.217", port = 82, weight = 1, priority = 0}, + } + } + local ctx = {conf_version = 1} + ctx.upstream_conf = up_conf + ctx.upstream_version = "ver" + ctx.upstream_key = up_conf.type .. "#route_" .. "id" + + test(route, ctx) + } + } +--- request +GET /t +--- response_body +host: 39.97.63.215 count: 4 +host: 39.97.63.216 count: 4 +host: 39.97.63.217 count: 4 + + + +=== TEST 2: roundrobin with different weight +--- config + location /t { + content_by_lua_block { + local up_conf = { + type = "roundrobin", + nodes = { + {host = "39.97.63.215", port = 80, weight = 1, priority = 0}, + {host = "39.97.63.216", port = 81, weight = 2, priority = 0}, + {host = "39.97.63.217", port = 82, weight = 3, priority = 0}, + } + } + local ctx = {conf_version = 1} + ctx.upstream_conf = up_conf + ctx.upstream_version = "ver" + ctx.upstream_key = up_conf.type .. "#route_" .. "id" + + test(route, ctx) + } + } +--- request +GET /t +--- response_body +host: 39.97.63.215 count: 2 +host: 39.97.63.216 count: 4 +host: 39.97.63.217 count: 6 + + + +=== TEST 3: roundrobin, cached server picker by version +--- config + location /t { + content_by_lua_block { + local up_conf = { + type = "roundrobin", + nodes = { + {host = "39.97.63.215", port = 80, weight = 1, priority = 0}, + {host = "39.97.63.216", port = 81, weight = 1, priority = 0}, + {host = "39.97.63.217", port = 82, weight = 1, priority = 0}, + } + } + local ctx = {} + ctx.upstream_conf = up_conf + ctx.upstream_version = 1 + ctx.upstream_key = up_conf.type .. "#route_" .. "id" + + test(route, ctx) + + -- cached by version + up_conf.nodes = { + {host = "39.97.63.218", port = 80, weight = 1, priority = 0}, + {host = "39.97.63.219", port = 80, weight = 0, priority = 0}, + } + test(route, ctx) + + -- update, version changed + ctx.upstream_version = 2 + test(route, ctx) + } + } +--- request +GET /t +--- response_body +host: 39.97.63.215 count: 4 +host: 39.97.63.216 count: 4 +host: 39.97.63.217 count: 4 +host: 39.97.63.215 count: 4 +host: 39.97.63.216 count: 4 +host: 39.97.63.217 count: 4 +host: 39.97.63.218 count: 12 + + + +=== TEST 4: chash +--- config + location /t { + content_by_lua_block { + local up_conf = { + type = "chash", + key = "remote_addr", + nodes = { + {host = "39.97.63.215", port = 80, weight = 1, priority = 0}, + {host = "39.97.63.216", port = 81, weight = 1, priority = 0}, + {host = "39.97.63.217", port = 82, weight = 1, priority = 0}, + } + } + local ctx = { + var = {remote_addr = "127.0.0.1"}, + } + ctx.upstream_conf = up_conf + ctx.upstream_version = 1 + ctx.upstream_key = up_conf.type .. "#route_" .. "id" + + test(route, ctx) + + -- cached by version + up_conf.nodes = { + {host = "39.97.63.218", port = 80, weight = 1, priority = 0}, + {host = "39.97.63.219", port = 80, weight = 0, priority = 0}, + } + test(route, ctx) + + -- update, version changed + ctx.upstream_version = 2 + test(route, ctx) + } + } +--- request +GET /t +--- response_body +host: 39.97.63.215 count: 12 +host: 39.97.63.215 count: 12 +host: 39.97.63.218 count: 12 + + + +=== TEST 5: return item directly if only have one item in `nodes` +--- config + location /t { + content_by_lua_block { + local up_conf = { + type = "roundrobin", + nodes = { + {host = "39.97.63.215", port = 80, weight = 1, priority = 0}, + {host = "39.97.63.216", port = 81, weight = 1, priority = 0}, + {host = "39.97.63.217", port = 82, weight = 1, priority = 0}, + } + } + local ctx = {} + ctx.upstream_conf = up_conf + ctx.upstream_version = 1 + ctx.upstream_key = up_conf.type .. "#route_" .. "id" + + test(route, ctx) + + -- one item in nodes, return it directly + up_conf.nodes = { + {host = "39.97.63.218", port = 80, weight = 1, priority = 0}, + } + test(route, ctx) + } + } +--- request +GET /t +--- response_body +host: 39.97.63.215 count: 4 +host: 39.97.63.216 count: 4 +host: 39.97.63.217 count: 4 +host: 39.97.63.218 count: 12 diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/consumer-group-force-delete.t b/CloudronPackages/APISIX/apisix-source/t/admin/consumer-group-force-delete.t new file mode 100644 index 0000000..d5e96c6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/consumer-group-force-delete.t @@ -0,0 +1,163 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set consumer_group(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumer_groups/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 200, + "time_window": 60, + "rejected_code": 503, + "group": "consumer_group_1" + } + } + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 201 +--- response_body +passed + + + +=== TEST 2: add consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/consumers/1', + ngx.HTTP_PUT, + [[{ + "username": "1", + "plugins": { + "key-auth": { + "key": "auth-one" + } + }, + "group_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 3: delete consumer_group(wrong header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/consumer_groups/1?force=anyvalue', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this consumer group, consumer [1] is still using it now"} + + + +=== TEST 4: delete consumer_group(without force delete header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/consumer_groups/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this consumer group, consumer [1] is still using it now"} + + + +=== TEST 5: delete consumer_group(force delete) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/consumer_groups/1?force=true', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed + + + +=== TEST 6: delete consumer +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/consumers/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/consumer-group.t b/CloudronPackages/APISIX/apisix-source/t/admin/consumer-group.t new file mode 100644 index 0000000..176ed9f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/consumer-group.t @@ -0,0 +1,549 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: PUT +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/consumer_groups/company_a', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]], + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }, + "key": "/apisix/consumer_groups/company_a" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/consumer_groups/company_a')) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- response_body +passed + + + +=== TEST 2: GET +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumer_groups/company_a', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }, + "key": "/apisix/consumer_groups/company_a" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: GET all +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumer_groups', + ngx.HTTP_GET, + nil, + [[{ + "total": 1, + "list": [ + { + "key": "/apisix/consumer_groups/company_a", + "value": { + "plugins": { + "limit-count": { + "time_window": 60, + "policy": "local", + "count": 2, + "key": "remote_addr", + "rejected_code": 503 + } + } + } + } + ] + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: PATCH +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/consumer_groups/company_a')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local prev_update_time = res.body.node.value.update_time + assert(prev_update_time ~= nil, "update_time is nil") + ngx.sleep(1) + + local code, body = t('/apisix/admin/consumer_groups/company_a', + ngx.HTTP_PATCH, + [[{ + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }}]], + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }, + "key": "/apisix/consumer_groups/company_a" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/consumer_groups/company_a')) + local create_time = res.body.node.value.create_time + assert(prev_create_time == create_time, "create_time mismatched") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + assert(prev_update_time ~= update_time, "update_time should be changed") + } + } +--- response_body +passed + + + +=== TEST 5: PATCH (sub path) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/consumer_groups/company_a')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local prev_update_time = res.body.node.value.update_time + assert(prev_update_time ~= nil, "update_time is nil") + ngx.sleep(1) + + local code, body = t('/apisix/admin/consumer_groups/company_a/plugins', + ngx.HTTP_PATCH, + [[{ + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }]], + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }, + "key": "/apisix/consumer_groups/company_a" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/consumer_groups/company_a')) + local create_time = res.body.node.value.create_time + assert(prev_create_time == create_time, "create_time mismatched") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + assert(prev_update_time ~= update_time, "update_time should be changed") + } + } +--- response_body +passed + + + +=== TEST 6: invalid plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumer_groups/company_a', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "rejected_code": 503, + "time_window": 60, + "key": "remote_addr" + } + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: property \"count\" is required"} +--- error_code: 400 + + + +=== TEST 7: PUT (with non-plugin fields) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/consumer_groups/company_a', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "labels": { + "你好": "世界" + }, + "desc": "blah" + }]], + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "labels": { + "你好": "世界" + }, + "desc": "blah" + }, + "key": "/apisix/consumer_groups/company_a" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/consumer_groups/company_a')) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- response_body +passed + + + +=== TEST 8: GET (with non-plugin fields) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumer_groups/company_a', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "labels": { + "你好": "世界" + }, + "desc": "blah" + }, + "key": "/apisix/consumer_groups/company_a" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: invalid non-plugin fields +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumer_groups/company_a', + ngx.HTTP_PUT, + [[{ + "labels": "a", + "plugins": { + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- response_body +{"error_msg":"invalid configuration: property \"labels\" validation failed: wrong type: expected object, got string"} +--- error_code: 400 + + + +=== TEST 10: set consumer-group +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/consumer_groups/company_a', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/consumer_groups/company_a')) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- response_body +passed + + + +=== TEST 11: add consumer with group +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/foobar', + ngx.HTTP_PUT, + [[{ + "username": "foobar", + "plugins": { + "key-auth": { + "key": "auth-two" + } + }, + "group_id": "company_a" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: delete-consumer group failed +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumer_groups/company_a', + ngx.HTTP_DELETE + ) + ngx.print(body) + } + } +--- response_body +{"error_msg":"can not delete this consumer group, consumer [foobar] is still using it now"} + + + +=== TEST 13: delete consumer +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/foobar', + ngx.HTTP_DELETE + ) + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: delete consumer-group +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumer_groups/company_a', + ngx.HTTP_DELETE + ) + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: add consumer with invalid group +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/foobar', + ngx.HTTP_PUT, + [[{ + "username": "foobar", + "plugins": { + "key-auth": { + "key": "auth-two" + } + }, + "group_id": "invalid_group" + }]] + ) + assert(code >= 300) + ngx.say(body) + } + } +--- response_body_like +.*failed to fetch consumer group info by consumer group id.* diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/consumers.t b/CloudronPackages/APISIX/apisix-source/t/admin/consumers.t new file mode 100644 index 0000000..2e9c5e2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/consumers.t @@ -0,0 +1,362 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: add consumer with username +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username":"jack", + "desc": "new consumer" + }]], + [[{ + "value": { + "username": "jack", + "desc": "new consumer" + }, + "key": "/apisix/consumers/jack" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: update consumer with username and plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/consumers/jack')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + ngx.sleep(1) + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "desc": "new consumer", + "plugins": { + "key-auth": { + "key": "auth-one" + } + } + }]], + [[{ + "value": { + "username": "jack", + "desc": "new consumer", + "plugins": { + "key-auth": { + "key": "4y+JvURBE6ZwRbbgaryrhg==" + } + } + }, + "key": "/apisix/consumers/jack" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/consumers/jack')) + local create_time = res.body.node.value.create_time + assert(prev_create_time == create_time, "create_time mismatched") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: get consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "username": "jack", + "desc": "new consumer", + "plugins": { + "key-auth": { + "key": "auth-one" + } + } + }, + "key": "/apisix/consumers/jack" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: delete consumer +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_DELETE + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: delete consumer(id: not_found) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/consumers/not_found', + ngx.HTTP_DELETE, + nil + ) + ngx.say("[delete] code: ", code) + } + } +--- request +GET /t +--- response_body +[delete] code: 404 + + + +=== TEST 6: missing username +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "id":"jack" + }]], + [[{ + "value": { + "id": "jack" + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"username\" is required"} + + + +=== TEST 7: consumer username allows '-' in it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username":"Jack-and-Rose_123" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 201 + + + +=== TEST 8: add consumer with labels +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username":"jack", + "desc": "new consumer", + "labels": { + "build":"16", + "env":"production", + "version":"v2" + } + }]], + [[{ + "value": { + "username": "jack", + "desc": "new consumer", + "labels": { + "build":"16", + "env":"production", + "version":"v2" + } + }, + "key": "/apisix/consumers/jack" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: invalid format of label value: set consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username":"jack", + "desc": "new consumer", + "labels": { + "env": ["production", "release"] + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"labels\" validation failed: failed to validate env (matching \".*\"): wrong type: expected string, got table"} + + + +=== TEST 10: post consumers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_POST, + "" + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 405 +--- response_body +{"error_msg":"not supported `POST` method for consumer"} + + + +=== TEST 11: add consumer with create_time and update_time(pony) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username":"pony", + "desc": "new consumer", + "create_time": 1602883670, + "update_time": 1602893670 + }]], + [[{ + "value": { + "username": "pony", + "desc": "new consumer", + "create_time": 1602883670, + "update_time": 1602893670 + }, + "key": "/apisix/consumers/pony" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"the property is forbidden:.*"\}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/consumers2.t b/CloudronPackages/APISIX/apisix-source/t/admin/consumers2.t new file mode 100644 index 0000000..6e351d0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/consumers2.t @@ -0,0 +1,176 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: not unwanted data, PUT +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username":"jack" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + res.value.create_time = nil + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/consumers/jack","value":{"username":"jack"}} + + + +=== TEST 2: not unwanted data, GET +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/consumers/jack', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/consumers/jack","value":{"username":"jack"}} + + + +=== TEST 3: not unwanted data, DELETE +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/consumers/jack', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"deleted":"1","key":"/apisix/consumers/jack"} + + + +=== TEST 4: list empty resources +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/consumers', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"list":[],"total":0} + + + +=== TEST 5: mismatched username, PUT +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/consumers/jack1', + ngx.HTTP_PUT, + [[{ + "username":"jack" + }]] + ) + + ngx.print(message) + } + } +--- response_body +{"error_msg":"wrong username"} diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/credentials.t b/CloudronPackages/APISIX/apisix-source/t/admin/credentials.t new file mode 100644 index 0000000..1511982 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/credentials.t @@ -0,0 +1,494 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: create a credential for invalid consumer: consumer not found error +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/credential_a', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": { + "key": "the-key" + } + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 404 +--- response_body +{"error_msg":"consumer not found"} + + + +=== TEST 2: add a consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username":"jack", + "desc": "new consumer", + "plugins": { + "basic-auth": { + "username": "the-user", + "password": "the-password" + } + } + }]], + [[{ + "key": "/apisix/consumers/jack", + "value": + { + "username":"jack", + "desc": "new consumer", + "plugins": { + "basic-auth": { + "username": "the-user", + "password": "WvF5kpaLvIzjuk4GNIMTJg==" + } + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: add a credentials with basic-auth for the consumer jack, should success +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/credential_a', + ngx.HTTP_PUT, + [[{ + "desc": "basic-auth for jack", + "plugins": { + "basic-auth": { + "username": "the-user", + "password": "the-password" + } + } + }]], + [[{ + "value":{ + "desc":"basic-auth for jack", + "id":"credential_a", + "plugins":{"basic-auth":{"username":"the-user","password":"WvF5kpaLvIzjuk4GNIMTJg=="}} + }, + "key":"/apisix/consumers/jack/credentials/credential_a" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: add a credential with key-auth for the consumer jack, should success +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/credential_b', + ngx.HTTP_PUT, + [[{ + "desc": "key-auth for jack", + "plugins": { + "key-auth": { + "key": "the-key" + } + } + }]], + [[{ + "value":{ + "desc":"key-auth for jack", + "id":"credential_b", + "plugins":{"key-auth":{"key":"JCX7x1qN5e9kHt0GuJfWpw=="}} + }, + "key":"/apisix/consumers/jack/credentials/credential_b" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: add a credential with a plugin which is not a auth plugin, should fail +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/credential_b', + ngx.HTTP_PUT, + [[{ + "desc": "limit-conn for jack", + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 0, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key_type": "var", + "key": "http_a" + } + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"only supports auth type plugins in consumer credential"} + + + +=== TEST 6: list consumers: should not contain credential +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body, res = t('/apisix/admin/consumers', ngx.HTTP_GET) + + ngx.status = code + res = json.decode(res) + assert(res.total == 1) + assert(res.list[1].key == "/apisix/consumers/jack") + } + } +--- request +GET /t +--- response_body + + + +=== TEST 7: list credentials: should contain credential_a and credential_b +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body, res = t('/apisix/admin/consumers/jack/credentials', ngx.HTTP_GET) + + ngx.status = code + res = json.decode(res) + assert(res.total == 2) + assert(res.list[1].key == "/apisix/consumers/jack/credentials/credential_a") + assert(res.list[2].key == "/apisix/consumers/jack/credentials/credential_b") + } + } +--- request +GET /t +--- response_body + + + +=== TEST 8: get a credential +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/credential_b', + ngx.HTTP_GET, + nil, + [[{ + "key": "/apisix/consumers/jack/credentials/credential_b", + "value": { + "desc": "key-auth for jack", + "plugins": {"key-auth": {"key": "the-key"} + }} + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: update credential: should ok +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/credential_b', + ngx.HTTP_PUT, + [[{ + "desc": "new description", + "plugins": { + "key-auth": { + "key": "new-key" + } + } + }]], + [[{ + "key": "/apisix/consumers/jack/credentials/credential_b", + "value": { + "desc": "new description", + "plugins": { + "key-auth": { + "key": "523EisB/dvqlIT9RzfF3ZQ==" + } + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: delete credential +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/credential_a', ngx.HTTP_DELETE) + + assert(code == 200) + ngx.status = code + + code, body, res = t('/apisix/admin/consumers/jack/credentials', ngx.HTTP_GET) + res = json.decode(res) + assert(res.total == 1) + assert(res.list[1].key == "/apisix/consumers/jack/credentials/credential_b") + } + } +--- request +GET /t +--- response_body + + + +=== TEST 11: create a credential has more than one plugin: should not ok +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/xxx-yyy-zzz', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {"key": "the-key"}, + "basic-auth": {"username": "the-user", "password": "the-password"} + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"plugins\" validation failed: expect object to have at most 1 properties"} + + + +=== TEST 12: delete consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_DELETE + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: list credentials: should get 404 because the consumer is deleted +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials', ngx.HTTP_GET) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 404 +--- response_body +{"message":"Key not found"} + + + +=== TEST 14: add a consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username":"jack" + }]] + ) + + if ngx.status >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: add a credential with key-auth for the consumer jack (id in the payload but not in uri), should success +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials', + ngx.HTTP_PUT, + [[{ + "id": "d79a5aa3", + "desc": "key-auth for jack", + "plugins": { + "key-auth": { + "key": "the-key" + } + } + }]], + [[{ + "value":{ + "desc":"key-auth for jack", + "id":"d79a5aa3", + "plugins":{"key-auth":{"key":"JCX7x1qN5e9kHt0GuJfWpw=="}} + }, + "key":"/apisix/consumers/jack/credentials/d79a5aa3" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: add a credential with key-auth for the consumer jack but missing id in uri and payload, should fail +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials', + ngx.HTTP_PUT, + [[{ + "desc": "key-auth for jack", + "plugins": { + "key-auth": { + "key": "the-key" + } + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"missing credential id"} diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/filter.t b/CloudronPackages/APISIX/apisix-source/t/admin/filter.t new file mode 100644 index 0000000..075af69 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/filter.t @@ -0,0 +1,1055 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +worker_connections(1024); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $user_yaml_config = <<_EOC_; +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: ~ + admin_api_version: v3 +apisix: + node_listen: 1984 + proxy_mode: http&stream +_EOC_ + $block->set_value("yaml_config", $user_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: bad page_size(page_size must be between 10 and 500) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + ngx.sleep(0.5) + + local code, body = t('/apisix/admin/routes/?page=1&page_size=2', + ngx.HTTP_GET + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 400 +--- response_body +page_size must be between 10 and 500 + + + +=== TEST 2: ignore bad page and would use default value 1 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?page=-1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 10) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: sort by createdIndex +# the smaller the createdIndex, the higher the ranking +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?page=1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + + for i = 1, #res.list - 1 do + assert(res.list[i].createdIndex < res.list[i + 1].createdIndex) + end + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: routes pagination +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?page=1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 10) + + code, body, res = t('/apisix/admin/routes/?page=2&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + + code, body, res = t('/apisix/admin/routes/?page=3&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 0) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: services pagination +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/services/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/services/?page=1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 10) + + code, body, res = t('/apisix/admin/services/?page=2&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + + code, body, res = t('/apisix/admin/services/?page=3&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 0) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: only search name or labels +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/services/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "name": "]] .. i .. [[", + "labels": {"]] .. i .. '":"' .. i .. [["} + }]] + ) + end + + ngx.sleep(0.5) + + local matched = {1, 10, 11} + + local code, body, res = t('/apisix/admin/services/?name=1', + ngx.HTTP_GET + ) + res = json.decode(res) + -- match the name are 1, 10, 11 + assert(#res.list == 3) + + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + code, body, res = t('/apisix/admin/services/?label=1', + ngx.HTTP_GET + ) + res = json.decode(res) + -- match the label are 1, 10, 11 + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: services filter +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/services/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "name": "]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/services/?name=1', + ngx.HTTP_GET + ) + res = json.decode(res) + + -- match the name and label are 1, 10, 11 + assert(#res.list == 3) + + local matched = {1, 10, 11} + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: routes filter +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "name": "]] .. i .. [[", + "uri": "]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/services/?name=1', + ngx.HTTP_GET + ) + res = json.decode(res) + + -- match the name and label are 1, 10, 11 + assert(#res.list == 3) + + local matched = {1, 10, 11} + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: filter with pagination +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body, res = t('/apisix/admin/services/?name=1&page=1&page_size=10', + ngx.HTTP_GET + ) + res = json.decode(res) + + -- match the name and label are 1, 10, 11 + -- we do filtering first now, so it will first filter to 1, 10, 11, and then paginate + -- res will contain 1, 10, 11 instead of just 1, 10. + assert(#res.list == 3) + + local matched = {1, 10, 11} + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: routes filter with uri +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + for i = 1, 11 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "name": "]] .. i .. [[", + "uri": "]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?uri=1', + ngx.HTTP_GET + ) + res = json.decode(res) + + -- match the name and label are 1, 10, 11 + assert(#res.list == 3) + + local matched = {1, 10, 11} + for _, node in ipairs(res.list) do + assert(core.table.array_find(matched, tonumber(node.value.name))) + end + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: match labels +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "labels": { + "env": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello2", + "labels": { + "env2": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.5) + + -- only match labels' keys + local code, body, res = t('/apisix/admin/routes/?label=env', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + -- don't match labels' values + code, body, res = t('/apisix/admin/routes/?label=production', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 0) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: match uris +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello", "/world"] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/foo", "/bar"] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?uri=world', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: match uris & labels +# uris are same in different routes, filter by labels +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello", "/world"], + "labels": { + "env": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello", "/world"], + "labels": { + "build": "16" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.5) + + -- only match route 1 + local code, body, res = t('/apisix/admin/routes/?uri=world&label=env', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: match uri & labels +# uri is same in different routes, filter by labels +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "labels": { + "env": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "labels": { + "env2": "production" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.5) + + local code, body, res = t('/apisix/admin/routes/?uri=hello&label=env', + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 1) + assert(res.list[1].value.id == "1") + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: filtered data total +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, body, res = t('/apisix/admin/routes', ngx.HTTP_GET) + res = json.decode(res) + assert(res.total == 11) + assert(#res.list == 11) + + local code, body, res = t('/apisix/admin/routes/?label=', ngx.HTTP_GET) + res = json.decode(res) + assert(res.total == 0) + assert(#res.list == 0) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: pagination data total +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, body, res = t('/apisix/admin/routes?page=1&page_size=10', ngx.HTTP_GET) + res = json.decode(res) + assert(res.total == 11) + assert(#res.list == 10) + + local code, body, res = t('/apisix/admin/routes?page=10&page_size=10', ngx.HTTP_GET) + res = json.decode(res) + assert(res.total == 11) + assert(#res.list == 0) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST: 17: filter by route service_id/upstream_id +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + -- create a service + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + -- create a upstream + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }]] + ) + + for i = 1, 11 do + local route = { uri = "/hello" .. i } + if i % 2 == 0 then + route.service_id = "1" + else + route.upstream_id = "1" + end + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + json.encode(route) + ) + end + + ngx.sleep(0.5) + + -- check service_id + local code, body, res = t('/apisix/admin/routes?filter=' + .. ngx.encode_args({ service_id = "1" }), + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 5, "expected 5 routes with service_id 1, got " .. #res.list) + + for i = 1, #res.list do + assert(tonumber(res.list[i].value.id) % 2 == 0, + "expected route id to be even, got " .. res.list[i].value.id) + assert(res.list[i].value.service_id == "1", + "expected service_id 1, got " .. tostring(res.list[i].value.service_id)) + end + + -- check upstream_id + local code, body, res = t('/apisix/admin/routes?filter=' + .. ngx.encode_args({ upstream_id = "1" }), + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 6, "expected 6 routes with upstream_id 1, got " .. #res.list) + + for i = 1, #res.list do + assert(tonumber(res.list[i].value.id) % 2 == 1, + "expected route id to be odd, got " .. res.list[i].value.id) + assert(res.list[i].value.upstream_id == "1", + "expected upstream_id 1, got " .. tostring(res.list[i].value.upstream_id)) + end + } + } +--- error_code: 200 + + + +=== TEST: 18: filter by stream route service_id/upstream_id +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + -- create a service + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + -- create a upstream + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }]] + ) + + for i = 1, 11 do + local route = { server_port = 5432 } + if i % 2 == 0 then + route.service_id = "1" + else + route.upstream_id = "1" + end + local code, body = t('/apisix/admin/stream_routes/' .. i, + ngx.HTTP_PUT, + json.encode(route) + ) + end + + ngx.sleep(0.5) + + -- check service_id + local code, body, res = t('/apisix/admin/stream_routes?filter=' + .. ngx.encode_args({ service_id = "1" }), + ngx.HTTP_GET + ) + res = json.decode(res) + + assert(#res.list == 5, "expected 5 stream routes with service_id 1, got " .. #res.list) + + for i = 1, #res.list do + assert(tonumber(res.list[i].value.id) % 2 == 0, + "expected stream route id to be even, got " .. res.list[i].value.id) + assert(res.list[i].value.service_id == "1", + "expected service_id 1, got " .. tostring(res.list[i].value.service_id)) + end + + -- check upstream_id + local code, body, res = t('/apisix/admin/stream_routes?filter=' + .. ngx.encode_args({ upstream_id = "1" }), + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 6, "expected 6 stream routes with upstream_id 1, got " .. #res.list) + + for i = 1, #res.list do + assert(tonumber(res.list[i].value.id) % 2 == 1, + "expected stream route id to be odd, got " .. res.list[i].value.id) + assert(res.list[i].value.upstream_id == "1", + "expected upstream_id 1, got " .. tostring(res.list[i].value.upstream_id)) + end + } + } +--- error_code: 200 + + + +=== TEST: 19: filter by route (both service_id/upstream_id) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + -- create a service + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + -- create a upstream + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }]] + ) + + for i = 1, 11 do + local route = { uri = "/hello" .. i } + if i % 2 == 0 then + route.service_id = "1" + else + route.upstream_id = "1" + route.service_id = "1" + end + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + json.encode(route) + ) + end + + ngx.sleep(0.5) + + -- check service_id + local code, body, res = t('/apisix/admin/routes?filter=' + .. ngx.encode_args({ service_id = "1" }), + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 11, "expected 11 routes with service_id 1, got " .. #res.list) + + for i = 1, #res.list do + assert(res.list[i].value.service_id == "1", + "expected service_id 1, got " .. tostring(res.list[i].value.service_id)) + end + + -- check both service_id and upstream_id + local code, body, res = t('/apisix/admin/routes?' + .. ngx.encode_args({filter = ngx.encode_args({ service_id = "1", upstream_id = "1" })}), + ngx.HTTP_GET + ) + res = json.decode(res) + assert(#res.list == 6, "expected 6 routes with both service_id 1 and upstream_id 1, got " .. #res.list) + + for i = 1, #res.list do + assert(tonumber(res.list[i].value.id) % 2 == 1, + "expected route id to be odd, got " .. res.list[i].value.id) + assert(res.list[i].value.service_id == "1", + "expected service_id 1, got " .. tostring(res.list[i].value.service_id)) + assert(res.list[i].value.upstream_id == "1", + "expected upstream_id 1, got " .. tostring(res.list[i].value.upstream_id)) + end + } + } +--- error_code: 200 diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/global-rules.t b/CloudronPackages/APISIX/apisix-source/t/admin/global-rules.t new file mode 100644 index 0000000..0819f2d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/global-rules.t @@ -0,0 +1,506 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: set global rules +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]], + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }, + "key": "/apisix/global_rules/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/global_rules/1')) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: get global rules +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }, + "key": "/apisix/global_rules/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: list global rules +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules', + ngx.HTTP_GET, + nil, + [[{ + "total": 1, + "list": [ + { + "key": "/apisix/global_rules/1", + "value": { + "plugins": { + "limit-count": { + "time_window": 60, + "policy": "local", + "count": 2, + "key": "remote_addr", + "rejected_code": 503 + } + } + } + } + ] + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: PATCH global rules +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/global_rules/1')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local prev_update_time = res.body.node.value.update_time + assert(prev_update_time ~= nil, "update_time is nil") + ngx.sleep(1) + + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PATCH, + [[{ + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }}]], + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }, + "key": "/apisix/global_rules/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/global_rules/1')) + local create_time = res.body.node.value.create_time + assert(prev_create_time == create_time, "create_time mismatched") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + assert(prev_update_time ~= update_time, "update_time should be changed") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: PATCH global rules (sub path) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/global_rules/1')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local prev_update_time = res.body.node.value.update_time + assert(prev_update_time ~= nil, "update_time is nil") + ngx.sleep(1) + + local code, body = t('/apisix/admin/global_rules/1/plugins', + ngx.HTTP_PATCH, + [[{ + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }]], + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }, + "key": "/apisix/global_rules/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/global_rules/1')) + local create_time = res.body.node.value.create_time + assert(prev_create_time == create_time, "create_time mismatched") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + assert(prev_update_time ~= update_time, "update_time should be changed") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: delete global rules +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE + ) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 7: delete global rules(not_found) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE + ) + ngx.say("[delete] code: ", code) + } + } +--- request +GET /t +--- response_body +[delete] code: 404 + + + +=== TEST 8: set global rules(missing plugins) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{}]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"plugins\" is required"} + + + +=== TEST 9: string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/a-b-c-ABC_0123', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: string id(DELETE) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/a-b-c-ABC_0123', + ngx.HTTP_DELETE + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: not unwanted data, PUT +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + res.value.create_time = nil + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/","use_real_request_uri_unsafe":false}}}} +--- request +GET /t + + + +=== TEST 12: not unwanted data, PATCH +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/global_rules/1', + ngx.HTTP_PATCH, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + res.value.create_time = nil + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/","use_real_request_uri_unsafe":false}}}} +--- request +GET /t + + + +=== TEST 13: not unwanted data, GET +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/global_rules/1', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/","use_real_request_uri_unsafe":false}}}} +--- request +GET /t + + + +=== TEST 14: not unwanted data, DELETE +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"deleted":"1","key":"/apisix/global_rules/1"} +--- request +GET /t diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/global-rules2.t b/CloudronPackages/APISIX/apisix-source/t/admin/global-rules2.t new file mode 100644 index 0000000..345f67f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/global-rules2.t @@ -0,0 +1,146 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: list empty resources +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/global_rules', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"list":[],"total":0} + + + +=== TEST 2: set global rule +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + res.value.create_time = nil + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/global_rules/1","value":{"id":"1","plugins":{"proxy-rewrite":{"uri":"/","use_real_request_uri_unsafe":false}}}} + + + +=== TEST 3: list global rules +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/global_rules', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.total == 1) + assert(#res.list == 1) + assert(res.list[1].createdIndex ~= nil) + assert(res.list[1].modifiedIndex ~= nil) + assert(res.list[1].key == "/apisix/global_rules/1") + assert(res.list[1].value ~= nil) + + ngx.say(message) + } + } +--- response_body_like +passed + + + +=== TEST 4: delete global rules +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE + ) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 200 message: passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/health-check.t b/CloudronPackages/APISIX/apisix-source/t/admin/health-check.t new file mode 100644 index 0000000..f485256 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/health-check.t @@ -0,0 +1,521 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $init_by_lua_block = <<_EOC_; + require "resty.core" + apisix = require("apisix") + apisix.http_init() + + json = require("toolkit.json") + req_data = json.decode([[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "checks": {} + }, + "uri": "/index.html" + }]]) + exp_data = { + value = req_data, + key = "/apisix/routes/1", + } +_EOC_ + + $block->set_value("init_by_lua_block", $init_by_lua_block); + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: active +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 2, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + }]]) + exp_data.value.upstream.checks = req_data.upstream.checks + + local code, body, res = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + req_data, + exp_data + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: passive +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 2, + "successes": 1 + } + }, + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 1 + }, + "unhealthy": { + "http_statuses": [500], + "http_failures": 2 + } + } + }]]) + exp_data.value.upstream.checks = req_data.upstream.checks + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + req_data, + exp_data + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: invalid route: active.healthy.successes counter exceed maximum value +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "active": { + "healthy": { + "successes": 255 + } + } + }]]) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, req_data) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: property \"active\" validation failed: property \"healthy\" validation failed: property \"successes\" validation failed: expected 255 to be at most 254"} + + + +=== TEST 4: invalid route: active.healthy.successes counter below the minimum value +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "active": { + "healthy": { + "successes": 0 + } + } + }]]) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, req_data) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: property \"active\" validation failed: property \"healthy\" validation failed: property \"successes\" validation failed: expected 0 to be at least 1"} + + + +=== TEST 5: invalid route: wrong passive.unhealthy.http_statuses +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "passive": { + "unhealthy": { + "http_statuses": [500, 600] + } + } + }]]) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, req_data) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: property \"passive\" validation failed: property \"unhealthy\" validation failed: property \"http_statuses\" validation failed: failed to validate item 2: expected 600 to be at most 599"} + + + +=== TEST 6: invalid route: wrong active.type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "active": { + "type": "udp" + } + }]]) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, req_data) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: property \"active\" validation failed: property \"type\" validation failed: matches none of the enum values"} + + + +=== TEST 7: invalid route: duplicate items in active.healthy.http_statuses +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "active": { + "healthy": { + "http_statuses": [200, 200] + } + } + }]]) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, req_data) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: property \"active\" validation failed: property \"healthy\" validation failed: property \"http_statuses\" validation failed: expected unique items but items 1 and 2 are equal"} + + + +=== TEST 8: invalid route: active.unhealthy.http_failure is a floating point value +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "active": { + "unhealthy": { + "http_failures": 3.1 + } + } + }]]) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, req_data) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: property \"active\" validation failed: property \"unhealthy\" validation failed: property \"http_failures\" validation failed: wrong type: expected integer, got number"} + + + +=== TEST 9: valid req_headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 2, + "successes": 1 + }, + "req_headers": ["User-Agent: curl/7.29.0"] + } + }]]) + exp_data.value.upstream.checks = req_data.upstream.checks + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + req_data, + exp_data + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: multiple request headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 2, + "successes": 1 + }, + "req_headers": ["User-Agent: curl/7.29.0", "Accept: */*"] + } + }]]) + exp_data.value.upstream.checks = req_data.upstream.checks + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + req_data, + exp_data + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: invalid req_headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 2, + "successes": 1 + }, + "req_headers": ["User-Agent: curl/7.29.0", 2233] + } + }]]) + exp_data.value.upstream.checks = req_data.upstream.checks + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + req_data, + exp_data + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: property \"active\" validation failed: property \"req_headers\" validation failed: failed to validate item 2: wrong type: expected string, got number"} + + + +=== TEST 12: only passive +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 1 + }, + "unhealthy": { + "http_statuses": [500], + "http_failures": 2 + } + } + }]]) + exp_data.value.upstream.checks = req_data.upstream.checks + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + req_data, + exp_data + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: object matches none of the required: [\"active\"] or [\"active\",\"passive\"]"} + + + +=== TEST 13: only active +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 2, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + }]]) + exp_data.value.upstream.checks.active = req_data.upstream.checks.active + exp_data.value.upstream.checks.passive = { + type = "http", + healthy = { + http_statuses = { 200, 201, 202, 203, 204, 205, 206, 207, 208, 226, + 300, 301, 302, 303, 304, 305, 306, 307, 308 }, + successes = 0, + }, + unhealthy = { + http_statuses = { 429, 500, 503 }, + tcp_failures = 0, + timeouts = 0, + http_failures = 0, + } + } + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + req_data, + exp_data + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: number type timeout +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + req_data.upstream.checks = json.decode([[{ + "active": { + "http_path": "/status", + "host": "foo.com", + "timeout": 1.01, + "healthy": { + "interval": 2, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + }]]) + exp_data.value.upstream.checks = req_data.upstream.checks + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + req_data, + exp_data + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/metadata.spec.ts b/CloudronPackages/APISIX/apisix-source/t/admin/metadata.spec.ts new file mode 100644 index 0000000..39dfe18 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/metadata.spec.ts @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { readFile } from 'node:fs/promises'; +import { resolve } from 'node:path'; + +import { request as requestAdminAPI } from '../ts/admin_api'; + +describe('Resource Metadata', () => { + describe('Consumer', () => { + it('should ensure additionalProperties is false', () => + expect( + requestAdminAPI( + '/apisix/admin/consumers/jack', + 'PUT', + { + username: 'jack', + invalid: true, + }, + undefined, + { validateStatus: () => true }, + ), + ).resolves.toMatchObject({ status: 400 })); + + it('should accept desc field', () => + expect( + requestAdminAPI('/apisix/admin/consumers/jack', 'PUT', { + username: 'jack', + desc: 'test_desc', + }), + ).resolves.not.toThrow()); + }); + + describe('Consumer Credentials', () => { + it('should ensure additionalProperties is false', () => + expect( + requestAdminAPI( + '/apisix/admin/consumers/jack/credentials/cred1', + 'PUT', + { + plugins: { 'key-auth': { key: 'test' } }, + invalid: true, + }, + undefined, + { validateStatus: () => true }, + ), + ).resolves.toMatchObject({ status: 400 })); + + it('should accept name field', () => + expect( + requestAdminAPI( + '/apisix/admin/consumers/jack/credentials/cred1', + 'PUT', + { + name: 'test_name', + plugins: { 'key-auth': { key: 'test' } }, + }, + ), + ).resolves.not.toThrow()); + }); + + describe('SSL', () => { + const path = resolve(__dirname, '../certs/'); + let cert: string; + let key: string; + + beforeAll(async () => { + cert = await readFile(resolve(path, 'apisix.crt'), 'utf-8'); + key = await readFile(resolve(path, 'apisix.key'), 'utf-8'); + }); + + it('should ensure additionalProperties is false', () => + expect( + requestAdminAPI( + '/apisix/admin/ssls/ssl1', + 'PUT', + { sni: 'test.com', cert, key, invalid: true }, + undefined, + { validateStatus: () => true }, + ), + ).resolves.toMatchObject({ status: 400 })); + + it('should accept desc field', () => + expect( + requestAdminAPI('/apisix/admin/ssls/ssl1', 'PUT', { + desc: 'test_desc', + sni: 'test.com', + cert, + key, + }), + ).resolves.not.toThrow()); + }); + + describe('Proto', () => { + it('should ensure additionalProperties is false', () => + expect( + requestAdminAPI( + '/apisix/admin/protos/proto1', + 'PUT', + { content: 'syntax = "proto3";', invalid: true }, + undefined, + { validateStatus: () => true }, + ), + ).resolves.toMatchObject({ status: 400 })); + + it('should accept name/labels field', () => + expect( + requestAdminAPI('/apisix/admin/protos/proto1', 'PUT', { + name: 'test_name', + labels: { test: 'test' }, + content: 'syntax = "proto3";', + }), + ).resolves.not.toThrow()); + }); + + describe('Stream Route', () => { + it('should ensure additionalProperties is false', () => + expect( + requestAdminAPI( + '/apisix/admin/stream_routes/sr1', + 'PUT', + { upstream: { nodes: { '127.0.0.1:5432': 1 } }, invalid: true }, + undefined, + { validateStatus: () => true }, + ), + ).resolves.toMatchObject({ status: 400 })); + + it('should accept name field', () => + expect( + requestAdminAPI('/apisix/admin/stream_routes/sr1', 'PUT', { + name: 'test_name', + upstream: { nodes: { '127.0.0.1:5432': 1 } }, + }), + ).resolves.not.toThrow()); + }); + + describe('Consumer Group', () => { + it('should ensure additionalProperties is false', () => + expect( + requestAdminAPI( + '/apisix/admin/consumer_groups/cg1', + 'PUT', + { plugins: {}, invalid: true }, + undefined, + { validateStatus: () => true }, + ), + ).resolves.toMatchObject({ status: 400 })); + + it('should accept name field', () => + expect( + requestAdminAPI('/apisix/admin/consumer_groups/cg1', 'PUT', { + name: 'test_name', + plugins: {}, + }), + ).resolves.not.toThrow()); + }); +}); diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/metadata.t b/CloudronPackages/APISIX/apisix-source/t/admin/metadata.t new file mode 100644 index 0000000..d27035e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/metadata.t @@ -0,0 +1,36 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +use_hup(); + +run_tests(); + +__DATA__ + +=== TEST 1: test +--- timeout: 15 +--- max_size: 204800 +--- exec +cd t && pnpm test admin/metadata.spec.ts 2>&1 +--- no_error_log +failed to execute the script with status +--- response_body eval +qr/PASS admin\/metadata.spec.ts/ diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/plugin-configs-force-delete.t b/CloudronPackages/APISIX/apisix-source/t/admin/plugin-configs-force-delete.t new file mode 100644 index 0000000..7d4f737 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/plugin-configs-force-delete.t @@ -0,0 +1,163 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set plugin_configs(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503 + } + } + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 201 +--- response_body +passed + + + +=== TEST 2: add route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugin_config_id": 1, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/index.html" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 3: delete plugin_configs(wrong header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/plugin_configs/1?force=anyvalue', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this plugin config, route [1] is still using it now"} + + + +=== TEST 4: delete plugin_configs(without force delete header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this plugin config, route [1] is still using it now"} + + + +=== TEST 5: delete plugin_configs(force delete) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/plugin_configs/1?force=true', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed + + + +=== TEST 6: delete route +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/plugin-configs.t b/CloudronPackages/APISIX/apisix-source/t/admin/plugin-configs.t new file mode 100644 index 0000000..8526316 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/plugin-configs.t @@ -0,0 +1,523 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: PUT +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]], + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }, + "key": "/apisix/plugin_configs/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/plugin_configs/1')) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- response_body +passed + + + +=== TEST 2: GET +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }, + "key": "/apisix/plugin_configs/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: GET all +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_configs', + ngx.HTTP_GET, + nil, + [[{ + "total": 1, + "list": [ + { + "key": "/apisix/plugin_configs/1", + "value": { + "plugins": { + "limit-count": { + "time_window": 60, + "policy": "local", + "count": 2, + "key": "remote_addr", + "rejected_code": 503 + } + } + } + } + ] + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: PATCH +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/plugin_configs/1')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local prev_update_time = res.body.node.value.update_time + assert(prev_update_time ~= nil, "update_time is nil") + ngx.sleep(1) + + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PATCH, + [[{ + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }}]], + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }, + "key": "/apisix/plugin_configs/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/plugin_configs/1')) + local create_time = res.body.node.value.create_time + assert(prev_create_time == create_time, "create_time mismatched") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + assert(prev_update_time ~= update_time, "update_time should be changed") + } + } +--- response_body +passed + + + +=== TEST 5: PATCH (sub path) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/plugin_configs/1')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local prev_update_time = res.body.node.value.update_time + assert(prev_update_time ~= nil, "update_time is nil") + ngx.sleep(1) + + local code, body = t('/apisix/admin/plugin_configs/1/plugins', + ngx.HTTP_PATCH, + [[{ + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }]], + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }, + "key": "/apisix/plugin_configs/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/plugin_configs/1')) + local create_time = res.body.node.value.create_time + assert(prev_create_time == create_time, "create_time mismatched") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + assert(prev_update_time ~= update_time, "update_time should be changed") + } + } +--- response_body +passed + + + +=== TEST 6: invalid plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "rejected_code": 503, + "time_window": 60, + "key": "remote_addr" + } + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: property \"count\" is required"} +--- error_code: 400 + + + +=== TEST 7: PUT (with non-plugin fields) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "labels": { + "你好": "世界" + }, + "desc": "blah" + }]], + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "labels": { + "你好": "世界" + }, + "desc": "blah" + }, + "key": "/apisix/plugin_configs/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/plugin_configs/1')) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- response_body +passed + + + +=== TEST 8: GET (with non-plugin fields) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "labels": { + "你好": "世界" + }, + "desc": "blah" + }, + "key": "/apisix/plugin_configs/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: invalid non-plugin fields +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "labels": "a", + "plugins": { + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- response_body +{"error_msg":"invalid configuration: property \"labels\" validation failed: wrong type: expected object, got string"} +--- error_code: 400 + + + +=== TEST 10: set plugin-configs(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/plugin_configs/1')) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- response_body +passed + + + +=== TEST 11: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugin_config_id": 1, + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: delete-plugin configs failed(id: 1) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_DELETE + ) + ngx.print(body) + } + } +--- response_body +{"error_msg":"can not delete this plugin config, route [1] is still using it now"} + + + +=== TEST 13: delete route(id: 1) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: delete plugin-configs(id: 1) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_DELETE + ) + ngx.say(body) + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/plugin-metadata.t b/CloudronPackages/APISIX/apisix-source/t/admin/plugin-metadata.t new file mode 100644 index 0000000..e78ee00 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/plugin-metadata.t @@ -0,0 +1,335 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/example-plugin', + ngx.HTTP_PUT, + [[{ + "skey": "val", + "ikey": 1 + }]], + [[{ + "value": { + "skey": "val", + "ikey": 1 + }, + "key": "/apisix/plugin_metadata/example-plugin" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: update plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/example-plugin', + ngx.HTTP_PUT, + [[{ + "skey": "val2", + "ikey": 2 + }]], + [[{ + "value": { + "skey": "val2", + "ikey": 2 + } + }]] + ) + + ngx.status = code + ngx.say(body) + + -- hit again + local code, body = t('/apisix/admin/plugin_metadata/example-plugin', + ngx.HTTP_PUT, + [[{ + "skey": "val2", + "ikey": 2 + }]], + [[{ + "value": { + "skey": "val2", + "ikey": 2 + } + }]] + ) + + ngx.say(code) + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +200 +passed + + + +=== TEST 3: get plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/example-plugin', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "skey": "val2", + "ikey": 2 + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: delete plugin metadata +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/example-plugin', ngx.HTTP_DELETE) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: delete plugin metadata(key: not_found) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/plugin_metadata/not_found', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code) + } + } +--- request +GET /t +--- response_body +[delete] code: 404 + + + +=== TEST 6: missing plugin name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata', + ngx.HTTP_PUT, + [[{"k": "v"}]], + [[{ + "value": "sdf" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"missing plugin name"} + + + +=== TEST 7: invalid plugin name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/test', + ngx.HTTP_PUT, + [[{"k": "v"}]], + [[{ + "value": "sdf" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid plugin name"} + + + +=== TEST 8: verify metadata schema fail +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/example-plugin', + ngx.HTTP_PUT, + [[{ + "skey": "val" + }]], + [[{ + "value": { + "skey": "val", + "ikey": 1 + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid configuration: property \\"ikey\\" is required"\}/ + + + +=== TEST 9: not unwanted data, PUT +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/plugin_metadata/example-plugin', + ngx.HTTP_PUT, + [[{ + "skey": "val", + "ikey": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/plugin_metadata/example-plugin","value":{"id":"example-plugin","ikey":1,"skey":"val"}} +--- request +GET /t + + + +=== TEST 10: not unwanted data, GET +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/plugin_metadata/example-plugin', ngx.HTTP_GET) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/plugin_metadata/example-plugin","value":{"id":"example-plugin","ikey":1,"skey":"val"}} +--- request +GET /t + + + +=== TEST 11: not unwanted data, DELETE +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/plugin_metadata/example-plugin', ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"deleted":"1","key":"/apisix/plugin_metadata/example-plugin"} +--- request +GET /t diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/plugin-metadata2.t b/CloudronPackages/APISIX/apisix-source/t/admin/plugin-metadata2.t new file mode 100644 index 0000000..8bce818 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/plugin-metadata2.t @@ -0,0 +1,61 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: list empty resources +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/plugin_metadata', ngx.HTTP_GET) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"list":[],"total":0} diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/plugins-reload.t b/CloudronPackages/APISIX/apisix-source/t/admin/plugins-reload.t new file mode 100644 index 0000000..3367231 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/plugins-reload.t @@ -0,0 +1,429 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); +workers(2); + +add_block_preprocessor(sub { + my ($block) = @_; + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: reload plugins +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- now the plugin will be loaded twice, + -- one during startup and the other one by reload + local code, _, org_body = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + + ngx.status = code + ngx.say(org_body) + ngx.sleep(1) + } +} +--- request +GET /t +--- response_body +done +--- grep_error_log eval +qr/sync local conf to etcd/ +--- grep_error_log_out +sync local conf to etcd +--- error_log +load plugin times: 2 +load plugin times: 2 +start to hot reload plugins +start to hot reload plugins + + + +=== TEST 2: reload plugins triggers plugin list sync +--- config +location /t { + content_by_lua_block { + local core = require "apisix.core" + local config_util = require("apisix.core.config_util") + ngx.sleep(1) -- make sure the sync happened when admin starts is already finished + + local before_reload = true + local plugins_conf, err + plugins_conf, err = core.config.new("/plugins", { + automatic = true, + single_item = true, + filter = function(item) + -- called once before reload for sync data from admin + ngx.log(ngx.WARN, "reload plugins on node ", + before_reload and "before reload" or "after reload") + ngx.log(ngx.WARN, require("toolkit.json").encode(item.value)) + end, + }) + if not plugins_conf then + error("failed to create etcd instance for fetching /plugins : " + .. err) + end + ngx.sleep(1) + + local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: + - jwt-auth +stream_plugins: + - mqtt-proxy + ]] + require("lib.test_admin").set_config_yaml(data) + + before_reload = false + local t = require("lib.test_admin").test + local code, _, org_body = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + + ngx.status = code + ngx.say(org_body) + ngx.sleep(1) + } +} +--- request +GET /t +--- response_body +done +--- grep_error_log eval +qr/reload plugins on node \w+ reload/ +--- grep_error_log_out +reload plugins on node before reload +reload plugins on node after reload +--- error_log +filter(): [{"name":"jwt-auth"},{"name":"mqtt-proxy","stream":true}] + + + +=== TEST 3: reload plugins when attributes changed +--- yaml_config +apisix: + node_listen: 1984 +plugins: + - example-plugin +plugin_attr: + example-plugin: + val: 0 +--- config +location /t { + content_by_lua_block { + local core = require "apisix.core" + ngx.sleep(0.1) + local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: + - example-plugin +plugin_attr: + example-plugin: + val: 1 + ]] + require("lib.test_admin").set_config_yaml(data) + + local t = require("lib.test_admin").test + local code, _, org_body = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + + ngx.status = code + ngx.say(org_body) + ngx.sleep(0.1) + + local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: + - example-plugin +plugin_attr: + example-plugin: + val: 1 + ]] + require("lib.test_admin").set_config_yaml(data) + + local t = require("lib.test_admin").test + local code, _, org_body = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + ngx.say(org_body) + ngx.sleep(0.1) + } +} +--- request +GET /t +--- response_body +done +done +--- grep_error_log eval +qr/example-plugin get plugin attr val: \d+/ +--- grep_error_log_out +example-plugin get plugin attr val: 0 +example-plugin get plugin attr val: 0 +example-plugin get plugin attr val: 0 +example-plugin get plugin attr val: 1 +example-plugin get plugin attr val: 1 +example-plugin get plugin attr val: 1 +example-plugin get plugin attr val: 1 +example-plugin get plugin attr val: 1 +example-plugin get plugin attr val: 1 + + + +=== TEST 4: reload plugins to change prometheus' export uri +--- yaml_config +apisix: + node_listen: 1984 +plugins: + - public-api + - prometheus +plugin_attr: + prometheus: + export_uri: /metrics +--- config +location /t { + content_by_lua_block { + local core = require "apisix.core" + ngx.sleep(0.1) + local t = require("lib.test_admin").test + + -- setup public API route + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/metrics" + }]] + ) + ngx.say(code) + + local code, _, org_body = t('/apisix/metrics', + ngx.HTTP_GET) + ngx.say(code) + + local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: + - public-api + - prometheus +plugin_attr: + prometheus: + export_uri: /apisix/metrics + ]] + require("lib.test_admin").set_config_yaml(data) + + local code, _, org_body = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + + ngx.say(org_body) + + ngx.sleep(0.1) + local code, _, org_body = t('/apisix/metrics', + ngx.HTTP_GET) + ngx.say(code) + } +} +--- request +GET /t +--- response_body +201 +404 +done +200 + + + +=== TEST 5: reload plugins to disable skywalking +--- yaml_config +apisix: + node_listen: 1984 +plugins: + - skywalking +plugin_attr: + skywalking: + service_name: APISIX + service_instance_name: "APISIX Instance Name" + endpoint_addr: http://127.0.0.1:12801 + report_interval: 1 +--- config +location /t { + content_by_lua_block { + local core = require "apisix.core" + ngx.sleep(1.2) + local t = require("lib.test_admin").test + + local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: + - prometheus + ]] + require("lib.test_admin").set_config_yaml(data) + + local code, _, org_body = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + + ngx.say(org_body) + + ngx.sleep(2) + } +} +--- request +GET /t +--- response_body +done +--- no_error_log +[alert] +--- grep_error_log eval +qr/Instance report fails/ +--- grep_error_log_out +Instance report fails + + + +=== TEST 6: check disabling plugin via etcd +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "echo": { + "body":"hello upstream\n" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + ngx.sleep(0.1) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: hit +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +--- request +GET /hello +--- response_body +hello upstream + + + +=== TEST 8: hit after disabling echo +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + assert(etcd.set("/plugins", {{name = "jwt-auth"}})) + + ngx.sleep(0.2) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } +} +--- request +GET /t +--- response_body +hello world + + + +=== TEST 9: wrong method to reload plugins +--- request +GET /apisix/admin/plugins/reload +--- error_code: 405 +--- response_body +{"error_msg":"please use PUT method to reload the plugins, GET method is not allowed."} diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/plugins.t b/CloudronPackages/APISIX/apisix-source/t/admin/plugins.t new file mode 100644 index 0000000..0249a42 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/plugins.t @@ -0,0 +1,480 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: get plugins' name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require('cjson') + local code, _, body = t("/apisix/admin/plugins/list", "GET") + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local tab = json.decode(body) + for _, v in ipairs(tab) do + ngx.say(v) + end + } + } + +--- response_body +real-ip +ai +client-control +proxy-control +request-id +zipkin +ext-plugin-pre-req +fault-injection +mocking +serverless-pre-function +cors +ip-restriction +ua-restriction +referer-restriction +csrf +uri-blocker +request-validation +chaitin-waf +multi-auth +openid-connect +cas-auth +authz-casbin +authz-casdoor +wolf-rbac +ldap-auth +hmac-auth +basic-auth +jwt-auth +jwe-decrypt +key-auth +consumer-restriction +attach-consumer-label +forward-auth +opa +authz-keycloak +proxy-cache +body-transformer +ai-request-rewrite +ai-prompt-guard +ai-prompt-template +ai-prompt-decorator +ai-rag +ai-aws-content-moderation +ai-proxy-multi +ai-proxy +ai-rate-limiting +proxy-mirror +proxy-rewrite +workflow +api-breaker +limit-conn +limit-count +limit-req +gzip +traffic-split +redirect +response-rewrite +mcp-bridge +degraphql +kafka-proxy +grpc-transcode +grpc-web +http-dubbo +public-api +prometheus +datadog +lago +loki-logger +elasticsearch-logger +echo +loggly +http-logger +splunk-hec-logging +skywalking-logger +google-cloud-logging +sls-logger +tcp-logger +kafka-logger +rocketmq-logger +syslog +udp-logger +file-logger +clickhouse-logger +tencent-cloud-cls +inspect +example-plugin +aws-lambda +azure-functions +openwhisk +openfunction +serverless-post-function +ext-plugin-post-req +ext-plugin-post-resp + + + +=== TEST 2: invalid plugin +--- request +GET /apisix/admin/plugins/asdf +--- error_code: 404 +--- response_body +{"error_msg":"plugin not found in subsystem http"} + + + +=== TEST 3: get plugin schema +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugins/limit-req', + ngx.HTTP_GET, + nil, + [[ + {"type":"object","required":["rate","burst","key"],"properties":{"rate":{"type":"number","exclusiveMinimum":0},"key_type":{"type":"string","enum":["var","var_combination"],"default":"var"},"burst":{"type":"number","minimum":0},"nodelay":{"type":"boolean","default":false},"key":{"type":"string"},"rejected_code":{"type":"integer","minimum":200,"maximum":599,"default":503},"rejected_msg":{"type":"string","minLength":1},"allow_degradation":{"type":"boolean","default":false}}} + ]] + ) + + ngx.status = code + } + } + + + +=== TEST 4: get plugin node-status schema +--- extra_yaml_config +plugins: + - node-status +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugins/node-status', + ngx.HTTP_GET, + nil, + [[ +{"properties":{},"type":"object"} + ]] + ) + + ngx.status = code + } + } + + + +=== TEST 5: get plugin prometheus schema +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugins/prometheus', + ngx.HTTP_GET, + nil, + [[ +{"properties":{},"type":"object"} + ]] + ) + + ngx.status = code + } + } + + + +=== TEST 6: get plugin basic-auth schema +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugins/basic-auth', + ngx.HTTP_GET, + nil, + [[ +{"properties":{},"title":"work with route or service object","type":"object"} + ]] + ) + + ngx.status = code + } + } + + + +=== TEST 7: get plugin basic-auth schema by schema_type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugins/basic-auth?schema_type=consumer', + ngx.HTTP_GET, + nil, + [[ +{"title":"work with consumer object","required":["username","password"],"properties":{"username":{"type":"string"},"password":{"type":"string"}},"type":"object"} + ]] + ) + + ngx.status = code + } + } + + + +=== TEST 8: confirm the name, priority, schema, type and version of plugin +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/plugins?all=true', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + for k, v in pairs(res) do + if k == "example-plugin" then + ngx.say(json.encode(v)) + end + end + } + } +--- response_body eval +qr/\{"metadata_schema":\{"properties":\{"ikey":\{"minimum":0,"type":"number"\},"skey":\{"type":"string"\}\},"required":\["ikey","skey"\],"type":"object"\},"priority":0,"schema":\{"\$comment":"this is a mark for our injected plugin schema","properties":\{"_meta":\{"additionalProperties":false,"properties":\{"disable":\{"type":"boolean"\},"error_response":\{"oneOf":\[\{"type":"string"\},\{"type":"object"\}\]\},"filter":\{"description":"filter determines whether the plugin needs to be executed at runtime","type":"array"\},"pre_function":\{"description":"function to be executed in each phase before execution of plugins. The pre_function will have access to two arguments: `conf` and `ctx`.","type":"string"\},"priority":\{"description":"priority of plugins by customized order","type":"integer"\}\},"type":"object"\},"i":\{"minimum":0,"type":"number"\},"ip":\{"type":"string"\},"port":\{"type":"integer"\},"s":\{"type":"string"\},"t":\{"minItems":1,"type":"array"\}\},"required":\["i"\],"type":"object"\},"version":0.1\}/ + + + +=== TEST 9: confirm the plugin of auth type +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/plugins?all=true', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + local auth_plugins = {} + for k, v in pairs(res) do + if v.type == "auth" then + local plugin = {} + plugin.name = k + plugin.priority = v.priority + table.insert(auth_plugins, plugin) + end + end + + table.sort(auth_plugins, function(l, r) + return l.priority > r.priority + end) + ngx.say(json.encode(auth_plugins)) + } + } +--- response_body eval +qr/\[\{"name":"multi-auth","priority":2600\},\{"name":"wolf-rbac","priority":2555\},\{"name":"ldap-auth","priority":2540\},\{"name":"hmac-auth","priority":2530\},\{"name":"basic-auth","priority":2520\},\{"name":"jwt-auth","priority":2510\},\{"name":"jwe-decrypt","priority":2509\},\{"name":"key-auth","priority":2500\}\]/ + + + +=== TEST 10: confirm the consumer_schema of plugin +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/plugins?all=true', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + local consumer_schema + for k, v in pairs(res) do + if k == "basic-auth" then + consumer_schema = v.consumer_schema + end + end + ngx.say(json.encode(consumer_schema)) + } + } +--- response_body eval +qr/\{"encrypt_fields":\["password"\],"properties":\{"password":\{"type":"string"\},"username":\{"type":"string"\}\},"required":\["username","password"\],"title":"work with consumer object","type":"object"\}/ + + + +=== TEST 11: confirm the name, priority, schema, type and version of stream plugin +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/plugins?all=true&subsystem=stream', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + for k, v in pairs(res) do + if k == "limit-conn" then + ngx.say(json.encode(v)) + end + end + } + } +--- response_body +{"priority":1003,"schema":{"$comment":"this is a mark for our injected plugin schema","properties":{"_meta":{"additionalProperties":false,"properties":{"disable":{"type":"boolean"},"error_response":{"oneOf":[{"type":"string"},{"type":"object"}]},"filter":{"description":"filter determines whether the plugin needs to be executed at runtime","type":"array"},"pre_function":{"description":"function to be executed in each phase before execution of plugins. The pre_function will have access to two arguments: `conf` and `ctx`.","type":"string"},"priority":{"description":"priority of plugins by customized order","type":"integer"}},"type":"object"},"burst":{"minimum":0,"type":"integer"},"conn":{"exclusiveMinimum":0,"type":"integer"},"default_conn_delay":{"exclusiveMinimum":0,"type":"number"},"key":{"type":"string"},"key_type":{"default":"var","enum":["var","var_combination"],"type":"string"},"only_use_default_delay":{"default":false,"type":"boolean"}},"required":["conn","burst","default_conn_delay","key"],"type":"object"},"version":0.1} + + + +=== TEST 12: confirm the scope of plugin +--- extra_yaml_config +plugins: + - batch-requests + - error-log-logger + - server-info + - example-plugin + - node-status +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/plugins?all=true', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + local global_plugins = {} + for k, v in pairs(res) do + if v.scope == "global" then + global_plugins[k] = v.scope + end + end + ngx.say(json.encode(global_plugins)) + } + } +--- response_body +{"batch-requests":"global","error-log-logger":"global","node-status":"global","server-info":"global"} + + + +=== TEST 13: check with wrong plugin subsystem +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local _, message, _ = t('/apisix/admin/plugins?subsystem=asdf', + ngx.HTTP_GET + ) + ngx.say(message) + } + } +--- response_body eval +qr/\{"error_msg":"unsupported subsystem: asdf"\}/ + + + +=== TEST 14: check with right plugin in wrong subsystem +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local _, message, _ = t('/apisix/admin/plugins/http-logger?subsystem=stream', + ngx.HTTP_GET + ) + ngx.say(message) + } + } +--- response_body eval +qr/\{"error_msg":"plugin not found in subsystem stream"\}/ + + + +=== TEST 15: check with right plugin in right subsystem +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local _, _ , message = t('/apisix/admin/plugins/http-logger?subsystem=http', + ngx.HTTP_GET + ) + ngx.say(message) + } + } +--- response_body eval +qr/this is a mark for our injected plugin schema/ diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/protos-force-delete.t b/CloudronPackages/APISIX/apisix-source/t/admin/protos-force-delete.t new file mode 100644 index 0000000..db0e5d8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/protos-force-delete.t @@ -0,0 +1,175 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set proto(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + }" + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 201 +--- response_body +passed + + + +=== TEST 2: add route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHello" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 3: delete proto(wrong header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/1?force=anyvalue', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this proto, route [1] is still using it now"} + + + +=== TEST 4: delete proto(without force delete header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this proto, route [1] is still using it now"} + + + +=== TEST 5: delete proto(force delete) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/1?force=true', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed + + + +=== TEST 6: delete route +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/protos.t b/CloudronPackages/APISIX/apisix-source/t/admin/protos.t new file mode 100644 index 0000000..e560fff --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/protos.t @@ -0,0 +1,216 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: put proto (id:1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content": "syntax = \"proto3\"; + package proto; + message HelloRequest{ + string name = 1; + } + + message HelloResponse{ + int32 code = 1; + string msg = 2; + } + // The greeting service definition. + service Hello { + // Sends a greeting + rpc SayHi (HelloRequest) returns (HelloResponse){} + }" + }]] + ) + + if code ~= 201 then + ngx.status = code + ngx.say("[put proto] code: ", code, " message: ", message) + return + end + + ngx.say("[put proto] code: ", code, " message: ", message) + } + } +--- response_body +[put proto] code: 201 message: passed + + + +=== TEST 2: delete proto(id:1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/1', + ngx.HTTP_DELETE + ) + + if code ~= 200 then + ngx.status = code + ngx.say("[delete proto] code: ", code, " message: ", message) + return + end + + ngx.say("[delete proto] code: ", code, " message: ", message) + } + } +--- response_body +[delete proto] code: 200 message: passed + + + +=== TEST 3: put proto (id:2) + route refer proto(proto id 2) + delete proto(proto id 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/2', + ngx.HTTP_PUT, + [[{ + "content": "syntax = \"proto3\"; + package proto; + message HelloRequest{ + string name = 1; + } + + message HelloResponse{ + int32 code = 1; + string msg = 2; + } + // The greeting service definition. + service Hello { + // Sends a greeting + rpc SayHi (HelloRequest) returns (HelloResponse){} + }" + }]] + ) + + if code ~= 201 then + ngx.status = code + ngx.say("[put proto] code: ", code, " message: ", message) + return + end + ngx.say("[put proto] code: ", code, " message: ", message) + + + code, message = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "grpc-transcode": { + "_meta": { + "disable": false + }, + "method": "SayHi", + "proto_id": 2, + "service": "proto.Hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/grpc/sayhi", + "name": "hi-grpc" + }]] + ) + + if code ~= 201 then + ngx.status = code + ngx.say("[route refer proto] code: ", code, " message: ", message) + return + end + ngx.say("[route refer proto] code: ", code, " message: ", message) + + ngx.sleep(0.1) -- ensure reference is synced from etcd + + code, message = t('/apisix/admin/protos/2', + ngx.HTTP_DELETE + ) + + ngx.say("[delete proto] code: ", code) + } + } +--- response_body +[put proto] code: 201 message: passed +[route refer proto] code: 201 message: passed +[delete proto] code: 400 + + + +=== TEST 4: reject invalid proto +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content": "syntax = \"proto3\"; + package proto; + message HelloRequest{ + string name = 1; + } + + message HelloResponse{ + int32 code = 1; + string msg = 1; + }" + }]] + ) + + if code ~= 200 then + ngx.status = code + end + + ngx.say(message) + } + } +--- error_code: 400 +--- response_body eval +qr/invalid content:/ diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/resources.t b/CloudronPackages/APISIX/apisix-source/t/admin/resources.t new file mode 100644 index 0000000..e1f39cc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/resources.t @@ -0,0 +1,55 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: invalid resource type: 'routs' +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routs/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]] + ) + + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like +{"error_msg":"Unsupported resource type: routs"} diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/response_body_format.t b/CloudronPackages/APISIX/apisix-source/t/admin/response_body_format.t new file mode 100644 index 0000000..86f4e5d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/response_body_format.t @@ -0,0 +1,255 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $user_yaml_config = <<_EOC_; +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: ~ + admin_api_version: v3 +apisix: + node_listen: 1984 +_EOC_ + $block->set_value("yaml_config", $user_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: use v3 admin api, no action in response body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]], + [[{ + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: response body format only have total and list (total is 1) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/routes', ngx.HTTP_GET) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + res = json.decode(res) + assert(res.total == 1) + assert(res.total == #res.list) + assert(res.action == nil) + assert(res.node == nil) + assert(res.list.key == nil) + assert(res.list.dir == nil) + assert(res.list[1].createdIndex ~= nil) + assert(res.list[1].modifiedIndex ~= nil) + assert(res.list[1].key == "/apisix/routes/1") + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 3: response body format only have total and list (total is 2) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local code, message, res = t('/apisix/admin/routes', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.total == 2) + assert(res.total == #res.list) + assert(res.action == nil) + assert(res.node == nil) + assert(res.list.key == nil) + assert(res.list.dir == nil) + assert(res.list[1].createdIndex ~= nil) + assert(res.list[1].modifiedIndex ~= nil) + assert(res.list[1].key == "/apisix/routes/1") + assert(res.list[2].createdIndex ~= nil) + assert(res.list[2].modifiedIndex ~= nil) + assert(res.list[2].key == "/apisix/routes/2") + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 4: response body format (test services) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "new service 001" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/services/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "new service 002" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, message, res = t('/apisix/admin/services', ngx.HTTP_GET) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.total == 2) + assert(res.total == #res.list) + assert(res.action == nil) + assert(res.node == nil) + assert(res.list.key == nil) + assert(res.list.dir == nil) + assert(res.list[1].createdIndex ~= nil) + assert(res.list[1].modifiedIndex ~= nil) + assert(res.list[1].key == "/apisix/services/1") + assert(res.list[2].createdIndex ~= nil) + assert(res.list[2].modifiedIndex ~= nil) + assert(res.list[2].key == "/apisix/services/2") + ngx.say(message) + } + } +--- response_body +passed +passed +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/routes-array-nodes.t b/CloudronPackages/APISIX/apisix-source/t/admin/routes-array-nodes.t new file mode 100644 index 0000000..5378642 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/routes-array-nodes.t @@ -0,0 +1,115 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]], + [[{ + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: get route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/routes.t b/CloudronPackages/APISIX/apisix-source/t/admin/routes.t new file mode 100644 index 0000000..835d4b3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/routes.t @@ -0,0 +1,788 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]], + [[{ + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: get route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: delete route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 4: delete route(id: not_found) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/routes/not_found', + ngx.HTTP_DELETE + ) + ngx.say("[delete] code: ", code) + } + } +--- request +GET /t +--- response_body +[delete] code: 404 + + + +=== TEST 5: post route + delete +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, message, res = t('/apisix/admin/routes', + ngx.HTTP_POST, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]], + [[{ + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + } + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + local id = string.sub(res.key, #"/apisix/routes/" + 1) + local res = assert(etcd.get('/routes/' .. id)) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + + code, message = t('/apisix/admin/routes/' .. id, + ngx.HTTP_DELETE + ) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed +[delete] code: 200 message: passed + + + +=== TEST 6: uri + upstream +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]], + [[{ + "value": { + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + } + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + + local res = assert(etcd.get('/routes/1')) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed + + + +=== TEST 7: uri + plugins +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "uri": "/index.html" + }]], + [[{ + "value": { + "uri": "/index.html", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + } + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed + + + +=== TEST 8: invalid route: duplicate method +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like + + + +=== TEST 9: invalid method +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["invalid_method"], + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"methods\" validation failed: failed to validate item 1: matches none of the enum values"} + + + +=== TEST 10: invalid service id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "service_id": "invalid_id$", + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"service_id\" validation failed: object matches none of the required"} + + + +=== TEST 11: service id: not exist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "service_id": "99999999999999", + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to fetch service info by service id [99999999999999], response code: 404"} + + + +=== TEST 12: invalid id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "id": 3, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong route id"} + + + +=== TEST 13: id in the rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes', + ngx.HTTP_PUT, + [[{ + "id": "1", + "plugins":{}, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: integer id less than 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes', + ngx.HTTP_PUT, + [[{ + "id": -100, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the required"} + + + +=== TEST 15: invalid upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream_id": "invalid$", + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream_id\" validation failed: object matches none of the required"} + + + +=== TEST 16: not exist upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream_id": "99999999", + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to fetch upstream info by upstream id [99999999], response code: 404"} + + + +=== TEST 17: wrong route id, do not need it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes', + ngx.HTTP_POST, + [[{ + "id": 1, + "plugins":{}, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong route id, do not need it"} + + + +=== TEST 18: wrong route id, do not need it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_POST, + [[{ + "plugins":{}, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong route id, do not need it"} + + + +=== TEST 19: limit-count with `disable` option +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "_meta": { + "disable": true + } + } + }, + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed + + + +=== TEST 20: host: *.foo.com +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "host": "*.foo.com", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]], + [[{ + "value": { + "host": "*.foo.com", + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 21: invalid host: a.*.foo.com +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "host": "a.*.foo.com", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like +{"error_msg":"invalid configuration: property \\"host\\" validation failed: failed to match pattern .* + + + +=== TEST 22: invalid host: *.a.*.foo.com +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "host": "*.a.*.foo.com", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like +{"error_msg":"invalid configuration: property \\"host\\" validation failed: failed to match pattern .* + + + +=== TEST 23: removing the init_dir key from etcd can still list all routes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + local etcd = require("apisix.core.etcd") + + local code, body = t('/apisix/admin/routes/del_init_dir_1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + assert(code == 200 or code == 201, "failed to add route") + + local code, body = t('/apisix/admin/routes/del_init_dir_2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + assert(code == 200 or code == 201, "failed to add route") + + -- remove the init_dir key from etcd + assert(etcd.delete("/routes/")) + + -- list all routes and check them + local code, body, res = t('/apisix/admin/routes', ngx.HTTP_GET) + ngx.status = code + ngx.say(res) + } + } +--- request +GET /t +--- response_body eval +qr/del_init_dir_1.*del_init_dir_2/ diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/routes2.t b/CloudronPackages/APISIX/apisix-source/t/admin/routes2.t new file mode 100644 index 0000000..41d11cf --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/routes2.t @@ -0,0 +1,653 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: invalid route: bad remote_addrs +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "remote_addrs": [""], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like eval +qr/property \\"remote_addrs\\" validation failed:/ + + + +=== TEST 2: invalid route: bad remote_addrs cidr +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "remote_addrs": ["/16"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like eval +qr/property \\"remote_addrs\\" validation failed:/ + + + +=== TEST 3: valid route with remote_addrs +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "remote_addrs": ["::1/16", "::1", "::", "1.1.1.1", "1.1.1.1/32"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: invalid route: bad vars operator +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "methods": ["GET"], + "vars": [["remote_addr", "=", "127.0.0.1"]], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]=] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to validate the 'vars' expression: invalid operator '='"} + + + +=== TEST 5: not unwanted data, POST +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/routes', + ngx.HTTP_POST, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/not_unwanted_data_post" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + res.key = nil + res.value.create_time = nil + res.value.update_time = nil + assert(res.value.id ~= nil) + res.value.id = nil + ngx.say(json.encode(res)) + } + } +--- request +GET /t +--- response_body +{"value":{"methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/not_unwanted_data_post"}} + + + +=== TEST 6: not unwanted data, PUT +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/routes', + ngx.HTTP_PUT, + [[{ + "id": 1, + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + res.value.create_time = nil + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- request +GET /t +--- response_body +{"key":"/apisix/routes/1","value":{"id":1,"methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index.html"}} + + + +=== TEST 7: not unwanted data, PATCH +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + res.value.create_time = nil + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- request +GET /t +--- response_body +{"key":"/apisix/routes/1","value":{"id":"1","methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index"}} + + + +=== TEST 8: not unwanted data, GET +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- request +GET /t +--- response_body +{"key":"/apisix/routes/1","value":{"id":"1","methods":["GET"],"priority":0,"status":1,"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"},"uri":"/index"}} + + + +=== TEST 9: not unwanted data, DELETE +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- request +GET /t +--- response_body +{"deleted":"1","key":"/apisix/routes/1"} + + + +=== TEST 10: invalid route: empty remote_addrs +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "remote_addrs": [], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like eval +qr/property \\"remote_addrs\\" validation failed:/ + + + +=== TEST 11: invalid route: empty uris +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uris": [] + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like eval +qr/property \\"uris\\" validation failed:/ + + + +=== TEST 12: invalid route: empty hosts +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "hosts": [], + "uri": "/" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like eval +qr/property \\"hosts\\" validation failed:/ + + + +=== TEST 13: invalid route: uris & uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uris": ["/"], + "uri": "/" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like eval +qr/value should match only one schema/ + + + +=== TEST 14: enable remote_addrs and remote_addr together +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "remote_addr": "127.0.0.1", + "remote_addrs": ["127.0.0.1"] + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"only one of remote_addr or remote_addrs is allowed"} + + + +=== TEST 15: labels in Chinese +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "labels": { + "您好": "世界" + }, + + "uri": "/index.html" + }]], + [[{ + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "labels": { + "您好": "世界" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: labels value with whitespace +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "labels": { + "您好": "世 界" + }, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like eval +qr/invalid configuration: property \\"labels\\" validation failed/ + + + +=== TEST 17: route with plugin_config_id (not found) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugin_config_id": "not_found", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to fetch plugin config info by plugin config id [not_found], response code: 404"} + + + +=== TEST 18: valid route with timeout +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "timeout": { + "connect": 3, + "send": 3, + "read": 3 + }, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/routes3.t b/CloudronPackages/APISIX/apisix-source/t/admin/routes3.t new file mode 100644 index 0000000..331f1b2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/routes3.t @@ -0,0 +1,743 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: list empty resources +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/routes', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"list":[],"total":0} + + + +=== TEST 2: remote_addr: 127.0.0.1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]], + [[{ + "value": { + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }, + "key": "/apisix/routes/1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: remote_addr: 127.0.0.1/24 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.0/24", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]], + [[{ + "value": { + "remote_addr": "127.0.0.0/24", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }, + "key": "/apisix/routes/1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: remote_addr: 127.0.0.33333 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.33333", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"remote_addr\" validation failed: object matches none of the required"} + + + +=== TEST 5: all method +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST", "PUT", "DELETE", "PATCH", + "HEAD", "OPTIONS", "CONNECT", "TRACE", "PURGE"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: patch route(new uri) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + + local id = 1 + local res = assert(etcd.get('/routes/' .. id)) + local prev_create_time = res.body.node.value.create_time + local prev_update_time = res.body.node.value.update_time + ngx.sleep(1) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + [[{ + "uri": "/patch_test" + }]], + [[{ + "value": { + "uri": "/patch_test" + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/routes/' .. id)) + local create_time = res.body.node.value.create_time + assert(prev_create_time == create_time, "create_time mismatched") + local update_time = res.body.node.value.update_time + assert(prev_update_time ~= update_time, "update_time should be changed") + } + } +--- response_body +passed + + + +=== TEST 7: patch route(multi) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": null, + "127.0.0.2:8080": 1 + } + }, + "desc": "new route" + }]], + [[{ + "value": { + "methods": [ + "GET" + ], + "uri": "/patch_test", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.2:8080": 1 + }, + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: patch route(new methods) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + [[{ + "methods": ["GET", "DELETE", "PATCH", "POST", "PUT"] + }]], + [[{ + "value": { + "methods": ["GET", "DELETE", "PATCH", "POST", "PUT"] + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: patch route(minus methods) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + [[{ + "methods": ["GET", "POST"] + }]], + [[{ + "value": { + "methods": ["GET", "POST"] + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: patch route(new methods - sub path way) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1/methods', + ngx.HTTP_PATCH, + '["POST"]', + [[{ + "value": { + "methods": [ + "POST" + ] + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: patch route(new uri) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1/uri', + ngx.HTTP_PATCH, + '"/patch_uri_test"', + [[{ + "value": { + "uri": "/patch_uri_test" + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: patch route(whole) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1/', + ngx.HTTP_PATCH, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]], + [[{ + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "desc": "new route", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: multiple hosts +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/index.html", + "hosts": ["foo.com", "*.bar.com"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route" + }]], + [[{ + "value": { + "hosts": ["foo.com", "*.bar.com"] + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: enable hosts and host together +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/index.html", + "host": "xxx.com", + "hosts": ["foo.com", "*.bar.com"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"only one of host or hosts is allowed"} + + + +=== TEST 15: multiple remote_addrs +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/index.html", + "remote_addrs": ["127.0.0.1", "192.0.0.1/8", "::1", "fe80::/32"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route" + }]], + [[{ + "value": { + "remote_addrs": ["127.0.0.1", "192.0.0.1/8", "::1", "fe80::/32"] + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: multiple vars +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "uri": "/index.html", + "vars": [["arg_name", "==", "json"], ["arg_age", ">", 18]], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route" + }]=], + [=[{ + "value": { + "vars": [["arg_name", "==", "json"], ["arg_age", ">", 18]] + } + }]=] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 17: filter function +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "uri": "/index.html", + "filter_func": "function(vars) return vars.arg_name == 'json' end", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]=], + [=[{ + "value": { + "filter_func": "function(vars) return vars.arg_name == 'json' end" + } + }]=] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: filter function (invalid) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "uri": "/index.html", + "filter_func": "function(vars) ", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]=] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to load 'filter_func' string: [string \"return function(vars) \"]:1: 'end' expected near ''"} + + + +=== TEST 19: Support for multiple URIs +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "uris": ["/index.html","/index2.html"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]=] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: set route(id: 1, parameters with boolean values) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/index.html", + "enable_websocket": true, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080":1 + } + } + }]]) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: patch route(modify the boolean value of parameters to false) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1/enable_websocket', + ngx.HTTP_PATCH, + 'false', + [[{ + "value": { + "enable_websocket": false + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 22: patch route(modify the boolean value of parameters to true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1/enable_websocket', + ngx.HTTP_PATCH, + 'true', + [[{ + "value": { + "enable_websocket": true + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/routes4.t b/CloudronPackages/APISIX/apisix-source/t/admin/routes4.t new file mode 100644 index 0000000..0bab450 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/routes4.t @@ -0,0 +1,795 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route with ttl +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local core = require("apisix.core") + -- set + local code, body, res = t('/apisix/admin/routes/1?ttl=1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + -- get + code, body = t('/apisix/admin/routes/1?ttl=1', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "uri": "/index.html" + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.say("code: ", code) + ngx.say(body) + + -- etcd v3 would still get the value at 2s, don't know why yet + ngx.sleep(2.5) + + -- get again + code, body, res = t('/apisix/admin/routes/1', ngx.HTTP_GET) + + ngx.say("code: ", code) + ngx.say("message: ", core.json.decode(body).message) + } +} +--- response_body +code: 200 +passed +code: 404 +message: Key not found +--- timeout: 5 + + + +=== TEST 2: post route with ttl +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local core = require("apisix.core") + + local code, body, res = t('/apisix/admin/routes?ttl=1', + ngx.HTTP_POST, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]], + [[{}]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say("[push] succ: ", body) + ngx.sleep(2.5) + + local id = string.sub(res.key, #"/apisix/routes/" + 1) + code, body = t('/apisix/admin/routes/' .. id, ngx.HTTP_GET) + + ngx.say("code: ", code) + ngx.say("message: ", core.json.decode(body).message) + } +} +--- response_body +[push] succ: passed +code: 404 +message: Key not found +--- timeout: 5 + + + +=== TEST 3: invalid argument: ttl +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body, res = t('/apisix/admin/routes?ttl=xxx', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + ngx.say("[push] succ: ", body) + } +} +--- error_code: 400 +--- response_body +{"error_msg":"invalid argument ttl: should be a number"} + + + +=== TEST 4: set route(id: 1, check priority) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]], + [[{ + "value": { + "priority": 0 + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: set route(id: 1 + priority: 0) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html", + "priority": 1 + }]], + [[{ + "value": { + "priority": 1 + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: set route(id: 1) and upstream(type:chash, default hash_on: vars, missing key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash" + }, + "desc": "new route", + "uri": "/index.html" + }]]) + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"missing key"} + + + +=== TEST 7: set route(id: 1) and upstream(type:chash, hash_on: header, missing key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on":"header" + }, + "desc": "new route", + "uri": "/index.html" + }]]) + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"missing key"} + + + +=== TEST 8: set route(id: 1) and upstream(type:chash, hash_on: cookie, missing key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on":"cookie" + }, + "desc": "new route", + "uri": "/index.html" + }]]) + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"missing key"} + + + +=== TEST 9: set route(id: 1) and upstream(type:chash, hash_on: consumer, missing key is ok) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on":"consumer" + }, + "desc": "new route", + "uri": "/index.html" + }]]) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: set route(id: 1 + name: test name) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "name": "test name", + "uri": "/index.html" + }]], + [[{ + "value": { + "name": "test name" + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/a-b-c-ABC_0123', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: string id(delete) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/a-b-c-ABC_0123', + ngx.HTTP_DELETE + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: invalid string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/*invalid', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 400 + + + +=== TEST 14: Verify Response Content-Type=application/json +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local httpc = http.new() + httpc:set_timeout(500) + httpc:connect(ngx.var.server_addr, ngx.var.server_port) + local res, err = httpc:request( + { + path = '/apisix/admin/routes/1?ttl=1', + method = "GET", + } + ) + + ngx.header["Content-Type"] = res.headers["Content-Type"] + ngx.status = 200 + ngx.say("passed") + } + } +--- response_headers +Content-Type: application/json + + + +=== TEST 15: set route with size 36k (temporary file to store request body) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local core = require("apisix.core") + local s = string.rep("a", 1024 * 35) + local req_body = [[{ + "upstream": { + "nodes": { + "]] .. s .. [[": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, req_body) + + if code >= 300 then + ngx.status = code + end + + ngx.say("req size: ", #req_body) + ngx.say(body) + } + } +--- response_body +req size: 36066 +passed +--- error_log +a client request body is buffered to a temporary file + + + +=== TEST 16: route size more than 1.5 MiB +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local s = string.rep( "a", 1024 * 1024 * 1.6 ) + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "]] .. s .. [[", + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid request body: request size 1678025 is greater than the maximum size 1572864 allowed"} +--- error_log +failed to read request body: request size 1678025 is greater than the maximum size 1572864 allowed + + + +=== TEST 17: uri + plugins + script failed +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "script": "local _M = {} \n function _M.access(api_ctx) \n ngx.log(ngx.INFO,\"hit access phase\") \n end \nreturn _M", + "uri": "/index.html" + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + } + } +--- error_code: 400 +--- response_body_like +{"error_msg":"invalid configuration: value wasn't supposed to match schema"} + + + +=== TEST 18: invalid route: multi nodes with `node` mode to pass host +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "GET"], + "upstream": { + "nodes": { + "apisix.com:8080": 1, + "test.com:8080": 1 + }, + "type": "roundrobin", + "pass_host": "node" + }, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 + + + +=== TEST 19: set route(with labels) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "labels": { + "build": "16", + "env": "production", + "version": "v2" + }, + + "uri": "/index.html" + }]], + [[{ + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "labels": { + "build": "16", + "env": "production", + "version": "v2" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: patch route(change labels) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + [[{ + "labels": { + "build": "17" + } + }]], + [[{ + "value": { + "methods": [ + "GET" + ], + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "labels": { + "env": "production", + "version": "v2", + "build": "17" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: invalid format of label value: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/index.html", + "labels": { + "env": ["production", "release"] + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"labels\" validation failed: failed to validate env (matching \".*\"): wrong type: expected string, got table"} + + + +=== TEST 22: create route with create_time and update_time(id : 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html", + "create_time": 1602883670, + "update_time": 1602893670 + }]], + [[{ + "value": { + "uri": "/index.html", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "create_time": 1602883670, + "update_time": 1602893670 + }, + "key": "/apisix/routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"the property is forbidden:.*"\}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/routes_request_body.t b/CloudronPackages/APISIX/apisix-source/t/admin/routes_request_body.t new file mode 100644 index 0000000..4c4cb71 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/routes_request_body.t @@ -0,0 +1,274 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route in request body vars +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "vars": [ + [ + ["post_arg.model","==", "deepseek"] + ] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "vars": [ + [ + ["post_arg.model","==","openai"] + ] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: send request with model == deepseek +--- request +POST /hello +{ "model":"deepseek", "messages": [ { "role": "system", "content": "You are a mathematician" }] } +--- more_headers +Content-Type: application/json +--- error_code: 404 + + + +=== TEST 3: send request with model == openai and content-type == application/json +--- request +POST /hello +{ "model":"openai", "messages": [ { "role": "system", "content": "You are a mathematician" }] } +--- more_headers +Content-Type: application/json +--- error_code: 200 + + + +=== TEST 4: send request with model == openai and content-type == application/x-www-form-urlencoded +--- request +POST /hello +model=openai&messages[0][role]=system&messages[0][content]=You%20are%20a%20mathematician +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 200 + + + +=== TEST 5: multipart/form-data with model=openai +--- request +POST /hello +--testboundary +Content-Disposition: form-data; name="model" + +openai +--testboundary-- +--- more_headers +Content-Type: multipart/form-data; boundary=testboundary +--- error_code: 200 + + + +=== TEST 6: no match without content type +--- request +POST /hello +--testboundary +Content-Disposition: form-data; name="model" + +openai +--testboundary-- +--- error_code: 404 +--- error_log +unsupported content-type in header: + + + +=== TEST 7: use array in request body vars +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "vars": [ + [ + ["post_arg.messages[*].content[*].type","has","image_url"] + ] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: send request with type not image_url +--- request +POST /hello +{ "model":"deepseek", "messages": [ { "role": "system", "content": [{"text":"You are a mathematician","type":"text"}] }] } +--- more_headers +Content-Type: application/json +--- error_code: 404 + + + +=== TEST 9: send request with type has image_url +--- request +POST /hello +{ "model":"deepseek", "messages": [ { "role": "system", "content": [{"text":"You are a mathematician","type":"text"},{"text":"You are a mathematician","type":"image_url"}] }] } +--- more_headers +Content-Type: application/json +--- error_code: 200 + + + +=== TEST 10: use invalid jsonpath input +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "vars": [ + [ + ["post_arg.messages[.content[*].type","has","image_url"] + ] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body eval +qr/.*failed to validate the 'vars' expression: invalid expression.*/ +--- error_code: 400 + + + +=== TEST 11: use non array in request body vars +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "vars": [ + [ + ["post_arg.model.name","==","deepseek"] + ] + ], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: send request +--- request +POST /hello +{ "model":{"name": "deepseek"}, "messages": [ { "role": "system", "content": [{"text":"You are a mathematician","type":"text"},{"text":"You are a mathematician","type":"image_url"}] }] } +--- more_headers +Content-Type: application/json +--- error_code: 200 diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/schema-validate.t b/CloudronPackages/APISIX/apisix-source/t/admin/schema-validate.t new file mode 100644 index 0000000..81698b2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/schema-validate.t @@ -0,0 +1,441 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("warn"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: validate ok +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uri": "/httpbin/*", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 200 + + + +=== TEST 2: validate failed, wrong uri type +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uri": 666, + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg": {"property \"uri\" validation failed: wrong type: expected string, got number"}} + + + +=== TEST 3: validate failed, length limit +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uri": "", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"uri\" validation failed: string too short, expected at least 1, got 0"} + + + +=== TEST 4: validate failed, array type expected +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uris": "foobar", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"uris\" validation failed: wrong type: expected array, got string"} + + + +=== TEST 5: validate failed, array size limit +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uris": [], + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"uris\" validation failed: expect array to have at least 1 items"} + + + +=== TEST 6: validate failed, array unique items +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uris": ["/foo", "/foo"], + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"uris\" validation failed: expected unique items but items 1 and 2 are equal"} + + + +=== TEST 7: validate failed, uri or uris is mandatory +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"allOf 1 failed: value should match only one schema, but matches none"} + + + +=== TEST 8: validate failed, enum check +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "status": 3, + "uri": "/foo", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"status\" validation failed: matches none of the enum values"} + + + +=== TEST 9: validate failed, wrong combination +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "script": "xxxxxxxxxxxxxxxxxxxxx", + "plugin_config_id": "foo" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"allOf 1 failed: value should match only one schema, but matches none"} + + + +=== TEST 10: validate failed, id_schema check +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "plugin_config_id": "@@@@@@@@@@@@@@@@", + "uri": "/foo", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"plugin_config_id\" validation failed: object matches none of the required"} + + + +=== TEST 11: upstream ok +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/upstreams', + ngx.HTTP_POST, + [[{ + "nodes":{ + "nghttp2.org":100 + }, + "type":"roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 200 + + + +=== TEST 12: upstream failed, wrong nodes format +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/upstreams', + ngx.HTTP_POST, + [[{ + "nodes":[ + "nghttp2.org" + ], + "type":"roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"allOf 1 failed: value should match only one schema, but matches none"} + + + +=== TEST 13: Check node_schema optional port +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes', + ngx.HTTP_POST, + { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + { host = "127.0.0.1:1980", weight = 1,} + } + }, + methods = {"GET"}, + } + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: Test route upstream +--- request +GET /hello +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/schema.t b/CloudronPackages/APISIX/apisix-source/t/admin/schema.t new file mode 100644 index 0000000..7853add --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/schema.t @@ -0,0 +1,250 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: get route schema +--- request +GET /apisix/admin/schema/route +--- response_body eval +qr/"plugins":\{"type":"object"}/ + + + +=== TEST 2: get service schema and check if it contains `anyOf` +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, _, res_body = t('/apisix/admin/schema/service', ngx.HTTP_GET) + local res_data = core.json.decode(res_body) + if res_data["anyOf"] then + ngx.say("found `anyOf`") + return + end + + ngx.say("passed") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: get not exist schema +--- request +GET /apisix/admin/schema/noexits +--- error_code: 400 + + + +=== TEST 4: wrong method +--- request +PUT /apisix/admin/schema/service +--- error_code: 404 + + + +=== TEST 5: wrong method +--- request +POST /apisix/admin/schema/service +--- error_code: 404 + + + +=== TEST 6: ssl +--- config +location /t { + content_by_lua_block { + local ssl = require("apisix.schema_def").ssl + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/ssl', + ngx.HTTP_GET, + nil, + ssl + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: get plugin's schema +--- request +GET /apisix/admin/schema/plugins/limit-count +--- response_body eval +qr/"required":\["count","time_window"\]/ + + + +=== TEST 8: get not exist plugin +--- request +GET /apisix/admin/schema/plugins/no-exist +--- error_code: 404 + + + +=== TEST 9: serverless-pre-function +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/plugins/serverless-pre-function', + ngx.HTTP_GET, + nil, + [[{ + "properties": { + "phase": { + "enum": ["rewrite", "access", "header_filter", "body_filter", "log", "before_proxy"], + "type": "string" + }, + "functions": { + "minItems": 1, + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": ["functions"], + "type": "object" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: serverless-post-function +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/plugins/serverless-post-function', + ngx.HTTP_GET, + nil, + [[{ + "properties": { + "phase": { + "enum": ["rewrite", "access", "header_filter", "body_filter", "log", "before_proxy"], + "type": "string" + }, + "functions": { + "minItems": 1, + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": ["functions"], + "type": "object" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: get plugin udp-logger schema +--- request +GET /apisix/admin/schema/plugins/udp-logger +--- response_body eval +qr/"properties":/ + + + +=== TEST 12: get plugin grpc-transcode schema +--- request +GET /apisix/admin/schema/plugins/grpc-transcode +--- response_body eval +qr/("proto_id".*additionalProperties|additionalProperties.*"proto_id")/ + + + +=== TEST 13: get plugin prometheus schema +--- request +GET /apisix/admin/schema/plugins/prometheus +--- response_body eval +qr/"disable":\{"type":"boolean"\}/ + + + +=== TEST 14: get plugin node-status schema +--- extra_yaml_config +plugins: + - node-status +--- request +GET /apisix/admin/schema/plugins/node-status +--- response_body eval +qr/"disable":\{"type":"boolean"\}/ + + + +=== TEST 15: get global_rule schema to check if it contains `create_time` and `update_time` +--- request +GET /apisix/admin/schema/global_rule +--- response_body eval +qr/("update_time":\{"type":"integer"\}.*"create_time":\{"type":"integer"\}|"create_time":\{"type":"integer"\}.*"update_time":\{"type":"integer"\})/ + + + +=== TEST 16: get proto schema to check if it contains `create_time` and `update_time` +--- request +GET /apisix/admin/schema/proto +--- response_body eval +qr/("update_time":\{"type":"integer"\}.*"create_time":\{"type":"integer"\}|"create_time":\{"type":"integer"\}.*"update_time":\{"type":"integer"\})/ + + + +=== TEST 17: get stream_route schema to check if it contains `create_time` and `update_time` +--- request +GET /apisix/admin/schema/stream_route +--- response_body eval +qr/("update_time":\{"type":"integer"\}.*"create_time":\{"type":"integer"\}|"create_time":\{"type":"integer"\}.*"update_time":\{"type":"integer"\})/ diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/secrets.t b/CloudronPackages/APISIX/apisix-source/t/admin/secrets.t new file mode 100644 index 0000000..79402ab --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/secrets.t @@ -0,0 +1,279 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: PUT +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:12800/get", + "prefix" : "apisix", + "token" : "apisix" + }]], + [[{ + "value": { + "uri": "http://127.0.0.1:12800/get", + "prefix" : "apisix", + "token" : "apisix" + }, + "key": "/apisix/secrets/vault/test1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/secrets/vault/test1')) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- response_body +passed + + + +=== TEST 2: GET +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "uri": "http://127.0.0.1:12800/get", + "prefix" : "apisix", + "token" : "apisix" + }, + "key": "/apisix/secrets/vault/test1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: GET all +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/secrets', + ngx.HTTP_GET, + nil, + [[{ + "total": 1, + "list": [ + { + "key": "/apisix/secrets/vault/test1", + "value": { + "uri": "http://127.0.0.1:12800/get", + "prefix" : "apisix", + "token" : "apisix" + } + } + ] + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: PATCH on path +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/secrets/vault/test1')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local prev_update_time = res.body.node.value.update_time + assert(prev_update_time ~= nil, "update_time is nil") + ngx.sleep(1) + + local code, body = t('/apisix/admin/secrets/vault/test1/token', + ngx.HTTP_PATCH, + [["unknown"]], + [[{ + "value": { + "uri": "http://127.0.0.1:12800/get", + "prefix" : "apisix", + "token" : "unknown" + }, + "key": "/apisix/secrets/vault/test1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/secrets/vault/test1')) + assert(res.body.node.value.token == "unknown") + } + } +--- response_body +passed + + + +=== TEST 5: PATCH +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/secrets/vault/test1')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local prev_update_time = res.body.node.value.update_time + assert(prev_update_time ~= nil, "update_time is nil") + ngx.sleep(1) + + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PATCH, + [[{ + "uri": "http://127.0.0.1:12800/get", + "prefix" : "apisix", + "token" : "apisix" + }]], + [[{ + "value": { + "uri": "http://127.0.0.1:12800/get", + "prefix" : "apisix", + "token" : "apisix" + }, + "key": "/apisix/secrets/vault/test1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/secrets/vault/test1')) + assert(res.body.node.value.token == "apisix") + } + } +--- response_body +passed + + + +=== TEST 6: PATCH without id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/secrets/vault', + ngx.HTTP_PATCH, + [[{}]], + [[{}]] + ) + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"no secret id"} + + + +=== TEST 7: DELETE +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_DELETE + ) + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: PUT with invalid format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "/get", + "prefix" : "apisix", + "token" : "apisix" + }]], + [[{ + "value": { + "uri": "http://127.0.0.1:12800/get", + "prefix" : "apisix", + "token" : "apisix" + }, + "key": "/apisix/secrets/vault/test1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- error_code: 400 +--- response_body eval +qr/validation failed: failed to match pattern/ diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/services-array-nodes.t b/CloudronPackages/APISIX/apisix-source/t/admin/services-array-nodes.t new file mode 100644 index 0000000..12dbbef --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/services-array-nodes.t @@ -0,0 +1,105 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: set service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + }, + "desc": "new service" + }]], + [[{ + "value": { + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + }, + "desc": "new service" + }, + "key": "/apisix/services/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: get service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + }, + "desc": "new service" + }, + "key": "/apisix/services/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/services-force-delete.t b/CloudronPackages/APISIX/apisix-source/t/admin/services-force-delete.t new file mode 100644 index 0000000..439b44e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/services-force-delete.t @@ -0,0 +1,156 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 201 +--- response_body +passed + + + +=== TEST 2: add route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "service_id": 1, + "uri": "/index.html" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 3: delete service(wrong header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/services/1?force=anyvalue', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this service directly, route [1] is still using it now"} + + + +=== TEST 4: delete service(without force delete header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/services/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this service directly, route [1] is still using it now"} + + + +=== TEST 5: delete service(force delete) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/services/1?force=true', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed + + + +=== TEST 6: delete route +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/services-string-id.t b/CloudronPackages/APISIX/apisix-source/t/admin/services-string-id.t new file mode 100644 index 0000000..bef6b1d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/services-string-id.t @@ -0,0 +1,745 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: set service(id: 5eeb3dc90f747328b2930b0b) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new service" + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new service" + }, + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: get service(id: 5eeb3dc90f747328b2930b0b) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new service" + }, + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: delete service(id: 5eeb3dc90f747328b2930b0b) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 4: delete service(id: not_found) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/services/not_found', ngx.HTTP_DELETE) + + ngx.say("[delete] code: ", code) + } + } +--- request +GET /t +--- response_body +[delete] code: 404 + + + +=== TEST 5: post service + delete +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services', + ngx.HTTP_POST, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + } + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + + local id = string.sub(res.key, #"/apisix/services/" + 1) + code, message = t('/apisix/admin/services/' .. id, ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed +[delete] code: 200 message: passed + + + +=== TEST 6: uri + upstream +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }, + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed + + + +=== TEST 7: uri + plugins +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]], + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }, + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed + + + +=== TEST 8: invalid service id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/*invalid_id$', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]] + ) + + ngx.exit(code) + } + } +--- request +GET /t +--- error_code: 400 + + + +=== TEST 9: invalid id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "id": "3", + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong service id"} + + + +=== TEST 10: id in the rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": "5eeb3dc90f747328b2930b0b", + "plugins": {} + }]], + [[{ + "value": { + "plugins": {} + }, + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: integer id less than 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": -100, + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the required"} + + + +=== TEST 12: invalid service id: contains symbols value +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": "*invalid_id$", + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the required"} + + + +=== TEST 13: invalid upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": "5eeb3dc90f747328b2930b0b", + "upstream_id": "invalid$" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream_id\" validation failed: object matches none of the required"} + + + +=== TEST 14: not exist upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": "5eeb3dc90f747328b2930b0b", + "upstream_id": "9999999999" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to fetch upstream info by upstream id [9999999999], response code: 404"} + + + +=== TEST 15: wrong service id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_POST, + [[{ + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong service id, do not need it"} + + + +=== TEST 16: wrong service id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_POST, + [[{ + "id": "5eeb3dc90f747328b2930b0b", + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong service id, do not need it"} + + + +=== TEST 17: patch service(whole) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PATCH, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new 20 service" + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new 20 service" + }, + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: patch service(new desc) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PATCH, + [[{ + "desc": "new 19 service" + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new 19 service" + }, + "key": "/apisix/services/5eeb3dc90f747328b2930b0b" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 19: patch service(new nodes) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PATCH, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin" + } + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin" + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: set service(id: 5eeb3dc90f747328b2930b0b) and upstream(type:chash, default hash_on: vars, missing key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash" + }, + "desc": "new service" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"missing key"} + + + +=== TEST 21: set service(id: 5eeb3dc90f747328b2930b0b) and upstream(type:chash, hash_on: header, missing key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "header" + }, + "desc": "new service" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"missing key"} + + + +=== TEST 22: set service(id: 5eeb3dc90f747328b2930b0b) and upstream(type:chash, hash_on: cookie, missing key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "cookie" + }, + "desc": "new service" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"missing key"} + + + +=== TEST 23: set service(id: 5eeb3dc90f747328b2930b0b) and upstream(type:chash, hash_on: consumer, missing key is ok) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/5eeb3dc90f747328b2930b0b', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "consumer" + }, + "desc": "new service" + }]] + ) + + ngx.status = code + ngx.say(code .. " " .. body) + } + } +--- request +GET /t +--- response_body +200 passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/services.t b/CloudronPackages/APISIX/apisix-source/t/admin/services.t new file mode 100644 index 0000000..90a5e92 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/services.t @@ -0,0 +1,1281 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: set service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new service" + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new service" + }, + "key": "/apisix/services/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/services/1')) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: get service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new service" + }, + "key": "/apisix/services/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: delete service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message,res = t('/apisix/admin/services/1', ngx.HTTP_DELETE) + + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 4: delete service(id: not_found) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/services/not_found', ngx.HTTP_DELETE) + + ngx.say("[delete] code: ", code) + } + } +--- request +GET /t +--- response_body +[delete] code: 404 + + + +=== TEST 5: post service + delete +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, message, res = t('/apisix/admin/services', + ngx.HTTP_POST, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + } + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + + local id = string.sub(res.key, #"/apisix/services/" + 1) + local res = assert(etcd.get('/services/' .. id)) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + + code, message = t('/apisix/admin/services/' .. id, ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed +[delete] code: 200 message: passed + + + +=== TEST 6: uri + upstream +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + } + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed + + + +=== TEST 7: uri + plugins +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]], + [[{ + "value": { + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + } + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed + + + +=== TEST 8: invalid service id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/invalid_id$', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]] + ) + + ngx.exit(code) + } + } +--- request +GET /t +--- error_code: 400 + + + +=== TEST 9: invalid id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "id": 3, + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong service id"} + + + +=== TEST 10: id in the rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": "1", + "plugins": {} + }]], + [[{ + "value": { + "plugins": {} + }, + "key": "/apisix/services/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: integer id less than 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": -100, + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the required"} + + + +=== TEST 12: invalid service id: string value +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": "invalid_id$", + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the required"} + + + +=== TEST 13: invalid upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": 1, + "upstream_id": "invalid$" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream_id\" validation failed: object matches none of the required"} + + + +=== TEST 14: not exist upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_PUT, + [[{ + "id": 1, + "upstream_id": "9999999999" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to fetch upstream info by upstream id [9999999999], response code: 404"} + + + +=== TEST 15: wrong service id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_POST, + [[{ + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong service id, do not need it"} + + + +=== TEST 16: wrong service id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_POST, + [[{ + "id": 1, + "plugins": {} + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong service id, do not need it"} + + + +=== TEST 17: patch service(whole) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + + local id = 1 + local res = assert(etcd.get('/services/' .. id)) + local prev_create_time = res.body.node.value.create_time + local prev_update_time = res.body.node.value.update_time + ngx.sleep(1) + + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PATCH, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new 20 service" + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new 20 service" + }, + "key": "/apisix/services/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/services/' .. id)) + local create_time = res.body.node.value.create_time + assert(prev_create_time == create_time, "create_time mismatched") + local update_time = res.body.node.value.update_time + assert(prev_update_time ~= update_time, "update_time should be changed") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: patch service(new desc) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PATCH, + [[{ + "desc": "new 19 service" + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new 19 service" + }, + "key": "/apisix/services/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 19: patch service(new nodes) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PATCH, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin" + } + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin" + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: patch service(whole - sub path) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1/', + ngx.HTTP_PATCH, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new 22 service" + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new 22 service" + }, + "key": "/apisix/services/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 21: patch service(new desc - sub path) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1/desc', + ngx.HTTP_PATCH, + '"new 23 service"', + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new 23 service" + }, + "key": "/apisix/services/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: patch service(new nodes - sub path) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1/upstream', + ngx.HTTP_PATCH, + [[{ + "nodes": { + "127.0.0.2:8081": 3, + "127.0.0.3:8082": 4 + }, + "type": "roundrobin" + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.2:8081": 3, + "127.0.0.3:8082": 4 + }, + "type": "roundrobin" + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 23: set service(id: 1) and upstream(type:chash, default hash_on: vars, missing key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash" + }, + "desc": "new service" + }]]) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"missing key"} + + + +=== TEST 24: set service(id: 1) and upstream(type:chash, hash_on: header, missing key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "header" + }, + "desc": "new service" + }]]) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"missing key"} + + + +=== TEST 25: set service(id: 1) and upstream(type:chash, hash_on: cookie, missing key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "cookie" + }, + "desc": "new service" + }]]) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"missing key"} + + + +=== TEST 26: set service(id: 1) and upstream(type:chash, hash_on: consumer, missing key is ok) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "consumer" + }, + "desc": "new service" + }]]) + + ngx.status = code + ngx.say(code .. " " .. body) + } + } +--- request +GET /t +--- response_body +200 passed + + + +=== TEST 27: set service(id: 1 + test service name) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "name": "test service name" + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "name": "test service name" + }, + "key": "/apisix/services/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 28: invalid string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/*invalid', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 + + + +=== TEST 29: set empty service. (id: 1)(allow empty `service` object) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + '{}', + [[{ + "value": { + "id":"1" + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 30: patch content to the empty service. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PATCH, + [[{ + "desc": "empty service", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:80": 1 + } + } + }]], + [[{ + "value":{ + "desc":"empty service", + "plugins":{ + "limit-count":{ + "time_window":60, + "count":2, + "rejected_code":503, + "key":"remote_addr", + "policy":"local" + } + }, + "upstream":{ + "type":"roundrobin", + "nodes":{ + "127.0.0.1:80":1 + }, + "hash_on":"vars", + "pass_host":"pass" + }, + "id":"1" + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 31: set service(with labels) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "labels": { + "build":"16", + "env":"production", + "version":"v2" + }, + "desc": "new service" + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "labels": { + "build": "16", + "env": "production", + "version": "v2" + }, + "desc": "new service" + }, + "key": "/apisix/services/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 32: patch service(change labels) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PATCH, + [[{ + "labels": { + "build": "17" + } + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "labels": { + "build": "17", + "env": "production", + "version": "v2" + }, + "desc": "new service" + }, + "key": "/apisix/services/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 33: invalid format of label value: set service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "labels": { + "env": ["production", "release"] + }, + "desc": "new service" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"labels\" validation failed: failed to validate env (matching \".*\"): wrong type: expected string, got table"} + + + +=== TEST 34: create service with create_time and update_time(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "create_time": 1602883670, + "update_time": 1602893670 + }]]) + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"the property is forbidden:.*"\}/ + + + +=== TEST 35: create service and the built-in resource with create_time and update_time(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8080": 1 + }, + "create_time": 1602883670, + "update_time": 1602893670 + } + }]]) + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"the property is forbidden:.*"\}/ + + + +=== TEST 36: limit the length of service's name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', ngx.HTTP_PUT, + require("toolkit.json").encode({name = ("1"):rep(101)})) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"name\" validation failed: string too long, expected at most 100, got 101"} + + + +=== TEST 37: allow dot in the id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/a.b', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new service" + }]], + [[{ + "value": { + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new service" + }, + "key": "/apisix/services/a.b" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/services2.t b/CloudronPackages/APISIX/apisix-source/t/admin/services2.t new file mode 100644 index 0000000..a47592b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/services2.t @@ -0,0 +1,300 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: not unwanted data, POST +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services', + ngx.HTTP_POST, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + res.key = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.id ~= nil) + res.value.id = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"value":{"upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} + + + +=== TEST 2: not unwanted data, PUT +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + res.value.create_time = nil + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} + + + +=== TEST 3: not unwanted data, PATCH +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services/1', + ngx.HTTP_PATCH, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + res.value.create_time = nil + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} + + + +=== TEST 4: not unwanted data, GET +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services/1', ngx.HTTP_GET) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/services/1","value":{"id":"1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} + + + +=== TEST 5: not unwanted data, DELETE +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/services/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"deleted":"1","key":"/apisix/services/1"} + + + +=== TEST 6: set service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "service_id": 1, + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: delete service(id: 1) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/services/1', ngx.HTTP_DELETE) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this service directly, route [1] is still using it now"} + + + +=== TEST 9: delete route(id: 1) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 10: delete service(id: 1) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/services/1', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 200 message: passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/ssl.t b/CloudronPackages/APISIX/apisix-source/t/admin/ssl.t new file mode 100644 index 0000000..24a2c99 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/ssl.t @@ -0,0 +1,802 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: set ssl(id: 1) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local etcd = require("apisix.core.etcd") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/ssls/1')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: get ssl(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/ssls/1', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "sni": "test.com", + "key": null + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: delete ssl(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/ssls/1', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 4: delete ssl(id: 99999999999999) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/ssls/99999999999999', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code) + } + } +--- request +GET /t +--- response_body +[delete] code: 404 + + + +=== TEST 5: push ssl + delete +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "foo.com"} + + local code, message, res = t.test('/apisix/admin/ssls', + ngx.HTTP_POST, + core.json.encode(data), + [[{ + "value": { + "sni": "foo.com" + } + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + + local id = string.sub(res.key, #"/apisix/ssls/" + 1) + code, message = t.test('/apisix/admin/ssls/' .. id, ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed +[delete] code: 200 message: passed + + + +=== TEST 6: missing certificate information +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {sni = "foo.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "foo.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: then clause did not match"} + + + +=== TEST 7: wildcard host name +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "*.foo.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "*.foo.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: store sni in `snis` +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = { + cert = ssl_cert, key = ssl_key, + snis = {"*.foo.com", "bar.com"}, + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "snis": ["*.foo.com", "bar.com"] + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: string id +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/a-b-c-ABC_0123', + ngx.HTTP_PUT, + core.json.encode(data) + ) + if code > 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: string id(delete) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/a-b-c-ABC_0123', + ngx.HTTP_DELETE + ) + if code > 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: invalid id +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/*invalid', + ngx.HTTP_PUT, + core.json.encode(data) + ) + if code > 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 + + + +=== TEST 12: set ssl with multicerts(id: 1) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local ssl_ecc_cert = t.read_file("t/certs/apisix_ecc.crt") + local ssl_ecc_key = t.read_file("t/certs/apisix_ecc.key") + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com", + certs = {ssl_ecc_cert}, + keys = {ssl_ecc_key} + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: mismatched certs and keys +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_ecc_cert = t.read_file("t/certs/apisix_ecc.crt") + + local data = { + sni = "test.com", + certs = { ssl_ecc_cert }, + keys = {}, + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: then clause did not match"} + + + +=== TEST 14: set ssl(with labels) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com", labels = { version = "v2", build = "16", env = "production"}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com", + "labels": { + "version": "v2", + "build": "16", + "env": "production" + } + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: invalid format of label value: set ssl +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com", labels = { env = {"production", "release"}}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com", + "labels": { + "env": ["production", "release"] + } + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"labels\" validation failed: failed to validate env (matching \".*\"): wrong type: expected string, got table"} + + + +=== TEST 16: create ssl with manage fields(id: 1) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com" + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: delete test ssl(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/ssls/1', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 18: create/patch ssl +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local etcd = require("apisix.core.etcd") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body, res = t.test('/apisix/admin/ssls', + ngx.HTTP_POST, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com" + } + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(body) + return + end + + local id = string.sub(res.key, #"/apisix/ssls/" + 1) + local res = assert(etcd.get('/ssls/' .. id)) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + + local code, body = t.test('/apisix/admin/ssls/' .. id, + ngx.HTTP_PATCH, + core.json.encode({create_time = 0, update_time = 1}) + ) + + if code ~= 200 then + ngx.status = code + ngx.say(body) + return + end + + local res = assert(etcd.get('/ssls/' .. id)) + local create_time = res.body.node.value.create_time + assert(create_time == 0, "create_time mismatched") + local update_time = res.body.node.value.update_time + assert(update_time == 1, "update_time mismatched") + + -- clean up + local code, body = t.test('/apisix/admin/ssls/' .. id, ngx.HTTP_DELETE) + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 19: missing sni information +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: then clause did not match"} + + + +=== TEST 20: type client, missing sni information +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {type = "client", cert = ssl_cert, key = ssl_key} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- response_body chomp +passed + + + +=== TEST 21: set ssl with sercret +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local data = { + sni = "test.com", + cert = "$secret://vault/test/ssl/test.com.crt", + key = "$secret://vault/test/ssl/test.com.key", + certs = {"$secret://vault/test/ssl/test.com.2.crt"}, + keys = {"$secret://vault/test/ssl/test.com.2.key"} + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com", + "cert": "$secret://vault/test/ssl/test.com.crt", + "key": "$secret://vault/test/ssl/test.com.key", + "certs": ["$secret://vault/test/ssl/test.com.2.crt"], + "keys": ["$secret://vault/test/ssl/test.com.2.key"] + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: set ssl with env, and prefix is all uppercase or lowercase +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local data = { + sni = "test.com", + cert = "$ENV://APISIX_TEST_SSL_CERT", + key = "$env://APISIX_TEST_SSL_KEY", + certs = {"$env://APISIX_TEST_SSL_CERTS"}, + keys = {"$ENV://APISIX_TEST_SSL_KEYS"}, + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com", + "cert": "$ENV://APISIX_TEST_SSL_CERT", + "key": "$env://APISIX_TEST_SSL_KEY", + "certs": ["$env://APISIX_TEST_SSL_CERTS"], + "keys": ["$ENV://APISIX_TEST_SSL_KEYS"] + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 23: set ssl with invalid prefix +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local data = { + sni = "test.com", + cert = "$ENV://APISIX_TEST_SSL_CERT", + key = "$env://APISIX_TEST_SSL_KEY", + certs = {"https://APISIX_TEST_SSL_CERTS"}, + keys = {"$ENV://APISIX_TEST_SSL_KEYS"}, + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"certs\" validation failed: failed to validate item 1: value should match only one schema, but matches none"} diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/ssl2.t b/CloudronPackages/APISIX/apisix-source/t/admin/ssl2.t new file mode 100644 index 0000000..15abf37 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/ssl2.t @@ -0,0 +1,496 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: not unwanted data, POST +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "not-unwanted-post.com"} + local code, message, res = t.test('/apisix/admin/ssls', + ngx.HTTP_POST, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.key ~= nil) + res.key = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.cert ~= nil) + res.value.cert = "" + assert(res.value.key ~= nil) + res.value.key = "" + assert(res.value.id ~= nil) + res.value.id = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"value":{"cert":"","key":"","sni":"not-unwanted-post.com","status":1,"type":"server"}} + + + +=== TEST 2: not unwanted data, PUT +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + local code, message, res = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.cert ~= nil) + res.value.cert = "" + assert(res.value.key ~= nil) + res.value.key = "" + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/ssls/1","value":{"cert":"","id":"1","key":"","sni":"test.com","status":1,"type":"server"}} + + + +=== TEST 3: not unwanted data, PATCH +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "t.com"} + local code, message, res = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PATCH, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.cert ~= nil) + res.value.cert = "" + assert(res.value.key ~= nil) + res.value.key = "" + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/ssls/1","value":{"cert":"","id":"1","key":"","sni":"t.com","status":1,"type":"server"}} + + + +=== TEST 4: not unwanted data, GET +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local code, message, res = t.test('/apisix/admin/ssls/1', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.cert ~= nil) + res.value.cert = "" + assert(res.value.key == nil) + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/ssls/1","value":{"cert":"","id":"1","sni":"t.com","status":1,"type":"server"}} + + + +=== TEST 5: not unwanted data, DELETE +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + local code, message, res = t.test('/apisix/admin/ssls/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"deleted":"1","key":"/apisix/ssls/1"} + + + +=== TEST 6: bad cert +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = [[-----BEGIN CERTIFICATE----- +MIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G +U/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM= +-----END CERTIFICATE----- + ]], key = ssl_key, sni = "test.com"} + local code, message, res = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + + ngx.say(res) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to parse cert: PEM_read_bio_X509_AUX() failed"} + + + +=== TEST 7: bad key +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local data = {cert = ssl_cert, key = [[ +-----BEGIN RSA PRIVATE KEY----- +MIIG5AIBAAKCAYEAyCM0rqJecvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5 +jhZB3W6BkWUWR4oNFLLSqcVbVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfo +wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== +-----END RSA PRIVATE KEY-----]], sni = "test.com"} + local code, message, res = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + + ngx.say(res) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to parse key: PEM_read_bio_PrivateKey() failed"} + + + +=== TEST 8: bad certs +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "t.com", + certs = { + [[-----BEGIN CERTIFICATE----- +MIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G +U/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM= +-----END CERTIFICATE-----]] + }, + keys = {ssl_key} + } + local code, message, res = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + + ngx.say(res) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to handle cert-key pair[1]: failed to parse cert: PEM_read_bio_X509_AUX() failed"} + + + +=== TEST 9: bad keys +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "t.com", + certs = {ssl_cert}, + keys = {[[-----BEGIN RSA PRIVATE KEY----- +MIIG5AIBAAKCAYEAyCM0rqJecvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5 +jhZB3W6BkWUWR4oNFLLSqcVbVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfo +wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== +-----END RSA PRIVATE KEY-----]]} + } + local code, message, res = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + + ngx.say(res) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to handle cert-key pair[1]: failed to parse key: PEM_read_bio_PrivateKey() failed"} + + + +=== TEST 10: empty snis +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, snis = {}} + local code, message, res = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + + ngx.say(res) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"snis\" validation failed: expect array to have at least 1 items"} + + + +=== TEST 11: update snis, PATCH with sub path +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, snis = {"test.com"}} + local code, message, res = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + + local data = {"update1.com", "update2.com"} + local code, message, res = t.test('/apisix/admin/ssls/1/snis', + ngx.HTTP_PATCH, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + ngx.say(res) + } + } +--- response_body_like eval +qr/"snis":\["update1.com","update2.com"\]/ + + + +=== TEST 12: PATCH encrypt ssl key +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: "qeddd145sfvddff3" +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, certs = {ssl_cert}, keys = {ssl_key}} + local code, message, res = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PATCH, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(res.value.key == ssl_key) + ngx.say(res.value.keys[1] == ssl_key) + } + } +--- response_body +false +false + + + +=== TEST 13: PATCH encrypt ssl key, sub_path +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: "qeddd145sfvddff3" +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local ssl_key = t.read_file("t/certs/apisix.key") + local code, message, res = t.test('/apisix/admin/ssls/1/keys', + ngx.HTTP_PATCH, + json.encode({ssl_key}) + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(res.value.keys[1] == ssl_key) + } + } +--- response_body +false diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/ssl3.t b/CloudronPackages/APISIX/apisix-source/t/admin/ssl3.t new file mode 100644 index 0000000..f9c1cd0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/ssl3.t @@ -0,0 +1,63 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: list empty resources +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/ssls', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"list":[],"total":0} diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/ssl4.t b/CloudronPackages/APISIX/apisix-source/t/admin/ssl4.t new file mode 100644 index 0000000..c9de90d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/ssl4.t @@ -0,0 +1,510 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +no_root_location(); + +add_block_preprocessor( sub{ + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + my $TEST_NGINX_HTML_DIR ||= html_dir(); + + my $config = <<_EOC_; +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "www.test.com", true) + if not sess then + sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + sess, err = sock:sslhandshake(nil, "www.test.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + end + + ngx.say("ssl handshake: ", sess ~= nil) + + local req = "GET /hello HTTP/1.0\\r\\nHost: www.test.com\\r\\nConnection: close\\r\\n\\r\\n" + local bytes, err = sock:send(req) + if not bytes then + ngx.say("failed to send http request: ", err) + return + end + + ngx.say("sent http request: ", bytes, " bytes.") + + while true do + local line, err = sock:receive() + if not line then + break + end + + ngx.say("received: ", line) + end + + local ok, err = sock:close() + ngx.say("close: ", ok, " ", err) + end -- do + -- collectgarbage() + } +} +_EOC_ + + if (!$block->config) { + $block->set_value("config", $config) + } +} + +); + + +run_tests; + +__DATA__ + +=== TEST 1: set ssl(sni: www.test.com), encrypt with the first keyring +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: + - edd1c9f0985e76a1 + - qeddd145sfvddff3 +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "www.test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 2: set route(id: 1) +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: "edd1c9f0985e76a1" +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: client request with the old style keyring +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: "edd1c9f0985e76a1" +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 62 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- error_log +server name: "www.test.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 4: client request with the new style keyring +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: + - edd1c9f0985e76a1 +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 62 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- error_log +server name: "www.test.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 5: client request failed with the wrong keyring +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: + - qeddd145sfvddff3 +--- error_log +decrypt ssl key failed + + + +=== TEST 6: client request successfully, use the two keyring to decrypt in turn +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: + - qeddd145sfvddff3 + - edd1c9f0985e76a1 +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 62 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- ignore_error_log + + + +=== TEST 7: remove test ssl certs +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: + - edd1c9f0985e76a1 +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + t.test('/apisix/admin/ssls/1', ngx.HTTP_DELETE) + } +} + + + +=== TEST 8: set ssl(sni: www.test.com), do not encrypt +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: null +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "www.test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 9: client request without keyring +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: null +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 62 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- error_log +server name: "www.test.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 10: remove test ssl certs +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: null +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + t.test('/apisix/admin/ssls/1', ngx.HTTP_DELETE) + } +} + + + +=== TEST 11: set ssl(sni: www.test.com) with long label +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: null +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com", + labels = {secret = "js-design-test-bigdata-data-app-service-router-my-secret-number-123456"}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "www.test.com", + "labels": { + "secret": "js-design-test-bigdata-data-app-service-router-my-secret-number-123456" + }, + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 12: set ssl(sni: www.test.com), encrypt with the first keyring +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: + - edd1c9f0985e76a1 +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 13: update encrypt keyring, and set ssl(sni: test2.com) +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: + - qeddd145sfvddff3 + - edd1c9f0985e76a1 +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/test2.crt") + local ssl_key = t.read_file("t/certs/test2.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test2.com"} + + local code, body = t.test('/apisix/admin/ssls/2', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test2.com" + }, + "key": "/apisix/ssls/2" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 14: Successfully access test.com +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: + - qeddd145sfvddff3 + - edd1c9f0985e76a1 +--- exec +curl -k -s --resolve "test2.com:1994:127.0.0.1" https://test2.com:1994/hello 2>&1 | cat +--- response_body +hello world + + + +=== TEST 15: Successfully access test2.com +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: + - qeddd145sfvddff3 + - edd1c9f0985e76a1 +--- exec +curl -k -s --resolve "test2.com:1994:127.0.0.1" https://test2.com:1994/hello 2>&1 | cat +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/ssl5.t b/CloudronPackages/APISIX/apisix-source/t/admin/ssl5.t new file mode 100644 index 0000000..c9bd7b1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/ssl5.t @@ -0,0 +1,86 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: Not supported set TLSv1.0 for ssl_protocols +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com", ssl_protocols = {"TLSv1.0", "TLSv1.2"}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"ssl_protocols\" validation failed: failed to validate item 1: matches none of the enum values"} + + + +=== TEST 2: The default value for the ssl_protocols is null +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com", + "ssl_protocols": null, + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/ssls.t b/CloudronPackages/APISIX/apisix-source/t/admin/ssls.t new file mode 100644 index 0000000..6752756 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/ssls.t @@ -0,0 +1,75 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: test /apisix/admin/ssls/{id} +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local etcd = require("apisix.core.etcd") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/ssls/1')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/standalone-healthcheck.t b/CloudronPackages/APISIX/apisix-source/t/admin/standalone-healthcheck.t new file mode 100644 index 0000000..50d5a0e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/standalone-healthcheck.t @@ -0,0 +1,128 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + # restarts cause the memory cache to be emptied, don't do this + $ENV{TEST_NGINX_FORCE_RESTART_ON_TEST} = 0; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +use_hup(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->yaml_config) { + $block->set_value("yaml_config", <<'EOF'); +deployment: + role: traditional + role_traditional: + config_provider: yaml + admin: + admin_key: + - name: admin + key: edd1c9f034335f136f87ad84b625c8f1 + role: admin +EOF + } + + if (!defined $block->no_error_log) { + $block->set_value("no_error_log", ""); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: test +--- timeout: 15 +--- max_size: 204800 +--- exec +cd t && pnpm test admin/standalone.spec.ts 2>&1 +--- no_error_log +failed to execute the script with status +--- response_body eval +qr/PASS admin\/standalone.spec.ts/ + + + +=== TEST 2: send /healthcheck should fail because config is not loaded yet +--- init_by_lua_block + require "resty.core" + apisix = require("apisix") + local shared_dict = ngx.shared["standalone-config"] + shared_dict:delete("config") +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local healthcheck_uri = "http://127.0.0.1:7085" .. "/status/ready" + local httpc = http.new() + local res, _ = httpc:request_uri(healthcheck_uri, {method = "GET", keepalive = false}) + ngx.status = res.status + } + } +--- request +GET /t +--- error_code: 503 + + + +=== TEST 3: configure route and send /healthcheck should pass +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/configs', + ngx.HTTP_PUT, + [[{"routes":[{"id":"r1","uri":"/r1","upstream":{"nodes":{"127.0.0.1:1980":1},"type":"roundrobin"},"plugins":{"proxy-rewrite":{"uri":"/hello"}}}]}]], + nil, + { + ["X-API-KEY"] = "edd1c9f034335f136f87ad84b625c8f1" + } + ) + + if code >= 300 then + ngx.status = code + end + local code, body = t('/apisix/admin/configs', + ngx.HTTP_PUT, + [[{"routes":[{"id":"r1","uri":"/r1","upstream":{"nodes":{"127.0.0.1:1980":1},"type":"roundrobin"},"plugins":{"proxy-rewrite":{"uri":"/hello"}}}]}]], + nil, + { + ["X-API-KEY"] = "edd1c9f034335f136f87ad84b625c8f1" + } + ) + + if code >= 300 then + ngx.status = code + end + ngx.sleep(1) + local http = require("resty.http") + local healthcheck_uri = "http://127.0.0.1:7085" .. "/status/ready" + local httpc = http.new() + local res, _ = httpc:request_uri(healthcheck_uri, {method = "GET", keepalive = false}) + ngx.status = res.status + } + } +--- request +GET /t +--- error_code: 200 diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/standalone.spec.ts b/CloudronPackages/APISIX/apisix-source/t/admin/standalone.spec.ts new file mode 100644 index 0000000..fdfc828 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/standalone.spec.ts @@ -0,0 +1,442 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import axios from "axios"; +import YAML from "yaml"; + +const ENDPOINT = "/apisix/admin/configs"; +const clientConfig = { + baseURL: "http://localhost:1984", + headers: { + "X-API-KEY": "edd1c9f034335f136f87ad84b625c8f1", + }, +}; +const config1 = { + routes: [ + { + id: "r1", + uri: "/r1", + upstream: { + nodes: { "127.0.0.1:1980": 1 }, + type: "roundrobin", + }, + plugins: { "proxy-rewrite": { uri: "/hello" } }, + }, + ], +}; +const config2 = { + routes: [ + { + id: "r2", + uri: "/r2", + upstream: { + nodes: { "127.0.0.1:1980": 1 }, + type: "roundrobin", + }, + plugins: { "proxy-rewrite": { uri: "/hello" } }, + }, + ], +}; +const invalidConfVersionConfig1 = { + routes_conf_version: -1, +}; +const invalidConfVersionConfig2 = { + routes_conf_version: "adc", +}; +const routeWithModifiedIndex = { + routes: [ + { + id: "r1", + uri: "/r1", + modifiedIndex: 1, + upstream: { + nodes: { "127.0.0.1:1980": 1 }, + type: "roundrobin", + }, + plugins: { "proxy-rewrite": { uri: "/hello" } }, + }, + ], +}; +const routeWithKeyAuth = { + routes: [ + { + id: "r1", + uri: "/r1", + upstream: { + nodes: { "127.0.0.1:1980": 1 }, + type: "roundrobin", + }, + plugins: { + "proxy-rewrite": { uri: "/hello" }, + "key-auth": {}, + }, + }, + ] +} +const consumerWithModifiedIndex = { + routes: routeWithKeyAuth.routes, + consumers: [ + { + modifiedIndex: 10, + username: "jack", + plugins: { + "key-auth": { + key: "jack-key", + } + }, + }, + ], +} +const credential1 = { + routes: routeWithKeyAuth.routes, + consumers: [ + { + "username": "john_1" + }, + { + "id": "john_1/credentials/john-a", + "plugins": { + "key-auth": { + "key": "auth-a" + } + } + }, + { + "id": "john_1/credentials/john-b", + "plugins": { + "key-auth": { + "key": "auth-b" + } + } + } + ] +} + +describe("Admin - Standalone", () => { + const client = axios.create(clientConfig); + client.interceptors.response.use((response) => { + const contentType = response.headers["content-type"] || ""; + if ( + contentType.includes("application/yaml") && + typeof response.data === "string" && + response.config.responseType !== "text" + ) + response.data = YAML.parse(response.data); + return response; + }); + + describe("Normal", () => { + it("dump empty config (default json format)", async () => { + const resp = await client.get(ENDPOINT); + expect(resp.status).toEqual(200); + expect(resp.data.routes_conf_version).toEqual(0); + expect(resp.data.ssls_conf_version).toEqual(0); + expect(resp.data.services_conf_version).toEqual(0); + expect(resp.data.upstreams_conf_version).toEqual(0); + expect(resp.data.consumers_conf_version).toEqual(0); + }); + + it("dump empty config (yaml format)", async () => { + const resp = await client.get(ENDPOINT, { + headers: { Accept: "application/yaml" }, + }); + expect(resp.status).toEqual(200); + expect(resp.headers["content-type"]).toEqual("application/yaml"); + expect(resp.data.routes_conf_version).toEqual(0); + expect(resp.data.ssls_conf_version).toEqual(0); + expect(resp.data.services_conf_version).toEqual(0); + expect(resp.data.upstreams_conf_version).toEqual(0); + expect(resp.data.consumers_conf_version).toEqual(0); + }); + + it("update config (add routes, by json)", async () => { + const resp = await client.put(ENDPOINT, config1); + expect(resp.status).toEqual(202); + }); + + it("dump config (json format)", async () => { + const resp = await client.get(ENDPOINT); + expect(resp.status).toEqual(200); + expect(resp.data.routes_conf_version).toEqual(1); + expect(resp.data.ssls_conf_version).toEqual(1); + expect(resp.data.services_conf_version).toEqual(1); + expect(resp.data.upstreams_conf_version).toEqual(1); + expect(resp.data.consumers_conf_version).toEqual(1); + }); + + it("check default value", async () => { + const resp = await client.get(ENDPOINT); + expect(resp.status).toEqual(200); + expect(resp.data.routes).toEqual(config1.routes); + }); + + it("dump config (yaml format)", async () => { + const resp = await client.get(ENDPOINT, { + headers: { Accept: "application/yaml" }, + responseType: 'text', + }); + expect(resp.status).toEqual(200); + expect(resp.data).toContain("routes:") + expect(resp.data).toContain("id: r1") + expect(resp.data.startsWith('---')).toBe(false); + expect(resp.data.endsWith('...')).toBe(false); + }); + + it('check route "r1"', async () => { + const resp = await client.get("/r1"); + expect(resp.status).toEqual(200); + expect(resp.data).toEqual("hello world\n"); + }); + + it("update config (add routes, by yaml)", async () => { + const resp = await client.put( + ENDPOINT, + YAML.stringify(config2), + { + headers: { "Content-Type": "application/yaml" }, + } + ); + expect(resp.status).toEqual(202); + }); + + it("dump config (json format)", async () => { + const resp = await client.get(ENDPOINT); + expect(resp.status).toEqual(200); + expect(resp.data.routes_conf_version).toEqual(2); + expect(resp.data.ssls_conf_version).toEqual(2); + expect(resp.data.services_conf_version).toEqual(2); + expect(resp.data.upstreams_conf_version).toEqual(2); + expect(resp.data.consumers_conf_version).toEqual(2); + }); + + it('check route "r1"', () => + expect(client.get("/r1")).rejects.toThrow( + "Request failed with status code 404" + )); + + it('check route "r2"', async () => { + const resp = await client.get("/r2"); + expect(resp.status).toEqual(200); + expect(resp.data).toEqual("hello world\n"); + }); + + it("update config (delete routes)", async () => { + const resp = await client.put( + ENDPOINT, + {}, + { params: { conf_version: 3 } } + ); + expect(resp.status).toEqual(202); + }); + + it('check route "r2"', () => + expect(client.get("/r2")).rejects.toThrow( + "Request failed with status code 404" + )); + + it("only set routes_conf_version", async () => { + const resp = await client.put( + ENDPOINT, + YAML.stringify({ routes_conf_version: 15 }), + { + headers: { "Content-Type": "application/yaml" }, + }); + expect(resp.status).toEqual(202); + + const resp_1 = await client.get(ENDPOINT); + expect(resp_1.status).toEqual(200); + expect(resp_1.data.routes_conf_version).toEqual(15); + expect(resp_1.data.ssls_conf_version).toEqual(4); + expect(resp_1.data.services_conf_version).toEqual(4); + expect(resp_1.data.upstreams_conf_version).toEqual(4); + expect(resp_1.data.consumers_conf_version).toEqual(4); + + const resp2 = await client.put( + ENDPOINT, + YAML.stringify({ routes_conf_version: 17 }), + { + headers: { "Content-Type": "application/yaml" }, + }); + expect(resp2.status).toEqual(202); + + const resp2_1 = await client.get(ENDPOINT); + expect(resp2_1.status).toEqual(200); + expect(resp2_1.data.routes_conf_version).toEqual(17); + expect(resp2_1.data.ssls_conf_version).toEqual(5); + expect(resp2_1.data.services_conf_version).toEqual(5); + expect(resp2_1.data.upstreams_conf_version).toEqual(5); + expect(resp2_1.data.consumers_conf_version).toEqual(5); + }); + + it("control resource changes using modifiedIndex", async () => { + const c1 = structuredClone(routeWithModifiedIndex); + c1.routes[0].modifiedIndex = 1; + + const c2 = structuredClone(c1); + c2.routes[0].uri = "/r2"; + + const c3 = structuredClone(c2); + c3.routes[0].modifiedIndex = 2; + + // Update with c1 + const resp = await client.put(ENDPOINT, c1); + expect(resp.status).toEqual(202); + + // Check route /r1 exists + const resp_1 = await client.get("/r1"); + expect(resp_1.status).toEqual(200); + + // Update with c2 + const resp2 = await client.put(ENDPOINT, c2); + expect(resp2.status).toEqual(202); + + // Check route /r1 exists + // But it is not applied because the modifiedIndex is the same as the old value + const resp2_2 = await client.get("/r1"); + expect(resp2_2.status).toEqual(200); + + // Check route /r2 not exists + const resp2_1 = await client.get("/r2").catch((err) => err.response); + expect(resp2_1.status).toEqual(404); + + // Update with c3 + const resp3 = await client.put(ENDPOINT, c3); + expect(resp3.status).toEqual(202); + + // Check route /r1 not exists + const resp3_1 = await client.get("/r1").catch((err) => err.response); + expect(resp3_1.status).toEqual(404); + + // Check route /r2 exists + const resp3_2 = await client.get("/r2"); + expect(resp3_2.status).toEqual(200); + }); + + it("apply consumer with modifiedIndex", async () => { + const resp = await client.put(ENDPOINT, consumerWithModifiedIndex); + expect(resp.status).toEqual(202); + + const resp_1 = await client.get("/r1", { headers: { "apikey": "invalid-key" } }).catch((err) => err.response); + expect(resp_1.status).toEqual(401); + const resp_2 = await client.get("/r1", { headers: { "apikey": "jack-key" } }); + expect(resp_2.status).toEqual(200); + + const updatedConsumer = structuredClone(consumerWithModifiedIndex); + + // update key of key-auth plugin, but modifiedIndex is not changed + updatedConsumer.consumers[0].plugins["key-auth"] = { "key": "jack-key-updated" }; + const resp2 = await client.put(ENDPOINT, updatedConsumer); + expect(resp2.status).toEqual(202); + + const resp2_1 = await client.get("/r1", { headers: { "apikey": "jack-key-updated" } }).catch((err) => err.response); + expect(resp2_1.status).toEqual(401); + const resp2_2 = await client.get("/r1", { headers: { "apikey": "jack-key" } }); + expect(resp2_2.status).toEqual(200); + + // update key of key-auth plugin, and modifiedIndex is changed + updatedConsumer.consumers[0].modifiedIndex++; + const resp3 = await client.put(ENDPOINT, updatedConsumer); + const resp3_1 = await client.get("/r1", { headers: { "apikey": "jack-key-updated" } }); + expect(resp3_1.status).toEqual(200); + const resp3_2 = await client.get("/r1", { headers: { "apikey": "jack-key" } }).catch((err) => err.response); + expect(resp3_2.status).toEqual(401); + }); + + it("apply consumer with credentials", async () => { + const resp = await client.put(ENDPOINT, credential1); + expect(resp.status).toEqual(202); + + const resp_1 = await client.get("/r1", { headers: { "apikey": "auth-a" } }); + expect(resp_1.status).toEqual(200); + const resp_2 = await client.get("/r1", { headers: { "apikey": "auth-b" } }); + expect(resp_2.status).toEqual(200); + const resp_3 = await client.get("/r1", { headers: { "apikey": "invalid-key" } }).catch((err) => err.response); + expect(resp_3.status).toEqual(401); + }); + }); + + describe("Exceptions", () => { + const clientException = axios.create({ + ...clientConfig, + validateStatus: () => true, + }); + + it("update config (lower conf_version)", async () => { + const resp = await clientException.put( + ENDPOINT, + { routes_conf_version: 100 }, + { headers: { "Content-Type": "application/yaml" } } + ); + const resp2 = await clientException.put( + ENDPOINT, + YAML.stringify(invalidConfVersionConfig1), + { headers: { "Content-Type": "application/yaml" } } + ); + expect(resp2.status).toEqual(400); + expect(resp2.data).toEqual({ + error_msg: + "routes_conf_version must be greater than or equal to (100)", + }); + }); + + it("update config (invalid conf_version)", async () => { + const resp = await clientException.put( + ENDPOINT, + YAML.stringify(invalidConfVersionConfig2), + { + headers: { + "Content-Type": "application/yaml", + }, + } + ); + expect(resp.status).toEqual(400); + expect(resp.data).toEqual({ + error_msg: "routes_conf_version must be a number", + }); + }); + + it("update config (invalid json format)", async () => { + const resp = await clientException.put(ENDPOINT, "{abcd", { + params: { conf_version: 4 }, + }); + expect(resp.status).toEqual(400); + expect(resp.data).toEqual({ + error_msg: + "invalid request body: Expected object key string but found invalid token at character 2", + }); + }); + + it("update config (not compliant with jsonschema)", async () => { + const data = structuredClone(config1); + (data.routes[0].uri as unknown) = 123; + const resp = await clientException.put(ENDPOINT, data); + expect(resp.status).toEqual(400); + expect(resp.data).toMatchObject({ + error_msg: + 'invalid routes at index 0, err: property "uri" validation failed: wrong type: expected string, got number', + }); + }); + + it("update config (empty request body)", async () => { + const resp = await clientException.put(ENDPOINT, ""); + expect(resp.status).toEqual(400); + expect(resp.data).toEqual({ + error_msg: "invalid request body: empty request body", + }); + }); + }); +}); diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/standalone.t b/CloudronPackages/APISIX/apisix-source/t/admin/standalone.t new file mode 100644 index 0000000..24d6e79 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/standalone.t @@ -0,0 +1,258 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + # restarts cause the memory cache to be emptied, don't do this + $ENV{TEST_NGINX_FORCE_RESTART_ON_TEST} = 0; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +use_hup(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->yaml_config) { + $block->set_value("yaml_config", <<'EOF'); +deployment: + role: traditional + role_traditional: + config_provider: yaml + admin: + admin_key: + - name: admin + key: edd1c9f034335f136f87ad84b625c8f1 + role: admin +EOF + } + + if (!defined $block->no_error_log) { + $block->set_value("no_error_log", ""); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: test +--- timeout: 15 +--- max_size: 204800 +--- exec +cd t && pnpm test admin/standalone.spec.ts 2>&1 +--- no_error_log +failed to execute the script with status +--- response_body eval +qr/PASS admin\/standalone.spec.ts/ + + + +=== TEST 2: init conf_version +--- config + location /t {} # force the worker to restart by changing the configuration +--- request +PUT /apisix/admin/configs +{ + "consumer_groups_conf_version": 1000, + "consumers_conf_version": 1000, + "global_rules_conf_version": 1000, + "plugin_configs_conf_version": 1000, + "plugin_metadata_conf_version": 1000, + "protos_conf_version": 1000, + "routes_conf_version": 1000, + "secrets_conf_version": 1000, + "services_conf_version": 1000, + "ssls_conf_version": 1000, + "upstreams_conf_version": 1000 +} +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- error_code: 202 + + + +=== TEST 3: get config +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local code, body = t.test('/apisix/admin/configs', + ngx.HTTP_GET, + nil, + [[{ + "consumer_groups_conf_version": 1000, + "consumers_conf_version": 1000, + "global_rules_conf_version": 1000, + "plugin_configs_conf_version": 1000, + "plugin_metadata_conf_version": 1000, + "protos_conf_version": 1000, + "routes_conf_version": 1000, + "secrets_conf_version": 1000, + "services_conf_version": 1000, + "ssls_conf_version": 1000, + "upstreams_conf_version": 1000 + }]], + { + ["X-API-KEY"] = "edd1c9f034335f136f87ad84b625c8f1" + } + ) + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: configure route +--- config + location /t {} # force the worker to restart by changing the configuration +--- request +PUT /apisix/admin/configs +{"routes":[{"id":"r1","uri":"/r1","upstream":{"nodes":{"127.0.0.1:1980":1},"type":"roundrobin"},"plugins":{"proxy-rewrite":{"uri":"/hello"}}}]} +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- error_code: 202 + + + +=== TEST 5: test route +--- config + location /t1 {} +--- request +GET /r1 +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 6: remove route +--- config + location /t2 {} +--- request +PUT /apisix/admin/configs +{} +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- error_code: 202 + + + +=== TEST 7: test non-exist route +--- config + location /t3 {} +--- request +GET /r1 +--- error_code: 404 + + + +=== TEST 8: route references upstream, but only updates the route +--- config + location /t6 {} +--- pipelined_requests eval +[ + "PUT /apisix/admin/configs\n" . "{\"routes_conf_version\":1060,\"upstreams_conf_version\":1060,\"routes\":[{\"id\":\"r1\",\"uri\":\"/r1\",\"upstream_id\":\"u1\",\"plugins\":{\"proxy-rewrite\":{\"uri\":\"/hello\"}}}],\"upstreams\":[{\"id\":\"u1\",\"nodes\":{\"127.0.0.1:1980\":1},\"type\":\"roundrobin\"}]}", + "PUT /apisix/admin/configs\n" . "{\"routes_conf_version\":1062,\"upstreams_conf_version\":1060,\"routes\":[{\"id\":\"r1\",\"uri\":\"/r2\",\"upstream_id\":\"u1\",\"plugins\":{\"proxy-rewrite\":{\"uri\":\"/hello\"}}}],\"upstreams\":[{\"id\":\"u1\",\"nodes\":{\"127.0.0.1:1980\":1},\"type\":\"roundrobin\"}]}" +] +--- more_headers eval +[ + "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1", + "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1\n" . "x-apisix-conf-version-routes: 100", +] +--- error_code eval +[202, 202] + + + +=== TEST 9: hit r2 +--- config + location /t3 {} +--- pipelined_requests eval +["GET /r1", "GET /r2"] +--- error_code eval +[404, 200] + + + +=== TEST 10: routes_conf_version < 1062 is not allowed +--- config + location /t {} +--- request +PUT /apisix/admin/configs +{"routes_conf_version":1,"routes":[{"id":"r1","uri":"/r2","upstream_id":"u1","plugins":{"proxy-rewrite":{"uri":"/hello"}}}]} +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +x-apisix-conf-version-routes: 100 +--- error_code: 400 +--- response_body +{"error_msg":"routes_conf_version must be greater than or equal to (1062)"} + + + +=== TEST 11: duplicate route id found +--- config + location /t11 {} +--- request +PUT /apisix/admin/configs +{"routes_conf_version":1063,"routes":[{"id":"r1","uri":"/r2","upstream_id":"u1","plugins":{"proxy-rewrite":{"uri":"/hello"}}}, +{"id":"r1","uri":"/r2","upstream_id":"u1","plugins":{"proxy-rewrite":{"uri":"/hello"}}}]} +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- error_code: 400 +--- response_body +{"error_msg":"found duplicate id r1 in routes"} + + + +=== TEST 12: duplicate consumer username found +--- config + location /t12 {} +--- request +PUT /apisix/admin/configs +{"consumers_conf_version":1064,"consumers":[{"username":"consumer1","plugins":{"key-auth":{"key":"consumer1"}}}, +{"username":"consumer1","plugins":{"key-auth":{"key":"consumer1"}}}]} +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- error_code: 400 +--- response_body +{"error_msg":"found duplicate username consumer1 in consumers"} + + + +=== TEST 13: duplicate consumer credential id found +--- config + location /t13 {} +--- request +PUT /apisix/admin/configs +{"consumers_conf_version":1065,"consumers":[ + {"username": "john_1"}, + {"id":"john_1/credentials/john-a","plugins":{"key-auth":{"key":"auth-a"}}}, + {"id":"john_1/credentials/john-a","plugins":{"key-auth":{"key":"auth-a"}}} +]} +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- error_code: 400 +--- response_body +{"error_msg":"found duplicate credential id john_1/credentials/john-a in consumers"} diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/stream-routes-disable.t b/CloudronPackages/APISIX/apisix-source/t/admin/stream-routes-disable.t new file mode 100644 index 0000000..d834d2b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/stream-routes-disable.t @@ -0,0 +1,66 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; +use Cwd qw(cwd); + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $user_yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +_EOC_ + + $block->set_value("yaml_config", $user_yaml_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route(disabled stream model) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/stream-routes.t b/CloudronPackages/APISIX/apisix-source/t/admin/stream-routes.t new file mode 100644 index 0000000..77a6d5b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/stream-routes.t @@ -0,0 +1,653 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "desc": "test-desc", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route" + }]], + [[{ + "value": { + "remote_addr": "127.0.0.1", + "desc": "test-desc", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route" + }, + "key": "/apisix/stream_routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/stream_routes/1')) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: get route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route" + }, + "key": "/apisix/stream_routes/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: delete route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/stream_routes/1', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 4: post route + delete +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, message, res = t('/apisix/admin/stream_routes', + ngx.HTTP_POST, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route" + }]], + [[{ + "value": { + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route" + } + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + + local id = string.sub(res.key, #"/apisix/stream_routes/" + 1) + + local ret = assert(etcd.get('/stream_routes/' .. id)) + local create_time = ret.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = ret.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + id = ret.body.node.value.id + assert(id ~= nil, "id is nil") + + code, message = t('/apisix/admin/stream_routes/' .. id, ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed +[delete] code: 200 message: passed + + + +=== TEST 5: set route with plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + } + ] + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: set route with server_addr and server_port +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_addr": "127.0.0.1", + "server_port": 1982, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + } + ] + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: delete route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/stream_routes/1', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 8: string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/a-b-c-ABC_0123', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: string id(delete) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/a-b-c-ABC_0123', ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: invalid string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/*invalid', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 + + + +=== TEST 11: not unwanted data, POST +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/stream_routes', + ngx.HTTP_POST, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + -- clean data + local id = string.sub(res.key, #"/apisix/stream_routes/" + 1) + local code, message = t('/apisix/admin/stream_routes/' .. id, + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + assert(res.key ~= nil) + res.key = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.id ~= nil) + res.value.id = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"value":{"remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} +--- request +GET /t + + + +=== TEST 12: not unwanted data, PUT +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/stream_routes/1","value":{"id":"1","remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} +--- request +GET /t + + + +=== TEST 13: not unwanted data, GET +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/stream_routes/1', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/stream_routes/1","value":{"id":"1","remote_addr":"127.0.0.1","upstream":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}}} +--- request +GET /t + + + +=== TEST 14: not unwanted data, DELETE +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/stream_routes/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"deleted":"1","key":"/apisix/stream_routes/1"} +--- request +GET /t + + + +=== TEST 15: set route with unknown plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "plugins": { + "mqttt-proxy": { + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"unknown plugin [mqttt-proxy]"} + + + +=== TEST 16: validate protocol +--- extra_yaml_config +xrpc: + protocols: + - name: pingpong +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for _, case in ipairs({ + {input = { + name = "xxx", + }}, + {input = { + name = "pingpong", + }}, + {input = { + name = "pingpong", + conf = { + faults = "a", + } + }}, + }) do + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = case.input, + upstream = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin" + } + } + ) + if code > 300 then + ngx.print(body) + else + ngx.say(body) + end + end + } + } +--- request +GET /t +--- response_body +{"error_msg":"unknown protocol [xxx]"} +passed +{"error_msg":"property \"faults\" validation failed: wrong type: expected array, got string"} + + + +=== TEST 17: set route with remote_addr and server_addr in IPV6 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "::1", + "server_addr": "::1", + "server_port": 1982, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + } + ] + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/token.t b/CloudronPackages/APISIX/apisix-source/t/admin/token.t new file mode 100644 index 0000000..1ab9942 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/token.t @@ -0,0 +1,179 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; +use Cwd qw(cwd); + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $user_yaml_config = <<_EOC_; +deployment: + admin: + admin_key: + - name: admin + role: admin + key: edd1c9f034335f136f87ad84b625c8f1 + +apisix: + node_listen: 1984 +_EOC_ + + $block->set_value("yaml_config", $user_yaml_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route without token +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").req_self_with_http + local res, err = t('/apisix/admin/routes/1', + "PUT", + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + + ngx.status = res.status + ngx.print(res.body) + } + } +--- request +GET /t +--- error_code: 401 + + + +=== TEST 2: set route with wrong token +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").req_self_with_http + local res, err = t( + '/apisix/admin/routes/1', + "PUT", + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]], + {apikey = "wrong_key"} + ) + + ngx.status = res.status + ngx.print(res.body) + } + } +--- request +GET /t +--- error_code: 401 + + + +=== TEST 3: set route with correct token +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").req_self_with_http + local res, err = t( + '/apisix/admin/routes/1', + "PUT", + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/index.html" + }]], + {x_api_key = "edd1c9f034335f136f87ad84b625c8f1"} + ) + + if res.status > 299 then + ngx.status = res.status + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 4: get plugins name +--- request +GET /apisix/admin/plugins/list +--- error_code: 401 + + + +=== TEST 5: reload plugins +--- request +PUT /apisix/admin/plugins/reload +--- error_code: 401 + + + +=== TEST 6: reload plugins with api key(arguments) +--- request +PUT /apisix/admin/plugins/reload?api_key=edd1c9f034335f136f87ad84b625c8f1 +--- error_code: 200 + + + +=== TEST 7: reload plugins with api key(cookie) +--- request +PUT /apisix/admin/plugins/reload +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- error_code: 200 + + + +=== TEST 8: reload plugins with api key(viewer role) +--- request +PUT /apisix/admin/plugins/reload?api_key=4054f7cf07e344346cd3f287985e76a2 +--- error_code: 401 + + + +=== TEST 9: fetch with api key(viewer role) +--- request +GET /apisix/admin/routes??api_key=4054f7cf07e344346cd3f287985e76a2 +--- error_code: 401 diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/upstream-array-nodes.t b/CloudronPackages/APISIX/apisix-source/t/admin/upstream-array-nodes.t new file mode 100644 index 0000000..b02a759 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/upstream-array-nodes.t @@ -0,0 +1,435 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" + }]], + [[{ + "value": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: get upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: delete upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 4: delete upstream(id: not_found) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/upstreams/not_found', ngx.HTTP_DELETE) + + ngx.say("[delete] code: ", code) + } + } +--- request +GET /t +--- response_body +[delete] code: 404 + + + +=== TEST 5: push upstream + delete +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/upstreams', + ngx.HTTP_POST, + [[{ + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + }]], + [[{ + "value": { + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin" + } + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + + local id = string.sub(res.key, #"/apisix/upstreams/" + 1) + code, message = t('/apisix/admin/upstreams/' .. id, ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed +[delete] code: 200 message: passed + + + +=== TEST 6: empty nodes +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [], + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + + ngx.say(message) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: refer to empty nodes upstream +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream_id": "1", + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + + ngx.say(message) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit empty nodes upstream +--- request +GET /index.html +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 9: additional properties is invalid +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": 1 + }], + "type": "roundrobin", + "_service_name": "xyz", + "_discovery_type": "nacos" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid configuration: additional properties forbidden, found .*"\}/ + + + +=== TEST 10: invalid weight of node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": "1" + }], + "type": "chash" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the required"} + + + +=== TEST 11: invalid weight of node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": [{ + "host": "127.0.0.1", + "port": 8080, + "weight": -100 + }], + "type": "chash" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the required"} + + + +=== TEST 12: invalid port of node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": [{ + "host": "127.0.0.1", + "port": 0, + "weight": 1 + }], + "type": "chash" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the required"} + + + +=== TEST 13: invalid host of node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": [{ + "host": "127.#.%.1", + "port": 8080, + "weight": 1 + }], + "type": "chash" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the required"} + + + +=== TEST 14: nodes host include ipv6 addr +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": [ + { + "host":"[::1]", + "port":8082, + "weight":1 + } + ], + "type": "roundrobin" + }, + "uri": "/index.html" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/upstream-force-delete.t b/CloudronPackages/APISIX/apisix-source/t/admin/upstream-force-delete.t new file mode 100644 index 0000000..6d834b1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/upstream-force-delete.t @@ -0,0 +1,154 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 201 +--- response_body +passed + + + +=== TEST 2: add route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream_id": 1, + "uri": "/index.html" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 3: delete upstream(wrong header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1?force=anyvalue', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this upstream, route [1] is still using it now"} + + + +=== TEST 4: delete upstream(without force delete header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this upstream, route [1] is still using it now"} + + + +=== TEST 5: delete upstream(force delete) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1?force=true', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed + + + +=== TEST 6: delete route +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/upstream.t b/CloudronPackages/APISIX/apisix-source/t/admin/upstream.t new file mode 100644 index 0000000..b92a2f4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/upstream.t @@ -0,0 +1,725 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: set upstream (use an id can't be referred by other route +so that we can delete it later) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/upstreams/admin_up', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }, + "key": "/apisix/upstreams/admin_up" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/upstreams/admin_up')) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: get upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/admin_up', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }, + "key": "/apisix/upstreams/admin_up" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: delete upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/admin_up', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 4: delete upstream(id: not_found) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/upstreams/not_found', ngx.HTTP_DELETE) + + ngx.say("[delete] code: ", code) + } + } +--- request +GET /t +--- response_body +[delete] code: 404 + + + +=== TEST 5: push upstream + delete +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, message, res = t('/apisix/admin/upstreams', + ngx.HTTP_POST, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code ~= 200 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("[push] code: ", code, " message: ", message) + + local id = string.sub(res.key, #"/apisix/upstreams/" + 1) + local res = assert(etcd.get('/upstreams/' .. id)) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + + code, message = t('/apisix/admin/upstreams/' .. id, ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[push] code: 200 message: passed +[delete] code: 200 message: passed + + + +=== TEST 6: invalid upstream id in uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/invalid_id$', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + + ngx.exit(code) + } + } +--- request +GET /t +--- error_code: 400 + + + +=== TEST 7: different id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "id": 3, + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong upstream id"} + + + +=== TEST 8: id in the rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": "1", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: integer id less than 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": -100, + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the required"} + + + +=== TEST 10: invalid upstream id: string value +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": "invalid_id$", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the required"} + + + +=== TEST 11: additional properties is invalid +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "_service_name": "xyz", + "_discovery_type": "nacos" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid configuration: additional properties forbidden, found .*"\}/ + + + +=== TEST 12: set upstream(type: chash) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "key": "remote_addr", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash" + }]], + [[{ + "value": { + "key": "remote_addr", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash" + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: unknown type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "unknown" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- response_body chomp +passed + + + +=== TEST 14: invalid weight of node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": { + "127.0.0.1:8080": "1" + }, + "type": "chash" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the required"} + + + +=== TEST 15: invalid weight of node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_PUT, + [[{ + "id": 1, + "nodes": { + "127.0.0.1:8080": -100 + }, + "type": "chash" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: object matches none of the required"} + + + +=== TEST 16: set upstream (missing key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"missing key"} + + + +=== TEST 17: wrong upstream id, do not need it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_POST, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong upstream id, do not need it"} + + + +=== TEST 18: wrong upstream id, do not need it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams', + ngx.HTTP_POST, + [[{ + "id": 1, + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"wrong upstream id, do not need it"} + + + +=== TEST 19: client_cert/client_key and client_cert_id cannot appear at the same time +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + client_cert_id = 1, + client_cert = ssl_cert, + client_key = ssl_key + } + } + local code, body = t.test('/apisix/admin/upstreams', + ngx.HTTP_POST, + core.json.encode(data) + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/{"error_msg":"invalid configuration: property \\\"tls\\\" validation failed: failed to validate dependent schema for \\\"client_cert|client_key\\\": value wasn't supposed to match schema"}/ + + + +=== TEST 20: tls.client_cert_id does not exist +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local data = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + client_cert_id = 9999999 + } + } + local code, body = t.test('/apisix/admin/upstreams', + ngx.HTTP_POST, + core.json.encode(data) + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to fetch ssl info by ssl id [9999999], response code: 404"} + + + +=== TEST 21: tls.client_cert_id exist with wrong ssl type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + sni = "test.com", + cert = ssl_cert, + key = ssl_key + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1 + }, + tls = { + client_cert_id = 1 + } + }, + uri = "/hello" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to fetch ssl info by ssl id [1], wrong ssl type"} + + + +=== TEST 22: type with default vale +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/upstreams/admin_up', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "desc": "new upstream" + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }, + "key": "/apisix/upstreams/admin_up" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/upstream2.t b/CloudronPackages/APISIX/apisix-source/t/admin/upstream2.t new file mode 100644 index 0000000..618861c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/upstream2.t @@ -0,0 +1,295 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: not unwanted data, POST +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/upstreams', + ngx.HTTP_POST, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.key ~= nil) + res.key = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + assert(res.value.id ~= nil) + res.value.id = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"value":{"hash_on":"vars","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}} + + + +=== TEST 2: not unwanted data, PUT +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/upstreams/unwanted', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}} + + + +=== TEST 3: not unwanted data, PATCH +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/upstreams/unwanted', + ngx.HTTP_PATCH, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}} + + + +=== TEST 4: not unwanted data, GET +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/upstreams/unwanted', ngx.HTTP_GET) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + assert(res.createdIndex ~= nil) + res.createdIndex = nil + assert(res.modifiedIndex ~= nil) + res.modifiedIndex = nil + assert(res.value.create_time ~= nil) + res.value.create_time = nil + assert(res.value.update_time ~= nil) + res.value.update_time = nil + ngx.say(json.encode(res)) + } + } +--- response_body +{"key":"/apisix/upstreams/unwanted","value":{"hash_on":"vars","id":"unwanted","nodes":{"127.0.0.1:8080":1},"pass_host":"pass","scheme":"http","type":"roundrobin"}} + + + +=== TEST 5: not unwanted data, DELETE +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, message, res = t('/apisix/admin/upstreams/unwanted', ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"deleted":"1","key":"/apisix/upstreams/unwanted"} + + + +=== TEST 6: empty nodes +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": {}, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 7: refer to empty nodes upstream +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream_id": "1", + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 8: hit empty nodes upstream +--- request +GET /index.html +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 9: upstream timeouts equal to zero +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "timeout": { + "connect": 0, + "send": 0, + "read": 0 + } + }]] + ) + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body_like eval +qr/{"error_msg":"invalid configuration: property \\\"timeout\\\" validation failed: property \\\"(connect|send|read)\\\" validation failed: expected 0 to be greater than 0"}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/upstream3.t b/CloudronPackages/APISIX/apisix-source/t/admin/upstream3.t new file mode 100644 index 0000000..335bbfa --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/upstream3.t @@ -0,0 +1,768 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: list empty resources +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/apisix/admin/upstreams', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + ngx.say(json.encode(res)) + } + } +--- response_body +{"list":[],"total":0} + + + +=== TEST 2: retry_timeout is -1 (INVALID) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8090": 1 + }, + "retry_timeout": -1, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"retry_timeout\" validation failed: expected -1 to be at least 0"} + + + +=== TEST 3: provide upstream for patch +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8090": 1 + }, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } + + + +=== TEST 4: patch upstream(whole) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + + local id = 1 + local res = assert(etcd.get('/upstreams/' .. id)) + local prev_create_time = res.body.node.value.create_time + local prev_update_time = res.body.node.value.update_time + ngx.sleep(1) + + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PATCH, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/upstreams/' .. id)) + local create_time = res.body.node.value.create_time + assert(prev_create_time == create_time, "create_time mismatched") + local update_time = res.body.node.value.update_time + assert(prev_update_time ~= update_time, "update_time should be changed") + } + } +--- response_body +passed + + + +=== TEST 5: patch upstream(new desc) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PATCH, + [[{ + "desc": "new 21 upstream" + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new 21 upstream" + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: patch upstream(new nodes) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PATCH, + [[{ + "nodes": { + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + } + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 4 + }, + "type": "roundrobin", + "desc": "new 21 upstream" + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: patch upstream(weight is 0) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PATCH, + [[{ + "nodes": { + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 0 + } + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8081": 3, + "127.0.0.1:8082": 0 + }, + "type": "roundrobin", + "desc": "new 21 upstream" + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: patch upstream(whole - sub path) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1/', + ngx.HTTP_PATCH, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new upstream 24" + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new upstream 24" + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: patch upstream(new desc - sub path) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1/desc', + ngx.HTTP_PATCH, + '"new 25 upstream"', + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new 25 upstream" + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: patch upstream(new nodes) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1/nodes', + ngx.HTTP_PATCH, + [[{ + "127.0.0.6:8081": 3, + "127.0.0.7:8082": 4 + }]], + [[{ + "value": { + "nodes": { + "127.0.0.6:8081": 3, + "127.0.0.7:8082": 4 + }, + "type": "roundrobin", + "desc": "new 25 upstream" + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: patch upstream(weight is 0 - sub path) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1/nodes', + ngx.HTTP_PATCH, + [[{ + "127.0.0.7:8081": 0, + "127.0.0.8:8082": 4 + }]], + [[{ + "value": { + "nodes": { + "127.0.0.7:8081": 0, + "127.0.0.8:8082": 4 + }, + "type": "roundrobin", + "desc": "new 25 upstream" + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: set upstream(type: chash) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "key": "server_name", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: wrong upstream key, hash_on default vars +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8081": 2 + }, + "type": "chash", + "key": "not_support", + "desc": "new upstream" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: failed to match pattern \"^((uri|server_name|server_addr|request_uri|remote_port|remote_addr|query_string|host|hostname|mqtt_client_id)|arg_[0-9a-zA-z_-]+)$\" with \"not_support\""} + + + +=== TEST 14: set upstream with args(type: chash) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "key": "arg_device_id", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "desc": "new chash upstream" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: set upstream(type: chash) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "key": "server_name", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: wrong upstream key, hash_on default vars +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8081": 2 + }, + "type": "chash", + "key": "not_support", + "desc": "new upstream" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: failed to match pattern \"^((uri|server_name|server_addr|request_uri|remote_port|remote_addr|query_string|host|hostname|mqtt_client_id)|arg_[0-9a-zA-z_-]+)$\" with \"not_support\""} + + + +=== TEST 17: set upstream with args(type: chash) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "key": "arg_device_id", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "desc": "new chash upstream" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: type chash, hash_on: vars +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "key": "arg_device_id", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "vars", + "desc": "new chash upstream" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 19: type chash, hash_on: header, header name with '_', underscores_in_headers on +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "key": "custom_header", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "header", + "desc": "new chash upstream" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: type chash, hash_on: header, header name with invalid character +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "key": "$#^@", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "header", + "desc": "new chash upstream" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: failed to match pattern \"^[a-zA-Z0-9-_]+$\" with \"$#^@\""} + + + +=== TEST 21: type chash, hash_on: cookie +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "key": "custom_cookie", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "cookie", + "desc": "new chash upstream" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 22: type chash, hash_on: cookie, cookie name with invalid character +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "key": "$#^@abc", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "cookie", + "desc": "new chash upstream" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: failed to match pattern \"^[a-zA-Z0-9-_]+$\" with \"$#^@abc\""} + + + +=== TEST 23: type chash, hash_on: consumer, do not need upstream key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "consumer", + "desc": "new chash upstream" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 24: type chash, hash_on: consumer, set key but invalid +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "consumer", + "key": "invalid-key", + "desc": "new chash upstream" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 25: type chash, invalid hash_on type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "key": "dsadas", + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "chash", + "hash_on": "aabbcc", + "desc": "new chash upstream" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"hash_on\" validation failed: matches none of the enum values"} diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/upstream4.t b/CloudronPackages/APISIX/apisix-source/t/admin/upstream4.t new file mode 100644 index 0000000..4112524 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/upstream4.t @@ -0,0 +1,668 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set upstream(id: 1 + name: test name) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "name": "test upstream name" + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "name": "test upstream name" + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: string id(delete) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123', ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: invalid string id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/*invalid', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"id\" validation failed: object matches none of the required"} + + + +=== TEST 5: retries is 0 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8090": 1 + }, + "retries": 0, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: retries is -1 (INVALID) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/a-b-c-ABC_0123', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1, + "127.0.0.1:8090": 1 + }, + "retries": -1, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"retries\" validation failed: expected -1 to be at least 0"} + + + +=== TEST 7: invalid route: empty `upstream_host` when `pass_host` is `rewrite` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "apisix.com:8080": 1, + "test.com:8080": 1 + }, + "type": "roundrobin", + "pass_host": "rewrite", + "upstream_host": "" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 + + + +=== TEST 8: set upstream(with labels) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "labels": { + "build":"16", + "env":"production", + "version":"v2" + } + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "labels": { + "build":"16", + "env":"production", + "version":"v2" + } + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: get upstream(with labels) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_GET, + nil, + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "labels": { + "version":"v2", + "build":"16", + "env":"production" + } + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: patch upstream(only labels) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PATCH, + [[{ + "labels": { + "build": "17" + } + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "labels": { + "version":"v2", + "build":"17", + "env":"production" + } + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: invalid format of label value: set upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "labels": { + "env": ["production", "release"] + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"labels\" validation failed: failed to validate env (matching \".*\"): wrong type: expected string, got table"} + + + +=== TEST 12: patch upstream(whole, create_time) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PATCH, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new upstream", + "create_time": 1705252779 + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new upstream", + "create_time": 1705252779 + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + if code >= 300 then + return + end + + local res = assert(etcd.get('/upstreams/1')) + local create_time = res.body.node.value.create_time + assert(create_time == 1705252779, "create_time mismatched") + } + } +--- response_body +passed + + + +=== TEST 13: patch upstream(whole, update_time) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PATCH, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new upstream", + "update_time": 1705252779 + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "desc": "new upstream", + "create_time": 1705252779 + }, + "key": "/apisix/upstreams/1" + }]] + ) + + ngx.status = code + ngx.say(body) + + if code >= 300 then + return + end + + local res = assert(etcd.get('/upstreams/1')) + local update_time = res.body.node.value.update_time + assert(update_time == 1705252779, "update_time mismatched") + } + } +--- response_body +passed + + + +=== TEST 14: create upstream with create_time and update_time +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/up_create_update_time', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "create_time": 1602883670, + "update_time": 1602893670 + }]], + [[{ + "value": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin", + "create_time": 1602883670, + "update_time": 1602893670 + }, + "key": "/apisix/upstreams/up_create_update_time" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"the property is forbidden:.*"\}/ + + + +=== TEST 15: patch upstream with sub_path, the data is number +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, message = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": {}, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + local id = 1 + local res = assert(etcd.get('/upstreams/' .. id)) + local prev_create_time = res.body.node.value.create_time + local prev_update_time = res.body.node.value.update_time + ngx.sleep(1) + + local code, message = t('/apisix/admin/upstreams/1/retries', + ngx.HTTP_PATCH, + json.encode(1) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(message) + local res = assert(etcd.get('/upstreams/' .. id)) + local create_time = res.body.node.value.create_time + assert(prev_create_time == create_time, "create_time mismatched") + local update_time = res.body.node.value.update_time + assert(prev_update_time ~= update_time, "update_time should be changed") + } + } + + + +=== TEST 16: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 17: set service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream_id": 1, + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 19: delete upstream(id: 1) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1', ngx.HTTP_DELETE) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this upstream, route [1] is still using it now"} + + + +=== TEST 20: delete route(id: 1) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 21: delete service(id: 1) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/services/1', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 22: delete upstream(id: 1) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1', ngx.HTTP_DELETE) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 200 message: passed diff --git a/CloudronPackages/APISIX/apisix-source/t/admin/upstream5.t b/CloudronPackages/APISIX/apisix-source/t/admin/upstream5.t new file mode 100644 index 0000000..572d292 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/admin/upstream5.t @@ -0,0 +1,599 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set upstream(kafka scheme) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local code, body = t.test("/apisix/admin/upstreams/kafka", ngx.HTTP_PUT, [[{ + "nodes": { + "127.0.0.1:9092": 1 + }, + "type": "none", + "scheme": "kafka" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: set upstream(empty tls) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local code, body = t.test("/apisix/admin/upstreams/kafka", ngx.HTTP_PUT, [[{ + "nodes": { + "127.0.0.1:9092": 1 + }, + "type": "none", + "scheme": "kafka", + "tls": {} + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: set upstream(tls without verify) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local code, body = t.test("/apisix/admin/upstreams/kafka", ngx.HTTP_PUT, [[{ + "nodes": { + "127.0.0.1:9092": 1 + }, + "type": "none", + "scheme": "kafka", + "tls": { + "verify": false + } + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: prepare upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_PUT, [[{ + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + } + } +--- response_body +passed + + + +=== TEST 5: prepare route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/routes/1", ngx.HTTP_PUT, [[{ + "plugins": { + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream_id": 1, + "weight": 1 + }, + { + "weight": 1 + } + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + } + } +--- response_body +passed + + + +=== TEST 6: delete upstream when plugin in route still refer it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"can not delete this upstream, plugin in route [1] is still using it now"} + + + +=== TEST 7: delete route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/routes/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: prepare service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/services/1", ngx.HTTP_PUT, [[{ + "plugins": { + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream_id": 1, + "weight": 1 + }, + { + "weight": 1 + } + ] + } + ] + } + } + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: delete upstream when plugin in service still refer it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"can not delete this upstream, plugin in service [1] is still using it now"} + + + +=== TEST 10: delete service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/services/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: prepare global_rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t("/apisix/admin/global_rules/1", ngx.HTTP_PUT, [[{ + "plugins": { + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream_id": 1, + "weight": 1 + }, + { + "weight": 1 + } + ] + } + ] + } + } + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: delete upstream when plugin in global_rule still refer it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"can not delete this upstream, plugin in global_rules [1] is still using it now"} + + + +=== TEST 13: delete global_rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/global_rules/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: prepare plugin_config +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t("/apisix/admin/plugin_configs/1", ngx.HTTP_PUT, [[{ + "plugins": { + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream_id": 1, + "weight": 1 + }, + { + "weight": 1 + } + ] + } + ] + } + } + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: delete upstream when plugin in plugin_config still refer it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"can not delete this upstream, plugin in plugin_config [1] is still using it now"} + + + +=== TEST 16: delete plugin_config +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/plugin_configs/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 17: prepare consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t("/apisix/admin/consumers", ngx.HTTP_PUT, [[{ + "username": "test", + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream_id": 1, + "weight": 1 + }, + { + "weight": 1 + } + ] + } + ] + } + } + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: delete upstream when plugin in consumer still refer it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"can not delete this upstream, plugin in consumer [test] is still using it now"} + + + +=== TEST 19: delete consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/consumers/test", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: prepare consumer_group +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t("/apisix/admin/consumer_groups/1", ngx.HTTP_PUT, [[{ + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream_id": 1, + "weight": 1 + }, + { + "weight": 1 + } + ] + } + ] + } + } + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: delete upstream when plugin in consumer_group still refer it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"can not delete this upstream, plugin in consumer_group [1] is still using it now"} + + + +=== TEST 22: delete consumer_group +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/consumer_groups/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 23: delete upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/apisix.luacov b/CloudronPackages/APISIX/apisix-source/t/apisix.luacov new file mode 100644 index 0000000..0694c2b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/apisix.luacov @@ -0,0 +1,38 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +return { + modules = { + ["lua.*"] = "lua", + ["apisix/*"] = "apisix", + ["apisix/admin/*"] = "admin", + ["apisix/core/*"] = "core", + ["apisix/http/*"] = "http", + ["apisix/http/router/*"] = "http/router", + ["apisix/plugins/*"] = "plugins", + ["apisix/plugins/grpc-transcode/*"] = "plugins/grpc-transcode", + ["apisix/plugins/limit-count/*"] = "plugins/limit-count", + ["apisix/plugins/prometheus/*"] = "plugins/prometheus", + ["apisix/plugins/zipkin/*"] = "plugins/zipkin", + ["apisix/utils/*"] = "utils", + ["apisix/discovery/*"] = "discovery", + + -- can not enable both at http and stream, will fix it later. + -- ["apisix/stream/*"] = "stream", + -- ["apisix/stream/plugins/*"] = "stream/plugins", + -- ["apisix/stream/router/*"] = "stream/router", + }, +} diff --git a/CloudronPackages/APISIX/apisix-source/t/assets/ai-proxy-response.json b/CloudronPackages/APISIX/apisix-source/t/assets/ai-proxy-response.json new file mode 100644 index 0000000..94665e5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/assets/ai-proxy-response.json @@ -0,0 +1,15 @@ +{ + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { "content": "1 + 1 = 2.", "role": "assistant" } + } + ], + "created": 1723780938, + "id": "chatcmpl-9wiSIg5LYrrpxwsr2PubSQnbtod1P", + "model": "gpt-4o-2024-05-13", + "object": "chat.completion", + "system_fingerprint": "fp_abc28019ad", + "usage": { "completion_tokens": 8, "prompt_tokens": 23, "total_tokens": 31 } +} diff --git a/CloudronPackages/APISIX/apisix-source/t/assets/content-moderation-responses.json b/CloudronPackages/APISIX/apisix-source/t/assets/content-moderation-responses.json new file mode 100644 index 0000000..e10c3d0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/assets/content-moderation-responses.json @@ -0,0 +1,224 @@ +{ + "good_request": { + "ResultList": [ + { + "Toxicity": 0.02150000333786, + "Labels": [ + { + "Name": "PROFANITY", + "Score": 0.00589999556541 + }, + { + "Name": "HATE_SPEECH", + "Score": 0.01729999780655 + }, + { + "Name": "INSULT", + "Score": 0.00519999861717 + }, + { + "Name": "GRAPHIC", + "Score": 0.00520000338554 + }, + { + "Name": "HARASSMENT_OR_ABUSE", + "Score": 0.00090001106262 + }, + { + "Name": "SEXUAL", + "Score": 0.00810000061989 + }, + { + "Name": "VIOLENCE_OR_THREAT", + "Score": 0.00570000290871 + } + ] + } + ] + }, + "profane": { + "ResultList": [ + { + "Toxicity": 0.62150000333786, + "Labels": [ + { + "Name": "PROFANITY", + "Score": 0.55589999556541 + }, + { + "Name": "HATE_SPEECH", + "Score": 0.21729999780655 + }, + { + "Name": "INSULT", + "Score": 0.25519999861717 + }, + { + "Name": "GRAPHIC", + "Score": 0.12520000338554 + }, + { + "Name": "HARASSMENT_OR_ABUSE", + "Score": 0.27090001106262 + }, + { + "Name": "SEXUAL", + "Score": 0.44810000061989 + }, + { + "Name": "VIOLENCE_OR_THREAT", + "Score": 0.27570000290871 + } + ] + } + ] + }, + "profane_but_not_toxic": { + "ResultList": [ + { + "Toxicity": 0.12150000333786, + "Labels": [ + { + "Name": "PROFANITY", + "Score": 0.55589999556541 + }, + { + "Name": "HATE_SPEECH", + "Score": 0.21729999780655 + }, + { + "Name": "INSULT", + "Score": 0.25519999861717 + }, + { + "Name": "GRAPHIC", + "Score": 0.12520000338554 + }, + { + "Name": "HARASSMENT_OR_ABUSE", + "Score": 0.27090001106262 + }, + { + "Name": "SEXUAL", + "Score": 0.44810000061989 + }, + { + "Name": "VIOLENCE_OR_THREAT", + "Score": 0.27570000290871 + } + ] + } + ] + }, + "very_profane": { + "ResultList": [ + { + "Toxicity": 0.72150000333786, + "Labels": [ + { + "Name": "PROFANITY", + "Score": 0.85589999556541 + }, + { + "Name": "HATE_SPEECH", + "Score": 0.21729999780655 + }, + { + "Name": "INSULT", + "Score": 0.25519999861717 + }, + { + "Name": "GRAPHIC", + "Score": 0.12520000338554 + }, + { + "Name": "HARASSMENT_OR_ABUSE", + "Score": 0.27090001106262 + }, + { + "Name": "SEXUAL", + "Score": 0.94810000061989 + }, + { + "Name": "VIOLENCE_OR_THREAT", + "Score": 0.27570000290871 + } + ] + } + ] + }, + "toxic": { + "ResultList": [ + { + "Toxicity": 0.72150000333786, + "Labels": [ + { + "Name": "PROFANITY", + "Score": 0.25589999556541 + }, + { + "Name": "HATE_SPEECH", + "Score": 0.21729999780655 + }, + { + "Name": "INSULT", + "Score": 0.75519999861717 + }, + { + "Name": "GRAPHIC", + "Score": 0.12520000338554 + }, + { + "Name": "HARASSMENT_OR_ABUSE", + "Score": 0.27090001106262 + }, + { + "Name": "SEXUAL", + "Score": 0.64810000061989 + }, + { + "Name": "VIOLENCE_OR_THREAT", + "Score": 0.27570000290871 + } + ] + } + ] + }, + "very_toxic": { + "ResultList": [ + { + "Toxicity": 0.92150000333786, + "Labels": [ + { + "Name": "PROFANITY", + "Score": 0.25589999556541 + }, + { + "Name": "HATE_SPEECH", + "Score": 0.21729999780655 + }, + { + "Name": "INSULT", + "Score": 0.25519999861717 + }, + { + "Name": "GRAPHIC", + "Score": 0.12520000338554 + }, + { + "Name": "HARASSMENT_OR_ABUSE", + "Score": 0.27090001106262 + }, + { + "Name": "SEXUAL", + "Score": 0.44810000061989 + }, + { + "Name": "VIOLENCE_OR_THREAT", + "Score": 0.27570000290871 + } + ] + } + ] + } +} diff --git a/CloudronPackages/APISIX/apisix-source/t/assets/embeddings.json b/CloudronPackages/APISIX/apisix-source/t/assets/embeddings.json new file mode 100644 index 0000000..2baa330 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/assets/embeddings.json @@ -0,0 +1,25 @@ +{ + "object": "list", + "data": [ + { + "object": "embedding", + "index": 0, + "embedding": [ + 123456789, + 0.01902593, + 0.008967914, + -0.013226582, + -0.026961878, + -0.017892223, + -0.0007785152, + -0.011031842, + 0.0068531134 + ] + } + ], + "model": "text-embedding-3-small", + "usage": { + "prompt_tokens": 4, + "total_tokens": 4 + } + } diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/apisix.crt b/CloudronPackages/APISIX/apisix-source/t/certs/apisix.crt new file mode 100644 index 0000000..503f277 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/apisix.crt @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE----- +MIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G +A1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa +GA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n +RG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM +CHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe +cvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb +VDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR +2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr +abf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2 +WjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/ +Evm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1 +/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh +/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj +cTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ +tSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl +c3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC +tC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY +1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl +PYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob +rJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy +hme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1 +7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y +IJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve +U/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM= +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/apisix.key b/CloudronPackages/APISIX/apisix-source/t/certs/apisix.key new file mode 100644 index 0000000..7105067 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/apisix.key @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG5AIBAAKCAYEAyCM0rqJecvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5 +jhZB3W6BkWUWR4oNFLLSqcVbVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfo +eLj0efMiOepOSZflj9Ob4yKR2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5s +mPtW1Oc/BV5terhscJdOgmRrabf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt +6iMWEGeQU6mwPENgvj1olji2WjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiy +Vt1TmtMWn1ztk6FfLRqwJWR/Evm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1o +npRVeXhrBajbCRDRBMwaNw/1/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2 +fzaqpIfyUbPST4GdqNG9NyIh/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI +1cGrGwyXbrieNp63AgMBAAECggGBAJM8g0duoHmIYoAJzbmKe4ew0C5fZtFUQNmu +O2xJITUiLT3ga4LCkRYsdBnY+nkK8PCnViAb10KtIT+bKipoLsNWI9Xcq4Cg4G3t +11XQMgPPgxYXA6m8t+73ldhxrcKqgvI6xVZmWlKDPn+CY/Wqj5PA476B5wEmYbNC +GIcd1FLl3E9Qm4g4b/sVXOHARF6iSvTR+6ol4nfWKlaXSlx2gNkHuG8RVpyDsp9c +z9zUqAdZ3QyFQhKcWWEcL6u9DLBpB/gUjyB3qWhDMe7jcCBZR1ALyRyEjmDwZzv2 +jlv8qlLFfn9R29UI0pbuL1eRAz97scFOFme1s9oSU9a12YHfEd2wJOM9bqiKju8y +DZzePhEYuTZ8qxwiPJGy7XvRYTGHAs8+iDlG4vVpA0qD++1FTpv06cg/fOdnwshE +OJlEC0ozMvnM2rZ2oYejdG3aAnUHmSNa5tkJwXnmj/EMw1TEXf+H6+xknAkw05nh +zsxXrbuFUe7VRfgB5ElMA/V4NsScgQKBwQDmMRtnS32UZjw4A8DsHOKFzugfWzJ8 +Gc+3sTgs+4dNIAvo0sjibQ3xl01h0BB2Pr1KtkgBYB8LJW/FuYdCRS/KlXH7PHgX +84gYWImhNhcNOL3coO8NXvd6+m+a/Z7xghbQtaraui6cDWPiCNd/sdLMZQ/7LopM +RbM32nrgBKMOJpMok1Z6zsPzT83SjkcSxjVzgULNYEp03uf1PWmHuvjO1yELwX9/ +goACViF+jst12RUEiEQIYwr4y637GQBy+9cCgcEA3pN9W5OjSPDVsTcVERig8++O +BFURiUa7nXRHzKp2wT6jlMVcu8Pb2fjclxRyaMGYKZBRuXDlc/RNO3uTytGYNdC2 +IptU5N4M7iZHXj190xtDxRnYQWWo/PR6EcJj3f/tc3Itm1rX0JfuI3JzJQgDb9Z2 +s/9/ub8RRvmQV9LM/utgyOwNdf5dyVoPcTY2739X4ZzXNH+CybfNa+LWpiJIVEs2 +txXbgZrhmlaWzwA525nZ0UlKdfktdcXeqke9eBghAoHARVTHFy6CjV7ZhlmDEtqE +U58FBOS36O7xRDdpXwsHLnCXhbFu9du41mom0W4UdzjgVI9gUqG71+SXrKr7lTc3 +dMHcSbplxXkBJawND/Q1rzLG5JvIRHO1AGJLmRgIdl8jNgtxgV2QSkoyKlNVbM2H +Wy6ZSKM03lIj74+rcKuU3N87dX4jDuwV0sPXjzJxL7NpR/fHwgndgyPcI14y2cGz +zMC44EyQdTw+B/YfMnoZx83xaaMNMqV6GYNnTHi0TO2TAoHBAKmdrh9WkE2qsr59 +IoHHygh7Wzez+Ewr6hfgoEK4+QzlBlX+XV/9rxIaE0jS3Sk1txadk5oFDebimuSk +lQkv1pXUOqh+xSAwk5v88dBAfh2dnnSa8HFN3oz+ZfQYtnBcc4DR1y2X+fVNgr3i +nxruU2gsAIPFRnmvwKPc1YIH9A6kIzqaoNt1f9VM243D6fNzkO4uztWEApBkkJgR +4s/yOjp6ovS9JG1NMXWjXQPcwTq3sQVLnAHxZRJmOvx69UmK4QKBwFYXXjeXiU3d +bcrPfe6qNGjfzK+BkhWznuFUMbuxyZWDYQD5yb6ukUosrj7pmZv3BxKcKCvmONU+ +CHgIXB+hG+R9S2mCcH1qBQoP/RSm+TUzS/Bl2UeuhnFZh2jSZQy3OwryUi6nhF0u +LDzMI/6aO1ggsI23Ri0Y9ZtqVKczTkxzdQKR9xvoNBUufjimRlS80sJCEB3Qm20S +wzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg== +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/apisix_admin_ssl.crt b/CloudronPackages/APISIX/apisix-source/t/certs/apisix_admin_ssl.crt new file mode 100644 index 0000000..82d7fc3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/apisix_admin_ssl.crt @@ -0,0 +1,33 @@ +-----BEGIN CERTIFICATE----- +MIIFsTCCA5mgAwIBAgIUODyT8W4gAxf8uwMNmtj5M1ANoUwwDQYJKoZIhvcNAQEL +BQAwVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG +Wmh1SGFpMQ0wCwYDVQQKDARhcGk3MRMwEQYDVQQDDAphcGlzaXguZGV2MCAXDTIw +MDYwNDAzMzc1MFoYDzIxMjAwNTExMDMzNzUwWjBWMQswCQYDVQQGEwJDTjESMBAG +A1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxDTALBgNVBAoMBGFwaTcx +EzARBgNVBAMMCmFwaXNpeC5kZXYwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQDQveSdplH49Lr+LsLWpGJbNRhf2En0V4SuFKpzGFP7mXaI7rMnpdH3BUVY +S3juMgPOdNh6ho4BeSbGZGfU3lG1NwIOXiPNA1mrTWGNGV97crJDVZeWTuDpqNHJ +4ATrnF6RnRbg0en8rjVtce6LBMrDJVyGbi9VAqBUPrCmzT/l0V1jPL6KNSN8mQog +ladrJuzUanfhWM9K9xyM+/SUt1MNUYFLNsVHasPzsi5/YDRBiwuzTtiT56O6yge2 +lvrdPFvULrCxlGteyvhtrFJwqjN//YtnQFooNR0CXBfXs0a7WGgMjawupuP1JKiY +t9KEcGHWGZDeLfsGGKgQ9G+PaP4y+gHjLr5xQvwt68otpoafGy+BpOoHZZFoLBpx +TtJKA3qnwyZg9zr7lrtqr8CISO/SEyh6xkAOUzb7yc2nHu9UpruzVIR7xI7pjc7f +2T6WyCVy6gFYQwzFLwkN/3O+ZJkioxXsnwaYWDj61k3d9ozVDkVkTuxmNJjXV8Ta +htGRAHo0/uHmpFTcaQfDf5o+iWi4z9B5kgfA/A1XWFQlCH1kl3mHKg7JNCN9qGF8 +rG+YzdiLQfo5OqJSvzGHRXbdGI2JQe/zyJHsMO7d0AhwXuPOWGTTAODOPlaBCxNB +AgjuUgt+3saqCrK4eaOo8sPt055AYJhZlaTH4EeD4sv7rJGm7wIDAQABo3UwczAd +BgNVHQ4EFgQUPS1LXZMqgQvH/zQHHzgTzrd7PIIwHwYDVR0jBBgwFoAUPS1LXZMq +gQvH/zQHHzgTzrd7PIIwDAYDVR0TBAUwAwEB/zAjBgNVHREEHDAaggphcGlzaXgu +ZGV2ggwqLmFwaXNpeC5kZXYwDQYJKoZIhvcNAQELBQADggIBAMlwNS8uo3JkkshI +rpYobdjCZfr74PBl+LhoihvzHs25/in3+CxETRA8cYo5pRotqdA63po3wiCCPs6a +mZiELQxyGHhFcqoYxnoURR4nyogRZLA6jjLGkbG4H+CA4ApmZmvGnP3X5uQW4v5q +IdqIXL3BvoUBln8GMEC7Rz5SGUjWG03JPkl6MdeziFyHkwdBCOrtK5m7icRncvq+ +iL8CMUx024LLI6A5hTBPwfVfgbWJTSv7tEu85q54ZZoYQhiD8dde4D7g5/noPvXM +ZyA9C3Sl981+pUhhazad9j9k8DCcqf9e8yH9lPY26tjiEcShv4YnwbErWzJU1F9s +ZI5Z6nj5PU66upnBWAWV7fWCOrlouB4GjNaznSNrmpn4Bb2+FinDK3t4AfWDPS5s +ljQBGQNXOd30DC7BdNAF5dQAUhVfz1EgQGqYa+frMQLiv8rNMs7h6gKQEqU+jC/1 +jbGe4/iwc0UeTtSgTPHMofqjqc99/R/ZqtJ3qFPJmoWpyu0NlNINw2KWRQaMoGLo +WgDCS0YA5/hNXVFcWnZ73jY62yrVSoj+sFbkUpGWhEFnO+uSmBv8uwY3UeCOQDih +X7Yazs3TZRqEPU+25QATf0kbxyzlWbGkwvyRD8x+n3ZHs5Ilhrc6jWHqM/S3ir7i +m9GcWiwg++EbusQsqs3w3uKAHAdT +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/apisix_admin_ssl.key b/CloudronPackages/APISIX/apisix-source/t/certs/apisix_admin_ssl.key new file mode 100644 index 0000000..ec88905 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/apisix_admin_ssl.key @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEA0L3knaZR+PS6/i7C1qRiWzUYX9hJ9FeErhSqcxhT+5l2iO6z +J6XR9wVFWEt47jIDznTYeoaOAXkmxmRn1N5RtTcCDl4jzQNZq01hjRlfe3KyQ1WX +lk7g6ajRyeAE65xekZ0W4NHp/K41bXHuiwTKwyVchm4vVQKgVD6wps0/5dFdYzy+ +ijUjfJkKIJWnaybs1Gp34VjPSvccjPv0lLdTDVGBSzbFR2rD87Iuf2A0QYsLs07Y +k+ejusoHtpb63Txb1C6wsZRrXsr4baxScKozf/2LZ0BaKDUdAlwX17NGu1hoDI2s +Lqbj9SSomLfShHBh1hmQ3i37BhioEPRvj2j+MvoB4y6+cUL8LevKLaaGnxsvgaTq +B2WRaCwacU7SSgN6p8MmYPc6+5a7aq/AiEjv0hMoesZADlM2+8nNpx7vVKa7s1SE +e8SO6Y3O39k+lsglcuoBWEMMxS8JDf9zvmSZIqMV7J8GmFg4+tZN3faM1Q5FZE7s +ZjSY11fE2obRkQB6NP7h5qRU3GkHw3+aPolouM/QeZIHwPwNV1hUJQh9ZJd5hyoO +yTQjfahhfKxvmM3Yi0H6OTqiUr8xh0V23RiNiUHv88iR7DDu3dAIcF7jzlhk0wDg +zj5WgQsTQQII7lILft7GqgqyuHmjqPLD7dOeQGCYWZWkx+BHg+LL+6yRpu8CAwEA +AQKCAgBNsbBLAWHXYPfMrgj1LUAypIOLAQ0dtgl7ZdO/fRmdNxSIiRgDtNN+tuaF +o6nCNrl1+cWtbTGj2L0W8L442/rbkTrhsCZxI0MX4HhjtUL1xs4VA+GlH3zVW3Gi +SxBpxczpM+gVC+ykkQ7vyo04DzONCPX0T0Ssxop4cND9dL3Iw3GYAz8EYBzyPmAn +mqwy1M0nju1J4e1eALYOv6TcSZPPDDwsi5lIKLQAm5x06pDoqGFVfw5blsc5OgM+ +8dkzyUiApFQ99Hk2UiO/ZnlU1/TNOcjOSISGHKbMfwycy2yTRKeNrJmez51fXCKo +nRrtEotHzkI+gCzDqx+7F9ACN9kM4f4JO5ca0/My6tCY+mH8TA/nVzMnUpL7329w +NobuNTpyA6x5nmB3QqElrzQCRtTj7Nw5ytMdRbByJhXww9C5tajUysdq8oGoZdz5 +94kXr6qCC5Qm3CkgyF2RjqZyg9tHUEEdaFKouHgziiqG9P2Nk1SHk7Jd7bF4rleI +i93u/f0fdVK7aMksofgUbOmfhnS+o1NxerVcbdX+E/iv6yfkrYDb46y3//4dcpwk +TeUEMCjc7ShwvYPq350q3jmzgwxeTK8ZdXwJymdJ7MaGcnMXPqd9A43evYM6nG6f +i3l2tYhH4cp6misGChnGORR68qsRkY8ssvSFNFzjcFHhnPyoCQKCAQEA8isIC1IJ +Iq9kB4mDVh0QdiuoBneNOEHy/8fASeZsqedu0OZPyoXU96iOhXuqf8sQ33ydvPef +iRwasLLkgw8sDeWILUjS36ZzwGP2QNxWfrapCFS8VfKl7hTPMVp0Wzxh8qqpGLSh +O0W7EEAJCgzzULagfupaO0Chmb3LZqXRp8m5oubnmE+9z0b5GrCIT1S8Yay2mEw9 +jxqZJGBhV7QnupyC2DIxLXlGmQk7Qs1+1mCCFwyfugHXclWYa+fet/79SkkADK0/ +ysxfy+FdZgGT/Ba5odsEpt1zH+tw4WXioJsX9mU3zAHbpPqtcfuVU+2xyKfQYrRG +NSm9MMNmart0wwKCAQEA3Koaj/0gNxLLslLIES50KmmagzU8CkEmCa/WLoVy02xr +qp42hvj+PzBTf3rIno3KEpRhMmnAtswozbV3P4l/VSZdfY+pwWsx7/5+Cf1R9nAP +vp6YCjGcLcbASazYNOWf0FRInt3pxdgT9DWjJDi99FGKA+UbI2yxHwzE+cE8r9Od +Iy42uhzCjJBqdg+an+q63k6yrOwv18KP69LlU/4vknhw4g3WxF4yTwVmXU8WKmux +aOrJv2ED8pfA7k+zwv0rPyN+F2nOySxoChaFfeu6ntBCX7zK/nV0DsMQImOycfzO +yN8WB9lRZTJVzU2r6PaGAI359uLHEmURy0069g+yZQKCAQAbECwJ99UFh0xKe1eu +G/lm+2H/twSVMOmTJCOdHp8uLar4tYRdQa+XLcMfr75SIcN09lw6bgHqNLXW4Wcg +LmXh97DMPsMyM0vkSEeQ4A7agldJkw6pHEDm5nRxM4alW44mrGPRWv5ZvWU2X7Gi +6eeXMZGmHVKQJJzqrYc5pXZUpfqU9fET2HWB4JCeJvRUyUd0MvUE+CA5CePraMn4 +Hy4BcNQ+jP1p84+sMpfo00ZFduuS39pJ00LciCxMgtElBt4PmzDiOcpTQ5vBESJ6 +79o15eRA7lUKwNzIyGsJBXXaNPrskks2BU8ilNElV9RMWNfxcK+dGEBwWIXIGU4s +x145AoIBAQCst9R8udNaaDLaTGNe126DuA8B/kwVdrLwSBqsZTXgeO+5J4dklEZl +bU0d7hxTxoXRjySZEh+OtTSG9y/0oonxO0tYOXfU9jOrNxaueQKLk2EvgfFdoUEu +r2/Y+xpsJQO3TBFfkDEn856Cuu0MMAG214/gxpY8XxowRI11NCRtN4S6gbTCbjp1 +TaCW8lXEMDW+Rfki0ugLyLVgD74CxWW1DuLEfbKKF3TnV0GtbXbbE1pU1dm+G5C8 +dL3FissYp5MPI5fRebcqzcBNjR1F15pGLpqVVy/IhmSmHVZmpISLJicxITScRiSo +wgJY5R/XBAcVLgvmi9Dn/AY2jCfHa7flAoIBAQCbnZ6ivZg81g6/X9qdo9J61hX0 +Y7Fn7bLvcs1L0ARGTsfXMvegA806XyZThqjpY47nHpQtoz4z62kiTTsdpAZUeA3z +9HUWr0b3YEpsvZpgyMNHgwq1vRDPjw4AWz0pBoDWMxx8Ck5nP1A//c1zyu9pgYEU +R+OutDeCJ+0VAc6JSH9WMA08utGPGs3t02Zhtyt2sszE9vzz4hTi5340/AYG72p7 +YGlikUxvbyylYh9wR4YUYa/klikvKLHEML1P0BCr8Vex+wLSGS1h1F5tW1Xr2CZQ +dVxFmfGmPDmwWbCQR6Rvt6FHRwNMpMrLr011h2RBcHBpdQl7XpUENDoopIh0 +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/apisix_ecc.crt b/CloudronPackages/APISIX/apisix-source/t/certs/apisix_ecc.crt new file mode 100644 index 0000000..b40240e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/apisix_ecc.crt @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB8jCCAZmgAwIBAgIJALwxr+GMOgSKMAoGCCqGSM49BAMCMFYxCzAJBgNVBAYT +AkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0GA1UE +CgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAeFw0yMDA4MTgwODI0MzdaFw0y +MTA4MTgwODI0MzdaMFYxCzAJBgNVBAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0Rvbmcx +DzANBgNVBAcMBlpodUhhaTEPMA0GA1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0 +LmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABEJACTxb5qYd4v9VaNKlv2fe +XlZSTDYe+0fZwT4l9sifmPzmpwjiVTB2wLiCYYzy+BPrb29r5ubgtXIflsWKRBKj +UDBOMB0GA1UdDgQWBBQPXxOYAHfboUjsoo1xm6/GJ1qHijAfBgNVHSMEGDAWgBQP +XxOYAHfboUjsoo1xm6/GJ1qHijAMBgNVHRMEBTADAQH/MAoGCCqGSM49BAMCA0cA +MEQCICQ70LiJ+Z2lv9ZF+FQL+VEdVQ938rz6RGXBmnl2oEvkAiBY2eeTl//JanNX +GsSV104WrpHjcBjcY24jb11Y1H3R9g== +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/apisix_ecc.key b/CloudronPackages/APISIX/apisix-source/t/certs/apisix_ecc.key new file mode 100644 index 0000000..984f3ea --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/apisix_ecc.key @@ -0,0 +1,8 @@ +-----BEGIN EC PARAMETERS----- +BggqhkjOPQMBBw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIBlQqVD3bK6CQT5puOOngrb50+3K66MKJdhtpWQoUw2poAoGCCqGSM49 +AwEHoUQDQgAEQkAJPFvmph3i/1Vo0qW/Z95eVlJMNh77R9nBPiX2yJ+Y/OanCOJV +MHbAuIJhjPL4E+tvb2vm5uC1ch+WxYpEEg== +-----END EC PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/client_enc.crt b/CloudronPackages/APISIX/apisix-source/t/certs/client_enc.crt new file mode 100644 index 0000000..4e5bc78 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/client_enc.crt @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIB2TCCAX6gAwIBAgIBBTAKBggqgRzPVQGDdTBFMQswCQYDVQQGEwJBQTELMAkG +A1UECAwCQkIxCzAJBgNVBAoMAkNDMQswCQYDVQQLDAJERDEPMA0GA1UEAwwGc3Vi +IGNhMB4XDTIyMTEwMjAzMTkzNloXDTMyMTAzMDAzMTkzNlowSTELMAkGA1UEBhMC +QUExCzAJBgNVBAgMAkJCMQswCQYDVQQKDAJDQzELMAkGA1UECwwCREQxEzARBgNV +BAMMCmNsaWVudCBlbmMwWjAUBggqgRzPVQGCLQYIKoEcz1UBgi0DQgAEYYuPPz5e +0QMSGPeBfVbK02GwYhSieSCuc12WsNw+ZQEiaN3NJ2Mh0EAH95eWVutKAeMwKwQZ +q7QgnSoo3io8hKNaMFgwCQYDVR0TBAIwADALBgNVHQ8EBAMCAzgwHQYDVR0OBBYE +FEL0AwvahirH+kdK5Poq+e0yhii1MB8GA1UdIwQYMBaAFCTrpmbUig3JfveqAIGJ +6n+vAk2AMAoGCCqBHM9VAYN1A0kAMEYCIQDx+KxdaJ7YX5gR492EgiGn7//HsjOU +B7+jyTVvkNzN2AIhAIbDKNJQ2i5Edcw/nDIWJQLec7NZui3QfC/gr9AuCfHN +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/client_enc.key b/CloudronPackages/APISIX/apisix-source/t/certs/client_enc.key new file mode 100644 index 0000000..154aba1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/client_enc.key @@ -0,0 +1,5 @@ +-----BEGIN PRIVATE KEY----- +MIGHAgEAMBMGByqGSM49AgEGCCqBHM9VAYItBG0wawIBAQQgq7+Y1ql10Uvv2vTf +AHR26o4B3RJTJO5XTh0BNLdtB7OhRANCAARhi48/Pl7RAxIY94F9VsrTYbBiFKJ5 +IK5zXZaw3D5lASJo3c0nYyHQQAf3l5ZW60oB4zArBBmrtCCdKijeKjyE +-----END PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/client_sign.crt b/CloudronPackages/APISIX/apisix-source/t/certs/client_sign.crt new file mode 100644 index 0000000..562742b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/client_sign.crt @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIB2TCCAX+gAwIBAgIBBDAKBggqgRzPVQGDdTBFMQswCQYDVQQGEwJBQTELMAkG +A1UECAwCQkIxCzAJBgNVBAoMAkNDMQswCQYDVQQLDAJERDEPMA0GA1UEAwwGc3Vi +IGNhMB4XDTIyMTEwMjAzMTkzNloXDTMyMTAzMDAzMTkzNlowSjELMAkGA1UEBhMC +QUExCzAJBgNVBAgMAkJCMQswCQYDVQQKDAJDQzELMAkGA1UECwwCREQxFDASBgNV +BAMMC2NsaWVudCBzaWduMFowFAYIKoEcz1UBgi0GCCqBHM9VAYItA0IABFZcc94m +hTZRKis639AnlAbS0cKQv73GP5RzBdNlLpAaUwi4hqAh0ZUIcTH/5ZbOTal9MvHA +gOLjVxv197o+fNejWjBYMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgbAMB0GA1UdDgQW +BBTkdzyphRCxqD6m6j/AUhMSEBASRDAfBgNVHSMEGDAWgBQk66Zm1IoNyX73qgCB +iep/rwJNgDAKBggqgRzPVQGDdQNIADBFAiB2rcm1UI84yPYT5q6vjucBNPw01cHM +3/Hc9fhiuYPyHwIhAIiPPPj4XI2l98C+DBaqoZtSiwDK2IA6Q8lf9SmHdFPN +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/client_sign.key b/CloudronPackages/APISIX/apisix-source/t/certs/client_sign.key new file mode 100644 index 0000000..22daee0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/client_sign.key @@ -0,0 +1,5 @@ +-----BEGIN PRIVATE KEY----- +MIGHAgEAMBMGByqGSM49AgEGCCqBHM9VAYItBG0wawIBAQQgIsAr+s4TL7K4AQWO +PbXrJfHoO5yE2V7oYQxUBsieOQOhRANCAARWXHPeJoU2USorOt/QJ5QG0tHCkL+9 +xj+UcwXTZS6QGlMIuIagIdGVCHEx/+WWzk2pfTLxwIDi41cb9fe6PnzX +-----END PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/etcd.key b/CloudronPackages/APISIX/apisix-source/t/certs/etcd.key new file mode 100644 index 0000000..c2bd651 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/etcd.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCf6sMQke4OUrPf +lZXqKZC4uaQLMxJB2DgacBf4Q3kXcVNCNHtc2U3U5cMD8tr3/Jt5MKKun5jQrbXV +FF4eVr4Iv9jgPBwQc2kBUC9QL/alsMhEuXMeqdGQcCK3F0CLJdV3zUlKUDU0kg+O +Exnbl1CHXrIbpD7zLy1i3s8p39v1pYFYf4WlrQxvfa/xo97gXY5dJv8RryryLzRc +uhHYBvX5MHCGpbrY61JxpfZqBo8CmLuHl1tmbeXpdHdQB11LKiuL6HtKflNjc6rg +5r8bXl1nZbM/KOZEE+muA1LVoaTyHzY/aGXz0bNy4QRUO+De9JFcTDgnXnNZVG5x +cyyDBpc9AgMBAAECggEAatcEtehZPJaCeClPPF/Cwbe9YoIfe4BCk186lHI3z7K1 +5nB7zt+bwVY0AUpagv3wvXoB5lrYVOsJpa9y5iAb3GqYMc/XDCKfD/KLea5hwfcn +BctEn0LjsPVKLDrLs2t2gBDWG2EU+udunwQh7XTdp2Nb6V3FdOGbGAg2LgrSwP1g +0r4z14F70oWGYyTQ5N8UGuyryVrzQH525OYl38Yt7R6zJ/44FVi/2TvdfHM5ss39 +SXWi00Q30fzaBEf4AdHVwVCRKctwSbrIOyM53kiScFDmBGRblCWOxXbiFV+d3bjX +gf2zxs7QYZrFOzOO7kLtHGua4itEB02497v+1oKDwQKBgQDOBvCVGRe2WpItOLnj +SF8iz7Sm+jJGQz0D9FhWyGPvrN7IXGrsXavA1kKRz22dsU8xdKk0yciOB13Wb5y6 +yLsr/fPBjAhPb4h543VHFjpAQcxpsH51DE0b2oYOWMmz+rXGB5Jy8EkP7Q4njIsc +2wLod1dps8OT8zFx1jX3Us6iUQKBgQDGtKkfsvWi3HkwjFTR+/Y0oMz7bSruE5Z8 +g0VOHPkSr4XiYgLpQxjbNjq8fwsa/jTt1B57+By4xLpZYD0BTFuf5po+igSZhH8s +QS5XnUnbM7d6Xr/da7ZkhSmUbEaMeHONSIVpYNgtRo4bB9Mh0l1HWdoevw/w5Ryt +L/OQiPhfLQKBgQCh1iG1fPh7bbnVe/HI71iL58xoPbCwMLEFIjMiOFcINirqCG6V +LR91Ytj34JCihl1G4/TmWnsH1hGIGDRtJLCiZeHL70u32kzCMkI1jOhFAWqoutMa +7obDkmwraONIVW/kFp6bWtSJhhTQTD4adI9cPCKWDXdcCHSWj0Xk+U8HgQKBgBng +t1HYhaLzIZlP/U/nh3XtJyTrX7bnuCZ5FhKJNWrYjxAfgY+NXHRYCKg5x2F5j70V +be7pLhxmCnrPTMKZhik56AaTBOxVVBaYWoewhUjV4GRAaK5Wc8d9jB+3RizPFwVk +V3OU2DJ1SNZ+W2HBOsKrEfwFF/dgby6i2w6MuAP1AoGBAIxvxUygeT/6P0fHN22P +zAHFI4v2925wYdb7H//D8DIADyBwv18N6YH8uH7L+USZN7e4p2k8MGGyvTXeC6aX +IeVtU6fH57Ddn59VPbF20m8RCSkmBvSdcbyBmqlZSBE+fKwCliKl6u/GH0BNAWKz +r8yiEiskqRmy7P7MY9hDmEbG +-----END PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/etcd.pem b/CloudronPackages/APISIX/apisix-source/t/certs/etcd.pem new file mode 100644 index 0000000..2f878bd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/etcd.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIJAI3Meu/gJVTLMA0GCSqGSIb3DQEBCwUAMG4xCzAJBgNV +BAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL +BgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl +ci5sb2NhbDAeFw0yMDEwMjgwMzMzMDJaFw0yMTEwMjgwMzMzMDJaMG4xCzAJBgNV +BAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL +BgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl +ci5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ/qwxCR7g5S +s9+VleopkLi5pAszEkHYOBpwF/hDeRdxU0I0e1zZTdTlwwPy2vf8m3kwoq6fmNCt +tdUUXh5Wvgi/2OA8HBBzaQFQL1Av9qWwyES5cx6p0ZBwIrcXQIsl1XfNSUpQNTSS +D44TGduXUIdeshukPvMvLWLezynf2/WlgVh/haWtDG99r/Gj3uBdjl0m/xGvKvIv +NFy6EdgG9fkwcIalutjrUnGl9moGjwKYu4eXW2Zt5el0d1AHXUsqK4voe0p+U2Nz +quDmvxteXWdlsz8o5kQT6a4DUtWhpPIfNj9oZfPRs3LhBFQ74N70kVxMOCdec1lU +bnFzLIMGlz0CAwEAAaNQME4wHQYDVR0OBBYEFFHeljijrr+SPxlH5fjHRPcC7bv2 +MB8GA1UdIwQYMBaAFFHeljijrr+SPxlH5fjHRPcC7bv2MAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQELBQADggEBAG6NNTK7sl9nJxeewVuogCdMtkcdnx9onGtCOeiQ +qvh5Xwn9akZtoLMVEdceU0ihO4wILlcom3OqHs9WOd6VbgW5a19Thh2toxKidHz5 +rAaBMyZsQbFb6+vFshZwoCtOLZI/eIZfUUMFqMXlEPrKru1nSddNdai2+zi5rEnM +HCot43+3XYuqkvWlOjoi9cP+C4epFYrxpykVbcrtbd7TK+wZNiK3xtDPnVzjdNWL +geAEl9xrrk0ss4nO/EreTQgS46gVU+tLC+b23m2dU7dcKZ7RDoiA9bdVc4a2IsaS +2MvLL4NZ2nUh8hAEHiLtGMAV3C6xNbEyM07hEpDW6vk6tqk= +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/gm_ca.crt b/CloudronPackages/APISIX/apisix-source/t/certs/gm_ca.crt new file mode 100644 index 0000000..1e7216e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/gm_ca.crt @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIB3zCCAYWgAwIBAgIBADAKBggqgRzPVQGDdTBGMQswCQYDVQQGEwJBQTELMAkG +A1UECAwCQkIxCzAJBgNVBAoMAkNDMQswCQYDVQQLDAJERDEQMA4GA1UEAwwHcm9v +dCBjYTAeFw0yMjExMDIwMzE5MzZaFw0zMjEwMzAwMzE5MzZaMEYxCzAJBgNVBAYT +AkFBMQswCQYDVQQIDAJCQjELMAkGA1UECgwCQ0MxCzAJBgNVBAsMAkREMRAwDgYD +VQQDDAdyb290IGNhMFowFAYIKoEcz1UBgi0GCCqBHM9VAYItA0IABB+V1+bwQsP4 +IMZEVCu3LSekz9SIhxWVVtlqdQYZG55S46PmAqICzrO3KFJ/IPtMx9wKn3L6V5M8 +hAc/UwOAnKCjYzBhMB0GA1UdDgQWBBRV7bTJ6vT1fliZR42/+E+fEBfTSjAfBgNV +HSMEGDAWgBRV7bTJ6vT1fliZR42/+E+fEBfTSjAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBhjAKBggqgRzPVQGDdQNIADBFAiEA1SGACV1wj158Spgh+HOW +oOr7rTO2fR4cK9Zx7eUvmAECIHbKbsw5szaC/EH7CdsHdFgrj2tWaXiQUnx/rxM/ +upAQ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIBATAKBggqgRzPVQGDdTBGMQswCQYDVQQGEwJBQTELMAkG +A1UECAwCQkIxCzAJBgNVBAoMAkNDMQswCQYDVQQLDAJERDEQMA4GA1UEAwwHcm9v +dCBjYTAeFw0yMjExMDIwMzE5MzZaFw0zMjEwMzAwMzE5MzZaMEUxCzAJBgNVBAYT +AkFBMQswCQYDVQQIDAJCQjELMAkGA1UECgwCQ0MxCzAJBgNVBAsMAkREMQ8wDQYD +VQQDDAZzdWIgY2EwWjAUBggqgRzPVQGCLQYIKoEcz1UBgi0DQgAElA5ey1dYWNkT +zfvwcKEhX1vHL+Kjil+egM6QssbNrts2S0M07L77XDe1q2zPpHjo0MR05x862/tZ +j87OgmEE0KNmMGQwHQYDVR0OBBYEFCTrpmbUig3JfveqAIGJ6n+vAk2AMB8GA1Ud +IwQYMBaAFFXttMnq9PV+WJlHjb/4T58QF9NKMBIGA1UdEwEB/wQIMAYBAf8CAQAw +DgYDVR0PAQH/BAQDAgGGMAoGCCqBHM9VAYN1A0gAMEUCIArKNHWYLmd3thQmJv89 +o0wr6O2q26WJuy6y7Eu14rdFAiEA7XNZ0JGMXPKiG5Hl7nmL8ooTrVhrmRrvs9No +Y8rH88Y= +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/incorrect.crt b/CloudronPackages/APISIX/apisix-source/t/certs/incorrect.crt new file mode 100644 index 0000000..5e758f8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/incorrect.crt @@ -0,0 +1,12 @@ +test not base64 encoded crt +test not base64 encoded crt +test not base64 encoded crt +test not base64 encoded crt +test not base64 encoded crt +test not base64 encoded crt +test not base64 encoded crt +test not base64 encoded crt +test not base64 encoded crt +test not base64 encoded crt +test not base64 encoded crt +test not base64 encoded crt diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/incorrect.key b/CloudronPackages/APISIX/apisix-source/t/certs/incorrect.key new file mode 100644 index 0000000..8b950f0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/incorrect.key @@ -0,0 +1,12 @@ +test not base64 encoded key +test not base64 encoded key +test not base64 encoded key +test not base64 encoded key +test not base64 encoded key +test not base64 encoded key +test not base64 encoded key +test not base64 encoded key +test not base64 encoded key +test not base64 encoded key +test not base64 encoded key +test not base64 encoded key diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/localhost_slapd_cert.pem b/CloudronPackages/APISIX/apisix-source/t/certs/localhost_slapd_cert.pem new file mode 100644 index 0000000..0830e61 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/localhost_slapd_cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDcjCCAdoCFCS4ndwl6lusO7yj4zxbngp17nNUMA0GCSqGSIb3DQEBCwUAMFYx +CzAJBgNVBAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhh +aTEPMA0GA1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0yMzA4MDMw +NjM3NDhaGA8yMTA1MDkyMjA2Mzc0OFowEzERMA8GA1UEAwwIdGVzdC5jb20wggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC4mQik/vxL1bdjXcXWNT6DBVGL +A87CeVYleNE5Fx5KROU5Y388h80VgSTmV3ytu9ZuNEB8hcZqmAttZXcipMyNm9PI +vTXaDFaQVclYNJ27hy7rpaJ29WkeLKlUx/pRUpOCg3lbafSmURf5C234LZL1qe5O +0CIvY3uAzkGWMUE8ABYnef+ucULZPLa2+Y9wIx76oP5tfmcM/pQhDXt+GK/bZyat +1sUmEHCVC2gjvHoZO8T7n4ccpi5v06Klj8BKVxRlGVkO2w4hlDbNyh6FWKK31nF8 +BLu/TKF70xSOzX4OFNT6/GJ8R9AyeK52f6OzNmlNUY3UsMEeX8Y2qL4hNrFhAgMB +AAEwDQYJKoZIhvcNAQELBQADggGBAL6g7NZfVTEVklJPmTFSqwQfuu0YXmIvUQIF +jvbNOmwC+nHSq6yuJFC+83R/on/IPWrkP489bEVwqaBQXnZnIMOTjIk8k9elBm/N +1BBpzWUiKNp+HqRjPCanvRCIPUZ9hWpmJy6uzG6VodEtxZgJ7lsArj0AlOYlkHHa +Ctmnl5g6H4m8sNACZaAixesb9wM8Slvn1zhpAeIYZsvIaBZOZnWuwHD1R7seh4ob +BDhDaUfXOYb0VJaKNWnJ5ItPxh4/YMSuS7mG5o2ELnzWN6OeDEQrqKFW17qWLXko +DXEfyrQnODDI+fXvasJhQ62hH33rQF/Q4yJQOEEr7gQUxtMYCxtGCumx2/5MFTuB +E8sf8FykV5jGjwdwMHhPGAmhpMJwM6i65P9GwZguqVmeFv2l4eSTmMinURlkwaAw +cx+LrigAxSKOCcnnnC6Uza1VShyDAuj+XKPglwwJd99UJlk1VG/9TXp3WZTOvSt+ +KttglpiMHyqzCYcMDTGbjPm/UsjFTw== +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/localhost_slapd_key.pem b/CloudronPackages/APISIX/apisix-source/t/certs/localhost_slapd_key.pem new file mode 100644 index 0000000..2b23ac4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/localhost_slapd_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC4mQik/vxL1bdj +XcXWNT6DBVGLA87CeVYleNE5Fx5KROU5Y388h80VgSTmV3ytu9ZuNEB8hcZqmAtt +ZXcipMyNm9PIvTXaDFaQVclYNJ27hy7rpaJ29WkeLKlUx/pRUpOCg3lbafSmURf5 +C234LZL1qe5O0CIvY3uAzkGWMUE8ABYnef+ucULZPLa2+Y9wIx76oP5tfmcM/pQh +DXt+GK/bZyat1sUmEHCVC2gjvHoZO8T7n4ccpi5v06Klj8BKVxRlGVkO2w4hlDbN +yh6FWKK31nF8BLu/TKF70xSOzX4OFNT6/GJ8R9AyeK52f6OzNmlNUY3UsMEeX8Y2 +qL4hNrFhAgMBAAECggEAKO9euGAHCLYS3ckcpvzVqO5T/9TPU9beYJ7jHprez69p +eYlz3LNsqhkiWqYJ8ujVi0ixCCwOLPMcjZzTh24uIjTtCPXUbE8SHx228YVxePVo +VT88wM55CgTzY+aYvtHl/iozji733q3a+BItx7wre6i8POPwwLt51r1mU+0GP0yQ ++spwGR3POjRZeXiWYKSwhfu/STBpQXLANHCpQOmYFCbjTVpCzJ03msQUfYJNmETd +DqyLGbE4aBoPmekrk8GQa/gn04SIsOi8WZeNhsUT9WXyeLFV0DEwnx0sv6IwOk2o +Fqymr71fKNMIvTpCt8wB0Q/rmvPzrprC+hHIZyX5AQKBgQDol5SYzTijZQasV+2d +DqO5IxE8xl8z10bLgsExKcHC6xyhk+el0XFO1fWs0SJ9ptNuKH32C8IlfEr6M3EE +XovQcRfT4crtnWhLmGPAFYKo91f5fiKy1YWggEtsnY+OODo34RCtORtKD6+iC+XE +LFbLMNQA6sHXVONthiEQ6fhIkQKBgQDLLPHgFFL8kSj+Tt/7ctRnwA7qPTUtAwWG +b2QzDMEpifu4O9DJWoE3sfMYkQhyz2g6S+O5XXm56CDtkV+Miazt29z0dM2Jz7uV +NLtymba/s6wBiWFUggHA4Dro1vYa4MJ94ampqi+XPJaJP/j6WoYIu/JhKQDIBtlP +ARaG0O3D0QKBgQCYMyB8wMXQPfqY6kzFml/OdUEBWPnRTYy4/U34Is/1Aa7RmJxb +6GrR4LaLqKp+OJ1gF0UdrWIU73mMsf7BkjDBbE/gSX9l77vgw856UlkWwgwiacTA +63IureUtJQlcUjTefftQru7JjuwqCMkIjs8Y1VHVa8j+ZEESWVPn4oKi0QKBgQCT +4YnHlGN2q91PhG9ooILTZSo1+gj7UyixWeBve8gYiPMrfHYSKIrG1AHhjqa8khQF +4njE0bGoy7kz0UzfiNHSauYfE+kKdqXNCw2ocxNd4tO+ZpTuIpZOIacfFF8a3x8Q +6rBH6rQq+xGCooqBBmRqdQoNCAAmlz2SUHNp+yYkEQKBgQDNBbH7WPjiggahF0TI +oweS86hQ9tDlUFMg/On2eMOY0juSU1yPsPA/JsIglSTDWGRasjFDhd29FVrnGGY5 +5GHy/Gh/6ZZUdrJVsypGw/Dy9amLgmkKTJU4SWDYOb6s1ocGvNPFSYgw0yFe56nx +TU+2zHJo/t2FXssGfnbFWrQAxA== +-----END PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/mtls_ca.crt b/CloudronPackages/APISIX/apisix-source/t/certs/mtls_ca.crt new file mode 100644 index 0000000..f5895b6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/mtls_ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNzCCAh8CFHIjXWyzoKYAEb8cJgTxRYdhZDu0MA0GCSqGSIb3DQEBDQUAMFgx +CzAJBgNVBAYTAmNuMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhh +aTEWMBQGA1UEAwwNY2EuYXBpc2l4LmRldjEMMAoGA1UECwwDb3BzMB4XDTIyMTIw +MTEwMTY0OFoXDTQyMTIwMzEwMTY0OFowWDELMAkGA1UEBhMCY24xEjAQBgNVBAgM +CUd1YW5nRG9uZzEPMA0GA1UEBwwGWmh1SGFpMRYwFAYDVQQDDA1jYS5hcGlzaXgu +ZGV2MQwwCgYDVQQLDANvcHMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQC6QRtbK1qZg1gUuaVWn0M8hw73H360hH6EvHtEil8meni3W000HpCsi/JdDSr3 +tD6Z88O7x0IToPCT0NsAWA0B2LK1oshFkiyIXiq4CWgfwM9qd5GIwA71WUzJ6Jkq +kz0r3M3/ogo0+Z7RKoKilvBinqVjTZhRpd63Dg5cLrgPBKFGUBMfRPmKSgwPSWfV +V85SuHlzpcgcK09NHgSLu2DlFGK+lXGWTLLsrb4F0GAAwL/lk4kplcHK0IZCxfJJ +puXynmoOmgWKcZcHipgv4+LY6+8K+8Lh9FF6ZXOuW7RLwTY1woLMKK2u40fG4C0I +Wcyh+vzTivCrJ72pSC3rYGX5AgMBAAEwDQYJKoZIhvcNAQENBQADggEBAG6RnAvo +AMQPog3TIAtIrXi1JPfJ5kuI26KOn/yUlVIoUjbqxkeELwmoUF/K4eg5sT6RX/8U +0gFVuBi5FZXfPZUt/JSN+fVRSoMP9d1K8ImVpu5gxdp5UZSINlCLvessMx9vafjQ +EwDcACseel511LIe0rOlVvwHeM2P9pNIMfoexGP0U2U5ubZIO8Ye4ZbNieHYgNCN +UgJpadvBOC8I3eML2hx79di5y4R1niRXhAd1IYnL9eK4xUoHwyMtl/1kXtq/nrXB +0njzpqb0GQg0badsF+7v+QM/zrbSSwDTzriCCTWSrd9ze4HYoRqCV6Dc3DjqmHq2 +j4wg2QntJBQWmSc= +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/mtls_ca.key b/CloudronPackages/APISIX/apisix-source/t/certs/mtls_ca.key new file mode 100644 index 0000000..94672d7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/mtls_ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAukEbWytamYNYFLmlVp9DPIcO9x9+tIR+hLx7RIpfJnp4t1tN +NB6QrIvyXQ0q97Q+mfPDu8dCE6Dwk9DbAFgNAdiytaLIRZIsiF4quAloH8DPaneR +iMAO9VlMyeiZKpM9K9zN/6IKNPme0SqCopbwYp6lY02YUaXetw4OXC64DwShRlAT +H0T5ikoMD0ln1VfOUrh5c6XIHCtPTR4Ei7tg5RRivpVxlkyy7K2+BdBgAMC/5ZOJ +KZXBytCGQsXySabl8p5qDpoFinGXB4qYL+Pi2OvvCvvC4fRRemVzrlu0S8E2NcKC +zCitruNHxuAtCFnMofr804rwqye9qUgt62Bl+QIDAQABAoIBABfHXiW6mDuHIESt +GuW/OYdNuuRj+foz/C8YHSi3/cPc2PKXznh7+n5883lbyAON2HwxOekMXGxDHNPS +U1Ns6mQ09UPpP2ZabiMO2qdaVBfRtulh0IvD8WTzfLE+Z+eemq2x5/7eAi2XPOZ5 +Zeo6GQCOPpE6A9tQsOlv+vdb45XPCsDdyC2WukeUfB2wsFBFEgMmcW7HLuzMYp9W +JUuQ6OcOf52ePkGnmEuXIK8Wc3RXitivHlkPLO49Y1LG8t4f61HfG9b2pvgNbHn2 +YdkzemCkBMSjzV4oAoIuG56IWBwuqXETRj2xduVcoxd0t2vLK9qlPADBMCa0hcnW +uCBqGM0CgYEA3ZjZQuQqer7XrA0qVbXdOdNXal/efUpqfxdxlwMug5sFqt3rSwVF +S9ZbbtGFeQZp4UAG41o2bWvXuWg59QnKOkKleVQ8zlQdPPjHSOz/7YwXcf/gNYqN +WHSaT8MqaDYY2m0bo3t9SwSMIBxas05guYuBenisOh3jjcaRPYJELucCgYEA1yuY +vlIVksPGN+vx6RBaOyY86B8MuXyvi0jVf96UvnL7soUIZDDWojGZvlq3r7ol8Ljj +LF4Wqdg3MxoOfmUDNWCnSZPjsFo1AUFP/2MJiyCDq75yO0CoVj54QsVYkDTnTosp +at3CKWTueIeOOtXUAKTObMjrWi9A7cF4rS+siB8CgYBOO2UQcX7xwKhhjHBSvBbz +EEK/QkNJFlmMrtkiSDRGsBcLILetz5mMUYwMDppBhNsic7k60KGAdd8+DKbRdHhZ +oyfKMswYx6de3DF29HzR/3BThdNA87486UWFPVCeY+LYUka8q58rOdrCh2AaB2Ss +fKzkcO/UwLKSXfTusyuhJwKBgEIEslzSuqPJRawqzJKB3e2AEff2buUKiKHnuvn8 +xQ6aIPfpMWXsRi6FoXJySyGzr6hoUetvAu0h1e3r9L57J7zc5vcAVT/qrZCxBWaK +cIcrdrrfOBVOBVhQ2n1CJ6Y3VTEYKaEMYWJqAXEhxlXu/Zkk9+EQ1IVbMkTAs9IP +apRpAoGBALd3ZhaP9WvAUVVFs489CKFEP14dZi+SRM6pCsqaQ+zxkW7Cc3Vt72rt +d+CwIiUMJlac/Efk2Za8MKn/ctMngvRrym8SxGjxJq9jhGo/jSOkHnTy9W//hE/X +SqlmnJHek5EXl+w0HjgFdukyIo8athDj0jS2ch3OPG4tgavRpKAI +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/mtls_client.crt b/CloudronPackages/APISIX/apisix-source/t/certs/mtls_client.crt new file mode 100644 index 0000000..378041f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/mtls_client.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDUzCCAjugAwIBAgIURw+Rc5FSNUQWdJD+quORtr9KaE8wDQYJKoZIhvcNAQEN +BQAwWDELMAkGA1UEBhMCY24xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG +Wmh1SGFpMRYwFAYDVQQDDA1jYS5hcGlzaXguZGV2MQwwCgYDVQQLDANvcHMwHhcN +MjIxMjAxMTAxOTU3WhcNNDIwODE4MTAxOTU3WjBOMQswCQYDVQQGEwJjbjESMBAG +A1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxGjAYBgNVBAMMEWNsaWVu +dC5hcGlzaXguZGV2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzypq +krsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5oIHkQLfeaaLcd4ycFcZw +FTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6WxcOza4VmfcrKqj27oodr +oqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv+e6HaAuw8MvcsEo+MQwu +cTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E0s+uYKzN0Cyef2C6VtBJ +KmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT/FpZSXm4iSy0a5qTYhkF +rFdV1YuYYZL5YGl9aQIDAQABox8wHTAbBgNVHREEFDASghBhZG1pbi5hcGlzaXgu +ZGV2MA0GCSqGSIb3DQEBDQUAA4IBAQBepRpwWdckZ6QdL5EuufYwU7p5SIqkVL/+ +N4/l5YSjPoAZf/M6XkZu/PsLI9/kPZN/PX4oxjZSDH14dU9ON3JjxtSrebizcT8V +aQ13TeW9KSv/i5oT6qBmj+V+RF2YCUhyzXdYokOfsSVtSlA1qMdm+cv0vkjYcImV +l3L9nVHRPq15dY9sbmWEtFBWvOzqNSuQYax+iYG+XEuL9SPaYlwKRC6eS/dbXa1T +PPWDQad2X/WmhxPzEHvjSl2bsZF1u0GEdKyhXWMOLCLiYIJo15G7bMz8cTUvkDN3 +6WaWBd6bd2g13Ho/OOceARpkR/ND8PU78Y8cq+zHoOSqH+1aly5H +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/mtls_client.key b/CloudronPackages/APISIX/apisix-source/t/certs/mtls_client.key new file mode 100644 index 0000000..d7e381d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/mtls_client.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAzypqkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5 +oIHkQLfeaaLcd4ycFcZwFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6W +xcOza4VmfcrKqj27oodroqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv ++e6HaAuw8MvcsEo+MQwucTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E +0s+uYKzN0Cyef2C6VtBJKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT +/FpZSXm4iSy0a5qTYhkFrFdV1YuYYZL5YGl9aQIDAQABAoIBAD7tUG//lnZnsj/4 +JXONaORaFj5ROrOpFPuRemS+egzqFCuuaXpC2lV6RHnr+XHq6SKII1WfagTb+lt/ +vs760jfmGQSxf1mAUidtqcP+sKc/Pr1mgi/SUTawz8AYEFWD6PHmlqBSLTYml+La +ckd+0pGtk49wEnYSb9n+cv640hra9AYpm9LXUFaypiFEu+xJhtyKKWkmiVGrt/X9 +3aG6MuYeZplW8Xq1L6jcHsieTOB3T+UBfG3O0bELBgTVexOQYI9O4Ejl9/n5/8WP +AbIw7PaAYc7fBkwOGh7/qYUdHnrm5o9MiRT6dPxrVSf0PZVACmA+JoNjCPv0Typf +3MMkHoECgYEA9+3LYzdP8j9iv1fP5hn5K6XZAobCD1mnzv3my0KmoSMC26XuS71f +vyBhjL7zMxGEComvVTF9SaNMfMYTU4CwOJQxLAuT69PEzW6oVEeBoscE5hwhjj6o +/lr5jMbt807J9HnldSpwllfj7JeiTuqRcCu/cwqKQQ1aB3YBZ7h5pZkCgYEA1ejo +KrR1hN2FMhp4pj0nZ5+Ry2lyIVbN4kIcoteaPhyQ0AQ0zNoi27EBRnleRwVDYECi +XAFrgJU+laKsg1iPjvinHibrB9G2p1uv3BEh6lPl9wPFlENTOjPkqjR6eVVZGP8e +VzxYxIo2x/QLDUeOpxySdG4pdhEHGfvmdGmr2FECgYBeknedzhCR4HnjcTSdmlTA +wI+p9gt6XYG0ZIewCymSl89UR9RBUeh++HQdgw0z8r+CYYjfH3SiLUdU5R2kIZeW +zXiAS55OO8Z7cnWFSI17sRz+RcbLAr3l4IAGoi9MO0awGftcGSc/QiFwM1s3bSSz +PAzYbjHUpKot5Gae0PCeKQKBgQCHfkfRBQ2LY2WDHxFc+0+Ca6jF17zbMUioEIhi +/X5N6XowyPlI6MM7tRrBsQ7unX7X8Rjmfl/ByschsTDk4avNO+NfTfeBtGymBYWX +N6Lr8sivdkwoZZzKOSSWSzdos48ELlThnO/9Ti706Lg3aSQK5iY+aakJiC+fXdfT +1TtsgQKBgQDRYvtK/Cpaq0W6wO3I4R75lHGa7zjEr4HA0Kk/FlwS0YveuTh5xqBj +wQz2YyuQQfJfJs7kbWOITBT3vuBJ8F+pktL2Xq5p7/ooIXOGS8Ib4/JAS1C/wb+t +uJHGva12bZ4uizxdL2Q0/n9ziYTiMc/MMh/56o4Je8RMdOMT5lTsRQ== +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/mtls_server.crt b/CloudronPackages/APISIX/apisix-source/t/certs/mtls_server.crt new file mode 100644 index 0000000..9650082 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/mtls_server.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDYDCCAkigAwIBAgIURw+Rc5FSNUQWdJD+quORtr9KaE4wDQYJKoZIhvcNAQEN +BQAwWDELMAkGA1UEBhMCY24xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG +Wmh1SGFpMRYwFAYDVQQDDA1jYS5hcGlzaXguZGV2MQwwCgYDVQQLDANvcHMwHhcN +MjIxMjAxMTAxNzI0WhcNNDIwODE4MTAxNzI0WjBbMQswCQYDVQQGEwJjbjESMBAG +A1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxGTAXBgNVBAMMEGFkbWlu +LmFwaXNpeC5kZXYxDDAKBgNVBAsMA29wczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAONlOihn9AtXWay72aPDFbwm2zJOe+5ngV1D3B4f2q+KlkAnjAPx +1GWO1buRDEALL8g5NfbZF3gU9v+wjsEsFK/Sn8ejtziVwJUFVmvRr+PCm/2DEARN +Re1cp0cwRVQJAKLVFpy19ALlSSTQRoCAjLVXC8tEsIxrvN3DVVet9g6AxnPPd4oR +LosDGQ+p+qbriQdx20gg5MHmjZX+/ByZq4BIQkshmQW2LnwxAS3xOpqPmFHmdn56 +RXw8JlyvYS3KRiGU3z59uph4wnIic4r/11Puj1LoGd+YtFJash6ZRU/rM6JSdPS7 +b53m8HdRcjGdBG+EnsqN67qZUWbGBntmu2cCAwEAAaMfMB0wGwYDVR0RBBQwEoIQ +YWRtaW4uYXBpc2l4LmRldjANBgkqhkiG9w0BAQ0FAAOCAQEAMAxCZmKwWEDHuAzW +PHJUyrdK1eos2UExmeq9TY8c7IluIMClTxS8ui3+7+oc6ZqffyrZL4jx7oe8Ua6V +bat75H+OF05f9ziGJjJY5PM3wxOP1QvR7HePSkwBkNPhGOQf3bi7rQEhBuqhpRfr +GfPmzKejaUm9m8IiHnFKxjTfQ7pm3hR8/+P9LKDO21i5Ua3ec+rKv0Y1jsCuv3t/ +APMN7MTDsFqxudqbOG3dufOSe1E7qs16/ygTRvYpIe+kz4rldGWmo0joOrrti43T +Oi1BAGaC3znJe3aaihr08c37NZ/A6WHiX+h5wBEdboOJc4Htytkicd8jBvU2Svjq +dZS3wQ== +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/mtls_server.key b/CloudronPackages/APISIX/apisix-source/t/certs/mtls_server.key new file mode 100644 index 0000000..a6ed448 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/mtls_server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA42U6KGf0C1dZrLvZo8MVvCbbMk577meBXUPcHh/ar4qWQCeM +A/HUZY7Vu5EMQAsvyDk19tkXeBT2/7COwSwUr9Kfx6O3OJXAlQVWa9Gv48Kb/YMQ +BE1F7VynRzBFVAkAotUWnLX0AuVJJNBGgICMtVcLy0SwjGu83cNVV632DoDGc893 +ihEuiwMZD6n6puuJB3HbSCDkweaNlf78HJmrgEhCSyGZBbYufDEBLfE6mo+YUeZ2 +fnpFfDwmXK9hLcpGIZTfPn26mHjCciJziv/XU+6PUugZ35i0UlqyHplFT+szolJ0 +9Ltvnebwd1FyMZ0Eb4Seyo3ruplRZsYGe2a7ZwIDAQABAoIBAEgQ8sePenaFrnPh +7O3Li/3fSqS83uYFg6gtM3uQmNv9TfTzE5rEb43oILCbHYjGgtQv3Xxn/Nofus/6 +AqQR9lRqqhy5M/4I58nSsTrmb5n9OTa07MSQQNMjBBi5oZ8qYzs30TzFJZotVGsI +Xu+mzfFCrwgysskt8+NMXqW1CkA50pvipVLtjULZ0p8XQqggV8kQpDGUr4eQ36OH +ekImj4K54GbO4z9IkuiBS/b+J687/hGMPYj5XPS18OU+hQaZnjzWPviAcnsGy8l7 +1dDL9bgUFjGvyVLtK+g4meRYRshymq93e4CdSwssTt/Gnbmc6UxsOhTAW5vzn/e2 +GDShxJECgYEA+1pKXkMdqaj0aIYmGpiaX8FgZvnxruwDXwDIFE4p9zIdrqDAtAk6 +xBBUM6f1+IvvhyqeADOGr78AHFGz8YG0Vcp2+2cw8sRM9ibceIBRwJ/QwW/P1R7q +N0phykACW+fcGYhb/gyu6HWT9B8NSPBBGggL0LA745E3rSnsIeXGvJUCgYEA55mK +mwuBzU1yusfb24cYJQqBmb9YleWouEDerrHbFoYCHFi3/E3OyGbuBy2n2wYTS0k5 +PTX8tOWmqM4TsH2JawwMdtNJ+6B8qvAYHSacwLIr4GcZzyJd/ikF3ujXm0da0NA0 +f7rz20kRj8GhcksQTMtWtOXCXJyonNNxZgrQ3QsCgYBjaBcnZoXRtpdKy1tAg3/y +ROlacJlr472FkiqPFUa1k+V3Te5IhanvJsIWV+QIs1c87tbkH3yx/ukNSibPacun +blZWIT6TlJ0XcNEa+yzZ8JrAFfdtQzfAPDOmqGAGdxFuK6auN9fo6a9lCe7YHOSy +ZeI+W6Sj4KfTXVQdJ+HMbQKBgE8DlEU3VM6NSMIuo3SvD267ueGRZZCmbLyH7TEe +nsd9asTvA75BcXXvn++1BNp1pSl/TtbyT0gMPaLDw/XnrnVmA+6aQVhmtYHALgnr +/XjEkLGbmzOO3xByQH1/ZOemHXa2QeL+DmpW8HXiMsmCkIoSqX9ID9p23BO9E6gj +soRnAoGBAPkmGcoz96/pb52QyMKcEo1pBK0sqRsfZW1Cpz2/hg2hA3BLLaZIBCEj +gtXcknib9CLwh4DxBiew3/41pMq1fq1aGTWwf0c9PjolOB4E+Z2NQsqEydTso7jP +B6M4+3xrWaHkrHFOhOT4hoBgyUJPzQ1fOiSn/0mXHxjfJ2pRF3Xk +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/ecc_good.crt b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/ecc_good.crt new file mode 100644 index 0000000..da1b9c0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/ecc_good.crt @@ -0,0 +1,45 @@ +-----BEGIN CERTIFICATE----- +MIIC+jCCAWKgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJDTjES +MBAGA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxDzANBgNVBAoMBmly +ZXN0eTERMA8GA1UEAwwIdGVzdC5jb20wHhcNMjQwMTEyMTQ1OTUwWhcNMzQwMTA5 +MTQ1OTUwWjA9MQswCQYDVQQGEwJDTjEWMBQGA1UECwwNQXBhY2hlIEFQSVNJWDEW +MBQGA1UEAwwNb2NzcC50ZXN0LmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IA +BIuZCmq/vmX5LqFrpa9ot5SboEhNyS/r5UGT7akjIOAXBVwZkn1vm/EsQp9VMF8y +rWZkGKFmElo0ZAXAyhjn9D6jNzA1MDMGCCsGAQUFBwEBBCcwJTAjBggrBgEFBQcw +AYYXaHR0cDovLzEyNy4wLjAuMToxMTQ1MS8wDQYJKoZIhvcNAQELBQADggGBAIEm +LKKS+eGBazPpSRvq2agnqmjM+PHVWRB/O/+LNOO69Lji3wRtq6T2zNHPZQXw1OMA +3C9HcIwaawTyb+hm+vX8yBr5mgS1UOtmDYzbnlpERjJBjxmPXTZLDbzogHshbabp +227p/IAjWm/2F2VPXjiX+aV1pYrhCcO7zUtBEu9KaoG3Amxg8T2WVamTV+J6r0SL +fkvYItZwbawSfwQlZ+22H4Mttu/bd2USTusT4zLAflv9UFh20bA1PizvcKK1brWS +IH2rxxSLCvu2wmrGsrLVn+9yD6xNsn4m6DyCWx9S/Tas7KLub8BjnCzP8YEvrVpV +fotefEMY5h0waj9Zc32l+6gk8Ntyp2ozWi+iu4eo0Y5SUqHlPjuGUXOivp5o/6b0 +gF5M9jtkXvbH2ffrOiz9YUo4fVwk6ws5OQTr9WsildEHZH4ADOW6HqPYkOnuxhdM +p6JP0LmnO/S60/k/ZH8nMTcSUfE+qcDg3LlH5ay2fv6IKz5BaVkyHPNreRi9qg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G +A1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa +GA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n +RG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM +CHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe +cvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb +VDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR +2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr +abf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2 +WjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/ +Evm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1 +/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh +/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj +cTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ +tSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl +c3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC +tC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY +1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl +PYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob +rJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy +hme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1 +7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y +IJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve +U/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM= +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/ecc_good.key b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/ecc_good.key new file mode 100644 index 0000000..23d09c3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/ecc_good.key @@ -0,0 +1,8 @@ +-----BEGIN EC PARAMETERS----- +BggqhkjOPQMBBw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIHMwGqSAcIFnsy8Sa6NxlSmGuOXV13SbZbZVIobN+3xboAoGCCqGSM49 +AwEHoUQDQgAEi5kKar++ZfkuoWulr2i3lJugSE3JL+vlQZPtqSMg4BcFXBmSfW+b +8SxCn1UwXzKtZmQYoWYSWjRkBcDKGOf0Pg== +-----END EC PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/index.txt b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/index.txt new file mode 100644 index 0000000..4156b28 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/index.txt @@ -0,0 +1,4 @@ +V 340109124821Z 1 unknown /C=CN/OU=Apache APISIX/CN=ocsp.test.com1 +V 340109125024Z 2 unknown /C=CN/OU=Apache APISIX/CN=ocsp.test.com2 +R 340109125151Z 240109125151Z 3 unknown /C=CN/OU=Apache APISIX/CN=ocsp-revoked.test.com +V 340109125746Z 5 unknown /C=CN/OU=Apache APISIX/CN=ocsp test CA signer diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_good.crt b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_good.crt new file mode 100644 index 0000000..9f31e55 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_good.crt @@ -0,0 +1,50 @@ +-----BEGIN CERTIFICATE----- +MIIDxTCCAi2gAwIBAgIBATANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJDTjES +MBAGA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxDzANBgNVBAoMBmly +ZXN0eTERMA8GA1UEAwwIdGVzdC5jb20wHhcNMjQwMTEyMTQ1OTA4WhcNMzQwMTA5 +MTQ1OTA4WjA9MQswCQYDVQQGEwJDTjEWMBQGA1UECwwNQXBhY2hlIEFQSVNJWDEW +MBQGA1UEAwwNb2NzcC50ZXN0LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAPPXWs5Bb+OuZv8D1dxReaZYiNN3gTCi6QuC7ABKq9ScufBFhdvT5K3I +6QbzphGJSetio9AS46ztX4tp2vXA20Wyw/R97reVt1lYRNCSaDqVqmdLqBDeG8RP +loLHAA+wq5fCJSm4DTHKPgN2t/KPb/D6f7x4mmNBQ7wdlxa0i4r9lWESWGC7maq1 +Skf9W6iMyYxP4OhDaOekMaHDA3zDijq4tO10JTHG+4L5WsZ6qFxfqJmrfsr9uwgG +zuX4m3PEau3KqSWcgPosm2vxSOJJWUja5PQi6W0xbCOtrRyF/HEWBaxBJB9CayXC +hFvmTrViaazW7OuBNcqSeIoLktQqGOMCAwEAAaM3MDUwMwYIKwYBBQUHAQEEJzAl +MCMGCCsGAQUFBzABhhdodHRwOi8vMTI3LjAuMC4xOjExNDUxLzANBgkqhkiG9w0B +AQsFAAOCAYEAkHi0FLFQbPANUxXIIYjR0dVt5xb0SoAet0TxAAzoJaO1v7jrXok+ +DBZu9dOftDIX5jB8vne7JSKCl1ibpsYqpOW9AjFjUTtKkirXcsKQKs+sW1Vue0uu +xPx1IAbI475X8emIB3vH5S/eqe8ep31pJkFxoiWSafKB9gpXzpD6NEteLr6oK67F +bdIZHEdxIuiu1SQEeN8ShSoIWcVkWavsP5ziXhi+PxK4CKYQoHyFoBFWk7SXhCCA +mKhnvcOjR9Cq/ZtkAe/G31x9nYQ6blJejRDxHOqgK+eke9+8qPx2oTLwraodPRVv +0O5NpI0SQw8+5KcWpz/vq0NZFHh0SqSh82/IJvgxSab51VLdU2lxNxsllTNpDN9F +LtXT5SRgRy/gXs6bOq6tszHTNE7t6hlCGlWfaRNUHfRsdyOfim0JwOpusmE0yR7R +v6jYCk8LyJM+1oppp71cUtzrMxWEn/bC0M9TQuwb9fHFgEFU1VWjJrcaSfFGf61m +uzgYQtn5uERq +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G +A1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa +GA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n +RG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM +CHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe +cvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb +VDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR +2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr +abf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2 +WjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/ +Evm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1 +/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh +/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj +cTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ +tSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl +c3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC +tC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY +1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl +PYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob +rJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy +hme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1 +7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y +IJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve +U/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM= +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_good.key b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_good.key new file mode 100644 index 0000000..7c2f3b5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_good.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA89dazkFv465m/wPV3FF5pliI03eBMKLpC4LsAEqr1Jy58EWF +29PkrcjpBvOmEYlJ62Kj0BLjrO1fi2na9cDbRbLD9H3ut5W3WVhE0JJoOpWqZ0uo +EN4bxE+WgscAD7Crl8IlKbgNMco+A3a38o9v8Pp/vHiaY0FDvB2XFrSLiv2VYRJY +YLuZqrVKR/1bqIzJjE/g6ENo56QxocMDfMOKOri07XQlMcb7gvlaxnqoXF+omat+ +yv27CAbO5fibc8Rq7cqpJZyA+iyba/FI4klZSNrk9CLpbTFsI62tHIX8cRYFrEEk +H0JrJcKEW+ZOtWJprNbs64E1ypJ4iguS1CoY4wIDAQABAoIBABV8oZzROVnX0W2h +WeQLLewRmyT/P9wYTu7bv44bBl863Eum5K/FUT5bGOWq7LRY47GhRIweTf+7/xJa +5peHQgs3QHs36aQ1xi1SUOYMMLEQ5S4rBYlO+SVoWfv2KzQ2vjgmPH4boNYFW0eU +24q9RwD2IfFqszgR1TUralfu2ukJWWi1zuNN4jJZfXZWcPLV26wMAdQrGhf7CnQ7 +1QN6SFNBKxpiKIXCs7ki2VmzAVg01FpmVsHkuRvn80dZ1FkkfXXaAYqNEulisAq0 ++Zv+QWXrK7IpmRpJQszr7VBGLlBrMTp9wFq+ep+Bui9e26pGXYETPnchUqtBFk5k +yylxduECgYEA/v8cJEX/W8H2//Dl2tOq+F6RKwxX56dlrs6LBSpYh9982CXrALG4 +Xuq8VmnjYYpdVxSyxMkex+Dih06BpQMmQci3mzHPGAPzkiFSxsogQBlB1MF4RphD +4UutZNdsmQb+l2NTGIyQR9TgLXjwKfEesia91HQGCTDPFzku/Xg0WBECgYEA9M0B +s9sLb4DC6LDoGlSlmIfKmJgZgbwjRhUI3Foplpzzrc+A23MaYbuhwVszch7S0Tib +OtEtJWTjryGAG5a/eCsVHtyAnJWxJiHV8yJ+xN1MTXIh9T6Xlablip8cBaHKZxLC +vhu5ZEIyGddYa2B6hG/x1ydMoz5pdMFGjKkFNbMCgYEA55HNgLOAn1eac/vVAdDP +pxZaRvnCqsE+em1fmqVGGL5AphppPAwpHymVN/SZZe89rONDJapvpZz4m2AUJEKj +74HUG8A0Dd8ox0Az6AuPFibZvdik3ZdRrbwID1gDa0UK13h/8f9U16benu0BTVWH +RsogAlwLTzVgG/r2TYFoJ8ECgYBa/u14Fp88lmddKY1NZFOdzDQh3r/0eqO+BEmj +5xv4cWUfIbfrWvDejWmGP0lzTUPeI6WICoM2mDcOPWyqVLHdkF4sd5iTHA2aeA9Y +bmUi9oPLcfZvfBHKvhwrGBPJgCeFgvLCyfly7CxFcMfcOiOwoRALgv842xVGIiYA +WT+ngwKBgAk1xBsENJlEGz6aeoby6ELx3QP9cQX5GG7YtPrrl9BziRIPc5YNJZfA +guw8rBxO72ilChrJIMfE8PZx6L4LJ+N1VTRgZ9T8F9ZGopqUtquc4OErmAYR0rH2 +ll/i9QPgHzUYm4L7kN2J/cejJnzhANnBiJbgE5wHUHsjT5sv3trO +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_revoked.crt b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_revoked.crt new file mode 100644 index 0000000..72ef51d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_revoked.crt @@ -0,0 +1,50 @@ +-----BEGIN CERTIFICATE----- +MIIDzTCCAjWgAwIBAgIBAzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJDTjES +MBAGA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxDzANBgNVBAoMBmly +ZXN0eTERMA8GA1UEAwwIdGVzdC5jb20wHhcNMjQwMTEyMTUwMDA5WhcNMzQwMTA5 +MTUwMDA5WjBFMQswCQYDVQQGEwJDTjEWMBQGA1UECwwNQXBhY2hlIEFQSVNJWDEe +MBwGA1UEAwwVb2NzcC1yZXZva2VkLnRlc3QuY29tMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtjAaX6JtUhjXkxqtOrwvpehS53gXxRioiY9sbklUKflH +63efdi4BY0IBs7IVLMo4JUbQPe4FFWFTBMDSOoyaECSSw7AJear3XZCB9H+b41rG +EXi574WNnMmRITwXDr7fJvA9QIvZK1rotUXqNNHwb5tP1VvJJh3jCRj9mzv51MQb +8HtleMntGSRGLSxEdo2bL/u33FwALV5Ayqgg6bf0gPPGc4Lqc2xwFO0HvF6ooiLj +lQjmsqkI4/jE0ElCJSHuRcbq4H0Q1riCROnmSrLRiSdEFnAUT/cf2sR5NZKsTAzO +VwIkKjigdUjyEUN4xWberGtGzwxgfG4iiSlVht4pRQIDAQABozcwNTAzBggrBgEF +BQcBAQQnMCUwIwYIKwYBBQUHMAGGF2h0dHA6Ly8xMjcuMC4wLjE6MTE0NTEvMA0G +CSqGSIb3DQEBCwUAA4IBgQBBbfOcWr20MQLWjijsOPiQGWa6Z+z8qMkgSB1Ukhqj +qhuYA/bVcMcftqCCcjeAW4fgN+xBuLnHwS+iIdCjUSV8kbN19KGJNR27kDI/ShgY +HiFNqEjDOq46jo/CMJap3VO4ZdvH8+3Dk3KuOCNBej/Oe3XD5Aw4jedHxHgfGWqt +FD3nA+lZOvUVe7qgSkOOPtWsyX3xx7cvWziXHFd6TUWhSfcRIORO0ZHMF80ipNgd +KgUe7t2pOuIN8sOx98j2MHNMFQVEPZ+EweznVOvWVqbGzW5wf3pUz/Vbb+uCR1LQ +otNEEbENAEEZQ6sKpZ0pe2xuuHT+KOQ20Ty79Fs2ji9R4maiD0NTaVy2/oqYrs3G +OFA7OrPSJ+HYKCq9QP6Cu/wY5kiG328SeoHNaXGltzCxvqE3DZNzevKh9s88SBjL +pZ1hHUH++Co3pCss+ZPDjkWUFnbg7v8altE37ksdYMXOjY1OStHUzfZ4uYeC9orx +Gm5X8AE3zIgpNdiANrO/ook= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G +A1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa +GA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n +RG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM +CHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe +cvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb +VDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR +2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr +abf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2 +WjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/ +Evm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1 +/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh +/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj +cTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ +tSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl +c3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC +tC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY +1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl +PYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob +rJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy +hme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1 +7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y +IJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve +U/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM= +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_revoked.key b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_revoked.key new file mode 100644 index 0000000..1c47073 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_revoked.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAtjAaX6JtUhjXkxqtOrwvpehS53gXxRioiY9sbklUKflH63ef +di4BY0IBs7IVLMo4JUbQPe4FFWFTBMDSOoyaECSSw7AJear3XZCB9H+b41rGEXi5 +74WNnMmRITwXDr7fJvA9QIvZK1rotUXqNNHwb5tP1VvJJh3jCRj9mzv51MQb8Htl +eMntGSRGLSxEdo2bL/u33FwALV5Ayqgg6bf0gPPGc4Lqc2xwFO0HvF6ooiLjlQjm +sqkI4/jE0ElCJSHuRcbq4H0Q1riCROnmSrLRiSdEFnAUT/cf2sR5NZKsTAzOVwIk +KjigdUjyEUN4xWberGtGzwxgfG4iiSlVht4pRQIDAQABAoIBAQCQ5Sz0hl/ffTZm +Hj9LiUNz9ZOJ1+8/p97SmKiqBdPUFhfm45qFCQ29fU+RNL62gpWov+r6dgTA/khi +bWBFhHE7CXtX+vduNlTJqxZP9/VpGlaQqq1mG5eG7KBqCDpmVdNwSnzMiuzLGGAf +W11raNSKTsFtdLRDhl18bM212jtVxNHVJ6itFZa5Ls0/VrD2KH9PXP0J5jugKYqi +GzHmt9RfmjhOPcAIOTYNP/5n1CiHERj5KytfXjN1BHWc3rx5uakaUntBkdevXWj+ +ZSspRi/ReuVCXWeldmi6Pg9hQSX3MGVndAx8J6ilmtqSrRvaCBRU6x9KLfuTVEz8 +UXap8UkBAoGBAOkjowQb/nWI5oqDs3SduXwVE+0yn8kBCk5x48ee1Vt0eCxtnuRs +qdPuldh6czsBj3ijmJ8Ny6aHQaaVuIlUbPjBDZGf7IVc0hj7sQ6o5Jcu9KmLLOBH +62fQDeuzM+EkGOcnH8aPvs31p5bMklhycyyyTXvmZba3hvST+Ske/3olAoGBAMgN +dtFrvAQoZMIV61kNTml3ehhUeWas/ry0WUHa4iaDnVMoJ0fwiMKk7kQDvJH8gxx3 +Cr7dSnI17yBYEn0HkXlTMq0IgjJp+K40temMFwMFOWrTtGUVcbibv1Yjx+8i7m2+ +pWnfGXGd6tolWRHAMe7B68q+x1iJEjQD/Ujx3XihAoGACJg3uj8N8mdJmHGie/oU +jG56fZQQL+jJ6HpqW0GPu/9fLsQbx2/6EsYI4CIjfVlhYKEnTzXC/DCgSvPaCbYD +DmiPh37NyVzSofklXdT8GFayzk1DKkF8fCc/XCEPGI2sHVlj4n4KGq2jr/t6qagO +dudb0+V6enHpl7qcxNdPs8ECgYBYM8+CUAzKjIC4Le/hCIPc7kePuJb6FSYPTzjX +V0lEj9zqkBaZmkzB/PPsWvVmLD4ma7n6Ixkyt+LhkNM9+vtB0dPTBKBa1+xD6ouW +GCUBOOly1zp/IvBL46d9tDLvlagoDNljj3DpbiXg3nyh3epmCWwLrQe5Wl4DPwsK +gVETYQKBgDvVy2JvG51tmrqg3bEkJ+8RWELK3DeJphVmlD+unkA6ONwJTB8gvDIE +mkrxulta1cgg+u3+oJ1Pbo6P19v6xOj9vr1NHmHTd6shpfx8cHVbOYo7tBX0zKcv +cPlhtyGb3LUmaHXc22qI3ooYAKUo6r0bsK6ixBQEQUyxxHOt2fz7 +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_unknown.crt b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_unknown.crt new file mode 100644 index 0000000..d9b6a82 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_unknown.crt @@ -0,0 +1,50 @@ +-----BEGIN CERTIFICATE----- +MIIDzTCCAjWgAwIBAgIBBDANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJDTjES +MBAGA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxDzANBgNVBAoMBmly +ZXN0eTERMA8GA1UEAwwIdGVzdC5jb20wHhcNMjQwMTEyMTUwMDM0WhcNMzQwMTA5 +MTUwMDM0WjBFMQswCQYDVQQGEwJDTjEWMBQGA1UECwwNQXBhY2hlIEFQSVNJWDEe +MBwGA1UEAwwVb2NzcC11bmtub3duLnRlc3QuY29tMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEApuHmwZFhVmjPhNRmQtZwDpVa6BesX6Y6mBJknQVRZN52 +5Ac9usp6cGBiYY+2iNV3T5EcC0yTgJhrKK0Oz1WDEAYSH3I3TwMYmFdx4r/cuaHE +sxXZggkGHPaVmzFOX9wSfrB+/TERByO4FIiY1/pTa/vDWy8cNYUYMqZzeJq52zpt +XSk1njVgjQ9pJIyZmTMADqfpc/OTdiG8kXXbrAnMuMYyNExDDcTd6eJtRmks+Vjg +95IbM8x8E/hn+QpXRCb1Nj6Fj7D0t9yC2/bQVdBJNkAxjEofuCbpR3Pn/p82Y84y +OuhORvdXmBWm7RchOf773cxat4R+I1Dec3GU8vpQAwIDAQABozcwNTAzBggrBgEF +BQcBAQQnMCUwIwYIKwYBBQUHMAGGF2h0dHA6Ly8xMjcuMC4wLjE6MTE0NTEvMA0G +CSqGSIb3DQEBCwUAA4IBgQBSK6ACelC4GVbyyaN8QKwtzOkQJtbn5lMyo6YmgqAD +00vyrjPIc5GbuA69Jinu9mty3AMMn+UFqudWHfLXGSAl0M1LUWrxg0Qa6llEB+Kh +f6EmDNciWUK7kijuraqPxVxB4G8ZebS+SjaeptgqIW54fMQMOOzmG9DldOvIc5FF +ZMzHYNP5QTHaeGki9KxZmfxt89lTYi6ZvViW7mjpxSbecY5H2DTFWIKD7P8seHVZ +Jp4laPyAWDA157zpIvyK/zTNqnE+85ZJ2c0MrVWFXwL/7InViHASZriIOaOUBs/g +pE6RTrwpU9JhjmdYtv39SgdLAInoaxmoPeNZmr4tefLrXwn9oRHnk6RInQNSffam +vxNxD/ZKNPDZwf40ybWH5JG/SyrQr0UJAT3PWlKxHwbAz/4f6z0E/byR5nJrdFSh +dLTbfJZ5h0vaBrcBeg/NXSvW7znJWtX3NBiUq3Ns3gAJ1y8usKKWwbCsbKzUl9j4 +NoG6Jv5toAlmtCmhVuUkX5g= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV +BAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G +A1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa +GA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n +RG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM +CHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe +cvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb +VDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR +2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr +abf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2 +WjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/ +Evm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1 +/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh +/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj +cTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ +tSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl +c3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC +tC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY +1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl +PYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob +rJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy +hme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1 +7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y +IJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve +U/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM= +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_unknown.key b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_unknown.key new file mode 100644 index 0000000..d1d7df1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/rsa_unknown.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApuHmwZFhVmjPhNRmQtZwDpVa6BesX6Y6mBJknQVRZN525Ac9 +usp6cGBiYY+2iNV3T5EcC0yTgJhrKK0Oz1WDEAYSH3I3TwMYmFdx4r/cuaHEsxXZ +ggkGHPaVmzFOX9wSfrB+/TERByO4FIiY1/pTa/vDWy8cNYUYMqZzeJq52zptXSk1 +njVgjQ9pJIyZmTMADqfpc/OTdiG8kXXbrAnMuMYyNExDDcTd6eJtRmks+Vjg95Ib +M8x8E/hn+QpXRCb1Nj6Fj7D0t9yC2/bQVdBJNkAxjEofuCbpR3Pn/p82Y84yOuhO +RvdXmBWm7RchOf773cxat4R+I1Dec3GU8vpQAwIDAQABAoIBAGVK6siFGKLdPVBv +p55cEGoZp7MGY38vI5OYXm+cgboK+fkQmBxfuA+rwStckrvdbezitDX7hfBhE3H+ +EOYyDjpUpP1nU0DnLS+SrDKoqC4YjY7x7TLrjUVZOpeXRu4SYzt4n6vI83/040+7 +VaKKc8Ywa3RWVPX7UiO0OpRyverdQgXKxaIKukO8v1IhMQ5cshvLStFpJSLlN+Cy +2BO3uGSth7dLHG/OIz8SoGp/m5ofpBUny5FAb2EWZM/eGP/VZN2hVrYfbx5d3dVs +8AtuboH/LRCbh9IE0P5mOIZFc4r1VodozkGP83fMYiEP61pyEVDvTFcvZizJRN6h +LDNlm/kCgYEA0FdQvOjKjpogUHBJkYrH9yuSlFIwzqp+I1ZsPBUORt/czfOQapf4 +HjFtUj6JvqZW3xn7rPiwTnXys9x6KP5xzjbia6or7dL/D1q7bk0oKvqhVF1LiQ2X +545a3zErLpkMmH9fSVVyTC4x+hIfN0PlrhXh0mH2urT2+K3v7j/4408CgYEAzQ64 +tzs1zVGdIgRmiQV0eP9qoB7y3vmcxZq0NKjNyoMbXhuo3muGY33AzCY6qSdFLigI +8LLZms/7o5VYp9ckaYPnuYwABzXrxN4fz1vSAe0y64X8i7P09W+pB8uh07nJVTpJ +rSC+E4fNgvnvnaVp30G0gRj29OmxMrWQt8gvqw0CgYB88mCxastQCo8mrrDwYFLc +oX0fBsvOpeFQQBxZTCdrygYaXeBWjR14vhvaHzds50ViN6sAaYUTCRmtVKTOwQpv +qerQtxXxY4EkLD4MQKm+XOE0P191qnlXncBR6qMDJzaunnT+/ge2OF4wo32lH0s3 +xFfSXH4kKzOSoH4sXKFfcQKBgHeB8+9+B54wyYZQ0D1dO4NlQIwvXVbMXSzhO9NQ +6hbzkBipwCJYwkrruFiCkz+QToZW+NbnNWE/g6XT3YZ8IZGJOZzu1fld2Jm05w8f +sWZECqAvR39YExSTzgxoBllx9r/AJ75Jzd1uET0bUyYqiGiAT6XJmewk4ovuO3iQ +qA9lAoGBAKKmSj69vgldBUuLQx5AGOb5ivIou8M4f6yJBKkU/fSuUjxy5EVvAOTe +YYtZVcNymzSdk8SXEg9krCK6QHDa/H6M8bLu/aJHk6pXwPFRWPG2TyQcYDg/ItlV +FK6rvRA+dxwFMi5p1TwZIE4gVkqExSWD77jgxfA8wfbSh2B/BCNi +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/signer.crt b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/signer.crt new file mode 100644 index 0000000..ecbbc04 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/signer.crt @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDqzCCAhOgAwIBAgIBBTANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJDTjES +MBAGA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxDzANBgNVBAoMBmly +ZXN0eTERMA8GA1UEAwwIdGVzdC5jb20wHhcNMjQwMTEyMTUwMDU1WhcNMzQwMTA5 +MTUwMDU1WjBDMQswCQYDVQQGEwJDTjEWMBQGA1UECwwNQXBhY2hlIEFQSVNJWDEc +MBoGA1UEAwwTb2NzcCB0ZXN0IENBIHNpZ25lcjCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAML7FQ6P5418DkSmB6tG7Rlhc9C2rfuu+xDOZIUW2tm++6xS +m4nh+9ZeDaOX5c9oBSN93VNaeKtd8ZAAaRjToJKFJbTQ+/ELGbUm8mISMIKrpebk +BWp/S+VavLYZiGjj3FiFtIZ0+RL+z2XarQcnTqcR2qlYWVDTETzHmxJiDBJ82+if +/L5VffS2RagCDUGuAfm/XcwI8pIkHNS6Bs4TL5ik7OTuX0e5YvkwCDiIhfzLm0fM +a54Z4ImvLFfGtTQRILkTltr1RGioPRQAzvpy8zpSgux5Qd96HAT6hF/xlJKHCCkr +xBiBUUtMX5fdw2AI6sUHig5v0u+bbU3nsMF9aiECAwEAAaMXMBUwEwYDVR0lBAww +CgYIKwYBBQUHAwkwDQYJKoZIhvcNAQELBQADggGBALd3mJXvzu9TDTqfGZMkfGqD +hv/GdV0uaPpNlPZyOkS31t+iLd5R1EZz8wKLegvOdm3uKA8NFx9uB7O6mWSneJ4R +VbrcJnFzl5C/SbgWIAt/N0uujO8xuC5YWlHeig5IJc8pmnacJ4y7FhTVfsw1u93d +evGyCIJ3TyYcZTDVuZruaW+xIRa+QJt3Q+CvkMn+aaxs1ji08ZjodGVu9+jsbb7f +DLgl3FuLf8DlKqhRh+hoOPUTI496m3ZTozb7cs0TfHFPSL0pZ2Rdbz4WGHfX0LRF +E8lweAsyd12OySr2PwiRY8m18t9HrPNbk7bAaVJLqehUjvB1Am5+Lgjicdy4HNhh +gDlt5hjLV2criJpq9QmKz7Veu2V42FNJR7DumnJsbUjBVlh+rgN42j8QnPAqylZ4 +4gcjhu1cGJd0miEN+TjzzqdZqjVvINepBR1j6pOF3fdfea8IFtJ1dzTpkU4Bq2P5 +CYqmKqhkAkEIYbwQ4fqehrcMrAAno3/ikW5PqxetnQ== +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/signer.key b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/signer.key new file mode 100644 index 0000000..c95f012 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/ocsp/signer.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAwvsVDo/njXwORKYHq0btGWFz0Lat+677EM5khRba2b77rFKb +ieH71l4No5flz2gFI33dU1p4q13xkABpGNOgkoUltND78QsZtSbyYhIwgqul5uQF +an9L5Vq8thmIaOPcWIW0hnT5Ev7PZdqtBydOpxHaqVhZUNMRPMebEmIMEnzb6J/8 +vlV99LZFqAINQa4B+b9dzAjykiQc1LoGzhMvmKTs5O5fR7li+TAIOIiF/MubR8xr +nhngia8sV8a1NBEguROW2vVEaKg9FADO+nLzOlKC7HlB33ocBPqEX/GUkocIKSvE +GIFRS0xfl93DYAjqxQeKDm/S75ttTeewwX1qIQIDAQABAoIBAF5EUBDjSBriYG+W +Kd0IBHeh4wGEYKdvGNkuP/EMdLCTok/U/Hf0NvKUNFnkhWn6K4nWP1weQHrxh2mM +mUM0hcxw7SL3audF657mfoclrihu3l273lZ3xvTTIquTupyjlZOCyR28jfM+GH1w +9Piha2hgvGvlWAE4mnvdMT75AkcpGEDvKrJec3Kkq7DfnW/AotVc8yJG0Q2JwkRJ +y0ITJBWkA+WtLscqRyp4tvSAtAByBdynuMpEskIrOOC7WtxLJD+JJtzO7H31irC1 +ON3x4czwiVKbVjxworoD5R/2Gy7JKK+pou0TNTcFrFm/+mU0Ig3VkcZTlDSTWJio +4D3CinECgYEA62fnLuxVOP9X/lgkKjN/xtTMqcDHOXz/1tyMewj6jRzFWu42b8k1 +ECDtzx1Aug8cqsA7pUvBxeC7DzyZmo3zybHRsUf9mQzUA7WK7RN4SK0O4FOZ6PbZ +1116aZqwIiYpMhXL5syC+5yVWEJkbtClMciCbDlp5Y/+PISHl24assUCgYEA1AnU +AsDKWysYoVqePfBoLDaep+5Q9VQr8T9AyXlvrqtpmLiBZR8Oh4iOGDJmFSsQOQWP +peYIuf9eTXh6DH0BhIr/wSbhleiS/ibuOPEUosnwUzC64rkcgXzofbKOyHig5o8y +45XGUzVSJQPBQM3fVEyuGV1vZKZ/2CVhnFBl360CgYBP1bMPtNLKO77J4XaSYVjK +Q80NHPXzxzK02aNC7q6aQNGlnvgTPTejuqcsAI29C/b66arQyjpzM139Mt4dDltJ +Yebtqq6Uw0b74wu0j0/Rxe8voOqnmWATq/4h5nYpfqul8sJuCZm6X0Y+4nVRJ61+ +jrO8pFQHqKfeOkwJzSt8yQKBgGCNAR8nznzpCNQgQUIPAEBxtpjdKbwsUb4OgV+8 +jiBJKVJDYZg8Jg+NHLbj7BvjegWdBKYUMxEOuVApddnN6i0CZib7n2j1eEmGTJ9d +F3pw3Z/j5pVqmRJVYEAsWFvsoceamR+MibxF4Vu9c/ggRntKV1RxeVGphzlS/DmD +WoAZAoGBAL9bHijnEzrijuWa1M9LV1eTCQyr0bihL86Z4pEI+B2NX5lWpcOQi8IU +W4wbN02e8g2u9DGgm6yg2eNbkVg4XWuaXUV7a3fGwnmtuWnxGhU/37Lrcou34bsM +3TvP055+kATwZ98X2MzvAUDIKdO9k+/s41H33frdJQgwCH5ArWGp +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/openssl-test2.conf b/CloudronPackages/APISIX/apisix-source/t/certs/openssl-test2.conf new file mode 100644 index 0000000..1e5beec --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/openssl-test2.conf @@ -0,0 +1,40 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +[req] +distinguished_name = req_distinguished_name +x509_extensions = v3_req +prompt = no + +[req_distinguished_name] +C = CN +ST = GuangDong +L = ZhuHai +O = iresty +CN = test2.com + +[v3_req] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer +basicConstraints = CA:TRUE +subjectAltName = @alt_names + +[alt_names] +DNS.1 = test2.com +DNS.2 = *.test2.com + +## openssl genrsa -out test2.key 3072 +## openssl req -new -x509 -key test2.key -sha256 -config openssl-test2.conf -out test2.crt -days 36500 diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/openssl.conf b/CloudronPackages/APISIX/apisix-source/t/certs/openssl.conf new file mode 100644 index 0000000..c99afc4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/openssl.conf @@ -0,0 +1,40 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +[req] +distinguished_name = req_distinguished_name +x509_extensions = v3_req +prompt = no + +[req_distinguished_name] +C = CN +ST = GuangDong +L = ZhuHai +O = iresty +CN = test.com + +[v3_req] +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer +basicConstraints = CA:TRUE +subjectAltName = @alt_names + +[alt_names] +DNS.1 = test.com +DNS.2 = *.test.com + +## openssl genrsa -out apisix.key 3072 -nodes +## openssl req -new -x509 -key apisix.key -sha256 -config openssl.conf -out apisix.crt -days 36500 diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/private.pem b/CloudronPackages/APISIX/apisix-source/t/certs/private.pem new file mode 100644 index 0000000..76f0875 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/private.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA79XYBopfnVMKxI533oU2VFQbEdSPtWRD+xSl73lHLVboGP1l +SIZtnEj5AcTN2uDW6AYPiWL2iA3lEEsDTs7JBUXyl6pysBPfrqC8n/MOXKaD4e8U +5GAHFiwHWg2WzHlfFSlFkLjzp0vPkDK+fQ4Clrd7shAyitB7use6DHcVCKuI4bFO +oFbdI5sBGeyoD833g+ql9bRkH/vf8O+rPwHAM+47r1iv3lY3ex0P45PRd7U7rq8P +8UIw6qOI1tiYuKlFJmjFdcwtYG0dctxWwgL1+7njrVQoWvuOTSsc9TDMhZkmmSsU +3wXjaPxJpydck1C/w9ZLqsctKK5swYWhIcbcBQIDAQABAoIBADHXy1FwqHZVr8Mx +qI/CN4xG/mkyN7uG3unrXKDsH3K4wPuQjeAIr/bu43EOqYl3eLI3sDrpKjsUSCqe +rE1QhE5oPwZuEe+t8aqlFQ5YwP9YS8hEm57qpg5hkBWTBWfxQWVwclilV13JT5W0 +NgpfQwJ3l2lmHFrlARHMOEom5WQrewKvLh2YXeJBFQc0shHcjC2Pt7cjR9oAUVi6 +M5h6I+eB5xd9jj2a2fXaFL1SKZXEBVT6agSQqdB0tSuVTUsTBzNnuTL5ngS1wdLa +lEdrw8klOYWrUihKJgYH7rnQrVEVNxGyO6fVs1S9CxMwu/nW2MPcbRBY0WKYCcAO +QFJ4j4ECgYEA+yaEEPp/SH1E+DJi3U35pGdlHqg8yP0R7sik2cvvPUk4VbPrYVDD +NQ8gt2H+06keycfRqJTPptS79db9LpKjG59yYP3aWj2YbGsH1H3XxA3sZiWHkNl0 +7i0ZE0GSCmEMbPe3C0Z3726tD9ZyVdaE5RdvRWdz1IloA+rYr3ypnH0CgYEA9Hdl +KY8qSthtgWsTuthpExcvfppS3Dijgd23+oZJY2JLKf8/yctuBv6rBgqDCwpnUmGR +tnkxPD/igaBnFtaMjDKNMwWwGHyarWkI7Zc+6HUdNcA/BkI3MCxwYQg2fr7HXY0h +FalewOHeJz2Tldaue9DrVIO49jfLtBh2DYZFvCkCgYBV7OmGPY3KqUEtgV+dw43D +l7Ra9shFI4A9J9xuv30MhL6HY9UGKHGA97oDw71BgT0NYBX1DWS1+VaNV46rnnO7 +gaPKV0+bTDOX9E5rftqRMwpMME7fWebNjhRkKCzk7CsqJN41N1jVTBJdtsrLX2d8 +UbY6EpjogFJb9L9J2ubUqQKBgQCk6oKJJbZfJV/CJaz6qBFCOqrkmlD5lQ/ghOUf +EUYi0GVqYHH0vNJtz5EqEx9R7GPFNGLrGRi4z1QLJF1HD9dioJuWZujjq/NgtnG6 +bgSXJqJc52Lc4wB99AyfuL2ihSrTFmjSRx7Puc9241hTha7Rgh+vNOkq2HsH9FR3 +TTRv+QKBgG5ph+SFenSE7MgYXm2NRfG1k8bp86hrt9C8vHJ7DSO2Rr833RtqEiDJ +nD4FbR0IObaBpS2VJdOn/jBYXCG0hFuj+Shxiyg/mZN0fwPVaRWDls7jzqqPsA+b +x3XKRAn57LY8UbsNpOIqZ8kjVLPZhgfYwfOI3yAeSMv4ZnRY/MWe +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/public.pem b/CloudronPackages/APISIX/apisix-source/t/certs/public.pem new file mode 100644 index 0000000..f122f85 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/public.pem @@ -0,0 +1,9 @@ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA79XYBopfnVMKxI533oU2 +VFQbEdSPtWRD+xSl73lHLVboGP1lSIZtnEj5AcTN2uDW6AYPiWL2iA3lEEsDTs7J +BUXyl6pysBPfrqC8n/MOXKaD4e8U5GAHFiwHWg2WzHlfFSlFkLjzp0vPkDK+fQ4C +lrd7shAyitB7use6DHcVCKuI4bFOoFbdI5sBGeyoD833g+ql9bRkH/vf8O+rPwHA +M+47r1iv3lY3ex0P45PRd7U7rq8P8UIw6qOI1tiYuKlFJmjFdcwtYG0dctxWwgL1 ++7njrVQoWvuOTSsc9TDMhZkmmSsU3wXjaPxJpydck1C/w9ZLqsctKK5swYWhIcbc +BQIDAQAB +-----END PUBLIC KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/server_1024.crt b/CloudronPackages/APISIX/apisix-source/t/certs/server_1024.crt new file mode 100644 index 0000000..85c6442 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/server_1024.crt @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIBrTCCARYCFGohAv7D46F+kSOf08X/MLwtazC7MA0GCSqGSIb3DQEBCwUAMBcx +FTATBgNVBAMMDGNhLmxvY2FsaG9zdDAeFw0yMzA1MjIwMjQ3MzZaFw0zMzA1MTkw +MjQ3MzZaMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOB +jQAwgYkCgYEA2YEV7+FWPl2R9EOSvi2iPyymiUnSaYhIaTuSoqRISOjCSmgrmKpJ +yDN1Cg2hqBHPkWW8BuphpV405ja+94xvOEc0qcP/Or6zDhDfWcaoqxGRAcdwHDgQ +XYMfOxlhwWCp5+vWKep3FPXpHamE09PqUbKWqIa/16aK/1sFR7Q+JJkCAwEAATAN +BgkqhkiG9w0BAQsFAAOBgQCA5aSfk4lZjAEFQ9mWN7Z97b9bnwLw2bv4PgAhDtas +0IxIvhuwR4ZTFv+Pe4PNNNvfnMgTRUWFVpjywUX7Km+k9WFavrikPgAGEfzkR2HR +WE4ETNuS7sGxczosqD+00An4lZ+58uYGEitUOJ6xO80NIhNnOGgo5N/d4fFUTyYH +bw== +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/server_1024.key b/CloudronPackages/APISIX/apisix-source/t/certs/server_1024.key new file mode 100644 index 0000000..2823346 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/server_1024.key @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANmBFe/hVj5dkfRD +kr4toj8spolJ0mmISGk7kqKkSEjowkpoK5iqScgzdQoNoagRz5FlvAbqYaVeNOY2 +vveMbzhHNKnD/zq+sw4Q31nGqKsRkQHHcBw4EF2DHzsZYcFgqefr1inqdxT16R2p +hNPT6lGylqiGv9emiv9bBUe0PiSZAgMBAAECgYBvf5j7S4ymk9kKWsmS7FnMANuu +bUWMC+zy5TMaZRUZKxjOg/A1ZrZEBvzslmhUfCzn4DsvYF+GInEDwvTKehdY07I+ ++hpv1M9Wa7v12ofRWNvZjsbMHfiWM/pFBZFYryV4sQ4qHFRj3TKgXu3pZWPx41wn +ayMUtxYRR3Lez4UiMQJBAP31OIC65xbWGr1W/YJ6IwOPFBgyB6O6qFTcR/lAdJ6H +6MVVs8XEWC4o/ZTk8RGog2nWzVsRCN2pqQUGUHBGRE8CQQDbQNL6eGbsuSxM1uUS +PjrAs5t9rfrwpx41ubjRoGIukEYeX1YXDf4WICe/51vE3jvVfVvFOJuvGoO9QqzB +LgaXAkEAtyRG0R74VBGnSvAW9idaZNCj7yb1N2/+wOPyy59d+o2MofLCKFcGOJO6 ++8t2xgM+ce9EPO419JTLnSIGlFE4JQJAGueHfCjOKHpIj11HWse8Ge1wRSnWQzWe +pWUW4tJVefVGRW/ZdpbG+RwVBJ11S2Eh4n6xhi/+GqycQdsuq73kHQJBAPCknTOP +KpiH5qtKw84nsF+RkWZQ6BhzvP5OuW4avQBx1jQUjpjUhOWfctFH7MLI92Ti+iUS +MYi+HgfT9Lx4kbc= +-----END PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/server_enc.crt b/CloudronPackages/APISIX/apisix-source/t/certs/server_enc.crt new file mode 100644 index 0000000..81328c5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/server_enc.crt @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIB2DCCAX6gAwIBAgIBAzAKBggqgRzPVQGDdTBFMQswCQYDVQQGEwJBQTELMAkG +A1UECAwCQkIxCzAJBgNVBAoMAkNDMQswCQYDVQQLDAJERDEPMA0GA1UEAwwGc3Vi +IGNhMB4XDTIyMTEwMjAzMTkzNloXDTMyMTAzMDAzMTkzNlowSTELMAkGA1UEBhMC +QUExCzAJBgNVBAgMAkJCMQswCQYDVQQKDAJDQzELMAkGA1UECwwCREQxEzARBgNV +BAMMCnNlcnZlciBlbmMwWjAUBggqgRzPVQGCLQYIKoEcz1UBgi0DQgAED+MQrLrZ +9PbMmz/44Kb73Qc7FlMs7u034XImjJREBAn1KzZ7jqcYfCiV/buhmu1sLhMXnB69 +mERtf1tAaXcgIaNaMFgwCQYDVR0TBAIwADALBgNVHQ8EBAMCAzgwHQYDVR0OBBYE +FBxHDo0gHhMoYkDeHWySTIJy5BZpMB8GA1UdIwQYMBaAFCTrpmbUig3JfveqAIGJ +6n+vAk2AMAoGCCqBHM9VAYN1A0gAMEUCIHtXgpOxcb3mZv2scRZHZz5YGFr45dfk +VfLkF9BkrB/xAiEA8EeUg7nCFfgHzrfgB7v0wgN1Hrgj8snTUO6IDfkBKYM= +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/server_enc.key b/CloudronPackages/APISIX/apisix-source/t/certs/server_enc.key new file mode 100644 index 0000000..0c264c1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/server_enc.key @@ -0,0 +1,5 @@ +-----BEGIN PRIVATE KEY----- +MIGHAgEAMBMGByqGSM49AgEGCCqBHM9VAYItBG0wawIBAQQgPJQ040ba9lVszMF0 +7S7pzqdxyIMc2fXKr6EsU0vRk2ahRANCAAQP4xCsutn09sybP/jgpvvdBzsWUyzu +7TfhciaMlEQECfUrNnuOpxh8KJX9u6Ga7WwuExecHr2YRG1/W0BpdyAh +-----END PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/server_sign.crt b/CloudronPackages/APISIX/apisix-source/t/certs/server_sign.crt new file mode 100644 index 0000000..3cd5e87 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/server_sign.crt @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIB2TCCAX+gAwIBAgIBAjAKBggqgRzPVQGDdTBFMQswCQYDVQQGEwJBQTELMAkG +A1UECAwCQkIxCzAJBgNVBAoMAkNDMQswCQYDVQQLDAJERDEPMA0GA1UEAwwGc3Vi +IGNhMB4XDTIyMTEwMjAzMTkzNloXDTMyMTAzMDAzMTkzNlowSjELMAkGA1UEBhMC +QUExCzAJBgNVBAgMAkJCMQswCQYDVQQKDAJDQzELMAkGA1UECwwCREQxFDASBgNV +BAMMC3NlcnZlciBzaWduMFowFAYIKoEcz1UBgi0GCCqBHM9VAYItA0IABKGSiyHA +5oIeT13uNL3yxK+t9rKhAMHTzDTQA01ZylHYLn6XdksWugZsP6tTx/2+17NmRkaH +H3wztf6ciD3WZnCjWjBYMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgbAMB0GA1UdDgQW +BBT28BTySSlomaLtnuex4LvaY8tM1TAfBgNVHSMEGDAWgBQk66Zm1IoNyX73qgCB +iep/rwJNgDAKBggqgRzPVQGDdQNIADBFAiB4qGB+bD47KxSfSgqcedXVTd+JlL4f +174uhGLSzNkOZwIhAI33LdJlaw+60YlZqQxzffI+gbqXpSN82+3W4vAsONN0 +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/server_sign.key b/CloudronPackages/APISIX/apisix-source/t/certs/server_sign.key new file mode 100644 index 0000000..b2ae174 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/server_sign.key @@ -0,0 +1,5 @@ +-----BEGIN PRIVATE KEY----- +MIGHAgEAMBMGByqGSM49AgEGCCqBHM9VAYItBG0wawIBAQQgMjFhLon1CAQdzFWt +0Mre0juQiCDbXOY8ljWqSzQN9EehRANCAAShkoshwOaCHk9d7jS98sSvrfayoQDB +08w00ANNWcpR2C5+l3ZLFroGbD+rU8f9vtezZkZGhx98M7X+nIg91mZw +-----END PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/test-dot.crt b/CloudronPackages/APISIX/apisix-source/t/certs/test-dot.crt new file mode 100644 index 0000000..a9dfd33 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/test-dot.crt @@ -0,0 +1,18 @@ +-----BEGIN CERTIFICATE----- +MIIC+zCCAeOgAwIBAgIUWUtIDbrU8QF90OXlMKyClPRNRcgwDQYJKoZIhvcNAQEL +BQAwETEPMA0GA1UEAwwGUk9PVENBMCAXDTI0MDcxNzE2MDcyM1oYDzIxMjQwNjIz +MTYwNzIzWjAYMRYwFAYDVQQDDA13d3cudGVzdC5jb20uMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEApkX5NgwwEC/brmrUAfxSMGMaYOzjx+3BlCC23sLR +0uQ1+KMXt/Pd2QJVqREjEAiwXCMuHbB0qWD5985SfsjeRJJ8rc8CzJfcb7QESKfK +GdLaD8LsyAAg+Rxm0QyVFGrLJ82sjbEimLGCkLMpYsePxEDEifKPp3Z9bRUFT0zm +xcUEXojw5pzjrjIvfqVenWNP716s7bSdOFoc4RBlAdEI3pFUasLF9Lovz7BJLvtY +aoqgCNfb78C6zreDLswET5/338AVf9yPYc5HOthmygxkYTniK47/fOW64RQKXQ2X +EtBiIzN6dSXfTCXSpvow5XIR02rLoxsVEEwM9ODgUAJg6QIDAQABo0IwQDAdBgNV +HQ4EFgQUAHYNW6/hFM+Bqd2KNBXbLgJLaxcwHwYDVR0jBBgwFoAUjwSzlti+ag+f +BzoRa0wZbMaGh10wDQYJKoZIhvcNAQELBQADggEBAA1HfiDtHZV8sxJjasnNSM9f +6XTRCjT+DcABXm7k/Dmb8q5rpyqYwkUfadgAbmPx6T/dC4z7LblkcTkwD7azpkNE +fXY3Hx4qxSVSbSOHWnaSOX/8BRiPbSQNWGyTGh9AK/Vp/VJU2cDPqFbjQKHFq3ZI +w3GnRDerdA8vm5qzJ5/9wMF2ZsmnMiV3zX0Xisbzx/dponz6ktfygE3bk8Pb4wKt +D0EjbnLIXwyHv1czJrcRq0Y8irWaTY97vdff/J2aO9582zFNx6AnsU3+6fGsDyrO +ss+ggKDLK+aOBKroTNb3TgdPyPOgobUwLByFdKT/zTtWbkqyYMZzBme2SD4TWok= +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/test-dot.key b/CloudronPackages/APISIX/apisix-source/t/certs/test-dot.key new file mode 100644 index 0000000..5ed8aba --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/test-dot.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCmRfk2DDAQL9uu +atQB/FIwYxpg7OPH7cGUILbewtHS5DX4oxe3893ZAlWpESMQCLBcIy4dsHSpYPn3 +zlJ+yN5EknytzwLMl9xvtARIp8oZ0toPwuzIACD5HGbRDJUUassnzayNsSKYsYKQ +sylix4/EQMSJ8o+ndn1tFQVPTObFxQReiPDmnOOuMi9+pV6dY0/vXqzttJ04Whzh +EGUB0QjekVRqwsX0ui/PsEku+1hqiqAI19vvwLrOt4MuzARPn/ffwBV/3I9hzkc6 +2GbKDGRhOeIrjv985brhFApdDZcS0GIjM3p1Jd9MJdKm+jDlchHTasujGxUQTAz0 +4OBQAmDpAgMBAAECggEAD84ctm8h5fYApDOWJ8Kp9tzCwgYekE94vEmATIw5CPqF +qVbqbyNUmhdTWGzvN+vVhMqYzHxsmHmmBTDU7WWPYDYK+TQRbGx+iRUz54qghsQg +04j4PDor6DYTjWlMZfqRSV0u+vCErP5JnpLTOyckUrfD3ueCUX0tRsBN5wf0s0WD +7AiUIdVBesQwIuIin3MyhGFtQC0PNta3NdSBVbnUA69OL3QNxPoai5LACrAf1hkf +wPD/y6y2CswdER+j+obPChjTcnJFjRCkqqO+66QZWmMmVxq4ymCQg9IOgLRWtfhI +6Ts5RxVn12kEuPULk9oHHOjC+MVh3BmWFLb58G/gwQKBgQDUCSd/2uswTVlTYpw6 +XO3iVyoZVeo/BIiOm/kjmqmr5U/D7ZO27ElKBTe9CDQ4WB5PuisCy0/SnsJJsPpf +pWif2v0mVs3T9K7J1M1yQU2iMs+Z2stzLGe5AASImYpw9091v57A/1jI4VUoodOr +7sMo+9ROqx6dTG/tJgUa+VZaKQKBgQDIv8CZHv4LqvQEQrGoTcKOxQP47nsbfEPW +B0GQscykvRTWxlTfFdfFM4VG2ApERZDwjPFU84n4dH8J7P14iy2ty70krzHWNfjY +y52CXUb295HsdcQ0bP8wztuvM/Jfh1mKKynmezvAZlTSb+GMAAMrReuG2Ga1/gp1 +5daCd4IowQKBgG//md6eCybLZIh4CN+HIJwywGj7iazZvyvc1T9qPX8vs+9g+Wpg +6uFvWh6+S58LZI9mXbuvGq288BEuq0GERHxTlu3+YeA4WW8AubhFKDWpsyCogliG +tw7wJHTm7Up4R3+BxOBawFHzPCEnQYCKsIlgY6deGeCqdGCGeaHi3CrpAoGAdWam +xSW53qr4j/FNIqdvK72OaCtX9agDqAyQTIWer40gvcY5ZknI6TwLKnY38ttYO0XB +8TOIMbQ3g1+EkNWcPjKTh/upQqRHxsm1cMMKOG5qeYYZ26sOxsWC9oCDs1hdhg9e +LrtNI2T1IChsGEr9j3YRmse9sZtDFNX4UE6B4UECgYEAsuRRQK0tgvcsQxkX/bZb +VTKqI4ezGRLXuavBe42xWOBLFzEujGvbZMbxzD4F4H1dfVVor3ItAEoybC37jtHI +uEWLAQtZtNyDCOiq4UuwbmtIqtoJz556QUrwO0KdPPjg/jyZTxs5jdKRMk9bsfmR +80vnuQpr0CZe8EgHiMoysrs= +-----END PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/test2.crt b/CloudronPackages/APISIX/apisix-source/t/certs/test2.crt new file mode 100644 index 0000000..922a8f8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/test2.crt @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIEsTCCAxmgAwIBAgIUMbgUUCYHkuKDaPy0bzZowlK0JG4wDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG +Wmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxEjAQBgNVBAMMCXRlc3QyLmNvbTAgFw0y +MDA0MDQyMjE3NTJaGA8yMTIwMDMxMTIyMTc1MlowVzELMAkGA1UEBhMCQ04xEjAQ +BgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVz +dHkxEjAQBgNVBAMMCXRlc3QyLmNvbTCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCC +AYoCggGBAMQGBk35V3zaNVDWzEzVGd+EkZnUOrRpXQg5mmcnoKnrQ5rQQMsQCbMO +gFvLt/9OEZQmbE2HuEKsPzL79Yjdu8rGjSoQdbJZ9ccO32uvln1gn68iK79o7Tvm +TCi+BayyNA+lo9IxrBm1wGBkOU1ZPasGYzgBAbMLTSDps1EYxNR8t4l9PrTTRsh6 +NZyTYoDeVIsKZ9SckpjWVnxHOkF+AzZzIJJSe2pj572TDLYA/Xw9I4X3L+SHzwTl +iGWNXb2tU367LHERHvensQzdle7mQN2kE5GpB7QPWB+t9V4mn30jc/LyDvOaei6L ++pbl5CriGBTjaR80oXhK765K720BQeKUezri15bQlMaUGQRnzr53ZsqA4PEh6WCX +hUT2ibO32+uZFXzVQw8y/JUkPf76pZagi8DoLV+sfSbUtnpbQ8wyV2qqTM2eCuPi +RgUwXQi2WssKKzrqcgKil3vksHZozLtOmyZiNE4qfNxv+UGoIybJtZmB+9spY0Rw +5zBRuULycQIDAQABo3MwcTAdBgNVHQ4EFgQUCmZefzpizPrb3VbiIDhrA48ypB8w +HwYDVR0jBBgwFoAUCmZefzpizPrb3VbiIDhrA48ypB8wDAYDVR0TBAUwAwEB/zAh +BgNVHREEGjAYggl0ZXN0Mi5jb22CCyoudGVzdDIuY29tMA0GCSqGSIb3DQEBCwUA +A4IBgQA0nRTv1zm1ACugJFfYZfxZ0mLJfRUCFMmFfhy+vGiIu6QtnOFVw/tEOyMa +m78lBiqac15n3YWYiHiC5NFffTZ7XVlOjN2i4x2z2IJsHNa8tU80AX0Q/pizGK/d ++dzlcsGBb9MGT18h/B3/EYQFKLjUsr0zvDb1T0YDlRUsN3Bq6CvZmvfe9F7Yh4Z/ +XO5R+rX8w9c9A2jzM5isBw2qp/Ggn5RQodMwApEYkJdu80MuxaY6s3dssS4Ay8wP +VNFEeLcdauJ00ES1OnbnuNiYSiSMOgWBsnR+c8AaSRB/OZLYQQKGGYbq0tspwRjM +MGJRrI/jdKnvJQ8p02abdvA9ZuFChoD3Wg03qQ6bna68ZKPd9peBPpMrDDGDLkGI +NzZ6bLJKILnQkV6b1OHVnPDsKXfXjUTTNK/QLJejTXu9RpMBakYZMzs/SOSDtFlS +A+q25t6+46nvA8msUSBKyOGBX42mJcKvR4OgG44PfDjYfmjn2l+Dz/jNXDclpb+Q +XAzBnfM= +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/test2.key b/CloudronPackages/APISIX/apisix-source/t/certs/test2.key new file mode 100644 index 0000000..c25d4e5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/test2.key @@ -0,0 +1,39 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIG5QIBAAKCAYEAxAYGTflXfNo1UNbMTNUZ34SRmdQ6tGldCDmaZyegqetDmtBA +yxAJsw6AW8u3/04RlCZsTYe4Qqw/Mvv1iN27ysaNKhB1sln1xw7fa6+WfWCfryIr +v2jtO+ZMKL4FrLI0D6Wj0jGsGbXAYGQ5TVk9qwZjOAEBswtNIOmzURjE1Hy3iX0+ +tNNGyHo1nJNigN5Uiwpn1JySmNZWfEc6QX4DNnMgklJ7amPnvZMMtgD9fD0jhfcv +5IfPBOWIZY1dva1TfrsscREe96exDN2V7uZA3aQTkakHtA9YH631XiaffSNz8vIO +85p6Lov6luXkKuIYFONpHzSheErvrkrvbQFB4pR7OuLXltCUxpQZBGfOvndmyoDg +8SHpYJeFRPaJs7fb65kVfNVDDzL8lSQ9/vqllqCLwOgtX6x9JtS2eltDzDJXaqpM +zZ4K4+JGBTBdCLZayworOupyAqKXe+SwdmjMu06bJmI0Tip83G/5QagjJsm1mYH7 +2yljRHDnMFG5QvJxAgMBAAECggGBAIELlkruwvGmlULKpWRPReEn3NJwLNVoJ56q +jUMri1FRWAgq4PzNahU+jrHfwxmHw3rMcK/5kQwTaOefh1y63E35uCThARqQroSE +/gBeb6vKWFVrIXG5GbQ9QBXyQroV9r/2Q4q0uJ+UTzklwbNx9G8KnXbY8s1zuyrX +rvzMWYepMwqIMSfJjuebzH9vZ4F+3BlMmF4XVUrYj8bw/SDwXB0UXXT2Z9j6PC1J +CS0oKbgIZ8JhoF3KKjcHBGwWTIf5+byRxeG+z99PBEBafm1Puw1vLfOjD3DN/fso +8xCEtD9pBPBJ+W97x/U+10oKetmP1VVEr2Ph8+s2VH1zsRF5jo5d0GtvJqOwIQJ7 +z3OHJ7lLODw0KAjB1NRXW4dTTUDm6EUuUMWFkGAV6YTyhNLAT0DyrUFJck9RiY48 +3QN8vSf3n/+3wwg1gzcJ9w3W4DUbvGqu86CaUQ4UegfYJlusY/3YGp5bGNQdxmws +lgIoSRrHp6UJKsP8Yl08MIvT/oNLgQKBwQD75SuDeyE0ukhEp0t6v+22d18hfSef +q3lLWMI1SQR9Kiem9Z1KdRkIVY8ZAHANm6D8wgjOODT4QZtiqJd2BJn3Xf+aLfCd +CW0hPvmGTcp/E4sDZ2u0HbIrUStz7ZcgXpjD2JJAJGEKY2Z7J65gnTqbqoBDrw1q +1+FqtikkHRte1UqxjwnWBpSdoRQFgNPHxPWffhML1xsD9Pk1B1b7JoakYcKsNoQM +oXUKPLxSZEtd0hIydqmhGYTa9QWBPNDlA5UCgcEAxzfGbOrPBAOOYZd3jORXQI6p +H7SddTHMQyG04i+OWUd0HZFkK7/k6r26GFmImNIsQMB26H+5XoKRFKn+sUl14xHY +FwB140j0XSav2XzT38UpJ9CptbgK1eKGQVp41xwRYjHVScE5hJuA3a1TKM0l26rp +hny/KaP+tXuqt9QbxcUN6efubNYyFP+m6nq2/XdX74bJuGpXLq8W0oFdiocO6tmF +4/Hsc4dCVrcwULqXQa0lJ57zZpfIPARqWM2847xtAoHBANVUNbDpg6rTJMc34722 +dAy3NhL3mqooH9aG+hsEls+l9uT4WFipqSScyU8ERuHPbt0BO1Hi2kFx1rYMUBG8 +PeT4b7NUutVUGV8xpUNv+FH87Bta6CUnjTAQUzuf+QCJ/NjIPrwh0yloG2+roIvk +PLF/CZfI1hUpdZfZZChYmkiLXPHZURw4gH6q33j1rOYf0WFc9aZua0vDmZame6zB +6P+oZ6VPmi/UQXoFC/y/QfDYK18fjfOI2DJTlnDoX4XErQKBwGc3M5xMz/MRcJyJ +oIwj5jzxbRibOJV2tpD1jsU9xG/nQHbtVEwCgTVKFXf2M3qSMhFeZn0xZ7ZayZY+ +OVJbcDO0lBPezjVzIAB/Qc7aCOBAQ4F4b+VRtHN6iPqlSESTK0KH9Szgas+UzeCM +o7BZEctNMu7WBSkq6ZXXu+zAfZ8q6HmPDA3hsFMG3dFQwSxzv+C/IhZlKkRqvNVV +50QVk5oEF4WxW0PECY/qG6NH+YQylDSB+zPlYf4Of5cBCWOoxQKBwQCeo37JpEAR +kYtqSjXkC5GpPTz8KR9lCY4SDuC1XoSVCP0Tk23GX6GGyEf4JWE+fb/gPEFx4Riu +7pvxRwq+F3LaAa/FFTNUpY1+8UuiMO7J0B1RkVXkyJjFUF/aQxAnOoZPmzrdZhWy +bpe2Ka+JS/aXSd1WRN1nmo/DarpWFvdLWZFwUt6zMziH40o1gyPHEuXOqVtf2QCe +Q6WC9xnEz4lbb/fR2TF9QRA4FtoRpDe/f3ZGIpWE0RdwyZZ6uA7T1+Q= +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/vector_logs_ca.crt b/CloudronPackages/APISIX/apisix-source/t/certs/vector_logs_ca.crt new file mode 100644 index 0000000..42e3659 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/vector_logs_ca.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIUa34rzhtYT21pC8NwNIYf3phFciQwDQYJKoZIhvcNAQEL +BQAwRTELMAkGA1UEBhMCVVMxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA0MjQxMzE2NDNaFw0yMzA1 +MjQxMzE2NDNaMEUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw +HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQC84FXe/8ofZB2rj5TPHasXdiBEbTCv04ti/lV92WOC +MrHLKibI2+kI3YRQXaR5/1F2bXfROUhdRgkB8NOSM4WbaD1mtr/7mW2Tatxplxsp +1s0zmHHl5v0VYwBahcUs6nlSe19dgfrj4s0Wn7p4E7iSq/UDAs+We/dQowusQTVs +Q2ZhjDlFY22CV/oyCYsNq3ORRgwZRm9cmVmUUF7GX70yjT1KvLkFjc7y1vwi8XJY +ADhw/hjtEzAOkxdUai84+jyhpQYQWMOgrlP1DXnZw1bNKqo6NTkMzfNCS+ul5PMs +Noyxcw1iyGW6Bm81LANsnMM7BLhPQATShmW7O83WUJ4vAgMBAAGjUzBRMB0GA1Ud +DgQWBBRdFCb//WETC8mDxg/75e+RoVNoDjAfBgNVHSMEGDAWgBRdFCb//WETC8mD +xg/75e+RoVNoDjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQC8 +cKOagO+SVJBMzJppm4uGdSM6TJ2wbkn6K5/eOtZmYdKtW6fkAC9tf0DR7dVP1DUk +24lS+atR1Oe7SukxJyd+NafCZ61uf+zrMC3wgBGnufrbPWaDDVxi6c3I0I+WNaCk +DHHY+9UtjvSboWKG1yuEExPN6aDeytbpscG1DNi7l96Ac3Yzs007SFljA7NBrf65 +So9SZYSdJVC/JrOnfK2HZPeAqvoyUO5JsCh02q5AskxTqfBGy6VUVQO5mN8bxYHV +GG5XD46rpwQYNT2bWWRF5d0bRv7ecNkCoupm6hCQROg4FZHGPnqHGqDTcgCLZ59e +8rHh2gsDMMNYvSMTi+0N +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/vector_logs_ca.key b/CloudronPackages/APISIX/apisix-source/t/certs/vector_logs_ca.key new file mode 100644 index 0000000..e36b693 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/vector_logs_ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAvOBV3v/KH2Qdq4+Uzx2rF3YgRG0wr9OLYv5VfdljgjKxyyom +yNvpCN2EUF2kef9Rdm130TlIXUYJAfDTkjOFm2g9Zra/+5ltk2rcaZcbKdbNM5hx +5eb9FWMAWoXFLOp5UntfXYH64+LNFp+6eBO4kqv1AwLPlnv3UKMLrEE1bENmYYw5 +RWNtglf6MgmLDatzkUYMGUZvXJlZlFBexl+9Mo09Sry5BY3O8tb8IvFyWAA4cP4Y +7RMwDpMXVGovOPo8oaUGEFjDoK5T9Q152cNWzSqqOjU5DM3zQkvrpeTzLDaMsXMN +YshlugZvNSwDbJzDOwS4T0AE0oZluzvN1lCeLwIDAQABAoIBADM7ou9fcQM80/OC +efoIcS1nBG+rMqau+kM6/BOsERrzB1k1sNmRFVArTkXCcOgKwp0eKn8dS6zJX44g +NjOVOCukhetDrSXhQ2DWfr1BmMOrmXPiaRrUolfXx/PGD2sUmx4tivvBUz3Xeowl +fZ4us0VN0aMkcwy9yaMc5wCtm4Em+uMrUIvWSAl3ji09oG4NNBQHUsEWJoRMZ/AG +GQowc7Ga850ybZlza1uWh29a3bbQqEwHExJwiCISv25PJ/xQLqH65biB4MU+ym17 +Ou/MDn9cYndxBal/XI4R7HbeIjMgw2XxwXiiDOuKAn5TlCzHmySRXFj1BoT8xoXa +vTXVlAkCgYEA+nc2GiainyW0MAASX53Ue5zsFh4T2CaA4TTHeXEK22rL1Sz3LsbX +ymBqCcNwbcSTYUzBsf6YzSsPLUwIzBGEN0p5Ywts5KtWavAxllBj2MOTP4yQfLvh +AxOq94hqrDLMs/g7LkFrfspYMCXmegGjjXGuqirKbigXkFVQkvOUcwUCgYEAwQy8 +kl2+deq1OD9rJId596nDx6JVLBt7/VP4dOOaS2/YQeFnUdM1xMM+zovEZb3UZMpp +8yhRE7hB7Fm0688yd+F7GpFC49LyVTitZcaIV7nMnXjJQQ27WyiAZoRKHt1gP4io +OCZAaOEJRbGJcWR3sSPHfX93R+xEtFNAexb/eqMCgYEA8NDV7+bdzO7PhKdNAyoZ +NpD2XX2lztmWaPH6KMWLjtPsD5cgQpVkvWxeB+0lmCS9H3xRb/Y+rGWOPhsxCiR9 +Xzv34kcF+AbVHBS9WK0Kk0vXs+5Ord9mxTKP21gKWG6vawpsvFiiJlIe4IxQQVZ6 +DnETYwGpiKh7n4an5eLVBJECgYEAnviuEJnBzbiJotgWku49MgVKg4raOIgpgmMz +po4G8TgZDadgPbGABZgCkHPoNyArVxSYSvRYT7TcFJWKtuTY2n+DsE0OmC2OAT+7 +CqSCgjsulD5y/G8iad7gXYtyvhfuumL+o75cLAGkcQ/R7t6c8fJUxLPCtieKLDSi +VLqLh6ECgYAlk8O5Rz2bSje4F+b0PZLAGjQRytpTjrgVpxwJ4bBXYeqhfd+3Fi8s +OraFx+kj/YOOFkk5uu75/fbccEY1DG0nmWUR0pjHM+QndB4rpkSxtp+pfVo2nRn0 +pAY8ep+TFRLwmy7ZXpOFPYlGPwx+rjSm9vk9EJYjxZE8YYldiBBKHw== +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/vector_logs_server.crt b/CloudronPackages/APISIX/apisix-source/t/certs/vector_logs_server.crt new file mode 100644 index 0000000..95bb51b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/vector_logs_server.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDETCCAfkCFEynFsv9L6bzyJJVmjoaKuCoYZwkMA0GCSqGSIb3DQEBCwUAMEUx +CzAJBgNVBAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl +cm5ldCBXaWRnaXRzIFB0eSBMdGQwHhcNMjMwNDI0MTMxNzAwWhcNMjQwNDIzMTMx +NzAwWjBFMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UE +CgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAtGvdwIHuk6k3vphBnGIdtlZ/6ImHSVBsNHz5y6E9X31a88EH +wtnxT5Ang8K6Y4pQt+LsjhI0NdUY2skiKDnGpo2IkaFAn9nERQ1GJstIHr7ltal6 +ureV4n/Na/T6n6GPnwD4+P86XvpIwFtJZujYr2tUl4qm/t1P7zHjB/UsF9G6H/aN +oCsDkG3a7+b8uWAZLkyHS4RLF3pG6pDWns8/vC/P9nTT7o3Ha2DV7TPaY0hlsXf6 +0/SCSm7EonnVVwhnKyy5Z0FsCXClg7weN4ZKPb+ypF0o0/LLqw481lbSfAu5kpjE +r/rHpsQonRbQrcrD9xovXmw2vdk/2jJn6wpFQwIDAQABMA0GCSqGSIb3DQEBCwUA +A4IBAQBv9e8gsD75iySf+pam11JUujjL0gpqdzY72CKo4abYX5NZhMiBs6OCKicz +EedR/EgRY+26RMThKC0zSy3hOO6SKPw03FLsV2B8ooDzaOa4l3F/E6NQ5yNDoK+K +lT1G85fW3bQWtNoB8aa/r1/eExZy3kZF8GSl+/BvwLtOwtGXMO0Y1URo81Dl0da+ +F2yv6ZGziEYIWYTUK3kxOpe0Sl4wHz33olWoli2qpYlSndUUIWoVYJr4gtH/xTEV +GHxdOhxcfyMNi6ceYG4HGWyKRFR9TJAU+PRBxHI8UUpg+BG3/DQmfA5+7xgAws37 +dEVsm725hta8vPUSMSAdRrArBlh+ +-----END CERTIFICATE----- diff --git a/CloudronPackages/APISIX/apisix-source/t/certs/vector_logs_server.key b/CloudronPackages/APISIX/apisix-source/t/certs/vector_logs_server.key new file mode 100644 index 0000000..54035d4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/certs/vector_logs_server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAtGvdwIHuk6k3vphBnGIdtlZ/6ImHSVBsNHz5y6E9X31a88EH +wtnxT5Ang8K6Y4pQt+LsjhI0NdUY2skiKDnGpo2IkaFAn9nERQ1GJstIHr7ltal6 +ureV4n/Na/T6n6GPnwD4+P86XvpIwFtJZujYr2tUl4qm/t1P7zHjB/UsF9G6H/aN +oCsDkG3a7+b8uWAZLkyHS4RLF3pG6pDWns8/vC/P9nTT7o3Ha2DV7TPaY0hlsXf6 +0/SCSm7EonnVVwhnKyy5Z0FsCXClg7weN4ZKPb+ypF0o0/LLqw481lbSfAu5kpjE +r/rHpsQonRbQrcrD9xovXmw2vdk/2jJn6wpFQwIDAQABAoIBAQCB24lV/6759Le8 +pNXEexIrpQKXGjWXXR0kgjdAiyMjUZRfETZG1prKy1TFjyiccHc8g0YD07JkdKZZ +Ap9lGICUbBY5yzg6VYDguncdgP69smSfZgaB0ZU92wK9iyvALYazyP1qKjmXFsm6 +OXoRadJcIAJYuGEN27imzt87YQmFciXj63lW4usR7rPpacW004VeWqGfXTnckJd6 +TYFq0xmdhnGxDxOlf6fs5zOEw17NrGlYxQVtdst8sGmpAPMEM7DzvDsjfEPxDuXl +hQJE8Zk8jK3Xwrnc03NWisZ4QVhgxeR7PVcraFo623qiI/CzH9YqUqMCtIMAqz/T +COXXl9JxAoGBAOosUC72SM7ZRshneHHszEaZDvfLINdKGUKCDvYlLEmVFqE5iRFy +SomVci2jtrlGH1gJAWfwkT09JVgtGosRIA0MS82HseLN/QIa01dAmmiZqM/CLbcn +mpb0CQDkm0Bbz6fokQkFB/sBA5Kj3kOKRydCLp2S0Ugs50cKXDHP5fuVAoGBAMU8 +9rIvmNdweGTiYjHYLJBkzu7eL+5RB2zVSMuZtVivaDTfleilbRlcrBBIaM0urv2W +UtROB9ack2Ijn/BF+tHkBRVWpaZFdHJ8qMfz2bBDgf6za/LBcvuT35i7ibPT+zfg +UFXtArmGwPq3AZdWBwIKyN8rM7253WDnUlkN7Ed3AoGBAMPAR0b6meJPvtvHoueZ +Cyn4yIpbQxi02GjAT8FzUZIxDrm3Xt02rRhV1RxRvm0iMRFmdcZtUvveIVmUWpvl +tOUzYiptREZT6yvXQNOvLWRDDtqdd5mjgZauaNhWQXGLTgsOXi8sBX/NWS87zJCp +BtHKgS03jbrHzo2UG32ITLgBAoGAJRoardoWPjCB9ThAkG/BskfERVq2WXYUl3xn +fSUk39HfIFMOt/ymUScFluqIDFDDyiAE5Lro7o31i3h4FZKUY/conaL29hgKl56r +gTF1uZp5UZgerkOFhZ2Dag+dD57ImvIvKnqzEIMwufjC69za5J9yucg+q2nTIu9g +pi/gSnECgYEAhfJ5uq1qa+g23np02ED5ttqmyrMRGGInx3mr2QgJDTum6FujpYCM +PwJhMwKJZXcf3eUlECSJPa+9UGI53d+JDlQdwq9Pi726KFtrBiS4t9aSyZSpkoWk +SVdYGaOMtokDKRJibazXjpGFJQy9tAMgtqptS3kL03IuJc643y+lMFc= +-----END RSA PRIVATE KEY----- diff --git a/CloudronPackages/APISIX/apisix-source/t/chaos/delayetcd/delayetcd.go b/CloudronPackages/APISIX/apisix-source/t/chaos/delayetcd/delayetcd.go new file mode 100644 index 0000000..4cb1c9e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/chaos/delayetcd/delayetcd.go @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package delayetcd + +import ( + "context" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/chaos-mesh/chaos-mesh/api/v1alpha1" + "github.com/gavv/httpexpect" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/apache/apisix/t/chaos/utils" +) + +func getEtcdDelayChaos(delay int) *v1alpha1.NetworkChaos { + return &v1alpha1.NetworkChaos{ + ObjectMeta: metav1.ObjectMeta{ + Name: "etcd-delay", + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.NetworkChaosSpec{ + Selector: v1alpha1.SelectorSpec{ + LabelSelectors: map[string]string{"app.kubernetes.io/instance": "etcd"}, + }, + Action: v1alpha1.DelayAction, + Mode: v1alpha1.AllPodMode, + TcParameter: v1alpha1.TcParameter{ + Delay: &v1alpha1.DelaySpec{ + Latency: strconv.Itoa(delay) + "ms", + }, + }, + }, + } +} + +func setRouteMultipleTimes(e *httpexpect.Expect, times int, status httpexpect.StatusRange) time.Duration { + now := time.Now() + timeLast := now + var timeList []string + for i := 0; i < times; i++ { + utils.SetRoute(e, status) + timeList = append(timeList, time.Since(timeLast).String()) + timeLast = time.Now() + } + fmt.Fprintf(ginkgo.GinkgoWriter, "takes %v separately\n", timeList) + return time.Since(now) / time.Duration(times) +} + +func setRouteMultipleTimesIgnoreError(e *httpexpect.Expect, times int) (time.Duration, int) { + now := time.Now() + var resp *httpexpect.Response + for i := 0; i < times; i++ { + resp = utils.SetRouteIgnoreError(e) + } + // use status code of the last time is enough to show the accessibility of apisix + return time.Since(now) / time.Duration(times), resp.Raw().StatusCode +} + +func deleteChaosAndCheck(eSilent *httpexpect.Expect, cliSet *utils.ClientSet, chaos *v1alpha1.NetworkChaos) { + err := cliSet.CtrlCli.Delete(context.Background(), chaos) + gomega.Expect(err).To(gomega.BeNil()) + time.Sleep(1 * time.Second) + + var setDuration time.Duration + var statusCode int + for range [10]int{} { + setDuration, statusCode = setRouteMultipleTimesIgnoreError(eSilent, 5) + if setDuration < 15*time.Millisecond && statusCode == http.StatusOK { + break + } + time.Sleep(5 * time.Second) + } + gomega.Ω(setDuration).Should(gomega.BeNumerically("<", 15*time.Millisecond)) + gomega.Ω(statusCode).Should(gomega.BeNumerically("==", http.StatusOK)) +} + +var _ = ginkgo.Describe("Test APISIX Delay When Add ETCD Delay", func() { + ctx := context.Background() + e := httpexpect.New(ginkgo.GinkgoT(), utils.Host) + eDataPanel := httpexpect.New(ginkgo.GinkgoT(), utils.DataPanelHost) + ePrometheus := httpexpect.New(ginkgo.GinkgoT(), utils.PrometheusHost) + eSilent := utils.GetSilentHttpexpectClient() + + var cliSet *utils.ClientSet + var apisixPod *v1.Pod + var err error + ginkgo.It("init client set", func() { + cliSet, err = utils.InitClientSet() + gomega.Expect(err).To(gomega.BeNil()) + listOption := client.MatchingLabels{"app": "apisix-gw"} + apisixPods, err := utils.GetPods(cliSet.CtrlCli, metav1.NamespaceDefault, listOption) + gomega.Expect(err).To(gomega.BeNil()) + gomega.Ω(len(apisixPods)).Should(gomega.BeNumerically(">", 0)) + apisixPod = &apisixPods[0] + }) + + ginkgo.It("setup prometheus metrics public API", func() { + utils.SetPrometheusMetricsPublicAPI(e) + }) + + ginkgo.It("check if everything works", func() { + utils.SetRoute(e, httpexpect.Status2xx) + utils.GetRouteList(e, http.StatusOK) + + utils.WaitUntilMethodSucceed(eDataPanel, http.MethodGet, 1) + utils.TestPrometheusEtcdMetric(ePrometheus, 1) + }) + + // get default + ginkgo.It("get default apisix delay", func() { + timeStart := time.Now() + setDuration := setRouteMultipleTimes(eSilent, 5, httpexpect.Status2xx) + gomega.Ω(setDuration).Should(gomega.BeNumerically("<", 15*time.Millisecond)) + + errorLog, err := utils.Log(apisixPod, cliSet.KubeCli, timeStart) + gomega.Expect(err).To(gomega.BeNil()) + gomega.Ω(errorLog).ShouldNot(gomega.ContainSubstring("error")) + }) + + // 30ms delay + ginkgo.It("generate a 30ms delay between etcd and apisix", func() { + timeStart := time.Now() + chaos := getEtcdDelayChaos(30) + err := cliSet.CtrlCli.Create(ctx, chaos) + gomega.Expect(err).To(gomega.BeNil()) + time.Sleep(1 * time.Second) + + defer deleteChaosAndCheck(eSilent, cliSet, chaos) + + setDuration := setRouteMultipleTimes(eSilent, 5, httpexpect.Status2xx) + gomega.Ω(setDuration).Should(gomega.BeNumerically("<", 400*time.Millisecond)) + + errorLog, err := utils.Log(apisixPod, cliSet.KubeCli, timeStart) + gomega.Expect(err).To(gomega.BeNil()) + gomega.Ω(errorLog).ShouldNot(gomega.ContainSubstring("error")) + }) + + // 300ms delay + ginkgo.It("generate a 300ms delay between etcd and apisix", func() { + timeStart := time.Now() + chaos := getEtcdDelayChaos(300) + err := cliSet.CtrlCli.Create(ctx, chaos) + gomega.Expect(err).To(gomega.BeNil()) + time.Sleep(1 * time.Second) + + defer deleteChaosAndCheck(eSilent, cliSet, chaos) + + setDuration := setRouteMultipleTimes(eSilent, 5, httpexpect.Status2xx) + gomega.Ω(setDuration).Should(gomega.BeNumerically("<", 4*time.Second)) + + errorLog, err := utils.Log(apisixPod, cliSet.KubeCli, timeStart) + gomega.Expect(err).To(gomega.BeNil()) + gomega.Ω(errorLog).ShouldNot(gomega.ContainSubstring("error")) + }) + + // 3s delay and cause error + ginkgo.It("generate a 3s delay between etcd and apisix", func() { + timeStart := time.Now() + chaos := getEtcdDelayChaos(3000) + err := cliSet.CtrlCli.Create(ctx, chaos) + gomega.Expect(err).To(gomega.BeNil()) + time.Sleep(1 * time.Second) + + defer deleteChaosAndCheck(eSilent, cliSet, chaos) + + _ = setRouteMultipleTimes(e, 2, httpexpect.Status5xx) + + errorLog, err := utils.Log(apisixPod, cliSet.KubeCli, timeStart) + gomega.Expect(err).To(gomega.BeNil()) + gomega.Ω(errorLog).Should(gomega.ContainSubstring("error")) + }) + + ginkgo.It("restore test environment", func() { + utils.WaitUntilMethodSucceed(e, http.MethodPut, 5) + utils.DeleteRoute(e) + }) +}) diff --git a/CloudronPackages/APISIX/apisix-source/t/chaos/e2e.go b/CloudronPackages/APISIX/apisix-source/t/chaos/e2e.go new file mode 100644 index 0000000..51094df --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/chaos/e2e.go @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e + +import ( + _ "github.com/apache/apisix/t/chaos/delayetcd" + _ "github.com/apache/apisix/t/chaos/killetcd" +) + +func runChaos() {} diff --git a/CloudronPackages/APISIX/apisix-source/t/chaos/e2e_test.go b/CloudronPackages/APISIX/apisix-source/t/chaos/e2e_test.go new file mode 100644 index 0000000..2a1a420 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/chaos/e2e_test.go @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package e2e + +import ( + "testing" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" +) + +func TestRunChaos(t *testing.T) { + runChaos() + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "chaos test suites") +} diff --git a/CloudronPackages/APISIX/apisix-source/t/chaos/go.mod b/CloudronPackages/APISIX/apisix-source/t/chaos/go.mod new file mode 100644 index 0000000..efd5cc7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/chaos/go.mod @@ -0,0 +1,52 @@ +module github.com/apache/apisix/t/chaos + +require ( + github.com/ajg/form v1.5.1 // indirect + github.com/chaos-mesh/chaos-mesh v1.1.1 + github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072 // indirect + github.com/fatih/structs v1.1.0 // indirect + github.com/gavv/httpexpect v2.0.0+incompatible + github.com/imkira/go-interpol v1.1.0 // indirect + github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect + github.com/moul/http2curl v1.0.0 // indirect + github.com/onsi/ginkgo v1.12.0 + github.com/onsi/gomega v1.9.0 + github.com/pkg/errors v0.9.1 + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 // indirect + github.com/yudai/gojsondiff v1.0.0 // indirect + github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect + github.com/yudai/pp v2.0.1+incompatible // indirect + k8s.io/api v0.17.0 + k8s.io/apimachinery v0.17.0 + k8s.io/client-go v0.17.0 + k8s.io/kubectl v0.0.0 + k8s.io/kubernetes v1.17.2 + sigs.k8s.io/controller-runtime v0.4.0 +) + +replace ( + k8s.io/api => k8s.io/api v0.17.0 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.17.0 + k8s.io/apimachinery => k8s.io/apimachinery v0.17.1-beta.0 + k8s.io/apiserver => k8s.io/apiserver v0.17.0 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.17.0 + k8s.io/client-go => k8s.io/client-go v0.17.0 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.17.0 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.17.0 + k8s.io/code-generator => k8s.io/code-generator v0.17.1-beta.0 + k8s.io/component-base => k8s.io/component-base v0.17.0 + k8s.io/cri-api => k8s.io/cri-api v0.17.1-beta.0 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.17.0 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.17.0 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.17.0 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.17.0 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.17.0 + k8s.io/kubectl => k8s.io/kubectl v0.17.0 + k8s.io/kubelet => k8s.io/kubelet v0.17.0 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.17.0 + k8s.io/metrics => k8s.io/metrics v0.17.0 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.17.0 +) + +go 1.14 diff --git a/CloudronPackages/APISIX/apisix-source/t/chaos/go.sum b/CloudronPackages/APISIX/apisix-source/t/chaos/go.sum new file mode 100644 index 0000000..22e5383 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/chaos/go.sum @@ -0,0 +1,1120 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= +github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= +github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= +github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/bazelbuild/bazel-gazelle v0.18.2/go.mod h1:D0ehMSbS+vesFsLGiD6JXu3mVEzOlfUl8wNnq+x/9p0= +github.com/bazelbuild/bazel-gazelle v0.19.1-0.20191105222053-70208cbdc798/go.mod h1:rPwzNHUqEzngx1iVBfO/2X2npKaT3tqPqqHW6rVsn/A= +github.com/bazelbuild/buildtools v0.0.0-20190731111112-f720930ceb60/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= +github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= +github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA= +github.com/bxcodec/faker v2.0.1+incompatible/go.mod h1:BNzfpVdTwnFJ6GtfYTcQu6l6rHShT+veBxNCnjCx5XM= +github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= +github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/chaos-mesh/chaos-mesh v1.1.1 h1:8nQ7Z9uvRa8rBvY/uJKt8kb7D9gBvcdWcnzqu7VckHc= +github.com/chaos-mesh/chaos-mesh v1.1.1/go.mod h1:BjKgSVpmbyrj5Te3/xiYaIMCfiZUhreS3nep/Fupqmk= +github.com/chaos-mesh/k8s_dns_chaos v0.0.0-20200922120555-7ced93637075/go.mod h1:CB8grXv5pqxLgiI0HSZxyyykmDRekpd5M7fz+NlOdMs= +github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= +github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= +github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/containerd/cgroups v0.0.0-20200404012852-53ba5634dc0f/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/containerd v1.0.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.2.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= +github.com/containerd/cri v1.11.1/go.mod h1:DavH5Qa8+6jOmeOMO3dhWoqksucZDe06LfuhBz/xPZs= +github.com/containerd/fifo v0.0.0-20191213151349-ff969a566b00/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20200115183213-fe1d0d650e42/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/coredns/corefile-migration v1.0.4/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= +github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= +github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072 h1:DddqAaWDpywytcG8w/qoQ5sAN8X12d3Z3koB0C3Rxsc= +github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gavv/httpexpect v2.0.0+incompatible h1:1X9kcRshkSKEjNJJxX9Y9mQ5BRfbxU5kORdjhlA1yX8= +github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/gzip v0.0.1/go.mod h1:fGBJBCdt6qCZuCAOwWuFhBB4OOq9EFqlo5dEaFhhu5w= +github.com/gin-contrib/pprof v1.3.0/go.mod h1:waMjT1H9b179t3CxuG1cV3DHpga6ybizwfBaM5OXaB0= +github.com/gin-contrib/sse v0.0.0-20170109093832-22d885f9ecc7/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.3.0/go.mod h1:7cKuhb5qV2ggCFctp2fJQ+ErvciLZrIeoOSOm6mUr7Y= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/gin-gonic/gin v1.6.2/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= +github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= +github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/overalls v0.0.0-20180201144345-22ec1a223b7c/go.mod h1:UqxAgEOt89sCiXlrc/ycnx00LVvUO/eS8tMUkWX4R7w= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.3.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= +github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= +github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.18.0/go.mod h1:kaqo8l0OZKYPtjNmG4z4HrWLgcYNIJ9B9q3LWri9uLg= +github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= +github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= +github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= +github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= +github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= +github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20181106134648-c34317bd91bf/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= +github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.1/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= +github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= +github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/joomcode/errorx v1.0.1/go.mod h1:kgco15ekB6cs+4Xjzo7SPeXzx38PbJzBwbnu9qfVNHQ= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= +github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= +github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= +github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= +github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= +github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4= +github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.0.2-0.20200225072153-6219ca02fffb/go.mod h1:E9j8UNyHeYo/uUXIIUOAehxf5B69UwZ5u3qj7wEn8J0= +github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= +github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= +github.com/pingcap/check v0.0.0-20191216031241-8a5a85928f12/go.mod h1:PYMCGwN0JHjoqGr3HrZoD+b8Tgx8bKnArhSq8YVzUMc= +github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/errors v0.11.5-0.20190809092503-95897b64e011/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/failpoint v0.0.0-20200210140405-f8f9fb234798/go.mod h1:DNS3Qg7bEDhU6EXNHF+XSv/PGznQaMJ5FWvctpm6pQI= +github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= +github.com/pingcap/log v0.0.0-20200117041106-d28c14d3b1cd/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/robfig/cron v1.1.0 h1:jk4/Hud3TTdcrJgUOBgsqrZBarcxl6ADIjSC2iniwLY= +github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/robfig/cron/v3 v3.0.0 h1:kQ6Cb7aHOHTSzNVNEhmp8EcWKLb4CbiMW9h9VyIhO4E= +github.com/robfig/cron/v3 v3.0.0/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= +github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44 h1:tB9NOR21++IjLyVx3/PCPhWMwqGNCMQEH96A6dMZ/gc= +github.com/sergi/go-diff v1.0.1-0.20180205163309-da645544ed44/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14/go.mod h1:gxQT6pBGRuIGunNf/+tSOB5OHvguWi8Tbt82WOkf35E= +github.com/swaggo/gin-swagger v1.2.0/go.mod h1:qlH2+W7zXGZkczuL+r2nEBR2JTT+/lX05Nn6vPhc7OI= +github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y= +github.com/swaggo/swag v1.6.7/go.mod h1:xDhTyuFIujYiN3DKWC/H/83xcfHp+UE/IzWWampG7Zc= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.5-pre/go.mod h1:FwP/aQVg39TXzItUBMwnWp9T9gPQnXw4Poh4/oBQZ/0= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.5-pre/go.mod h1:tULtS6Gy1AE1yCENaw4Vb//HLH5njI2tfCQDUqRd8fI= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.2.0 h1:dzZJf2IuMiclVjdw0kkT+f9u4YdrapbNyGAN47E/qnk= +github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yookoala/realpath v1.0.0/go.mod h1:gJJMA9wuX7AcqLy1+ffPatSCySA1FQ2S8Ya9AIoYBpE= +github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/dig v1.9.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= +go.uber.org/fx v1.12.0/go.mod h1:egT3Kyg1JFYQkvKLZ3EsykxkNrZxgXS+gKoKo7abERY= +go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190927031335-2835ba2e683f/go.mod h1:fYw7AShPAhGMdXqA9gRadk/CcMsvLlClpE5oBwnS3dM= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190424203555-c05e17bb3b2d/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd h1:GGJVjV8waZKRHrgwvtH66z9ZGVurTD1MT0n1Bb+q4aM= +golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200320220750-118fecf932d8 h1:1+zQlQqEEhUeStBTi653GZAnAuivZq/2hz+Iz+OP7rg= +golang.org/x/net v0.0.0-20200320220750-118fecf932d8/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa h1:mQTN3ECqfsViCNBgq+A40vdwhkGykrrQlYe3mPj6BoU= +golang.org/x/sys v0.0.0-20200409092240-59c9f1ba88fa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190122202912-9c309ee22fab/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606050223-4d9ae51c2468/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190611222205-d73e1c7e250b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191030062658-86caa796c7ab/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191107010934-f79515f33823/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191114200427-caa0b0f7d508/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200221224223-e1da425f72fd/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200309202150-20ab64c0d93f h1:NbrfHxef+IfdI86qCgO/1Siq1BuMH2xG0NqgvCguRhQ= +golang.org/x/tools v0.0.0-20200309202150-20ab64c0d93f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= +gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/gometalinter.v2 v2.0.12/go.mod h1:NDRytsqEZyolNuAgTzJkZMkSQM7FIKyzVzGhjB/qfYo= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alecthomas/kingpin.v3-unstable v3.0.0-20180810215634-df19058c872c/go.mod h1:3HH7i1SgMqlzxCcBmUHW657sD4Kvv9sC3HpL3YukzwA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpAriY= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.17.0 h1:H9d/lw+VkZKEVIUc8F3wgiQ+FUXTTr21M87jXLU7yqM= +k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= +k8s.io/apiextensions-apiserver v0.17.0 h1:+XgcGxqaMztkbbvsORgCmHIb4uImHKvTjNyu7b8gRnA= +k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8= +k8s.io/apimachinery v0.17.1-beta.0 h1:0Wl/KpAiFOMe9to5h8x2Y6JnjV+BEWJiTcUk1Vx7zdE= +k8s.io/apimachinery v0.17.1-beta.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apiserver v0.17.0 h1:XhUix+FKFDcBygWkQNp7wKKvZL030QUlH1o8vFeSgZA= +k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= +k8s.io/cli-runtime v0.17.0/go.mod h1:1E5iQpMODZq2lMWLUJELwRu2MLWIzwvMgDBpn3Y81Qo= +k8s.io/client-go v0.17.0 h1:8QOGvUGdqDMFrm9sD6IUFl256BcffynGoe80sxgTEDg= +k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= +k8s.io/cloud-provider v0.17.0/go.mod h1:Ze4c3w2C0bRsjkBUoHpFi+qWe3ob1wI2/7cUn+YQIDE= +k8s.io/cluster-bootstrap v0.17.0/go.mod h1:KnxktBWGyKlBDaHLC8zzu0EPt/HJ9Lcs7bNM2WvUHSs= +k8s.io/code-generator v0.17.1-beta.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= +k8s.io/component-base v0.17.0 h1:BnDFcmBDq+RPpxXjmuYnZXb59XNN9CaFrX8ba9+3xrA= +k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= +k8s.io/cri-api v0.17.1-beta.0/go.mod h1:BzAkbBHHp81d+aXzbiIcUbilLkbXa40B8mUHOk6EX3s= +k8s.io/csi-translation-lib v0.17.0/go.mod h1:HEF7MEz7pOLJCnxabi45IPkhSsE/KmxPQksuCrHKWls= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-aggregator v0.17.0/go.mod h1:Vw104PtCEuT12WTVuhRFWCHXGiVqXsTzFtrvoaHxpk4= +k8s.io/kube-controller-manager v0.17.0/go.mod h1:uewKsjSm/Kggbn+BmimupXDDEikKQv6rX8ShiLiuXTw= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/kube-proxy v0.17.0/go.mod h1:pecyGyajk667mTTCT0vMP7Oh3bQMUHvEW+Z5pZUjYxU= +k8s.io/kube-scheduler v0.17.0/go.mod h1:mZVsEg++qnq6xWm9DTh2bw9v2i9XPdkEQGDafcjG6PE= +k8s.io/kubectl v0.17.0 h1:xD4EWlL+epc/JTO1gvSjmV9yiYF0Z2wiHK2DIek6URY= +k8s.io/kubectl v0.17.0/go.mod h1:jIPrUAW656Vzn9wZCCe0PC+oTcu56u2HgFD21Xbfk1s= +k8s.io/kubelet v0.17.0/go.mod h1:e/JBCxucKuEV6JO6zYW+e72ib9eMsGO2Fah3iT5tiiI= +k8s.io/kubernetes v1.17.2 h1:g1UFZqFQsYx88xMUks4PKC6tsNcekxe0v06fcVGRwVE= +k8s.io/kubernetes v1.17.2/go.mod h1:NbNV+69yL3eKiKDJ+ZEjqOplN3BFXKBeunzkoOy8WLo= +k8s.io/legacy-cloud-providers v0.17.0/go.mod h1:DdzaepJ3RtRy+e5YhNtrCYwlgyK87j/5+Yfp0L9Syp8= +k8s.io/metrics v0.17.0/go.mod h1:EH1D3YAwN6d7bMelrElnLhLg72l/ERStyv2SIQVt6Do= +k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= +k8s.io/sample-apiserver v0.17.0/go.mod h1:SAkguNIe/gJik7VlkFu62oGlWltW3c0mAP9WQYUMEJo= +k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/controller-runtime v0.4.0 h1:wATM6/m+3w8lj8FXNaO6Fs/rq/vqoOjO1Q116Z9NPsg= +sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= +sigs.k8s.io/controller-tools v0.2.5/go.mod h1:+t0Hz6tOhJQCdd7IYO0mNzimmiM9sqMU0021u6UCF2o= +sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= +sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH+UQM= +sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/CloudronPackages/APISIX/apisix-source/t/chaos/killetcd/killetcd.go b/CloudronPackages/APISIX/apisix-source/t/chaos/killetcd/killetcd.go new file mode 100644 index 0000000..069b665 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/chaos/killetcd/killetcd.go @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package killetcd + +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/chaos-mesh/chaos-mesh/api/v1alpha1" + "github.com/gavv/httpexpect" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/apache/apisix/t/chaos/utils" +) + +var ( + bandwidthBefore float64 + durationBefore float64 + bpsBefore float64 + bandwidthAfter float64 + durationAfter float64 + bpsAfter float64 +) + +func getEtcdKillChaos() *v1alpha1.PodChaos { + return &v1alpha1.PodChaos{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kill-etcd", + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.PodChaosSpec{ + Selector: v1alpha1.SelectorSpec{ + LabelSelectors: map[string]string{"app.kubernetes.io/instance": "etcd"}, + }, + Action: v1alpha1.PodKillAction, + Mode: v1alpha1.AllPodMode, + Scheduler: &v1alpha1.SchedulerSpec{ + Cron: "@every 10m", + }, + }, + } +} + +var _ = ginkgo.Describe("Test Get Success When Etcd Got Killed", func() { + e := httpexpect.New(ginkgo.GinkgoT(), utils.Host) + eDataPanel := httpexpect.New(ginkgo.GinkgoT(), utils.DataPanelHost) + ePrometheus := httpexpect.New(ginkgo.GinkgoT(), utils.PrometheusHost) + + var cliSet *utils.ClientSet + var apisixPod *v1.Pod + var err error + ginkgo.It("init client set", func() { + cliSet, err = utils.InitClientSet() + gomega.Expect(err).To(gomega.BeNil()) + listOption := client.MatchingLabels{"app": "apisix-gw"} + apisixPods, err := utils.GetPods(cliSet.CtrlCli, metav1.NamespaceDefault, listOption) + gomega.Expect(err).To(gomega.BeNil()) + gomega.Ω(len(apisixPods)).Should(gomega.BeNumerically(">", 0)) + apisixPod = &apisixPods[0] + }) + + stopChan := make(chan bool) + + ginkgo.It("setup prometheus metrics public API", func() { + utils.SetPrometheusMetricsPublicAPI(e) + }) + + ginkgo.It("check if everything works", func() { + utils.SetRoute(e, httpexpect.Status2xx) + utils.GetRouteList(e, http.StatusOK) + + utils.WaitUntilMethodSucceed(eDataPanel, http.MethodGet, 1) + utils.TestPrometheusEtcdMetric(ePrometheus, 1) + }) + + ginkgo.It("run request in background", func() { + go func() { + defer ginkgo.GinkgoRecover() + for { + go func() { + defer ginkgo.GinkgoRecover() + utils.GetRoute(eDataPanel, http.StatusOK) + }() + time.Sleep(100 * time.Millisecond) + stopLoop := false + select { + case <-stopChan: + stopLoop = true + default: + } + if stopLoop { + break + } + } + }() + }) + // wait 1 seconds to let first route access returns + time.Sleep(1 * time.Second) + + ginkgo.It("get stats before kill etcd", func() { + timeStart := time.Now() + bandwidthBefore, durationBefore = utils.GetEgressBandwidthPerSecond(ePrometheus) + bpsBefore = bandwidthBefore / durationBefore + gomega.Expect(bpsBefore).NotTo(gomega.BeZero()) + + errorLog, err := utils.Log(apisixPod, cliSet.KubeCli, timeStart) + gomega.Expect(err).To(gomega.BeNil()) + gomega.Ω(errorLog).ShouldNot(gomega.ContainSubstring("no healthy etcd endpoint available")) + }) + + // apply chaos to kill all etcd pods + ginkgo.It("kill all etcd pods", func() { + chaos := getEtcdKillChaos() + err := cliSet.CtrlCli.Create(context.Background(), chaos.DeepCopy()) + gomega.Expect(err).To(gomega.BeNil()) + time.Sleep(3 * time.Second) + }) + + // fail to set route since etcd is all killed + // while get route could still succeed + ginkgo.It("get stats after kill etcd", func() { + utils.SetRoute(e, httpexpect.Status5xx) + utils.GetRoute(eDataPanel, http.StatusOK) + utils.TestPrometheusEtcdMetric(ePrometheus, 0) + + bandwidthAfter, durationAfter = utils.GetEgressBandwidthPerSecond(ePrometheus) + bpsAfter = bandwidthAfter / durationAfter + }) + + ginkgo.It("ingress bandwidth per second not change much", func() { + fmt.Fprintf(ginkgo.GinkgoWriter, "bandwidth before: %f, after: %f\n", bandwidthBefore, bandwidthAfter) + fmt.Fprintf(ginkgo.GinkgoWriter, "duration before: %f, after: %f\n", durationBefore, durationAfter) + fmt.Fprintf(ginkgo.GinkgoWriter, "bps before: %f, after: %f\n", bpsBefore, bpsAfter) + gomega.Expect(utils.RoughCompare(bpsBefore, bpsAfter)).To(gomega.BeTrue()) + }) + + ginkgo.It("restore test environment", func() { + stopChan <- true + cliSet.CtrlCli.Delete(context.Background(), getEtcdKillChaos()) + utils.WaitUntilMethodSucceed(e, http.MethodPut, 5) + utils.DeleteRoute(e) + }) +}) diff --git a/CloudronPackages/APISIX/apisix-source/t/chaos/kubernetes/deployment.yaml b/CloudronPackages/APISIX/apisix-source/t/chaos/kubernetes/deployment.yaml new file mode 100644 index 0000000..3076f9c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/chaos/kubernetes/deployment.yaml @@ -0,0 +1,115 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: apps/v1 # for versions before 1.8.0 use apps/v1beta1, before 1.9.0 use apps/v1beta2 +kind: Deployment +metadata: + labels: + app: apisix-gw + name: apisix-gw-deployment + # namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: apisix-gw + template: + metadata: + labels: + app: apisix-gw + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - apisix-gw + topologyKey: kubernetes.io/hostname + weight: 100 + initContainers: + - command: + - /bin/sh + - -c + - | + sysctl -w net.core.somaxconn=65535 + sysctl -w net.ipv4.ip_local_port_range="1024 65535" + sysctl -w net.ipv4.tcp_max_syn_backlog=8192 + sysctl -w fs.file-max=1048576 + sysctl -w fs.inotify.max_user_instances=16384 + sysctl -w fs.inotify.max_user_watches=524288 + sysctl -w fs.inotify.max_queued_events=16384 + image: busybox:latest + imagePullPolicy: IfNotPresent + name: init-sysctl + resources: {} + securityContext: + privileged: true + procMount: Default + restartPolicy: Always + + containers: + - env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + image: "apache/apisix:alpine-local" + imagePullPolicy: IfNotPresent + name: apisix-gw-deployment + ports: + - containerPort: 9080 + name: http + protocol: TCP + - containerPort: 9443 + name: https + protocol: TCP + - containerPort: 9180 + name: admin-port + protocol: TCP + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 9080 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /usr/local/apisix/conf/config.yaml + name: apisix-config-yaml-configmap + subPath: config.yaml + - mountPath: /etc/localtime + name: localtime + readOnly: true + volumes: + - configMap: + name: apisix-gw-config.yaml + name: apisix-config-yaml-configmap + - hostPath: + path: /etc/localtime + type: File + name: localtime diff --git a/CloudronPackages/APISIX/apisix-source/t/chaos/kubernetes/service.yaml b/CloudronPackages/APISIX/apisix-source/t/chaos/kubernetes/service.yaml new file mode 100644 index 0000000..f0ffdae --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/chaos/kubernetes/service.yaml @@ -0,0 +1,43 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +apiVersion: v1 +kind: Service +metadata: + name: apisix-gw-lb + # namespace: default + labels: + app: apisix-gw # useful for service discovery, for example, prometheus-operator. +spec: + ports: + - name: http + port: 9080 + protocol: TCP + targetPort: 9080 + - name: https + port: 9443 + protocol: TCP + targetPort: 9443 + - name: admin-port + port: 9180 + protocol: TCP + targetPort: 9180 + selector: + app: apisix-gw + type: NodePort + externalTrafficPolicy: Local + # sessionAffinity: None diff --git a/CloudronPackages/APISIX/apisix-source/t/chaos/utils/Dockerfile b/CloudronPackages/APISIX/apisix-source/t/chaos/utils/Dockerfile new file mode 100644 index 0000000..5d5ba6d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/chaos/utils/Dockerfile @@ -0,0 +1,76 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ARG ENABLE_PROXY=false + +FROM openresty/openresty:1.21.4.2-alpine-fat AS production-stage + +ARG ENABLE_PROXY +ARG APISIX_PATH +COPY $APISIX_PATH ./apisix +RUN set -x \ + && (test "${ENABLE_PROXY}" != "true" || /bin/sed -i 's,http://dl-cdn.alpinelinux.org,https://mirrors.aliyun.com,g' /etc/apk/repositories) \ + && apk add --no-cache --virtual .builddeps \ + automake \ + autoconf \ + libtool \ + pkgconfig \ + cmake \ + git \ + openldap-dev \ + pcre-dev \ + sudo \ + && cd apisix \ + && git config --global url.https://github.com/.insteadOf git://github.com/ \ + && make deps \ + && cp -v bin/apisix /usr/bin/ \ + && mv ../apisix /usr/local/apisix \ + && apk del .builddeps build-base make unzip + +FROM alpine:3.13 AS last-stage + +ARG ENABLE_PROXY +# add runtime for Apache APISIX +RUN set -x \ + && (test "${ENABLE_PROXY}" != "true" || /bin/sed -i 's,http://dl-cdn.alpinelinux.org,https://mirrors.aliyun.com,g' /etc/apk/repositories) \ + && apk add --no-cache \ + bash \ + curl \ + libstdc++ \ + openldap \ + pcre \ + tzdata + +WORKDIR /usr/local/apisix + +COPY --from=production-stage /usr/local/openresty/ /usr/local/openresty/ +COPY --from=production-stage /usr/local/apisix/ /usr/local/apisix/ +COPY --from=production-stage /usr/bin/apisix /usr/bin/apisix + +# forward request and error logs to docker log collector +RUN mkdir -p logs && touch logs/access.log && touch logs/error.log \ + && ln -sf /dev/stdout /usr/local/apisix/logs/access.log \ + && ln -sf /dev/stderr /usr/local/apisix/logs/error.log + +ENV PATH=$PATH:/usr/local/openresty/luajit/bin:/usr/local/openresty/nginx/sbin:/usr/local/openresty/bin + +EXPOSE 9080 9180 9443 + +CMD ["sh", "-c", "/usr/bin/apisix init && /usr/bin/apisix init_etcd && /usr/local/openresty/bin/openresty -p /usr/local/apisix -g 'daemon off;'"] + +STOPSIGNAL SIGQUIT + diff --git a/CloudronPackages/APISIX/apisix-source/t/chaos/utils/kube_utils.go b/CloudronPackages/APISIX/apisix-source/t/chaos/utils/kube_utils.go new file mode 100644 index 0000000..f21036e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/chaos/utils/kube_utils.go @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "bytes" + "context" + "io" + "time" + + "github.com/chaos-mesh/chaos-mesh/api/v1alpha1" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + clientScheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/remotecommand" + kubectlScheme "k8s.io/kubectl/pkg/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +type ClientSet struct { + CtrlCli client.Client + KubeCli *kubernetes.Clientset +} + +func InitClientSet() (*ClientSet, error) { + scheme := runtime.NewScheme() + v1alpha1.AddToScheme(scheme) + clientScheme.AddToScheme(scheme) + + restConfig := config.GetConfigOrDie() + ctrlCli, err := client.New(restConfig, client.Options{Scheme: scheme}) + if err != nil { + return nil, err + } + kubeCli, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, err + } + + return &ClientSet{ctrlCli, kubeCli}, nil +} + +func GetPods(cli client.Client, ns string, listOption client.MatchingLabels) ([]corev1.Pod, error) { + podList := &corev1.PodList{} + err := cli.List(context.Background(), podList, client.InNamespace(ns), listOption) + if err != nil { + return nil, err + } + return podList.Items, nil +} + +func ExecInPod(cli *kubernetes.Clientset, pod *corev1.Pod, cmd string) (string, error) { + name := pod.GetName() + namespace := pod.GetNamespace() + // only get the first container, no harm for now + containerName := pod.Spec.Containers[0].Name + + req := cli.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(name). + Namespace(namespace). + SubResource("exec") + + req.VersionedParams(&corev1.PodExecOptions{ + Container: containerName, + Command: []string{"/bin/sh", "-c", cmd}, + Stdin: false, + Stdout: true, + Stderr: true, + TTY: false, + }, kubectlScheme.ParameterCodec) + + var stdout, stderr bytes.Buffer + exec, err := remotecommand.NewSPDYExecutor(config.GetConfigOrDie(), "POST", req.URL()) + if err != nil { + return "", errors.Wrapf(err, "error in creating NewSPDYExecutor for pod %s in ns: %s", name, namespace) + } + err = exec.Stream(remotecommand.StreamOptions{ + Stdin: nil, + Stdout: &stdout, + Stderr: &stderr, + }) + if stderr.String() != "" { + stderror := errors.New(stderr.String()) + return "", errors.Wrapf(stderror, "pod: %s\ncommand: %s", name, cmd) + } + if err != nil { + return "", errors.Wrapf(err, "error in streaming remote command: pod: %s in ns: %s\n command: %s", name, namespace, cmd) + } + return stdout.String(), nil +} + +// Log print log of pod +func Log(pod *corev1.Pod, c *kubernetes.Clientset, sinceTime time.Time) (string, error) { + podLogOpts := corev1.PodLogOptions{} + if !sinceTime.IsZero() { + podLogOpts.SinceTime = &metav1.Time{Time: sinceTime} + } + + req := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &podLogOpts) + podLogs, err := req.Stream() + if err != nil { + return "", errors.Wrapf(err, "failed to open log stream for pod %s/%s", pod.GetNamespace(), pod.GetName()) + } + defer podLogs.Close() + + buf := new(bytes.Buffer) + _, err = io.Copy(buf, podLogs) + if err != nil { + return "", errors.Wrapf(err, "failed to copy information from podLogs to buf") + } + return buf.String(), nil +} diff --git a/CloudronPackages/APISIX/apisix-source/t/chaos/utils/setup_chaos_utils.sh b/CloudronPackages/APISIX/apisix-source/t/chaos/utils/setup_chaos_utils.sh new file mode 100755 index 0000000..4b41bb6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/chaos/utils/setup_chaos_utils.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -ex + +start_minikube() { + # pin the version until chaos mesh solves https://github.com/chaos-mesh/chaos-mesh/issues/2172 + curl -LO "https://storage.googleapis.com/kubernetes-release/release/v1.21.4/bin/linux/amd64/kubectl" + chmod +x ./kubectl + sudo mv ./kubectl /usr/local/bin/kubectl + + curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube_latest_amd64.deb + sudo dpkg -i --force-architecture minikube_latest_amd64.deb + minikube start --kubernetes-version "v1.21.4" +} + +modify_config() { + DNS_IP=$(kubectl get svc -n kube-system -l k8s-app=kube-dns -o 'jsonpath={..spec.clusterIP}') + echo "dns_resolver: + - ${DNS_IP} +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - \"http://etcd.default.svc.cluster.local:2379\" +plugin_attr: + prometheus: + enable_export_server: false + " > ./conf/config.yaml +} + +port_forward() { + apisix_pod_name=$(kubectl get pod -l app=apisix-gw -o 'jsonpath={.items[0].metadata.name}') + nohup kubectl port-forward svc/apisix-gw-lb 9080:9080 >/dev/null 2>&1 & + nohup kubectl port-forward svc/apisix-gw-lb 9180:9180 >/dev/null 2>&1 & + nohup kubectl port-forward $apisix_pod_name 9091:9091 >/dev/null 2>&1 & + ps aux | grep '[p]ort-forward' +} + +"$@" diff --git a/CloudronPackages/APISIX/apisix-source/t/chaos/utils/utils.go b/CloudronPackages/APISIX/apisix-source/t/chaos/utils/utils.go new file mode 100644 index 0000000..207a7b9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/chaos/utils/utils.go @@ -0,0 +1,292 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/gavv/httpexpect" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" +) + +var ( + token = "edd1c9f034335f136f87ad84b625c8f1" + // TODO: refactor the code. We should move the endpoint from the expect to the http call. + // So we don't need to remember to pass the correct expect. + Host = "http://127.0.0.1:9180" + DataPanelHost = "http://127.0.0.1:9080" + PrometheusHost = "http://127.0.0.1:9080" + setRouteBody = `{ + "uri": "/get", + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "httpbin.default.svc.cluster.local:8000": 1 + }, + "type": "roundrobin" + } + }` + ignoreErrorFuncMap = map[string]func(e *httpexpect.Expect) *httpexpect.Response{ + http.MethodGet: GetRouteIgnoreError, + http.MethodPut: SetRouteIgnoreError, + } +) + +type httpTestCase struct { + E *httpexpect.Expect + Method string + Path string + Body string + Headers map[string]string + IgnoreError bool + ExpectStatus int + ExpectBody string + ExpectStatusRange httpexpect.StatusRange +} + +func caseCheck(tc httpTestCase) *httpexpect.Response { + e := tc.E + var req *httpexpect.Request + switch tc.Method { + case http.MethodGet: + req = e.GET(tc.Path) + case http.MethodPut: + req = e.PUT(tc.Path) + case http.MethodDelete: + req = e.DELETE(tc.Path) + default: + panic("invalid HTTP method") + } + + if req == nil { + panic("fail to init request") + } + for key, val := range tc.Headers { + req.WithHeader(key, val) + } + if tc.Body != "" { + req.WithText(tc.Body) + } + + resp := req.Expect() + if tc.IgnoreError { + return resp + } + + if tc.ExpectStatus != 0 { + resp.Status(tc.ExpectStatus) + } + + if tc.ExpectStatusRange != 0 { + resp.StatusRange(tc.ExpectStatusRange) + } + + if tc.ExpectBody != "" { + resp.Body().Contains(tc.ExpectBody) + } + + return resp +} + +func SetRoute(e *httpexpect.Expect, expectStatusRange httpexpect.StatusRange) *httpexpect.Response { + return caseCheck(httpTestCase{ + E: e, + Method: http.MethodPut, + Path: "/apisix/admin/routes/1", + Headers: map[string]string{"X-API-KEY": token}, + Body: setRouteBody, + ExpectStatusRange: expectStatusRange, + }) +} + +func SetRouteIgnoreError(e *httpexpect.Expect) *httpexpect.Response { + return caseCheck(httpTestCase{ + E: e, + Method: http.MethodPut, + Path: "/apisix/admin/routes/1", + Headers: map[string]string{"X-API-KEY": token}, + Body: setRouteBody, + IgnoreError: true, + }) +} + +func GetRoute(e *httpexpect.Expect, expectStatus int) *httpexpect.Response { + return caseCheck(httpTestCase{ + E: e, + Method: http.MethodGet, + Path: "/get", + ExpectStatus: expectStatus, + }) +} + +func GetRouteIgnoreError(e *httpexpect.Expect) *httpexpect.Response { + return caseCheck(httpTestCase{ + E: e, + Method: http.MethodGet, + Path: "/get", + IgnoreError: true, + }) +} + +func GetRouteList(e *httpexpect.Expect, expectStatus int) *httpexpect.Response { + return caseCheck(httpTestCase{ + E: e, + Method: http.MethodGet, + Path: "/apisix/admin/routes", + Headers: map[string]string{"X-API-KEY": token}, + ExpectStatus: expectStatus, + ExpectBody: "httpbin.default.svc.cluster.local", + }) +} + +func DeleteRoute(e *httpexpect.Expect) *httpexpect.Response { + return caseCheck(httpTestCase{ + E: e, + Method: http.MethodDelete, + Path: "/apisix/admin/routes/1", + Headers: map[string]string{"X-API-KEY": token}, + }) +} + +func SetPrometheusMetricsPublicAPI(e *httpexpect.Expect) *httpexpect.Response { + return caseCheck(httpTestCase{ + E: e, + Method: http.MethodPut, + Path: "/apisix/admin/routes/metrics", + Headers: map[string]string{"X-API-KEY": token}, + Body: `{ + "uri": "/apisix/prometheus/metrics", + "plugins": { + "public-api": {} + }, + "upstream": { + "nodes": { + "httpbin.default.svc.cluster.local:8000": 1 + }, + "type": "roundrobin" + } + }`, + }) +} + +func TestPrometheusEtcdMetric(e *httpexpect.Expect, expectEtcd int) *httpexpect.Response { + return caseCheck(httpTestCase{ + E: e, + Method: http.MethodGet, + Path: "/apisix/prometheus/metrics", + ExpectBody: fmt.Sprintf("apisix_etcd_reachable %d", expectEtcd), + }) +} + +// get the first line which contains the key +func getPrometheusMetric(e *httpexpect.Expect, key string) string { + resp := caseCheck(httpTestCase{ + E: e, + Method: http.MethodGet, + Path: "/apisix/prometheus/metrics", + }) + resps := strings.Split(resp.Body().Raw(), "\n") + var targetLine string + for _, line := range resps { + if strings.Contains(line, key) { + targetLine = line + break + } + } + targetSlice := strings.Fields(targetLine) + gomega.Ω(len(targetSlice)).Should(gomega.BeNumerically("==", 2)) + return targetSlice[1] +} + +func GetEgressBandwidthPerSecond(e *httpexpect.Expect) (float64, float64) { + key := "apisix_bandwidth{type=\"egress\"," + bandWidthString := getPrometheusMetric(e, key) + bandWidthStart, err := strconv.ParseFloat(bandWidthString, 64) + gomega.Expect(err).To(gomega.BeNil()) + // after etcd got killed, it would take longer time to get the metrics + // so need to calculate the duration + timeStart := time.Now() + + time.Sleep(10 * time.Second) + bandWidthString = getPrometheusMetric(e, key) + bandWidthEnd, err := strconv.ParseFloat(bandWidthString, 64) + gomega.Expect(err).To(gomega.BeNil()) + duration := time.Since(timeStart) + + return bandWidthEnd - bandWidthStart, duration.Seconds() +} + +func GetSilentHttpexpectClient() *httpexpect.Expect { + return httpexpect.WithConfig(httpexpect.Config{ + BaseURL: Host, + Reporter: httpexpect.NewAssertReporter(ginkgo.GinkgoT()), + Printers: []httpexpect.Printer{ + newSilentPrinter(ginkgo.GinkgoT()), + }, + }) +} + +func WaitUntilMethodSucceed(e *httpexpect.Expect, method string, interval int) { + f, ok := ignoreErrorFuncMap[method] + gomega.Expect(ok).To(gomega.BeTrue()) + resp := f(e) + if resp.Raw().StatusCode != http.StatusOK { + for i := range [60]int{} { + timeWait := fmt.Sprintf("wait for %ds\n", i*interval) + fmt.Fprint(ginkgo.GinkgoWriter, timeWait) + resp = f(e) + if resp.Raw().StatusCode != http.StatusOK { + time.Sleep(5 * time.Second) + } else { + break + } + } + } + gomega.Ω(resp.Raw().StatusCode).Should(gomega.BeNumerically("==", http.StatusOK)) +} + +func RoughCompare(a float64, b float64) bool { + ratio := a / b + if ratio < 1.3 && ratio > 0.7 { + return true + } + return false +} + +type silentPrinter struct { + logger httpexpect.Logger +} + +func newSilentPrinter(logger httpexpect.Logger) silentPrinter { + return silentPrinter{logger} +} + +// Request implements Printer.Request. +func (p silentPrinter) Request(req *http.Request) { +} + +// Response implements Printer.Response. +func (silentPrinter) Response(*http.Response, time.Duration) { +} diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/cli.t b/CloudronPackages/APISIX/apisix-source/t/cli/cli.t new file mode 100644 index 0000000..da86341 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/cli.t @@ -0,0 +1,60 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# unit test for cli module +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +$ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: local_dns_resolver +--- config + location /t { + content_by_lua_block { + local local_dns_resolver = require("apisix.cli.ops").local_dns_resolver + local json_encode = require("toolkit.json").encode + ngx.say(json_encode(local_dns_resolver("$TEST_NGINX_HTML_DIR/resolv.conf"))) + } + } +--- user_files +>>> resolv.conf +# This file was automatically generated. +nameserver 172.27.0.1 + +nameserver fe80::215:5dff:fec5:8e1d +--- response_body +["172.27.0.1","fe80::215:5dff:fec5:8e1d"] diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/cli_envsubst_confusion.t b/CloudronPackages/APISIX/apisix-source/t/cli/cli_envsubst_confusion.t new file mode 100644 index 0000000..16d65e0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/cli_envsubst_confusion.t @@ -0,0 +1,111 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); + +$ENV{SOME_STRING_VALUE_BUT_DIFFERENT} = 'astringvaluebutdifferent'; +$ENV{SOME_STRING_VALUE} = 'astringvalue'; + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + +our $apisix_yaml = <<_EOC_; +upstreams: + - id: 1 + nodes: + - host: 127.0.0.1 + port: 1980 + weight: 1 +routes: + - uri: /hello + upstream_id: 1 + plugins: + response-rewrite: + headers: + set: + X-Some-String-Value-But-Different: Different \${{SOME_STRING_VALUE_BUT_DIFFERENT}} + X-Some-String-Value: \${{SOME_STRING_VALUE}} +#END +_EOC_ + +our $response_headers_correct = <<_EOC_; +X-Some-String-Value-But-Different: Different astringvaluebutdifferent +X-Some-String-Value: astringvalue +_EOC_ + +our $response_headers_INCORRECT = <<_EOC_; +X-Some-String-Value-But-Different: Different astringvalue +X-Some-String-Value: astringvalue +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /hello"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: assignment style, the PREFIX 1st - incorrect +--- main_config +env SOME_STRING_VALUE=astringvalue; +env SOME_STRING_VALUE_BUT_DIFFERENT=astringvaluebutdifferent; +--- yaml_config eval: $::yaml_config +--- apisix_yaml eval: $::apisix_yaml +--- response_headers eval: $::response_headers_INCORRECT + + + +=== TEST 2: assignment style, the DIFF 1st - correct +--- main_config +env SOME_STRING_VALUE_BUT_DIFFERENT=astringvaluebutdifferent; +env SOME_STRING_VALUE=astringvalue; +--- yaml_config eval: $::yaml_config +--- apisix_yaml eval: $::apisix_yaml +--- response_headers eval: $::response_headers_correct + + + +=== TEST 3: declaration style, the PREFIX 1st - correct +--- main_config +env SOME_STRING_VALUE; +env SOME_STRING_VALUE_BUT_DIFFERENT; +--- yaml_config eval: $::yaml_config +--- apisix_yaml eval: $::apisix_yaml +--- response_headers eval: $::response_headers_correct + + + +=== TEST 4: declaration style, the DIFF 1st - also correct +--- main_config +env SOME_STRING_VALUE_BUT_DIFFERENT; +env SOME_STRING_VALUE; +--- yaml_config eval: $::yaml_config +--- apisix_yaml eval: $::apisix_yaml +--- response_headers eval: $::response_headers_correct diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/common.sh b/CloudronPackages/APISIX/apisix-source/t/cli/common.sh new file mode 100644 index 0000000..a4e2dea --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/common.sh @@ -0,0 +1,43 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# 'make init' operates scripts and related configuration files in the current directory +# The 'apisix' command is a command in the /usr/local/apisix, +# and the configuration file for the operation is in the /usr/local/apisix/conf + +set -ex + +check_failure() { + cat logs/error.log +} + +clean_up() { + if [ $? -gt 0 ]; then + check_failure + fi + make stop || true + git checkout conf/config.yaml +} + +trap clean_up EXIT + +exit_if_not_customed_nginx() { + openresty -V 2>&1 | grep apisix-nginx-module || exit 0 +} + +rm logs/error.log || true # clear previous error log +unset APISIX_PROFILE diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/docker-compose-etcd-cluster.yaml b/CloudronPackages/APISIX/apisix-source/t/cli/docker-compose-etcd-cluster.yaml new file mode 100644 index 0000000..2931b7c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/docker-compose-etcd-cluster.yaml @@ -0,0 +1,72 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +version: "3.7" + +services: + etcd0: + image: "gcr.io/etcd-development/etcd:v3.4.15" + container_name: etcd0 + ports: + - "23800:2380" + - "23790:2379" + environment: + - ALLOW_NONE_AUTHENTICATION=yes + - ETCD_NAME=etcd0 + - ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380 + - ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379 + - ETCD_ADVERTISE_CLIENT_URLS=http://127.0.0.1:23790 + - ETCD_INITIAL_ADVERTISE_PEER_URLS=http://etcd0:2380 + - ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster + - ETCD_INITIAL_CLUSTER=etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380 + - ETCD_INITIAL_CLUSTER_STATE=new + - ETCD_ENABLE_GRPC_GATEWAY=${ETCD_ENABLE_GRPC_GATEWAY:-true} + + etcd1: + image: "gcr.io/etcd-development/etcd:v3.4.15" + container_name: etcd1 + ports: + - "23801:2380" + - "23791:2379" + environment: + - ALLOW_NONE_AUTHENTICATION=yes + - ETCD_NAME=etcd1 + - ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380 + - ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379 + - ETCD_ADVERTISE_CLIENT_URLS=http://127.0.0.1:23791 + - ETCD_INITIAL_ADVERTISE_PEER_URLS=http://etcd1:2380 + - ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster + - ETCD_INITIAL_CLUSTER=etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380 + - ETCD_INITIAL_CLUSTER_STATE=new + - ETCD_ENABLE_GRPC_GATEWAY=${ETCD_ENABLE_GRPC_GATEWAY:-true} + + etcd2: + image: "gcr.io/etcd-development/etcd:v3.4.15" + container_name: etcd2 + ports: + - "23802:2380" + - "23792:2379" + environment: + - ALLOW_NONE_AUTHENTICATION=yes + - ETCD_NAME=etcd2 + - ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380 + - ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379 + - ETCD_ADVERTISE_CLIENT_URLS=http://127.0.0.1:23792 + - ETCD_INITIAL_ADVERTISE_PEER_URLS=http://etcd2:2380 + - ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster + - ETCD_INITIAL_CLUSTER=etcd0=http://etcd0:2380,etcd1=http://etcd1:2380,etcd2=http://etcd2:2380 + - ETCD_INITIAL_CLUSTER_STATE=new + - ETCD_ENABLE_GRPC_GATEWAY=${ETCD_ENABLE_GRPC_GATEWAY:-true} diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_access_log.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_access_log.sh new file mode 100755 index 0000000..736b987 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_access_log.sh @@ -0,0 +1,262 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# log format + +git checkout conf/config.yaml + +echo ' +nginx_config: + http: + access_log_format: "$remote_addr - $remote_user [$time_local] $http_host test_access_log_format" +' > conf/config.yaml + +make init + +grep "test_access_log_format" conf/nginx.conf > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: access_log_format in nginx.conf doesn't change" + exit 1 +fi + +echo "passed: access_log_format in nginx.conf is ok" + +# check enable access log + +echo ' +nginx_config: + http: + enable_access_log: true + access_log_format: "$remote_addr - $remote_user [$time_local] $http_host test_enable_access_log_true" +' > conf/config.yaml + +make init + +count_test_access_log=`grep -c "test_enable_access_log_true" conf/nginx.conf || true` +if [ $count_test_access_log -eq 0 ]; then + echo "failed: nginx.conf file doesn't find access_log_format when enable access log" + exit 1 +fi + +count_access_log_off=`grep -c "access_log off;" conf/nginx.conf || true` +if [ $count_access_log_off -eq 5 ]; then + echo "failed: nginx.conf file find access_log off; when enable access log" + exit 1 +fi + +make run +sleep 0.1 +curl http://127.0.0.1:9080/hi +sleep 4 +tail -n 1 logs/access.log > output.log + +count_grep=`grep -c "test_enable_access_log_true" output.log || true` +if [ $count_grep -eq 0 ]; then + echo "failed: not found test_enable_access_log in access.log " + exit 1 +fi + +make stop + +echo ' +nginx_config: + http: + enable_access_log: false + access_log_format: "$remote_addr - $remote_user [$time_local] $http_host test_enable_access_log_false" +' > conf/config.yaml + +make init + +count_test_access_log=`grep -c "test_enable_access_log_false" conf/nginx.conf || true` +if [ $count_test_access_log -eq 1 ]; then + echo "failed: nginx.conf file find access_log_format when disable access log" + exit 1 +fi + +make run +sleep 0.1 +curl http://127.0.0.1:9080/hi +sleep 4 +tail -n 1 logs/access.log > output.log + +count_grep=`grep -c "test_enable_access_log_false" output.log || true` +if [ $count_grep -eq 1 ]; then + echo "failed: found test_enable_access_log in access.log " + exit 1 +fi + +make stop + +echo "passed: enable_access_log is ok" + +# access log with JSON format + +echo ' +nginx_config: + http: + access_log_format: |- + {"@timestamp": "$time_iso8601", "client_ip": "$remote_addr", "status": "$status"} + access_log_format_escape: json +' > conf/config.yaml + +make init +make run +sleep 0.1 +curl http://127.0.0.1:9080/hello2 +sleep 4 +tail -n 1 logs/access.log > output.log + +if [ `grep -c '"client_ip": "127.0.0.1"' output.log` -eq '0' ]; then + echo "failed: invalid JSON log in access log" + exit 1 +fi + +if [ `grep -c 'main escape=json' conf/nginx.conf` -eq '0' ]; then + echo "failed: not found \"escape=json\" in conf/nginx.conf" + exit 1 +fi + +make stop + +echo "passed: access log with JSON format" + +# check uninitialized variable in access log when access admin +git checkout conf/config.yaml + +rm logs/error.log +make init +make run + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: $admin_key") +make stop + +if [ ! $code -eq 200 ]; then + echo "failed: failed to access admin" + exit 1 +fi + +if grep -E 'using uninitialized ".+" variable while logging request' logs/error.log; then + echo "failed: uninitialized variable found during writing access log" + exit 1 +fi + +echo "pass: uninitialized variable not found during writing access log" + +# don't log uninitialized access log variable when the HTTP request is malformed + +git checkout conf/config.yaml + +rm logs/error.log +./bin/apisix start +sleep 1 # wait for apisix starts + +curl -v -k -i -m 20 -o /dev/null -s https://127.0.0.1:9080 || true +if grep -E 'using uninitialized ".+" variable while logging request' logs/error.log; then + echo "failed: log uninitialized access log variable when the HTTP request is malformed" + exit 1 +fi + +make stop + +echo "don't log uninitialized access log variable when the HTTP request is malformed" + +# TLS upstream + +echo " +deployment: + admin: + admin_listen: + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' + admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' +nginx_config: + http: + access_log_format: '\"\$upstream_scheme://\$upstream_host\" \$ssl_server_name' +" > conf/config.yaml + +make run +sleep 2 + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +curl -k -i https://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d \ + '{"uri":"/apisix/admin/routes/1", "upstream":{"nodes":{"localhost:9180":1},"scheme":"https","type":"roundrobin","pass_host":"node"}}' + +curl -i http://127.0.0.1:9080/apisix/admin/routes/1 +sleep 4 +tail -n 2 logs/access.log > output.log + +# APISIX +if ! grep '"https://localhost:9180" -' output.log; then + echo "failed: should find upstream scheme" + cat output.log + exit 1 +fi + +# admin +if ! grep '"http://localhost:9180" localhost' output.log; then + echo "failed: should find upstream scheme" + cat output.log + exit 1 +fi + +make stop +echo "passed: should find upstream scheme" + +# check stream logs +echo ' +apisix: + proxy_mode: stream + stream_proxy: # UDP proxy + udp: + - "127.0.0.1:9200" + +nginx_config: + stream: + enable_access_log: true + access_log_format: "$remote_addr $protocol test_stream_access_log_format" +' > conf/config.yaml + +make init + +grep "test_stream_access_log_format" conf/nginx.conf > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: stream access_log_format in nginx.conf doesn't change" + exit 1 +fi +echo "passed: stream access_log_format in nginx.conf is ok" + +# check if logs are being written +make run +sleep 0.1 +# sending single udp packet +echo -n "hello" | nc -4u -w1 localhost 9200 +sleep 4 +tail -n 1 logs/access_stream.log > output.log + +if ! grep '127.0.0.1 UDP test_stream_access_log_format' output.log; then + echo "failed: should have found udp log entry" + cat output.log + exit 1 +fi +echo "passed: logs are being dumped for stream proxy" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_admin.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_admin.sh new file mode 100755 index 0000000..1298cc1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_admin.sh @@ -0,0 +1,492 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# check admin https enabled + +git checkout conf/config.yaml + +echo " +deployment: + admin: + admin_listen: + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' + admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' +" > conf/config.yaml + +make init + +grep "listen 0.0.0.0:9180 ssl" conf/nginx.conf > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: failed to enable https for admin" + exit 1 +fi + +make run + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +echo "admin key is " $admin_key +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} https://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: $admin_key") +if [ ! $code -eq 200 ]; then + echo "failed: failed to enable https for admin" + exit 1 +fi + +make stop + +echo "passed: admin https enabled" + +echo ' +apisix: + enable_admin: true +deployment: + admin: + admin_listen: + ip: 127.0.0.2 + port: 9181 +' > conf/config.yaml + +make init + +if ! grep "listen 127.0.0.2:9181;" conf/nginx.conf > /dev/null; then + echo "failed: customize address for admin server" + exit 1 +fi + +make run + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.2:9181/apisix/admin/routes -H "X-API-KEY: $admin_key") + +if [ ! $code -eq 200 ]; then + echo "failed: failed to access admin" + exit 1 +fi + +make stop + +# rollback to the default + +git checkout conf/config.yaml + +make init + +set +ex + +grep "listen 0.0.0.0:9080 ssl" conf/nginx.conf > /dev/null +if [ ! $? -eq 1 ]; then + echo "failed: failed to rollback to the default admin config" + exit 1 +fi + +set -ex + +echo "passed: rollback to the default admin config" + +# set allow_admin in conf/config.yaml + +echo " +deployment: + admin: + allow_admin: + - 127.0.0.9 +" > conf/config.yaml + +make init + +count=`grep -c "allow 127.0.0.9" conf/nginx.conf` +if [ $count -eq 0 ]; then + echo "failed: not found 'allow 127.0.0.9;' in conf/nginx.conf" + exit 1 +fi + +echo " +deployment: + admin: + allow_admin: ~ +" > conf/config.yaml + +make init + +count=`grep -c "allow all;" conf/nginx.conf` +if [ $count -eq 0 ]; then + echo "failed: not found 'allow all;' in conf/nginx.conf" + exit 1 +fi + +echo "passed: empty allow_admin in conf/config.yaml" + +# missing admin key, allow any IP to access admin api + +git checkout conf/config.yaml + +echo ' +deployment: + admin: + admin_key: ~ + allow_admin: ~ +' > conf/config.yaml + +make init > output.log 2>&1 | true + +grep -E "ERROR: missing valid Admin API token." output.log > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: should show 'ERROR: missing valid Admin API token.'" + exit 1 +fi + +echo "pass: missing admin key and show ERROR message" + +# missing admin key, only allow 127.0.0.0/24 to access admin api + +echo ' +deployment: + admin: + admin_key: ~ + allow_admin: + - 127.0.0.0/24 +' > conf/config.yaml + +make init > output.log 2>&1 | true + +if grep -E "ERROR: missing valid Admin API token." output.log > /dev/null; then + echo "failed: should not show 'ERROR: missing valid Admin API token.'" + exit 1 +fi + +echo ' +deployment: + admin: + admin_key: ~ + allow_admin: + - 0.0.0.0/0 + - 127.0.0.0/24 +' > conf/config.yaml + +make init > output.log 2>&1 | true + +if ! grep -E "ERROR: missing valid Admin API token." output.log > /dev/null; then + echo "failed: should show 'ERROR: missing valid Admin API token.'" + exit 1 +fi + +echo "pass: missing admin key and only allow 127.0.0.0/24 to access admin api" + +# allow any IP to access admin api with empty admin_key, when admin_key_required=true + +git checkout conf/config.yaml + +echo ' +deployment: + admin: + admin_key_required: true + admin_key: ~ + allow_admin: + - 0.0.0.0/0 +' > conf/config.yaml + +make init > output.log 2>&1 | true + +if ! grep -E "ERROR: missing valid Admin API token." output.log > /dev/null; then + echo "failed: should show 'ERROR: missing valid Admin API token.'" + exit 1 +fi + +echo ' +deployment: + admin: + admin_key_required: false + admin_key: ~ + allow_admin: + - 0.0.0.0/0 +' > conf/config.yaml + +make init > output.log 2>&1 | true + +if grep -E "ERROR: missing valid Admin API token." output.log > /dev/null; then + echo "failed: should not show 'ERROR: missing valid Admin API token.'" + exit 1 +fi + +if ! grep -E "Warning! Admin key is bypassed" output.log > /dev/null; then + echo "failed: should show 'Warning! Admin key is bypassed'" + exit 1 +fi + +echo ' +deployment: + admin: + admin_key_required: invalid-value +' > conf/config.yaml + +make init > output.log 2>&1 | true + +if grep -E "path[deployment->admin->admin_key_required] expect: boolean, but got: string" output.log > /dev/null; then + echo "check admin_key_required value failed: should show 'expect: boolean, but got: string'" + exit 1 +fi + +echo "pass: allow empty admin_key, when admin_key_required=false" + +# admin api, allow any IP but use default key + +echo ' +deployment: + admin: + allow_admin: ~ + admin_key: + - name: "admin" + key: '' + role: admin +' > conf/config.yaml + +make init > output.log 2>&1 | true + +grep -E "WARNING: using empty Admin API." output.log > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: need to show `WARNING: using fixed Admin API token has security risk`" + exit 1 +fi + +echo "pass: show WARNING message if the user uses empty key" + +# admin_listen set +echo ' +deployment: + admin: + admin_listen: + port: 9180 +' > conf/config.yaml + +rm logs/error.log +make init +make run + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: $admin_key") +make stop + +if [ ! $code -eq 200 ]; then + echo "failed: failed to access admin" + exit 1 +fi + +if grep -E 'using uninitialized ".+" variable while logging request' logs/error.log; then + echo "failed: uninitialized variable found during writing access log" + exit 1 +fi + +echo "pass: uninitialized variable not found during writing access log (admin_listen set)" + +# Admin API can only be used with etcd config_provider +## if role is data_plane, and config_provider is yaml, then enable_admin is set to false +echo ' +apisix: + enable_admin: true +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if echo "$out" | grep "Admin API can only be used with etcd config_provider"; then + echo "failed: Admin API can only be used with etcd config_provider" + exit 1 +fi + +echo "passed: Admin API can only be used with etcd config_provider" + +# disable Admin API and init plugins syncer +echo ' +apisix: + enable_admin: false +' > conf/config.yaml + +rm logs/error.log +make init +make run +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') + + +make init + +if grep -E "failed to fetch data from etcd" logs/error.log; then + echo "failed: should sync /apisix/plugins from etcd when disabling admin normal" + exit 1 +fi + +make stop + +echo "pass: sync /apisix/plugins from etcd when disabling admin successfully" + + + +# ignore changes to /apisix/plugins/ due to init_etcd +echo ' +apisix: + enable_admin: true +plugins: + - public-api + - node-status +nginx_config: + error_log_level: info +' > conf/config.yaml + +rm logs/error.log +make init +make run + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +# initialize node-status public API routes #1 +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9180/apisix/admin/routes/node-status \ + -H "X-API-KEY: $admin_key" \ + -d "{ + \"uri\": \"/apisix/status\", + \"plugins\": { + \"public-api\": {} + } + }") +if [ ! $code -lt 300 ]; then + echo "failed: initialize node status public API failed #1" + exit 1 +fi + +sleep 0.5 + +# first time check node status api +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/status) +if [ ! $code -eq 200 ]; then + echo "failed: first time check node status api failed #1" + exit 1 +fi + +# mock another instance init etcd dir +make init +sleep 1 + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +# initialize node-status public API routes #2 +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9180/apisix/admin/routes/node-status \ + -H "X-API-KEY: $admin_key" \ + -d "{ + \"uri\": \"/apisix/status\", + \"plugins\": { + \"public-api\": {} + } + }") +if [ ! $code -eq 200 ]; then + echo "failed: initialize node status public API failed #2" + exit 1 +fi + +sleep 0.5 + +# second time check node status api +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/status) +if [ ! $code -eq 200 ]; then + echo "failed: second time check node status api failed #1" + exit 1 +fi + +make stop + +echo "pass: ignore changes to /apisix/plugins/ due to init_etcd successfully" + + +# accept changes to /apisix/plugins when enable_admin is false +echo ' +apisix: + enable_admin: false +plugins: + - public-api + - node-status +stream_plugins: +' > conf/config.yaml + +rm logs/error.log +make init +make run +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') + + +# first time check node status api +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/status) +if [ ! $code -eq 200 ]; then + echo "failed: first time check node status api failed #2" + exit 1 +fi + +sleep 0.5 + +# check http plugins load list +if ! grep logs/error.log -E -e 'new plugins: {"public-api":true,"node-status":true}' \ + -e 'new plugins: {"node-status":true,"public-api":true}'; then + echo "failed: first time load http plugins list failed" + exit 1 +fi + +# check stream plugins(no plugins under stream, it will be added below) +if grep -E 'failed to read stream plugin list from local file' logs/error.log; then + echo "failed: first time load stream plugins list failed" + exit 1 +fi + +# mock another instance add /apisix/plugins +res=$(etcdctl put "/apisix/plugins" '[{"name":"node-status"},{"name":"example-plugin"},{"name":"public-api"},{"stream":true,"name":"mqtt-proxy"}]') +if [[ $res != "OK" ]]; then + echo "failed: failed to set /apisix/plugins to add more plugins" + exit 1 +fi + +sleep 0.5 + +# second time check node status api +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/status) +if [ ! $code -eq 200 ]; then + echo "failed: second time check node status api failed #2" + exit 1 +fi + +# check http plugins load list +if ! grep logs/error.log -E -e 'new plugins: {"public-api":true,"node-status":true}' \ + -e 'new plugins: {"node-status":true,"public-api":true}'; then + echo "failed: second time load http plugins list failed" + exit 1 +fi + +# check stream plugins load list +if ! grep -E 'new plugins: {.*example-plugin' logs/error.log; then + echo "failed: second time load stream plugins list failed" + exit 1 +fi + + +if grep -E 'new plugins: {}' logs/error.log; then + echo "failed: second time load plugins list failed" + exit 1 +fi + +make stop + +echo "pass: accept changes to /apisix/plugins successfully" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_admin_mtls.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_admin_mtls.sh new file mode 100755 index 0000000..7f79b4e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_admin_mtls.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns +echo ' +deployment: + admin: + admin_listen: + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_cert: "../t/certs/mtls_server.crt" + admin_ssl_cert_key: "../t/certs/mtls_server.key" + admin_ssl_ca_cert: "../t/certs/mtls_ca.crt" + +' > conf/config.yaml + +make run + +sleep 1 + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +# correct certs +code=$(curl -i -o /dev/null -s -w %{http_code} --cacert ./t/certs/mtls_ca.crt --key ./t/certs/mtls_client.key --cert ./t/certs/mtls_client.crt -H "X-API-KEY: $admin_key" https://admin.apisix.dev:9180/apisix/admin/routes) +if [ ! "$code" -eq 200 ]; then + echo "failed: failed to enabled mTLS for admin" + exit 1 +fi + +# skip +code=$(curl -i -o /dev/null -s -w %{http_code} -k -H "X-API-KEY: $admin_key" https://admin.apisix.dev:9180/apisix/admin/routes) +if [ ! "$code" -eq 400 ]; then + echo "failed: failed to enabled mTLS for admin" + exit 1 +fi + +echo "passed: enabled mTLS for admin" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_admin_ui.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_admin_ui.sh new file mode 100755 index 0000000..91b3ce4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_admin_ui.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# check admin ui enabled + +git checkout conf/config.yaml + +make init + +grep "location ^~ /ui/" conf/nginx.conf > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: failed to enable embedded admin ui" + exit 1 +fi + +make run + +## check /ui redirects to /ui/ + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/ui) +if [ ! $code -eq 301 ]; then + echo "failed: failed to redirect /ui to /ui/" + exit 1 +fi + +## check /ui/ accessible + +mkdir -p ui/assets +echo "test_html" > ui/index.html +echo "test_js" > ui/assets/test.js +echo "test_css" > ui/assets/test.css + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/ui/) +if [ ! $code -eq 200 ]; then + echo "failed: /ui/ not accessible" + exit 1 +fi + +## check /ui/index.html accessible + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/ui/index.html) +if [ ! $code -eq 200 ]; then + echo "failed: /ui/index.html not accessible" + exit 1 +fi + +## check /ui/assets/test.js accessible + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/ui/assets/test.js) +if [ ! $code -eq 200 ]; then + echo "failed: /ui/assets/test.js not accessible" + exit 1 +fi + +## check /ui/assets/test.css accessible + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/ui/assets/test.css) +if [ ! $code -eq 200 ]; then + echo "failed: /ui/assets/test.css not accessible" + exit 1 +fi + +## check /ui/ single-page-application fallback + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/ui/not_exist) +if [ ! $code -eq 200 ]; then + echo "failed: /ui/not_exist not accessible" + exit 1 +fi + +make stop + +# test ip restriction + +git checkout conf/config.yaml + +echo " +deployment: + admin: + enable_admin_ui: true + allow_admin: + - 1.1.1.1/32 +" > conf/config.yaml + +make run + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/ui/) +if [ ! $code -eq 403 ]; then + echo "failed: ip restriction not working, expected 403, got $code" + exit 1 +fi + +make stop + +# test admin ui disabled + +git checkout conf/config.yaml + +echo " +deployment: + admin: + enable_admin_ui: false +" > conf/config.yaml + +make init + +#### When grep cannot find the value, it uses 1 as the exit code. +#### Due to the use of set -e, any non-zero exit will terminate the +#### script, so grep is written inside the if statement here. +if grep "location ^~ /ui/" conf/nginx.conf > /dev/null; then + echo "failed: failed to disable embedded admin ui" + exit 1 +fi + +# test admin UI explicitly enabled + +git checkout conf/config.yaml + +echo " +deployment: + admin: + enable_admin_ui: true +" > conf/config.yaml + +make init + +if ! grep "location ^~ /ui/" conf/nginx.conf > /dev/null; then + echo "failed: failed to explicitly enable embedded admin ui" + exit 1 +fi diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_apisix_mirror.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_apisix_mirror.sh new file mode 100755 index 0000000..65949d6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_apisix_mirror.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +exit_if_not_customed_nginx + +echo ' +nginx_config: + http: + enable_access_log: false +' > conf/config.yaml + +rm logs/error.log || true +make init +make run +sleep 0.1 + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +curl -k -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + }, + "uri": "/get" +}' + +sleep 0.1 + +curl -k -i http://127.0.0.1:9080/get + +sleep 0.1 + +if ! grep "apisix_mirror_on_demand on;" conf/nginx.conf > /dev/null; then + echo "failed: apisix_mirror_on_demand should on when running on apisix-runtime" + exit 1 +fi + +if grep -E "invalid URL prefix" logs/error.log > /dev/null; then + echo "failed: apisix_mirror_on_demand should on when running on apisix-runtime" + exit 1 +fi + +echo "passed: apisix_mirror_on_demand is on when running on apisix-runtime" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_ci_only.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_ci_only.sh new file mode 100755 index 0000000..d7d9f5b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_ci_only.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This file is like other test_*.sh, but requires extra dependencies which +# you don't need in daily development. + +. ./t/cli/common.sh + +# check error handling when connecting to old etcd +git checkout conf/config.yaml + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:3379" + prefix: "/apisix" +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'etcd cluster version 3.3.0 is less than the required version 3.4.0'; then + echo "failed: properly handle the error when connecting to old etcd" + exit 1 +fi + +echo "passed: properly handle the error when connecting to old etcd" + +# It is forbidden to run apisix under the "/root" directory. +git checkout conf/config.yaml + +mkdir /root/apisix + +cp -r ./* /root/apisix +cd /root/apisix +make init + +out=$(make run 2>&1 || true) +if ! echo "$out" | grep "Error: It is forbidden to run APISIX in the /root directory"; then + echo "failed: should echo It is forbidden to run APISIX in the /root directory" + exit 1 +fi + +cd - + +echo "passed: successfully prohibit APISIX from running in the /root directory" + +rm -rf /root/apisix diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_cmd.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_cmd.sh new file mode 100755 index 0000000..a5375dc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_cmd.sh @@ -0,0 +1,224 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +git checkout conf/config.yaml + +# check restart with old nginx.pid exist +echo "-1" > logs/nginx.pid +out=$(./bin/apisix start 2>&1 || true) +if echo "$out" | grep "the old APISIX is still running"; then + rm logs/nginx.pid + echo "failed: should reject bad nginx.pid" + exit 1 +fi + +./bin/apisix stop +sleep 0.5 +rm logs/nginx.pid || true + +# check no corresponding process +make run +oldpid=$(< logs/nginx.pid) +make stop +sleep 0.5 +echo $oldpid > logs/nginx.pid +out=$(make run || true) +if ! echo "$out" | grep "nginx.pid exists but there's no corresponding process with pid"; then + echo "failed: should find no corresponding process" + exit 1 +fi +make stop +echo "pass: no corresponding process" + +# check running when run repeatedly +out=$(make run; make run || true) +if ! echo "$out" | grep "the old APISIX is still running"; then + echo "failed: should find APISIX running" + exit 1 +fi + +make stop +echo "pass: check APISIX running" + +# check customized config + +git checkout conf/config.yaml + +# start with not existed customized config +make init + +if ./bin/apisix start -c conf/not_existed_config.yaml; then + echo "failed: apisix still start with invalid customized config.yaml" + exit 1 +fi + +# start with customized config +echo " +deployment: + admin: + admin_listen: + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' + admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' + admin_key_required: true # Enable Admin API authentication by default for security. + admin_key: + - + name: admin # admin: write access to configurations. + key: edd1c9f034335f136f87ad84b625c8f1 + role: admin +" > conf/customized_config.yaml + +./bin/apisix start -c conf/customized_config.yaml + +# check if .customized_config_path has been created +if [ ! -e conf/.customized_config_path ]; then + rm conf/customized_config.yaml + echo ".customized_config_path should exits" + exit 1 +fi + +# check if the custom config is used +code=$(curl -k -i -m 20 -o /dev/null -s -w %{http_code} https://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1") +if [ ! $code -eq 200 ]; then + rm conf/customized_config.yaml + echo "failed: customized config.yaml not be used" + exit 1 +fi + +make stop + +# check if .customized_config_path has been removed +if [ -e conf/.customized_config_path ]; then + rm conf/customized_config_path.yaml + echo ".customized_config_path should be removed" + exit 1 +fi + +# start with invalied config +echo "abc" > conf/customized_config.yaml + +if ./bin/apisix start -c conf/customized_config.yaml ; then + rm conf/customized_config.yaml + echo "start should be failed" + exit 1 +fi + +# check if apisix can be started use correctly default config. (https://github.com/apache/apisix/issues/9700) +./bin/apisix start +sleep 1 +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +echo "look here" $admin_key +code=$(curl -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: $admin_key") +if [ ! $code -eq 200 ]; then + rm conf/customized_config.yaml + echo "failed: should use default config" + exit 1 +fi + +make stop + +# check if apisix can be started after multiple start failures. (https://github.com/apache/apisix/issues/9171) +echo " +deployment: + admin: + admin_listen: + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' + admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' + etcd: + host: + - http://127.0.0.1:22379 +" > conf/customized_config.yaml + +./bin/apisix start -c conf/customized_config.yaml || true +./bin/apisix start -c conf/customized_config.yaml || true +./bin/apisix start -c conf/customized_config.yaml || true + +echo " +deployment: + admin: + admin_listen: + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' + admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' + admin_key_required: true # Enable Admin API authentication by default for security. + admin_key: + - + name: admin # admin: write access to configurations. + key: edd1c9f034335f136f87ad84b625c8f1 + role: admin +" > conf/customized_config.yaml + +./bin/apisix start -c conf/customized_config.yaml + +code=$(curl -k -i -m 20 -o /dev/null -s -w %{http_code} https://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1") +if [ ! $code -eq 200 ]; then + rm conf/customized_config.yaml + echo "failed: should use default config" + exit 1 +fi + +rm conf/customized_config.yaml +echo "passed: test customized config successful" + +# test quit command +bin/apisix start + +if ! ps -ef | grep "apisix" | grep "master process" | grep -v "grep"; then + echo "apisix not started" + exit 1 +fi + +bin/apisix quit + +sleep 2 + +if ps -ef | grep "worker process is shutting down" | grep -v "grep"; then + echo "all workers should exited" + exit 1 +fi + +echo "passed: test quit command successful" + +# test reload command +bin/apisix start + +if ! ps -ef | grep "apisix" | grep "master process" | grep -v "grep"; then + echo "apisix not started" + exit 1 +fi + +bin/apisix reload + +sleep 3 + +if ps -ef | grep "worker process is shutting down" | grep -v "grep"; then + echo "old workers should exited" + exit 1 +fi + +echo "passed: test reload command successful" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_control.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_control.sh new file mode 100755 index 0000000..4871185 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_control.sh @@ -0,0 +1,181 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# control server +echo ' +apisix: + enable_control: true +' > conf/config.yaml + +make init + +if ! grep "listen 127.0.0.1:9090;" conf/nginx.conf > /dev/null; then + echo "failed: find default address for control server" + exit 1 +fi + +make run + +sleep 0.1 + +set +e +times=1 +code=000 +while [ $code -eq 000 ] && [ $times -lt 10 ] +do + code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9090/v1/schema) + sleep 0.2 + times=$(($times+1)) +done +set -e + +if [ ! $code -eq 200 ]; then + echo "failed: access control server" + exit 1 +fi + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9090/v0/schema) + +if [ ! $code -eq 404 ]; then + echo "failed: handle route not found" + exit 1 +fi + +make stop + +echo ' +apisix: + enable_control: true + control: + ip: 127.0.0.2 +' > conf/config.yaml + +make init + +if ! grep "listen 127.0.0.2:9090;" conf/nginx.conf > /dev/null; then + echo "failed: customize address for control server" + exit 1 +fi + +make run + +sleep 0.1 + +set +e +times=1 +code=000 +while [ $code -eq 000 ] && [ $times -lt 10 ] +do + code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.2:9090/v1/schema) + sleep 0.2 + times=$(($times+1)) +done +set -e + +if [ ! $code -eq 200 ]; then + echo "failed: access control server" + exit 1 +fi + +make stop + +echo ' +apisix: + enable_control: true + control: + port: 9092 +' > conf/config.yaml + +make init + +if ! grep "listen 127.0.0.1:9092;" conf/nginx.conf > /dev/null; then + echo "failed: customize address for control server" + exit 1 +fi + +make run + +sleep 0.1 + +set +e +times=1 +code=000 +while [ $code -eq 000 ] && [ $times -lt 10 ] +do + code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9092/v1/schema) + sleep 0.2 + times=$(($times+1)) +done +set -e + +if [ ! $code -eq 200 ]; then + echo "failed: access control server" + exit 1 +fi + +make stop + +echo ' +apisix: + enable_control: false +' > conf/config.yaml + +make init + +if grep "listen 127.0.0.1:9090;" conf/nginx.conf > /dev/null; then + echo "failed: disable control server" + exit 1 +fi + +echo ' +apisix: + node_listen: 9090 + enable_control: true + control: + port: 9090 +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep "http listen port 9090 conflicts with control"; then + echo "failed: can't detect port conflicts" + exit 1 +fi + +echo ' +apisix: + node_listen: 9080 + enable_control: true + control: + port: 9091 +plugin_attr: + prometheus: + export_addr: + ip: "127.0.0.1" + port: 9091 +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep "prometheus port 9091 conflicts with control"; then + echo "failed: can't detect port conflicts" + exit 1 +fi + +echo "pass: access control server" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_core_config.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_core_config.sh new file mode 100755 index 0000000..f799241 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_core_config.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +echo " +nginx_config: + max_pending_timers: 10240 + max_running_timers: 2561 +" > conf/config.yaml + +make init + +count=$(grep -c "lua_max_pending_timers 10240;" conf/nginx.conf) +if [ "$count" -ne 1 ]; then + echo "failed: failed to set lua_max_pending_timers" + exit 1 +fi + +echo "passed: set lua_max_pending_timers successfully" + +count=$(grep -c "lua_max_running_timers 2561;" conf/nginx.conf) +if [ "$count" -ne 1 ]; then + echo "failed: failed to set lua_max_running_timers" + exit 1 +fi + +echo "passed: set lua_max_running_timers successfully" + +echo " +apisix: + proxy_mode: http&stream + stream_proxy: + tcp: + - addr: 9100 +nginx_config: + max_pending_timers: 10240 + max_running_timers: 2561 +" > conf/config.yaml + +make init + +count=$(grep -c "lua_max_pending_timers 10240;" conf/nginx.conf) +if [ "$count" -ne 2 ]; then + echo "failed: failed to set lua_max_pending_timers in stream proxy" + exit 1 +fi + +echo "passed: set lua_max_pending_timers successfully in stream proxy" + +count=$(grep -c "lua_max_running_timers 2561;" conf/nginx.conf) +if [ "$count" -ne 2 ]; then + echo "failed: failed to set lua_max_running_timers in stream proxy" + exit 1 +fi + +echo "passed: set lua_max_running_timers successfully in stream proxy" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_deployment_control_plane.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_deployment_control_plane.sh new file mode 100755 index 0000000..19e5152 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_deployment_control_plane.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns +echo ' +apisix: + enable_admin: false +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 +' > conf/config.yaml + +make run +sleep 1 + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: $admin_key") + +if [ ! $code -eq 200 ]; then + echo "failed: control_plane should enable Admin API" + exit 1 +fi + +echo "passed: control_plane should enable Admin API" + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" +}' + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/c -H "X-API-KEY: $admin_key") +make stop +if [ ! $code -eq 404 ]; then + echo "failed: should disable request proxy" + exit 1 +fi + +echo "passed: should disable request proxy" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_deployment_data_plane.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_deployment_data_plane.sh new file mode 100755 index 0000000..14b4cc2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_deployment_data_plane.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# clean etcd data +etcdctl del / --prefix + +# data_plane does not write data to etcd +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - https://127.0.0.1:12379 + prefix: "/apisix" + timeout: 30 + tls: + verify: false +' > conf/config.yaml + +make run + +sleep 1 + +res=$(etcdctl get / --prefix | wc -l) + +if [ ! $res -eq 0 ]; then + echo "failed: data_plane should not write data to etcd" + exit 1 +fi + +echo "passed: data_plane does not write data to etcd" + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes -H "X-API-KEY: $admin_key") +make stop + +if [ ! $code -eq 404 ]; then + echo "failed: data_plane should not enable Admin API" + exit 1 +fi + +echo "passed: data_plane should not enable Admin API" + +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - https://127.0.0.1:12379 + prefix: "/apisix" + timeout: 30 +' > conf/config.yaml + +out=$(make run 2>&1 || true) +make stop +if ! echo "$out" | grep 'failed to load the configuration: https://127.0.0.1:12379: certificate verify failed'; then + echo "failed: should verify certificate by default" + exit 1 +fi + +echo "passed: should verify certificate by default" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_deployment_data_plane_with_readonly_etcd.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_deployment_data_plane_with_readonly_etcd.sh new file mode 100755 index 0000000..c6f164b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_deployment_data_plane_with_readonly_etcd.sh @@ -0,0 +1,178 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# clean etcd data +etcdctl del / --prefix + +# non data_plane can prepare dirs when init etcd +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2379 + prefix: /apisix + timeout: 30 +' >conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'trying to initialize the data of etcd'; then + echo "failed: non data_plane should init the data of etcd" + exit 1 +fi +echo "passed: non data_plane can init the data of etcd" + +# start apisix to test non data_plane can work with etcd +make run +sleep 3 + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +curl -o /dev/null -s -w %{http_code} -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": [" + return function(conf, ctx) + local core = require(\"apisix.core\") + return core.response.exit(200) + end + "] + } + } +}' + +# check can access the route +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/hello) +if [ ! "$code" -eq 200 ]; then + echo "failed: non data_plane should be able to access the route" + exit 1 +fi +echo "passed: non data_plane can work with etcd" + +# prepare for data_plane with etcd +# stop apisix +make stop +sleep 3 + +# data_plane can skip initializing the data of etcd +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2379 + prefix: /apisix + timeout: 30 +' >conf/config.yaml + +out=$(make init 2>&1 || true) +if echo "$out" | grep 'trying to initialize the data of etcd'; then + echo "failed: data_plane should not init the data of etcd" + exit 1 +fi +if ! echo "$out" | grep 'access from the data plane to etcd should be read-only, skip initializing the data of etcd'; then + echo "failed: data_plane should skip initializing the data of etcd" + exit 1 +fi +echo "passed: data_plane can skip initializing the data of etcd" + +# start apisix to test data_plane can work with etcd +make run +sleep 3 + +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/hello) +if [ ! "$code" -eq 200 ]; then + echo "failed: data_plane should be able to access the route when using etcd" + exit 1 +fi +echo "passed: data_plane can work with etcd" + +# prepare for data_plane with read-only etcd +# stop apisix +make stop +sleep 3 +# add root user to help disable auth +etcdctl user add "root:test" +etcdctl role add root +etcdctl user grant-role root root +# add readonly user +etcdctl user add "apisix-data-plane:test" +etcdctl role add data-plane-role +etcdctl role grant-permission --prefix=true data-plane-role read /apisix +etcdctl user grant-role apisix-data-plane data-plane-role +# enable auth +etcdctl auth enable + +# data_plane can skip initializing the data when using read-only etcd +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2379 + user: apisix-data-plane + password: test + prefix: /apisix + timeout: 30 +' >conf/config.yaml + +out=$(make init 2>&1 || true) +if echo "$out" | grep 'trying to initialize the data of etcd'; then + echo "failed: data_plane should not init the data of etcd (read-only)" + exit 1 +fi +if ! echo "$out" | grep 'access from the data plane to etcd should be read-only, skip initializing the data of etcd'; then + echo "failed: data_plane should skip initializing the data of etcd (read-only)" + exit 1 +fi +echo "passed: data_plane can skip initializing the data of etcd (read-only)" + +# start apisix to test data_plane can work with read-only etcd +make run +sleep 3 + +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/hello) +if [ ! "$code" -eq 200 ]; then + echo "failed: data_plane should be able to access the route when using read-only etcd" + exit 1 +fi +echo "passed: data_plane can work with read-only etcd" + +# clean up +etcdctl --user=root:test auth disable +etcdctl user delete apisix-data-plane +etcdctl role delete data-plane-role +etcdctl user delete root +etcdctl role delete root diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_deployment_traditional.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_deployment_traditional.sh new file mode 100755 index 0000000..d4c209b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_deployment_traditional.sh @@ -0,0 +1,124 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# HTTP +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 +' > conf/config.yaml + +make run +sleep 1 + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: $admin_key") +make stop + +if [ ! $code -eq 200 ]; then + echo "failed: could not connect to etcd with http enabled" + exit 1 +fi + +# Both HTTP and Stream +echo ' +apisix: + proxy_mode: http&stream + enable_admin: true + stream_proxy: + tcp: + - addr: 9100 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 +' > conf/config.yaml + +make run +sleep 1 + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: $admin_key") +make stop + +if [ ! $code -eq 200 ]; then + echo "failed: could not connect to etcd with http & stream enabled" + exit 1 +fi + +# Stream +echo ' +apisix: + enable_admin: false + proxy_mode: stream + stream_proxy: + tcp: + - addr: 9100 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - http://127.0.0.1:2379 +' > conf/config.yaml + +make run +sleep 1 +make stop + +if grep '\[error\]' logs/error.log; then + echo "failed: could not connect to etcd with stream enabled" + exit 1 +fi + +echo "passed: could connect to etcd" + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + verify: false + ' > conf/config.yaml + +out=$(make init 2>&1 || echo "ouch") +if ! echo "$out" | grep "bad certificate"; then + echo "failed: apisix should echo \"bad certificate\"" + exit 1 +fi + +echo "passed: certificate verify fail expectedly" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_dns.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_dns.sh new file mode 100755 index 0000000..f0e19a8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_dns.sh @@ -0,0 +1,175 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# 'make init' operates scripts and related configuration files in the current directory +# The 'apisix' command is a command in the /usr/local/apisix, +# and the configuration file for the operation is in the /usr/local/apisix/conf + +. ./t/cli/common.sh + +# dns_resolver_valid +echo ' +apisix: + dns_resolver: + - 127.0.0.1 + - "[::1]:5353" + dns_resolver_valid: 30 +' > conf/config.yaml + +make init + +if ! grep "resolver 127.0.0.1 \[::1\]:5353 valid=30 ipv6=on;" conf/nginx.conf > /dev/null; then + echo "failed: dns_resolver_valid doesn't take effect" + exit 1 +fi + +echo ' +apisix: + proxy_mode: http&stream + stream_proxy: + tcp: + - 9100 + dns_resolver: + - 127.0.0.1 + - "[::1]:5353" + dns_resolver_valid: 30 +' > conf/config.yaml + +make init + +count=$(grep -c "resolver 127.0.0.1 \[::1\]:5353 valid=30 ipv6=on;" conf/nginx.conf) +if [ "$count" -ne 2 ]; then + echo "failed: dns_resolver_valid doesn't take effect" + exit 1 +fi + +echo "pass: dns_resolver_valid takes effect" + +echo ' +apisix: + proxy_mode: http&stream + stream_proxy: + tcp: + - 9100 + dns_resolver: + - 127.0.0.1 + - "::1" + - "[::2]" +' > conf/config.yaml + +make init + +count=$(grep -c "resolver 127.0.0.1 \[::1\] \[::2\] ipv6=on;" conf/nginx.conf) +if [ "$count" -ne 2 ]; then + echo "failed: can't handle IPv6 resolver w/o bracket" + exit 1 +fi + +echo "pass: handle IPv6 resolver w/o bracket" + +# ipv6 config test +echo ' +apisix: + enable_ipv6: false + dns_resolver: + - 127.0.0.1 + dns_resolver_valid: 30 +' > conf/config.yaml + +make init + +if ! grep "resolver 127.0.0.1 valid=30 ipv6=off;" conf/nginx.conf > /dev/null; then + echo "failed: ipv6 config doesn't take effect" + exit 1 +fi + +# check dns resolver address +echo ' +apisix: + dns_resolver: + - 127.0.0.1 + - "fe80::21c:42ff:fe00:18%eth0" +' > conf/config.yaml + +out=$(make init 2>&1 || true) + +if ! echo "$out" | grep "unsupported DNS resolver"; then + echo "failed: should check dns resolver is unsupported" + exit 1 +fi + +if ! grep "resolver 127.0.0.1 ipv6=on;" conf/nginx.conf > /dev/null; then + echo "failed: should skip unsupported DNS resolver" + exit 1 +fi + +if grep "fe80::21c:42ff:fe00:18%eth0" conf/nginx.conf > /dev/null; then + echo "failed: should skip unsupported DNS resolver" + exit 1 +fi + +echo "passed: check dns resolver" + +# dns resolver in stream subsystem +rm logs/error.log || true + +echo " +apisix: + enable_admin: true + proxy_mode: http&stream + stream_proxy: + tcp: + - addr: 9100 + dns_resolver: + - 127.0.0.1:1053 +nginx_config: + error_log_level: info +" > conf/config.yaml + +make run +sleep 0.5 +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +curl -v -k -i -m 20 -o /dev/null -s -X PUT http://127.0.0.1:9180/apisix/admin/stream_routes/1 \ + -H "X-API-KEY: $admin_key" \ + -d '{ + "upstream": { + "type": "roundrobin", + "nodes": [{ + "host": "sd.test.local", + "port": 1995, + "weight": 1 + }] + } + }' + +curl http://127.0.0.1:9100 || true +make stop +sleep 0.1 # wait for logs output + +if grep -E 'dns client error: 101 empty record received while prereading client data' logs/error.log; then + echo "failed: resolve upstream host in stream subsystem should works fine" + exit 1 +fi + +if ! grep -E 'dns resolver domain: sd.test.local to 127.0.0.(1|2) while prereading client data' logs/error.log; then + echo "failed: resolve upstream host in preread phase should works fine" + exit 1 +fi + +echo "success: resolve upstream host in stream subsystem works fine" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_dubbo.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_dubbo.sh new file mode 100755 index 0000000..6625aba --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_dubbo.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +exit_if_not_customed_nginx + +# enable dubbo +echo ' +plugins: + - dubbo-proxy +' > conf/config.yaml + +make init + +if ! grep "location @dubbo_pass " conf/nginx.conf > /dev/null; then + echo "failed: dubbo location not found in nginx.conf" + exit 1 +fi + +echo "passed: found dubbo location in nginx.conf" + +# dubbo multiplex configuration +echo ' +plugins: + - dubbo-proxy +plugin_attr: + dubbo-proxy: + upstream_multiplex_count: 16 +' > conf/config.yaml + +make init + +if ! grep "multi 16;" conf/nginx.conf > /dev/null; then + echo "failed: dubbo multiplex configuration not found in nginx.conf" + exit 1 +fi + +echo "passed: found dubbo multiplex configuration in nginx.conf" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd.sh new file mode 100755 index 0000000..be9b45f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd.sh @@ -0,0 +1,201 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# check etcd while enable auth +git checkout conf/config.yaml + +export ETCDCTL_API=3 +etcdctl version +etcdctl --endpoints=127.0.0.1:2379 user add "root:apache-api6" +etcdctl --endpoints=127.0.0.1:2379 role add root +etcdctl --endpoints=127.0.0.1:2379 user grant-role root root +etcdctl --endpoints=127.0.0.1:2379 user get root +etcdctl --endpoints=127.0.0.1:2379 auth enable +etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6 del /apisix --prefix + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2379 + prefix: /apisix + timeout: 30 + user: root + password: apache-api6 +' > conf/config.yaml + +make init +cmd_res=`etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6 get /apisix --prefix` +etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6 auth disable +etcdctl --endpoints=127.0.0.1:2379 role delete root +etcdctl --endpoints=127.0.0.1:2379 user delete root + +init_kv=( +"/apisix/consumers/ init_dir" +"/apisix/global_rules/ init_dir" +"/apisix/plugin_metadata/ init_dir" +"/apisix/plugins/ init_dir" +"/apisix/protos/ init_dir" +"/apisix/routes/ init_dir" +"/apisix/services/ init_dir" +"/apisix/ssls/ init_dir" +"/apisix/stream_routes/ init_dir" +"/apisix/upstreams/ init_dir" +) + +IFS=$'\n' +for kv in ${init_kv[@]} +do +count=`echo $cmd_res | grep -c ${kv} || true` +if [ $count -ne 1 ]; then + echo "failed: failed to match ${kv}" + exit 1 +fi +done + +echo "passed: etcd auth enabled and init kv has been set up correctly" + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'authentication is not enabled'; then + echo "failed: properly handle the error when connecting to etcd without auth" + exit 1 +fi + +echo "passed: properly handle the error when connecting to etcd without auth" + +# Check etcd retry if connect failed +git checkout conf/config.yaml + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2389 + prefix: /apisix +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep "retry time"; then + echo "failed: apisix should echo \"retry time\"" + exit 1 +fi + +echo "passed: Show retry time info successfully" + +# Check etcd connect refused +git checkout conf/config.yaml + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2389 + prefix: /apisix +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep "connection refused"; then + echo "failed: apisix should echo \"connection refused\"" + exit 1 +fi + +echo "passed: Show connection refused info successfully" + +# Check etcd auth error +git checkout conf/config.yaml + +export ETCDCTL_API=3 +etcdctl version +etcdctl --endpoints=127.0.0.1:2379 user add "root:apache-api6" +etcdctl --endpoints=127.0.0.1:2379 role add root +etcdctl --endpoints=127.0.0.1:2379 user grant-role root root +etcdctl --endpoints=127.0.0.1:2379 user get root +etcdctl --endpoints=127.0.0.1:2379 auth enable +etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6 del /apisix --prefix + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2379 + prefix: /apisix + timeout: 30 + user: root + password: apache-api7 +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep "invalid user ID or password"; then + echo "failed: should echo \"invalid user ID or password\"" + exit 1 +fi + +echo "passed: show password error successfully" + +# clean etcd auth +etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6 auth disable +etcdctl --endpoints=127.0.0.1:2379 role delete root +etcdctl --endpoints=127.0.0.1:2379 user delete root + +# check connect to etcd with ipv6 address +git checkout conf/config.yaml + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://[::1]:2379 + prefix: /apisix + timeout: 30 +' > conf/config.yaml + +rm logs/error.log || true +make run +sleep 0.1 + +if grep "update endpoint: http://\[::1\]:2379 to unhealthy" logs/error.log; then + echo "failed: connect to etcd via ipv6 address failed" + exit 1 +fi + +if grep "host or service not provided, or not known" logs/error.log; then + echo "failed: luasocket resolve ipv6 addresses failed" + exit 1 +fi + +make stop + +echo "passed: connect to etcd via ipv6 address successfully" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd_healthcheck.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd_healthcheck.sh new file mode 100755 index 0000000..41efb16 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd_healthcheck.sh @@ -0,0 +1,145 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# create 3 node etcd cluster in docker +ETCD_NAME_0=etcd0 +ETCD_NAME_1=etcd1 +ETCD_NAME_2=etcd2 +HEALTH_CHECK_RETRY_TIMEOUT=10 + +if [ -z "logs/error.log" ]; then + git checkout logs/error.log +fi + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:23790" + - "http://127.0.0.1:23791" + - "http://127.0.0.1:23792" + health_check_timeout: '"$HEALTH_CHECK_RETRY_TIMEOUT"' + timeout: 2 +' > conf/config.yaml + +docker compose -f ./t/cli/docker-compose-etcd-cluster.yaml up -d + +# case 1: Check apisix not got effected when one etcd node disconnected +make init && make run + +docker stop ${ETCD_NAME_0} + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: $admin_key") +if [ ! $code -eq 200 ]; then + echo "failed: apisix got effect when one etcd node out of a cluster disconnected" + exit 1 +fi +docker start ${ETCD_NAME_0} + +docker stop ${ETCD_NAME_1} + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: $admin_key") +if [ ! $code -eq 200 ]; then + echo "failed: apisix got effect when one etcd node out of a cluster disconnected" + exit 1 +fi +docker start ${ETCD_NAME_1} + +make stop + +echo "passed: apisix not got effected when one etcd node disconnected" + +# case 2: Check when all etcd nodes disconnected, apisix trying to reconnect with backoff, and could successfully recover when reconnected +make init && make run + +docker stop ${ETCD_NAME_0} && docker stop ${ETCD_NAME_1} && docker stop ${ETCD_NAME_2} + +sleep_till=$(date +%s -d "$DATE + $HEALTH_CHECK_RETRY_TIMEOUT second") + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: $admin_key") +if [ $code -eq 200 ]; then + echo "failed: apisix not got effect when all etcd nodes disconnected" + exit 1 +fi + +docker start ${ETCD_NAME_0} && docker start ${ETCD_NAME_1} && docker start ${ETCD_NAME_2} + +# case 3: sleep till etcd health check try to check again +current_time=$(date +%s) +sleep_seconds=$(( $sleep_till - $current_time + 3)) +if [ "$sleep_seconds" -gt 0 ]; then + sleep $sleep_seconds +fi + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H "X-API-KEY: $admin_key") +if [ ! $code -eq 200 ]; then + echo "failed: apisix could not recover when etcd node recover" + docker ps + cat logs/error.log + exit 1 +fi + +make stop + +echo "passed: when all etcd nodes disconnected, apisix trying to reconnect with backoff, and could successfully recover when reconnected" + +# case 4: stop one etcd node (result: start successful) +docker stop ${ETCD_NAME_0} + +out=$(make init 2>&1) +if echo "$out" | grep "23790" | grep "connection refused"; then + echo "passed: APISIX successfully to start, stop only one etcd node" +else + echo "failed: stop only one etcd node APISIX should start normally" + exit 1 +fi + +# case 5: stop two etcd nodes (result: start failure) +docker stop ${ETCD_NAME_1} + +out=$(make init 2>&1 || true) +if echo "$out" | grep "23791" | grep "connection refused"; then + echo "passed: APISIX failed to start, etcd cluster must have two or more healthy nodes" +else + echo "failed: two etcd nodes have been stopped, APISIX should fail to start" + exit 1 +fi + +# case 6: stop all etcd nodes (result: start failure) +docker stop ${ETCD_NAME_2} + +out=$(make init 2>&1 || true) +if echo "$out" | grep "23792" | grep "connection refused"; then + echo "passed: APISIX failed to start, all etcd nodes have stopped" +else + echo "failed: all etcd nodes have stopped, APISIX should not be able to start" + exit 1 +fi + +# stop etcd docker container +docker compose -f ./t/cli/docker-compose-etcd-cluster.yaml down diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd_mtls.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd_mtls.sh new file mode 100755 index 0000000..a05dcdb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd_mtls.sh @@ -0,0 +1,210 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +exit_if_not_customed_nginx + +# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns + +# etcd mTLS verify +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false + ' > conf/config.yaml + +out=$(make init 2>&1 || echo "ouch") +if echo "$out" | grep "bad certificate"; then + echo "failed: apisix should not echo \"bad certificate\"" + exit 1 +fi + +echo "passed: certificate verify success expectedly" + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + verify: false + ' > conf/config.yaml + +out=$(make init 2>&1 || echo "ouch") +if ! echo "$out" | grep "bad certificate"; then + echo "failed: apisix should echo \"bad certificate\"" + exit 1 +fi + +echo "passed: certificate verify fail expectedly" + +# etcd mTLS verify with CA +echo ' +apisix: + ssl: + ssl_trusted_certificate: t/certs/mtls_ca.crt +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + ' > conf/config.yaml + +out=$(make init 2>&1 || echo "ouch") +if echo "$out" | grep "certificate verify failed"; then + echo "failed: apisix should not echo \"certificate verify failed\"" + exit 1 +fi + +if echo "$out" | grep "ouch"; then + echo "failed: apisix should not fail" + exit 1 +fi + +echo "passed: certificate verify with CA success expectedly" + +# etcd mTLS in stream subsystem +echo ' +apisix: + proxy_mode: http&stream + stream_proxy: + tcp: + - addr: 9100 + ssl: + ssl_trusted_certificate: t/certs/mtls_ca.crt +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://admin.apisix.dev:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + ' > conf/config.yaml + +out=$(make init 2>&1 || echo "ouch") +if echo "$out" | grep "certificate verify failed"; then + echo "failed: apisix should not echo \"certificate verify failed\"" + exit 1 +fi + +if echo "$out" | grep "ouch"; then + echo "failed: apisix should not fail" + exit 1 +fi + +rm logs/error.log || true +make run +sleep 1 +make stop + +if grep "\[error\]" logs/error.log; then + echo "failed: veirfy etcd certificate during sync should not fail" +fi + +echo "passed: certificate verify in stream subsystem successfully" + +# use host in etcd.host as sni by default +git checkout conf/config.yaml +echo ' +apisix: + ssl: + ssl_trusted_certificate: t/certs/mtls_ca.crt +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + ' > conf/config.yaml + +rm logs/error.log || true +make init +make run +sleep 1 +make stop + +if ! grep -F 'certificate host mismatch' logs/error.log; then + echo "failed: should got certificate host mismatch when use host in etcd.host as sni" + exit 1 +fi + + +echo "passed: use host in etcd.host as sni by default" + +# specify custom sni instead of using etcd.host +git checkout conf/config.yaml +echo ' +apisix: + ssl: + ssl_trusted_certificate: t/certs/mtls_ca.crt +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + sni: "admin.apisix.dev" + ' > conf/config.yaml + +rm logs/error.log || true +make init +make run +sleep 1 +make stop + +if grep -E 'certificate host mismatch' logs/error.log; then + echo "failed: should use specify custom sni" + exit 1 +fi + +echo "passed: specify custom sni instead of using etcd.host" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd_sync_event_handle.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd_sync_event_handle.sh new file mode 100755 index 0000000..f448b2b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd_sync_event_handle.sh @@ -0,0 +1,133 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# check etcd while enable auth +git checkout conf/config.yaml + +# Make new routes +etcdctl --endpoints=127.0.0.1:2379 del --prefix /apisix/routes/ +etcdctl --endpoints=127.0.0.1:2379 put /apisix/routes/ init_dir +etcdctl --endpoints=127.0.0.1:2379 put /apisix/routes/1 '{"uri":"/1","plugins":{}}' +etcdctl --endpoints=127.0.0.1:2379 put /apisix/routes/2 '{"uri":"/2","plugins":{}}' +etcdctl --endpoints=127.0.0.1:2379 put /apisix/routes/3 '{"uri":"/3","plugins":{}}' +etcdctl --endpoints=127.0.0.1:2379 put /apisix/routes/4 '{"uri":"/4","plugins":{}}' +etcdctl --endpoints=127.0.0.1:2379 put /apisix/routes/5 '{"uri":"/5","plugins":{}}' + +# Connect by unauthenticated +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - http://127.0.0.1:2379 + prefix: /apisix +nginx_config: + error_log_level: info + worker_processes: 1 +' > conf/config.yaml + +# Initialize and start APISIX without password +make init +make run + +# Test request +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:9080/1 | grep 503 || (echo "failed: Round 1 Request 1 unexpected"; exit 1) +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:9080/2 | grep 503 || (echo "failed: Round 1 Request 2 unexpected"; exit 1) +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:9080/3 | grep 503 || (echo "failed: Round 1 Request 3 unexpected"; exit 1) +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:9080/4 | grep 503 || (echo "failed: Round 1 Request 4 unexpected"; exit 1) +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:9080/5 | grep 503 || (echo "failed: Round 1 Request 5 unexpected"; exit 1) +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:9080/6 | grep 404 || (echo "failed: Round 1 Request 6 unexpected"; exit 1) + +# Enable auth to block APISIX connect +export ETCDCTL_API=3 +etcdctl version +etcdctl --endpoints=127.0.0.1:2379 user add "root:apache-api6-sync" +etcdctl --endpoints=127.0.0.1:2379 role add root +etcdctl --endpoints=127.0.0.1:2379 user grant-role root root +etcdctl --endpoints=127.0.0.1:2379 user get root +etcdctl --endpoints=127.0.0.1:2379 auth enable +sleep 3 + +# Restart etcd services to make sure that APISIX cannot be synchronized +project_compose_ci=ci/pod/docker-compose.common.yml make ci-env-stop +project_compose_ci=ci/pod/docker-compose.common.yml make ci-env-up + +# Make some changes when APISIX cannot be synchronized +# Authentication ensures that only etcdctl can access etcd at this time +etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6-sync put /apisix/routes/1 '{"uri":"/1","plugins":{"fault-injection":{"abort":{"http_status":204}}}}' +etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6-sync put /apisix/routes/2 '{"uri":"/2"}' ## set incorrect configuration +etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6-sync put /apisix/routes/3 '{"uri":"/3","plugins":{"fault-injection":{"abort":{"http_status":204}}}}' +etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6-sync put /apisix/routes/4 '{"uri":"/4","plugins":{"fault-injection":{"abort":{"http_status":204}}}}' +etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6-sync put /apisix/routes/5 '{"uri":"/5","plugins":{"fault-injection":{"abort":{"http_status":204}}}}' + +# Resume APISIX synchronization by disable auth +# Since APISIX will not be able to access etcd until authentication is disable, +# watch will be temporarily disabled, so when authentication is disable, +# the backlog events will be sent at once at an offset from when APISIX disconnects. +# When APISIX resumes the connection, it still has not met its mandatory full +# synchronization condition, so it will be "watch" that resumes, not "readdir". +etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6-sync auth disable +etcdctl --endpoints=127.0.0.1:2379 user delete root +etcdctl --endpoints=127.0.0.1:2379 role delete root +sleep 5 # wait resync by watch + +# Test request +# All but the intentionally incoming misconfigurations should be applied, +# and non-existent routes will remain non-existent. +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:9080/1 | grep 204 || (echo "failed: Round 2 Request 1 unexpected"; exit 1) +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:9080/2 | grep 503 || (echo "failed: Round 2 Request 2 unexpected"; exit 1) +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:9080/3 | grep 204 || (echo "failed: Round 2 Request 3 unexpected"; exit 1) +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:9080/4 | grep 204 || (echo "failed: Round 2 Request 4 unexpected"; exit 1) +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:9080/5 | grep 204 || (echo "failed: Round 2 Request 5 unexpected"; exit 1) +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:9080/6 | grep 404 || (echo "failed: Round 2 Request 6 unexpected"; exit 1) + +# Check logs +## Case1: Ensure etcd is disconnected +cat logs/error.log | grep "watchdir err: has no healthy etcd endpoint available" || (echo "Log case 1 unexpected"; exit 1) + +## Case2: Ensure events are sent in bulk after connection is restored +## It is extracted from the structure of following type +## result = { +## events = { { +## { +## kv = { +## key = "/apisix/routes/1", +## ... +## } +#### }, { +## kv = { +## key = "/apisix/routes/2", +## ... +## } +## }, +## ... +## } }, +## header = { +## ... +## } +## } +## After check, it only appears when watch recovers and returns events in bulk. +cat logs/error.log | grep "}, {" || (echo "failed: Log case 2 unexpected"; exit 1) + +## Case3: Ensure that the check schema error is actually triggered. +cat logs/error.log | grep "failed to check item data" || (echo "failed: Log case 3 unexpected"; exit 1) diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd_tls.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd_tls.sh new file mode 100755 index 0000000..39db833 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_etcd_tls.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# 'make init' operates scripts and related configuration files in the current directory +# The 'apisix' command is a command in the /usr/local/apisix, +# and the configuration file for the operation is in the /usr/local/apisix/conf + +. ./t/cli/common.sh + +# Check etcd tls verify failure +git checkout conf/config.yaml + +echo ' +apisix: + ssl: + ssl_trusted_certificate: t/certs/mtls_ca.crt +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + prefix: "/apisix" + ' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep "certificate verify failed"; then + echo "failed: apisix should echo \"certificate verify failed\"" + exit 1 +fi + +echo "passed: Show certificate verify failed info successfully" + + +# Check etcd tls without verification +git checkout conf/config.yaml + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:12379" + prefix: "/apisix" + tls: + verify: false + ' > conf/config.yaml + +out=$(make init 2>&1 || true) +if echo "$out" | grep "certificate verify failed"; then + echo "failed: apisix should not echo \"certificate verify failed\"" + exit 1 +fi + +echo "passed: Certificate verification successfully" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_http_config.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_http_config.sh new file mode 100755 index 0000000..4059ca6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_http_config.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +git checkout conf/config.yaml + +echo ' +nginx_config: + http: + custom_lua_shared_dict: + my_dict: 1m +' > conf/config.yaml + +make init + +if ! grep "lua_shared_dict my_dict 1m;" conf/nginx.conf > /dev/null; then + echo "failed: define custom shdict" + exit 1 +fi + +echo "passed: define custom shdict" + +git checkout conf/config.yaml + +echo " +plugins: + - ip-restriction +" > conf/config.yaml + +make init + +if grep "plugin-limit-conn" conf/nginx.conf > /dev/null; then + echo "failed: enable shdict on demand" + exit 1 +fi + +echo " +plugins: + - limit-conn +" > conf/config.yaml + +make init + +if ! grep "plugin-limit-conn" conf/nginx.conf > /dev/null; then + echo "failed: enable shdict on demand" + exit 1 +fi + +echo "passed: enable shdict on demand" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_kubernetes.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_kubernetes.sh new file mode 100755 index 0000000..f60f856 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_kubernetes.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +echo ' +discovery: + kubernetes: + service: + host: ${HOST_ENV} + client: + token: ${TOKEN_ENV} +' >conf/config.yaml + +make init + +if ! grep "env HOST_ENV" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "env KUBERNETES_SERVICE_PORT" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "env TOKEN_ENV" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "lua_shared_dict kubernetes 1m;" conf/nginx.conf; then + echo "kubernetes discovery lua_shared_dict inject failed" + exit 1 +fi + +echo ' +discovery: + kubernetes: + - id: dev + service: + host: ${DEV_HOST} + port: ${DEV_PORT} + client: + token: ${DEV_TOKEN} + - id: pro + service: + host: ${PRO_HOST} + port: ${PRO_PORT} + client: + token: ${PRO_TOKEN} + shared_size: 2m +' >conf/config.yaml + +make init + +if ! grep "env DEV_HOST" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "env DEV_PORT" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "env DEV_TOKEN" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "env PRO_HOST" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "env PRO_PORT" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "env PRO_TOKEN" conf/nginx.conf; then + echo "kubernetes discovery env inject failed" + exit 1 +fi + +if ! grep "lua_shared_dict kubernetes-dev 1m;" conf/nginx.conf; then + echo "kubernetes discovery lua_shared_dict inject failed" + exit 1 +fi + +if ! grep "lua_shared_dict kubernetes-pro 2m;" conf/nginx.conf; then + echo "kubernetes discovery lua_shared_dict inject failed" + exit 1 +fi + +echo "kubernetes discovery inject success" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_main.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_main.sh new file mode 100755 index 0000000..62c128c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_main.sh @@ -0,0 +1,1002 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# 'make init' operates scripts and related configuration files in the current directory +# The 'apisix' command is a command in the /usr/local/apisix, +# and the configuration file for the operation is in the /usr/local/apisix/conf + +. ./t/cli/common.sh + +git checkout conf/config.yaml + +# check 'Server: APISIX' is not in nginx.conf. We already added it in Lua code. +make init + +if grep "Server: APISIX" conf/nginx.conf > /dev/null; then + echo "failed: 'Server: APISIX' should not be added twice" + exit 1 +fi + +echo "passed: 'Server: APISIX' not in nginx.conf" + +#make init <- no need to re-run since we don't change the config yet. + +# check the error_log directive uses warn level by default. +if ! grep "error_log logs/error.log warn;" conf/nginx.conf > /dev/null; then + echo "failed: error_log directive doesn't use warn level by default" + exit 1 +fi + +echo "passed: error_log directive uses warn level by default" + +# check whether the 'reuseport' is in nginx.conf . + +grep -E "listen 0.0.0.0:9080.*reuseport" conf/nginx.conf > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: nginx.conf file is missing reuseport configuration" + exit 1 +fi + +echo "passed: nginx.conf file contains reuseport configuration" + +# check default ssl port +echo " +apisix: + ssl: + listen: + - port: 8443 + +" > conf/config.yaml + +make init + +grep "listen 0.0.0.0:8443 ssl" conf/nginx.conf > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: failed to update ssl port" + exit 1 +fi + +grep "listen \[::\]:8443 ssl" conf/nginx.conf > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: failed to update ssl port" + exit 1 +fi + +echo "passed: change default ssl port" + +# check support multiple ports listen in http and https + +echo " +apisix: + node_listen: + - 9080 + - 9081 + - 9082 + ssl: + enable: true + listen: + - port: 9443 + - port: 9444 + - port: 9445 +" > conf/config.yaml + +make init + +count_http_ipv4=`grep -c "listen 0.0.0.0:908." conf/nginx.conf || true` +if [ $count_http_ipv4 -ne 3 ]; then + echo "failed: failed to support multiple ports listen in http with ipv4" + exit 1 +fi + +count_http_ipv6=`grep -c "listen \[::\]:908." conf/nginx.conf || true` +if [ $count_http_ipv6 -ne 3 ]; then + echo "failed: failed to support multiple ports listen in http with ipv6" + exit 1 +fi + +count_https_ipv4=`grep -c "listen 0.0.0.0:944. ssl" conf/nginx.conf || true` +if [ $count_https_ipv4 -ne 3 ]; then + echo "failed: failed to support multiple ports listen in https with ipv4" + exit 1 +fi + +count_https_ipv6=`grep -c "listen \[::\]:944. ssl" conf/nginx.conf || true` +if [ $count_https_ipv6 -ne 3 ]; then + echo "failed: failed to support multiple ports listen in https with ipv6" + exit 1 +fi + +echo "passed: support multiple ports listen in http and https" + +# check support specific IP listen in http and https + +echo " +apisix: + node_listen: + - ip: 127.0.0.1 + port: 9081 + - ip: 127.0.0.2 + port: 9082 + ssl: + listen: + - ip: 127.0.0.3 + port: 9444 + - ip: 127.0.0.4 + port: 9445 + enable_http3: true + enable_http2: true +" > conf/config.yaml + +make init + +count_http_specific_ip=`grep -c "listen 127.0.0..:908." conf/nginx.conf || true` +if [ $count_http_specific_ip -ne 2 ]; then + echo "failed: failed to support specific IP listen in http" + exit 1 +fi + +count_https_specific_ip=`grep -c "listen 127.0.0..:944. ssl" conf/nginx.conf || true` +if [ $count_https_specific_ip -ne 2 ]; then + echo "failed: failed to support specific IP listen in https" + exit 1 +fi + +count_enable_http2=`grep -c "http2 on" conf/nginx.conf || true` +if [ $count_enable_http2 -ne 1 ]; then + echo "failed: failed to enable http2" + exit 1 +fi + +count_https_specific_ip_and_enable_quic=`grep -c "listen 127.0.0..:944. quic" conf/nginx.conf || true` +if [ $count_https_specific_ip_and_enable_quic -ne 1 ]; then + echo "failed: failed to support specific IP and enable quic listen in https" + exit 1 +fi + +count_https_specific_ip_and_enable_http3=`grep -c "http3 on" conf/nginx.conf || true` +if [ $count_https_specific_ip_and_enable_http3 -ne 1 ]; then + echo "failed: failed to support specific IP and enable http3 listen in https" + exit 1 +fi + +echo "passed: support specific IP listen in http and https" + +# check deprecated enable_http2 in node_listen +echo " +apisix: + node_listen: + - ip: 127.0.0.1 + port: 9081 + enable_http2: true +" > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'port level enable_http2 in node_listen is deprecated'; then + echo "failed: failed to detect deprecated enable_http2 in node_listen" + exit 1 +fi + +echo "passed: check deprecated enable_http2 in node_listen" + + +# check deprecated enable_http2 in ssl.listen +echo " +apisix: + node_listen: + - ip: 127.0.0.1 + port: 9081 + ssl: + enable: true + listen: + - ip: 127.0.0.1 + port: 9444 + enable_http2: true +" > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'port level enable_http2 in ssl.listen is deprecated'; then + echo "failed: failed to detect deprecated enable_http2 in ssl.listen" + exit 1 +fi + +echo "passed: check deprecated enable_http2 in node_listen" + +# check default env +echo " +nginx_config: + envs: + - TEST +" > conf/config.yaml + +make init + +grep "env TEST;" conf/nginx.conf > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: failed to update env" + exit 1 +fi + +echo "passed: change default env" + +# support environment variables +echo ' +nginx_config: + envs: + - ${{var_test}}_${{FOO}} +' > conf/config.yaml + +var_test=TEST FOO=bar make init + +if ! grep "env TEST_bar;" conf/nginx.conf > /dev/null; then + echo "failed: failed to resolve variables" + exit 1 +fi + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep "can't find environment variable"; then + echo "failed: failed to resolve variables" + exit 1 +fi + +echo "passed: resolve variables" + +# support reserved environment variable APISIX_DEPLOYMENT_ETCD_HOST + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2333" +' > conf/config.yaml + +failed_msg="failed: failed to configure etcd host with reserved environment variable" + +out=$(APISIX_DEPLOYMENT_ETCD_HOST='["http://127.0.0.1:2379"]' make init 2>&1 || true) +if echo "$out" | grep "connection refused" > /dev/null; then + echo $failed_msg + exit 1 +fi + +out=$(APISIX_DEPLOYMENT_ETCD_HOST='["http://127.0.0.1:2379"]' make run 2>&1 || true) +if echo "$out" | grep "connection refused" > /dev/null; then + echo $failed_msg + exit 1 +fi + +if ! grep "env APISIX_DEPLOYMENT_ETCD_HOST;" conf/nginx.conf > /dev/null; then + echo "failed: 'env APISIX_DEPLOYMENT_ETCD_HOST;' not in nginx.conf" + echo $failed_msg + exit 1 +fi + +make stop + +echo "passed: configure etcd host with reserved environment variable" + +echo ' +nginx_config: + worker_rlimit_nofile: ${{nofile9}} +' > conf/config.yaml + +nofile9=99999 make init + +if ! grep "worker_rlimit_nofile 99999;" conf/nginx.conf > /dev/null; then + echo "failed: failed to resolve variables as integer" + exit 1 +fi + +echo "passed: resolve variables as integer" + +echo ' +apisix: + enable_admin: ${{admin}} +' > conf/config.yaml + +admin=false make init + +if grep "location /apisix/admin" conf/nginx.conf > /dev/null; then + echo "failed: failed to resolve variables as boolean" + exit 1 +fi + +echo "passed: resolve variables as boolean" + +echo ' +nginx_config: + envs: + - ${{ var_test}}_${{ FOO }} +' > conf/config.yaml + +var_test=TEST FOO=bar make init + +if ! grep "env TEST_bar;" conf/nginx.conf > /dev/null; then + echo "failed: failed to resolve variables wrapped with whitespace" + exit 1 +fi + +echo "passed: resolve variables wrapped with whitespace" + +# support environment variables in local_conf +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" +' > conf/config.yaml + +ETCD_HOST=127.0.0.1 ETCD_PORT=2379 make init + +if ! grep "env ETCD_HOST;" conf/nginx.conf > /dev/null; then + echo "failed: support environment variables in local_conf" + exit 1 +fi + +# don't override user's envs configuration +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" +nginx_config: + envs: + - ETCD_HOST +' > conf/config.yaml + +ETCD_HOST=127.0.0.1 ETCD_PORT=2379 make init + +if grep "env ETCD_HOST=.*;" conf/nginx.conf > /dev/null; then + echo "failed: support environment variables in local_conf" + exit 1 +fi + +if ! grep "env ETCD_HOST;" conf/nginx.conf > /dev/null; then + echo "failed: support environment variables in local_conf" + exit 1 +fi + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://${{ETCD_HOST}}:${{ETCD_PORT}}" +nginx_config: + envs: + - ETCD_HOST=1.1.1.1 +' > conf/config.yaml + +ETCD_HOST=127.0.0.1 ETCD_PORT=2379 make init + +if grep "env ETCD_HOST;" conf/nginx.conf > /dev/null; then + echo "failed: support environment variables in local_conf" + exit 1 +fi + +if ! grep "env ETCD_HOST=1.1.1.1;" conf/nginx.conf > /dev/null; then + echo "failed: support environment variables in local_conf" + exit 1 +fi + +echo "pass: support environment variables in local_conf" + +# support default value when environment not set +echo ' +tests: + key: ${{TEST_ENV:=1.1.1.1}} +' > conf/config.yaml + +make init + +if ! grep "env TEST_ENV;" conf/nginx.conf > /dev/null; then + echo "failed: should use default value when environment not set" + exit 1 +fi + +echo ' +tests: + key: ${{TEST_ENV:=very-long-domain-with-many-symbols.absolutely-non-exists-123ss.com:1234/path?param1=value1}} +' > conf/config.yaml + +make init + +if ! grep "env TEST_ENV;" conf/nginx.conf > /dev/null; then + echo "failed: should use default value when environment not set" + exit 1 +fi + +echo ' +tests: + key: ${{TEST_ENV:=192.168.1.1}} +' > conf/config.yaml + +TEST_ENV=127.0.0.1 make init + +if ! grep "env TEST_ENV;" conf/nginx.conf > /dev/null; then + echo "failed: should use environment variable when environment is set" + exit 1 +fi + +echo "pass: support default value when environment not set" + +# support merging worker_processes +echo ' +nginx_config: + worker_processes: 1 +' > conf/config.yaml + +make init + +if ! grep "worker_processes 1;" conf/nginx.conf > /dev/null; then + echo "failed: failed to merge worker_processes" + exit 1 +fi + +echo ' +nginx_config: + worker_processes: ${{nproc}} +' > conf/config.yaml + +nproc=1 make init + +if ! grep "worker_processes 1;" conf/nginx.conf > /dev/null; then + echo "failed: failed to merge worker_processes" + exit 1 +fi + +echo ' +nginx_config: + worker_processes: true +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'path\[nginx_config->worker_processes\] expect'; then + echo "failed: failed to merge worker_processes" + exit 1 +fi + +echo ' +nginx_config: + worker_processes: ${{nproc}} +' > conf/config.yaml + +out=$(nproc=false make init 2>&1 || true) +if ! echo "$out" | grep 'path\[nginx_config->worker_processes\] expect'; then + echo "failed: failed to merge worker_processes" + exit 1 +fi + +echo "passed: merge worker_processes" + +# check nameserver imported +git checkout conf/config.yaml + +make init + +i=`grep -E '^nameserver[[:space:]]+(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4]0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])[[:space:]]?$' /etc/resolv.conf | awk '{print $2}'` +for ip in $i +do + echo $ip + grep $ip conf/nginx.conf > /dev/null + if [ ! $? -eq 0 ]; then + echo "failed: system DNS "$ip" unimported" + exit 1 + fi +done + +echo "passed: system nameserver imported" + +# enable enable_dev_mode +git checkout conf/config.yaml + +echo " +apisix: + enable_dev_mode: true +" > conf/config.yaml + +make init + +count=`grep -c "worker_processes 1;" conf/nginx.conf` +if [ $count -ne 1 ]; then + echo "failed: worker_processes is not 1 when enable enable_dev_mode" + exit 1 +fi + +count=`grep -c "listen 0.0.0.0:9080.*reuseport" conf/nginx.conf || true` +if [ $count -ne 0 ]; then + echo "failed: reuseport should be disabled when enable enable_dev_mode" + exit 1 +fi + +echo "passed: enable enable_dev_mode" + +# check whether the 'worker_cpu_affinity' is in nginx.conf + +git checkout conf/config.yaml + +make init + +count=`grep -c "worker_cpu_affinity" conf/nginx.conf || true` +if [ $count -ne 0 ]; then + echo "failed: nginx.conf file found worker_cpu_affinity when disabling it" + exit 1 +fi + +echo "passed: nginx.conf file disables cpu affinity" + +# check the 'worker_shutdown_timeout' in 'nginx.conf' . + +make init + +grep -E "worker_shutdown_timeout 240s" conf/nginx.conf > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: worker_shutdown_timeout in nginx.conf is required 240s" + exit 1 +fi + +echo "passed: worker_shutdown_timeout in nginx.conf is ok" + +# check the 'client_max_body_size' in 'nginx.conf' . + +git checkout conf/config.yaml + +echo ' +nginx_config: + http: + client_max_body_size: 512m +' > conf/config.yaml + +make init + +if ! grep -E "client_max_body_size 512m" conf/nginx.conf > /dev/null; then + echo "failed: client_max_body_size in nginx.conf doesn't change" + exit 1 +fi + +echo "passed: client_max_body_size in nginx.conf is ok" + +# check worker processes number is configurable. + +git checkout conf/config.yaml + +echo " +nginx_config: + worker_processes: 2 +" > conf/config.yaml + +make init + +grep "worker_processes 2;" conf/nginx.conf > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: worker_processes in nginx.conf doesn't change" + exit 1 +fi + +sed -i 's/worker_processes: 2/worker_processes: auto/' conf/config.yaml +echo "passed: worker_processes number is configurable" + +# check disable cpu affinity +git checkout conf/config.yaml + +echo ' +nginx_config: + enable_cpu_affinity: true +' > conf/config.yaml + +make init + +grep -E "worker_cpu_affinity" conf/nginx.conf > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: nginx.conf file is missing worker_cpu_affinity configuration" + exit 1 +fi + +echo "passed: nginx.conf file contains worker_cpu_affinity configuration" + +# set worker processes with env +git checkout conf/config.yaml + +export APISIX_WORKER_PROCESSES=8 + +make init + +count=`grep -c "worker_processes 8;" conf/nginx.conf || true` +if [ $count -ne 1 ]; then + echo "failed: worker_processes is not 8 when using env to set worker processes" + exit 1 +fi + +echo "passed: using env to set worker processes" + +# set worker processes with env +git checkout conf/config.yaml + +make init + +count=`grep -c "ssl_session_tickets off;" conf/nginx.conf || true ` +if [ $count -eq 0 ]; then + echo "failed: ssl_session_tickets is off when ssl.ssl_session_tickets is false." + exit 1 +fi + +echo ' +apisix: + ssl: + ssl_session_tickets: true +' > conf/config.yaml + +make init + +count=`grep -c "ssl_session_tickets on;" conf/nginx.conf || true ` +if [ $count -eq 0 ]; then + echo "failed: ssl_session_tickets is on when ssl.ssl_session_tickets is true." + exit 1 +fi + +echo "passed: disable ssl_session_tickets by default" + +# support 3rd-party plugin +echo ' +apisix: + extra_lua_path: "$prefix/example/?.lua" + extra_lua_cpath: "$prefix/example/?.lua" +plugins: + - 3rd-party +stream_plugins: + - 3rd-party +' > conf/config.yaml + +rm logs/error.log || true +make init +make run + +sleep 0.5 +make stop + +if grep "failed to load plugin [3rd-party]" logs/error.log > /dev/null; then + echo "failed: 3rd-party plugin can not be loaded" + exit 1 +fi +echo "passed: 3rd-party plugin can be loaded" + +# validate extra_lua_path +echo ' +apisix: + extra_lua_path: ";" +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'invalid extra_lua_path'; then + echo "failed: can't detect invalid extra_lua_path" + exit 1 +fi + +echo "passed: detect invalid extra_lua_path" + +# support hooking into APISIX methods +echo ' +apisix: + lua_module_hook: "example/my_hook" +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'property "lua_module_hook" validation failed'; then + echo "failed: bad lua_module_hook should be rejected" + exit 1 +fi + +echo "passed: bad lua_module_hook should be rejected" + +echo ' +apisix: + proxy_mode: http&stream + extra_lua_path: "$prefix/example/?.lua" + lua_module_hook: "my_hook" + stream_proxy: + tcp: + - addr: 9100 +' > conf/config.yaml + +rm logs/error.log +make init +make run + +sleep 0.5 +make stop + +if ! grep "my hook works in http" logs/error.log > /dev/null; then + echo "failed: hook can take effect" + exit 1 +fi + +if ! grep "my hook works in stream" logs/error.log > /dev/null; then + echo "failed: hook can take effect" + exit 1 +fi + +echo "passed: hook can take effect" + +# check the keepalive related parameter settings in the upstream +git checkout conf/config.yaml + +echo ' +nginx_config: + http: + upstream: + keepalive: 32 + keepalive_requests: 100 + keepalive_timeout: 6s +' > conf/config.yaml + +make init + +if ! grep "keepalive 32;" conf/nginx.conf > /dev/null; then + echo "failed: 'keepalive 32;' not in nginx.conf" + exit 1 +fi + +if ! grep "keepalive_requests 100;" conf/nginx.conf > /dev/null; then + echo "failed: 'keepalive_requests 100;' not in nginx.conf" + exit 1 +fi + +if ! grep "keepalive_timeout 6s;" conf/nginx.conf > /dev/null; then + echo "failed: 'keepalive_timeout 6s;' not in nginx.conf" + exit 1 +fi + +echo "passed: found the keepalive related parameter in nginx.conf" + +# check the charset setting +git checkout conf/config.yaml + +echo ' +nginx_config: + http: + charset: gbk +' > conf/config.yaml + +make init + +if ! grep "charset gbk;" conf/nginx.conf > /dev/null; then + echo "failed: 'charset gbk;' not in nginx.conf" + exit 1 +fi + +echo "passed: found the 'charset gbk;' in nginx.conf" + +# check realip recursive setting +git checkout conf/config.yaml + +echo ' +nginx_config: + http: + real_ip_recursive: "on" +' > conf/config.yaml + +make init + +if ! grep "real_ip_recursive on;" conf/nginx.conf > /dev/null; then + echo "failed: 'real_ip_recursive on;' not in nginx.conf" + exit 1 +fi + +echo "passed: found 'real_ip_recursive on' in nginx.conf" + +# check the variables_hash_max_size setting +git checkout conf/config.yaml + +echo ' +nginx_config: + http: + variables_hash_max_size: 1024 +' > conf/config.yaml + +make init + +if ! grep "variables_hash_max_size 1024;" conf/nginx.conf > /dev/null; then + echo "failed: 'variables_hash_max_size 1024;' not in nginx.conf" + exit 1 +fi + +echo "passed: found the 'variables_hash_max_size 1024;' in nginx.conf" + +# test disk_path without quotes +git checkout conf/config.yaml + +echo ' +apisix: + proxy_cache: + zones: + - name: disk_cache_one + disk_path: /tmp/disk_cache_one + disk_size: 100m + memory_size: 20m + cache_levels: "1:2" +' > conf/config.yaml + +make init + +if ! grep "proxy_cache_path /tmp/disk_cache_one" conf/nginx.conf > /dev/null; then + echo "failed: disk_path could not work without quotes" + exit 1 +fi + +echo "passed: disk_path could work without quotes" + +# check the stream lua_shared_dict lrucache_lock value +git checkout conf/config.yaml + +echo ' +apisix: + proxy_mode: http&stream + stream_proxy: + tcp: + - addr: 9100 + tls: true + - addr: "127.0.0.1:9101" + udp: + - 9200 + - "127.0.0.1:9201" +nginx_config: + stream: + lua_shared_dict: + lrucache-lock-stream: 20m +' > conf/config.yaml + +make init + +if ! grep "lrucache-lock-stream 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'lrucache-lock-stream 20m;' not in nginx.conf" + exit 1 +fi + +echo "passed: found the 'lrucache-lock-stream 20m;' in nginx.conf" + +# check the http lua_shared_dict variables value +git checkout conf/config.yaml + +echo ' +nginx_config: + http: + lua_shared_dict: + internal-status: 20m + plugin-limit-req: 20m + plugin-limit-count: 20m + prometheus-metrics: 20m + plugin-limit-conn: 20m + upstream-healthcheck: 20m + worker-events: 20m + lrucache-lock: 20m + balancer-ewma: 20m + balancer-ewma-locks: 20m + balancer-ewma-last-touched-at: 20m + plugin-limit-count-redis-cluster-slot-lock: 2m + tracing_buffer: 20m + plugin-api-breaker: 20m + etcd-cluster-health-check: 20m + discovery: 2m + jwks: 2m + introspection: 20m + access-tokens: 2m +' > conf/config.yaml + +make init + +if ! grep "internal-status 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'internal-status 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "plugin-limit-req 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'plugin-limit-req 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "plugin-limit-count 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'plugin-limit-count 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "prometheus-metrics 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'prometheus-metrics 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "plugin-limit-conn 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'plugin-limit-conn 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "upstream-healthcheck 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'upstream-healthcheck 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "worker-events 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'worker-events 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "lrucache-lock 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'lrucache-lock 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "balancer-ewma 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'balancer-ewma 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "balancer-ewma-locks 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'balancer-ewma-locks 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "balancer-ewma-last-touched-at 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'balancer-ewma-last-touched-at 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "plugin-limit-count-redis-cluster-slot-lock 2m;" conf/nginx.conf > /dev/null; then + echo "failed: 'plugin-limit-count-redis-cluster-slot-lock 2m;' not in nginx.conf" + exit 1 +fi + +if ! grep "plugin-api-breaker 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'plugin-api-breaker 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "etcd-cluster-health-check 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'etcd-cluster-health-check 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "discovery 2m;" conf/nginx.conf > /dev/null; then + echo "failed: 'discovery 2m;' not in nginx.conf" + exit 1 +fi + +if ! grep "jwks 2m;" conf/nginx.conf > /dev/null; then + echo "failed: 'jwks 2m;' not in nginx.conf" + exit 1 +fi + +if ! grep "introspection 20m;" conf/nginx.conf > /dev/null; then + echo "failed: 'introspection 20m;' not in nginx.conf" + exit 1 +fi + +if ! grep "access-tokens 2m;" conf/nginx.conf > /dev/null; then + echo "failed: 'access-tokens 2m;' not in nginx.conf" + exit 1 +fi + +echo "passed: found the http lua_shared_dict related parameter in nginx.conf" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_makefile.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_makefile.sh new file mode 100755 index 0000000..30a196d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_makefile.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +make run + +echo " +deployment: + admin: + admin_listen: + ip: 127.0.0.2 + port: 9181 +apisix: + enable_admin: true +" > conf/config.yaml + +make reload +make stop + +if ! grep "listen 127.0.0.2:9181;" conf/nginx.conf > /dev/null; then + echo "failed: regenerate nginx conf in 'make reload'" + exit 1 +fi + +echo "passed: regenerate nginx conf in 'make reload'" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_opentelemetry_set_ngx_var.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_opentelemetry_set_ngx_var.sh new file mode 100755 index 0000000..8db6ea4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_opentelemetry_set_ngx_var.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +echo ' +plugins: + - opentelemetry +plugin_attr: + opentelemetry: + set_ngx_var: true +' > conf/config.yaml + +make init + +if ! grep "set \$opentelemetry_context_traceparent '';" conf/nginx.conf > /dev/null; then + echo "failed: opentelemetry_context_traceparent not found in nginx.conf" + exit 1 +fi + +if ! grep "set \$opentelemetry_trace_id '';" conf/nginx.conf > /dev/null; then + echo "failed: opentelemetry_trace_id not found in nginx.conf" + exit 1 +fi + +if ! grep "set \$opentelemetry_span_id '';" conf/nginx.conf > /dev/null; then + echo "failed: opentelemetry_span_id not found in nginx.conf" + exit 1 +fi + + +echo "passed: opentelemetry_set_ngx_var configuration is validated" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_prometheus.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_prometheus.sh new file mode 100755 index 0000000..a613993 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_prometheus.sh @@ -0,0 +1,181 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +git checkout conf/config.yaml + +sleep 1 + +make run + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/apisix/prometheus/metrics) +if [ ! $code -eq 404 ]; then + echo "failed: should listen at default prometheus address" + exit 1 +fi + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9091/apisix/prometheus/metrics) +if [ ! $code -eq 200 ]; then + echo "failed: should listen at default prometheus address" + exit 1 +fi + +if ! curl -i http://127.0.0.1:9091/apisix/prometheus/metrics | grep "apisix_nginx_http_current_connections" > /dev/null; then + echo "failed: should listen at default prometheus address" + exit 1 +fi + +make stop + +echo "passed: should listen at default prometheus address" + +echo ' +plugin_attr: + prometheus: + export_addr: + ip: ${{IP}} + port: ${{PORT}} +' > conf/config.yaml + +IP=127.0.0.1 PORT=9092 make run + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9092/apisix/prometheus/metrics) +if [ ! $code -eq 200 ]; then + echo "failed: should listen at configured prometheus address" + exit 1 +fi + +make stop + +echo "passed: should listen at configured prometheus address" + +echo ' +plugin_attr: + prometheus: + enable_export_server: false + export_uri: /prometheus/metrics + export_addr: + ip: ${{IP}} + port: ${{PORT}} +' > conf/config.yaml + +IP=127.0.0.1 PORT=9092 make run + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +# initialize prometheus metrics public API route #1 +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9180/apisix/admin/routes/metrics1 \ + -H "X-API-KEY: $admin_key" \ + -d "{ + \"uri\": \"/prometheus/metrics\", + \"plugins\": { + \"public-api\": {} + } + }") +if [ ! $code -eq 201 ]; then + echo "failed: initialize prometheus metrics public API failed #1" + exit 1 +fi + +sleep 0.5 + +code=$(curl -v -k -i -m 20 -o /dev/null -s http://127.0.0.1:9092/prometheus/metrics || echo 'ouch') +if [ "$code" != "ouch" ]; then + echo "failed: should listen at previous prometheus address" + exit 1 +fi + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/prometheus/metrics) +if [ ! $code -eq 200 ]; then + echo "failed: should listen at previous prometheus address" + exit 1 +fi + +make stop + +echo "passed: should listen at previous prometheus address" + +echo ' +plugin_attr: + prometheus: + export_addr: + ip: ${{IP}} + port: ${{PORT}} +' > conf/config.yaml + +out=$(IP=127.0.0.1 PORT=9090 make init 2>&1 || true) +if ! echo "$out" | grep "prometheus port 9090 conflicts with control"; then + echo "failed: can't detect port conflicts" + exit 1 +fi + +echo ' +apisix: + node_listen: ${{PORT}} +plugin_attr: + prometheus: + export_addr: + ip: ${{IP}} + port: ${{PORT}} +' > conf/config.yaml + +out=$(IP=127.0.0.1 PORT=9092 make init 2>&1 || true) +if ! echo "$out" | grep "http listen port 9092 conflicts with prometheus"; then + echo "failed: can't detect port conflicts" + exit 1 +fi + +echo "passed: should detect port conflicts" + +echo ' +plugin_attr: + prometheus: + metric_prefix: apisix_ci_prefix_ + export_addr: + ip: ${{IP}} + port: ${{PORT}} +' > conf/config.yaml + +IP=127.0.0.1 PORT=9092 make run + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +# initialize prometheus metrics public API route #2 +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} -X PUT http://127.0.0.1:9180/apisix/admin/routes/metrics2 \ + -H "X-API-KEY: $admin_key" \ + -d "{ + \"uri\": \"/apisix/prometheus/metrics\", + \"plugins\": { + \"public-api\": {} + } + }") +if [ ! $code -eq 201 ]; then + echo "failed: initialize prometheus metrics public API failed #2" + exit 1 +fi + +sleep 0.5 + +if ! curl -s http://127.0.0.1:9092/apisix/prometheus/metrics | grep "apisix_ci_prefix_" | wc -l; then + echo "failed: should use custom metric prefix" + exit 1 +fi + +make stop + +echo "passed: should use custom metric prefix" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_prometheus_reload.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_prometheus_reload.sh new file mode 100755 index 0000000..7a8b1a1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_prometheus_reload.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +git checkout conf/config.yaml + +make run + +sleep 2 + +echo "removing prometheus from the plugins list" +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: + - ip-restriction' > conf/config.yaml + +echo "fetch metrics, should not contain {}" + +if curl -i http://127.0.0.1:9091/apisix/prometheus/metrics | grep "{}" > /dev/null; then + echo "failed: metrics should not contain '{}' when prometheus is enabled" + exit 1 +fi + +echo "calling reload API to actually disable prometheus" + +curl -i http://127.0.0.1:9090/v1/plugins/reload -XPUT + +sleep 2 + +echo "fetch metrics after reload should contain {}" + +if ! curl -i http://127.0.0.1:9091/apisix/prometheus/metrics | grep "{}" > /dev/null; then + echo "failed: metrics should contain '{}' when prometheus is disabled" + exit 1 +fi + +echo "re-enable prometheus" + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: + - prometheus' > conf/config.yaml + +echo "fetching metrics without reloading should give same result as before" + +if ! curl -i http://127.0.0.1:9091/apisix/prometheus/metrics | grep "{}" > /dev/null; then + echo "failed: metrics should contain '{}' when prometheus is disabled" + exit 1 +fi + +echo "calling reload API to actually enable prometheus" + +curl -i http://127.0.0.1:9090/v1/plugins/reload -XPUT + +sleep 2 + +if curl -i http://127.0.0.1:9091/apisix/prometheus/metrics | grep "{}" > /dev/null; then + echo "failed: metrics should not contain '{}' when prometheus is enabled" + exit 1 +fi diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_prometheus_run_in_privileged.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_prometheus_run_in_privileged.sh new file mode 100755 index 0000000..08d0193 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_prometheus_run_in_privileged.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +git checkout conf/config.yaml + +exit_if_not_customed_nginx + +# prometheus run in privileged works when only http is enabled +sleep 0.5 +rm logs/error.log || true + +echo ' +apisix: + extra_lua_path: "$prefix/t/lib/?.lua" +nginx_config: + error_log_level: info +' > conf/config.yaml + +make run +sleep 0.1 + +curl -s -o /dev/null http://127.0.0.1:9091/apisix/prometheus/metrics + +if ! grep -E "process type: privileged agent" logs/error.log; then + echo "failed: prometheus run in privileged can't work when only http is enabled" + exit 1 +fi + +make stop + +echo "prometheus run in privileged agent successfully when only http is enabled" + + +# prometheus run in privileged works when both http & stream are enabled +sleep 0.5 +rm logs/error.log || true + +echo ' +apisix: + proxy_mode: "http&stream" + extra_lua_path: "$prefix/t/lib/?.lua" + enable_admin: true + stream_proxy: + tcp: + - addr: 9100 +stream_plugins: + - prometheus +nginx_config: + error_log_level: info +' > conf/config.yaml + +make run +sleep 0.1 + +curl -s -o /dev/null http://127.0.0.1:9091/apisix/prometheus/metrics + +if ! grep -E " process type: privileged agent" logs/error.log; then + echo "failed: prometheus run in privileged can't work when both http & stream are enabled" + exit 1 +fi + +echo "passed: prometheus run in privileged agent successfully when both http & stream are enabled" + +make stop + + +# prometheus run in privileged works when only stream is enabled +sleep 0.5 +rm logs/error.log || true + +echo ' +apisix: + proxy_mode: "http&stream" + extra_lua_path: "$prefix/t/lib/?.lua" + enable_admin: false + stream_proxy: + tcp: + - addr: 9100 +stream_plugins: + - prometheus +nginx_config: + error_log_level: info +' > conf/config.yaml + +make run +sleep 0.1 + +curl -s -o /dev/null http://127.0.0.1:9091/apisix/prometheus/metrics + +if ! grep -E " process type: privileged agent" logs/error.log; then + echo "failed: prometheus run in privileged can't work when only stream is enabled" + exit 1 +fi + +echo "passed: prometheus run in privileged agent successfully when only stream is enabled" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_prometheus_stream.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_prometheus_stream.sh new file mode 100755 index 0000000..c326315 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_prometheus_stream.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +exit_if_not_customed_nginx + +echo " +apisix: + proxy_mode: http&stream + enable_admin: true + stream_proxy: + tcp: + - addr: 9100 +stream_plugins: + - prometheus +" > conf/config.yaml + +make run +sleep 0.5 + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +curl -v -k -i -m 20 -o /dev/null -s -X PUT http://127.0.0.1:9180/apisix/admin/stream_routes/1 \ + -H "X-API-KEY: $admin_key" \ + -d '{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": [{ + "host": "127.0.0.1", + "port": 1995, + "weight": 1 + }] + } + }' + +curl http://127.0.0.1:9100 || true +sleep 1 # wait for sync + +out="$(curl http://127.0.0.1:9091/apisix/prometheus/metrics)" +if ! echo "$out" | grep "apisix_stream_connection_total{route=\"1\"} 1" > /dev/null; then + echo "failed: prometheus can't work in stream subsystem" + exit 1 +fi + +make stop + +echo "passed: prometheus works when both http & stream are enabled" + +echo " +apisix: + proxy_mode: stream + enable_admin: false + stream_proxy: + tcp: + - addr: 9100 +stream_plugins: + - prometheus +" > conf/config.yaml + +make run +sleep 0.5 + +curl http://127.0.0.1:9100 || true +sleep 1 # wait for sync + +out="$(curl http://127.0.0.1:9091/apisix/prometheus/metrics)" +if ! echo "$out" | grep "apisix_stream_connection_total{route=\"1\"} 1" > /dev/null; then + echo "failed: prometheus can't work in stream subsystem" + exit 1 +fi + +if ! echo "$out" | grep "apisix_node_info{hostname=" > /dev/null; then + echo "failed: prometheus can't work in stream subsystem" + exit 1 +fi + +echo "passed: prometheus works when only stream is enabled" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_proxy_mirror_timeout.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_proxy_mirror_timeout.sh new file mode 100755 index 0000000..90b7fd0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_proxy_mirror_timeout.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +echo ' +plugin_attr: + proxy-mirror: + timeout: + connect: 2000ms + read: 2s + send: 2000ms +' > conf/config.yaml + +make init + +if ! grep "proxy_connect_timeout 2000ms;" conf/nginx.conf > /dev/null; then + echo "failed: proxy_connect_timeout not found in nginx.conf" + exit 1 +fi + +if ! grep "proxy_read_timeout 2s;" conf/nginx.conf > /dev/null; then + echo "failed: proxy_read_timeout not found in nginx.conf" + exit 1 +fi + +echo "passed: proxy timeout configuration is validated" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_route_match_with_graphql.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_route_match_with_graphql.sh new file mode 100755 index 0000000..c670277 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_route_match_with_graphql.sh @@ -0,0 +1,98 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: yaml + +apisix: + router: + http: radixtree_uri + +nginx_config: + worker_processes: 1 + +' > conf/config.yaml + +echo ' +routes: + - uri: "/hello" + hosts: + - test.com + vars: + - - "graphql_name" + - "==" + - "createAccount" + priority: 30 + id: "graphql1" + upstream_id: "invalid" + + - uri: "/hello" + hosts: + - test.com + plugins: + echo: + body: "test server" + priority: 20 + id: "graphql2" + upstream_id: "invalid" + + - uri: "/hello" + hosts: + - test2.com + plugins: + echo: + body: "test2" + priority: 20 + id: "graphql3" + upstream_id: "invalid" + +upstreams: + - nodes: + 127.0.0.1:1999: 1 + id: "invalid" +#END +' > conf/apisix.yaml + +make run + +dd if=/dev/urandom of=tmp_data.json bs=300K count=1 + +for i in {1..100}; do + curl -s http://127.0.0.1:9080/hello -H "Host: test.com" -H "Content-Type: application/json" -X POST -d @tmp_data.json > /tmp/graphql_request1.txt & + curl -s http://127.0.0.1:9080/hello -H "Host: test2.com" -H "Content-Type: application/json" -X POST -d @tmp_data.json > /tmp/graphql_request2.txt & + + wait + + if diff /tmp/graphql_request1.txt /tmp/graphql_request2.txt > /dev/null; then + make stop + echo "failed: route match error in GraphQL requests, route should not be the same" + exit 1 + fi +done + +make stop + +rm tmp_data.json /tmp/graphql_request1.txt /tmp/graphql_request2.txt + +echo "passed: GraphQL requests can be correctly matched to the route" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_serverless.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_serverless.sh new file mode 100755 index 0000000..8f1d7e1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_serverless.sh @@ -0,0 +1,107 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +serverless_clean_up() { + clean_up + git checkout conf/apisix.yaml +} + +trap serverless_clean_up EXIT + +rm logs/error.log || echo '' + +echo ' +apisix: + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +' > conf/config.yaml + +make init + +echo ' +routes: + - + uri: /log_request + plugins: + serverless-pre-function: + phase: before_proxy + functions: + - "return function(conf, ctx) ctx.count = (ctx.count or 0) + 1 end" + - "return function(conf, ctx) ngx.log(ngx.WARN, \"run before_proxy phase \", ctx.count, \" with \", ctx.balancer_ip) end" + upstream: + nodes: + "127.0.0.1:1980": 1 + "0.0.0.0:1979": 100000 + type: chash + key: remote_addr +#END +' > conf/apisix.yaml + +make run +sleep 0.1 +curl -v -k -i -m 20 -o /dev/null http://127.0.0.1:9080/log_request + +if ! grep "run before_proxy phase 1 with 0.0.0.0" logs/error.log; then + echo "failed: before_proxy phase runs incorrect time" + exit 1 +fi + +if ! grep "run before_proxy phase 2 with 127.0.0.1" logs/error.log; then + echo "failed: before_proxy phase runs incorrect time" + exit 1 +fi + +make stop + +echo ' +routes: + - + uri: /log_request + plugins: + serverless-pre-function: + phase: before_proxy + functions: + - "return function(conf, ctx) ngx.exit(403) end" + upstream: + nodes: + "127.0.0.1:1980": 1 + "0.0.0.0:1979": 100000 + type: chash + key: remote_addr +#END +' > conf/apisix.yaml + +make run +sleep 0.1 +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/log_request) +make stop + +if [ ! $code -eq 403 ]; then + echo "failed: failed to exit in the before_proxy phase" + exit 1 +fi + +make stop + +echo "pass: run code in the before_proxy phase of serverless plugin" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_snippet.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_snippet.sh new file mode 100755 index 0000000..72eee7e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_snippet.sh @@ -0,0 +1,131 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# allow injecting configuration snippets + +echo ' +apisix: + node_listen: 9080 + enable_admin: true + proxy_mode: http&stream + stream_proxy: + tcp: + - 9100 +nginx_config: + main_configuration_snippet: | + daemon on; + http_configuration_snippet: | + chunked_transfer_encoding on; + http_server_configuration_snippet: | + set $my "var"; + http_server_location_configuration_snippet: | + set $upstream_name -; + http_admin_configuration_snippet: | + log_format admin "$request_time $pipe"; + http_end_configuration_snippet: | + server_names_hash_bucket_size 128; + stream_configuration_snippet: | + tcp_nodelay off; +' > conf/config.yaml + +make init + +grep "daemon on;" -A 2 conf/nginx.conf | grep "configuration snippet ends" > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: can't inject main configuration" + exit 1 +fi + +grep "chunked_transfer_encoding on;" -A 2 conf/nginx.conf | grep "configuration snippet ends" > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: can't inject http configuration" + exit 1 +fi + +grep 'set $my "var";' -A 2 conf/nginx.conf | grep "configuration snippet ends" > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: can't inject http server configuration" + exit 1 +fi + +grep 'set $upstream_name -;' -A 2 conf/nginx.conf | grep "configuration snippet ends" > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: can't inject http server location configuration" + exit 1 +fi + +grep 'log_format admin "$request_time $pipe";' -A 2 conf/nginx.conf | grep "configuration snippet ends" > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: can't inject admin server configuration" + exit 1 +fi + +grep 'server_names_hash_bucket_size 128;' -A 2 conf/nginx.conf | grep "configuration snippet ends" > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: can't inject http end configuration" + exit 1 +fi + +grep 'server_names_hash_bucket_size 128;' -A 3 conf/nginx.conf | grep "}" > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: can't inject http end configuration" + exit 1 +fi + +grep 'tcp_nodelay off;' -A 2 conf/nginx.conf | grep "configuration snippet ends" > /dev/null +if [ ! $? -eq 0 ]; then + echo "failed: can't inject stream configuration" + exit 1 +fi + +# use the builtin server by default + +echo ' +apisix: + node_listen: 9080 +nginx_config: + http_configuration_snippet: | + server { + listen 9080; + server_name qa.com www.qa.com; + location / { + return 503 "ouch"; + } + } +' > conf/config.yaml + +make run + +sleep 1 +code=$(curl -k -i -o /dev/null -s -w %{http_code} http://127.0.0.1:9080 -H 'Host: m.qa.com') +if [ ! $code -eq 404 ]; then + echo "failed: use the builtin server by default" + exit 1 +fi +code=$(curl -k -i -o /dev/null -s -w %{http_code} http://127.0.0.1:9080 -H 'Host: www.qa.com') +if [ ! $code -eq 503 ]; then + echo "failed: use the builtin server by default" + exit 1 +fi + +make stop + +echo "passed: use the builtin server by default" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_standalone.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_standalone.sh new file mode 100755 index 0000000..d5844a2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_standalone.sh @@ -0,0 +1,157 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +standalone() { + rm -f conf/apisix.yaml.link + clean_up + git checkout conf/apisix.yaml +} + +trap standalone EXIT + +# support environment variables in yaml values +echo ' +apisix: + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +' > conf/config.yaml + +echo ' +routes: + - + uri: ${{var_test_path}} + plugins: + proxy-rewrite: + uri: ${{var_test_proxy_rewrite_uri:=/apisix/nginx_status}} + upstream: + nodes: + "127.0.0.1:9091": 1 + type: roundrobin +#END +' > conf/apisix.yaml + +# check for resolve variables +var_test_path=/test make init + +if ! grep "env var_test_path;" conf/nginx.conf > /dev/null; then + echo "failed: failed to resolve variables" + exit 1 +fi + +# variable is valid +var_test_path=/test make run +sleep 0.1 +code=$(curl -o /dev/null -s -m 5 -w %{http_code} http://127.0.0.1:9080/test) +if [ ! $code -eq 200 ]; then + echo "failed: resolve variables in apisix.yaml conf failed" + exit 1 +fi + +echo "passed: resolve variables in apisix.yaml conf success" + +# support environment variables in yaml keys +echo ' +routes: + - + uri: "/test" + plugins: + proxy-rewrite: + uri: "/apisix/nginx_status" + upstream: + nodes: + "${{HOST_IP}}:${{PORT}}": 1 + type: roundrobin +#END +' > conf/apisix.yaml + +# variable is valid +HOST_IP="127.0.0.1" PORT="9091" make init +HOST_IP="127.0.0.1" PORT="9091" make run +sleep 0.1 + +code=$(curl -o /dev/null -s -m 5 -w %{http_code} http://127.0.0.1:9080/test) +if [ ! $code -eq 200 ]; then + echo "failed: resolve variables in apisix.yaml conf failed" +fi + +echo "passed: resolve variables in apisix.yaml conf success" + +# configure standalone via deployment +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +' > conf/config.yaml + +var_test_path=/test make run +sleep 0.1 +code=$(curl -o /dev/null -s -m 5 -w %{http_code} http://127.0.0.1:9080/apisix/admin/routes) +if [ ! $code -eq 404 ]; then + echo "failed: admin API should be disabled automatically" + exit 1 +fi + +echo "passed: admin API should be disabled automatically" + +# support environment variables +echo ' +routes: + - + uri: ${{var_test_path}} + plugins: + proxy-rewrite: + uri: ${{var_test_proxy_rewrite_uri:=/apisix/nginx_status}} + upstream: + nodes: + "127.0.0.1:9091": 1 + type: roundrobin +#END +' > conf/apisix.yaml + +var_test_path=/test make run +sleep 0.1 +code=$(curl -o /dev/null -s -m 5 -w %{http_code} http://127.0.0.1:9080/test) +if [ ! $code -eq 200 ]; then + echo "failed: resolve variables in apisix.yaml conf failed" + exit 1 +fi + +echo "passed: resolve variables in apisix.yaml conf success" + +# Avoid unnecessary config reloads +## Wait for a second else `st_ctime` won't increase +sleep 1 +expected_config_reloads=$(grep "config file $(pwd)/conf/apisix.yaml reloaded." logs/error.log | wc -l) + +## Create a symlink to change the link count and as a result `st_ctime` +ln conf/apisix.yaml conf/apisix.yaml.link +sleep 1 + +actual_config_reloads=$(grep "config file $(pwd)/conf/apisix.yaml reloaded." logs/error.log | wc -l) +if [ $expected_config_reloads -ne $actual_config_reloads ]; then + echo "failed: apisix.yaml was reloaded" + exit 1 +fi +echo "passed: apisix.yaml was not reloaded" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_status_api.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_status_api.sh new file mode 100755 index 0000000..c7b1a27 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_status_api.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +git checkout conf/config.yaml + + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:23790" + - "http://127.0.0.1:23791" + - "http://127.0.0.1:23792" + prefix: /apisix +nginx_config: + error_log_level: info +apisix: + status: + ip: 127.0.0.1 + port: 7085 +' > conf/config.yaml + +# create 3 node etcd cluster in docker +ETCD_NAME_0=etcd0 +ETCD_NAME_1=etcd1 +ETCD_NAME_2=etcd2 +docker compose -f ./t/cli/docker-compose-etcd-cluster.yaml up -d + +make run + +sleep 0.5 + +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:7085/status | grep 200 \ +|| (echo "failed: status api didn't return 200"; exit 1) + +sleep 2 + +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:7085/status/ready | grep 200 \ +|| (echo "failed: status/ready api didn't return 200"; exit 1) + +# stop two etcd endpoints but status api should return 200 as all workers are synced +docker stop ${ETCD_NAME_0} +docker stop ${ETCD_NAME_1} + +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:7085/status | grep 200 \ +|| (echo "failed: status api didn't return 200"; exit 1) + +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:7085/status/ready | grep 200 \ +|| (echo "failed: status/ready api didn't return 200"; exit 1) + +docker stop ${ETCD_NAME_2} + +echo "/status/ready returns 200 even when etcd endpoints are down as all workers are synced" +curl -s -o /dev/null -w "%{http_code}" http://127.0.0.1:7085/status/ready | grep 200 \ +|| (echo "failed: status/ready api didn't return 200"; exit 1) + +docker compose -f ./t/cli/docker-compose-etcd-cluster.yaml down diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_stream_config.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_stream_config.sh new file mode 100755 index 0000000..4eefb47 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_stream_config.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +echo " +apisix: + enable_admin: false + proxy_mode: stream + stream_proxy: + tcp: + - addr: 9100 +" > conf/config.yaml + +make init + +count=$(grep -c "lua_package_path" conf/nginx.conf) +if [ "$count" -ne 1 ]; then + echo "failed: failed to enable stream proxy only by default" + exit 1 +fi + +echo "passed: enable stream proxy only by default" + +echo " +apisix: + enable_admin: false + proxy_mode: http&stream + stream_proxy: + tcp: + - addr: 9100 +" > conf/config.yaml + +make init + +count=$(grep -c "lua_package_path" conf/nginx.conf) +if [ "$count" -ne 2 ]; then + echo "failed: failed to enable stream proxy and http proxy" + exit 1 +fi + +echo " +apisix: + enable_admin: true + proxy_mode: http&stream + stream_proxy: + tcp: + - addr: 9100 +" > conf/config.yaml + +make init + +count=$(grep -c "lua_package_path" conf/nginx.conf) +if [ "$count" -ne 2 ]; then + echo "failed: failed to enable stream proxy and http proxy when admin is enabled" + exit 1 +fi + +echo "passed: enable stream proxy and http proxy" + +echo " +apisix: + proxy_mode: http&stream + stream_proxy: + tcp: + - addr: 9100 +stream_plugins: + - ip-restriction +" > conf/config.yaml + +make init + +if grep "plugin-limit-conn-stream" conf/nginx.conf > /dev/null; then + echo "failed: enable shdict on demand" + exit 1 +fi + +echo " +apisix: + proxy_mode: http&stream + stream_proxy: + tcp: + - addr: 9100 +stream_plugins: + - limit-conn +" > conf/config.yaml + +make init + +if ! grep "plugin-limit-conn-stream" conf/nginx.conf > /dev/null; then + echo "failed: enable shdict on demand" + exit 1 +fi + +echo "passed: enable shdict on demand" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_tls_over_tcp.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_tls_over_tcp.sh new file mode 100755 index 0000000..c4dbc67 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_tls_over_tcp.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +# check tls over tcp proxy +echo " +apisix: + proxy_mode: http&stream + stream_proxy: + tcp: + - addr: 9100 + tls: true +nginx_config: + stream_configuration_snippet: | + server { + listen 9101; + return \"OK FROM UPSTREAM\"; + } + +" > conf/config.yaml + +make run +sleep 0.1 + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "cert" : "'"$(cat t/certs/mtls_server.crt)"'", + "key": "'"$(cat t/certs/mtls_server.key)"'", + "snis": ["test.com"] +}' + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +curl -k -i http://127.0.0.1:9180/apisix/admin/stream_routes/1 \ + -H "X-API-KEY: $admin_key" -X PUT -d \ + '{"upstream":{"nodes":{"127.0.0.1:9101":1},"type":"roundrobin"}}' + +sleep 0.1 +if ! echo -e 'mmm' | \ + openssl s_client -connect 127.0.0.1:9100 -servername test.com -CAfile t/certs/mtls_ca.crt \ + -ign_eof | \ + grep 'OK FROM UPSTREAM'; +then + echo "failed: should proxy tls over tcp" + exit 1 +fi + +make stop +echo "passed: proxy tls over tcp" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_upstream_mtls.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_upstream_mtls.sh new file mode 100755 index 0000000..bb6c41a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_upstream_mtls.sh @@ -0,0 +1,211 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# validate the config.yaml + +. ./t/cli/common.sh + +# test proxy_ssl_trusted_certificate success +git checkout conf/config.yaml + +exit_if_not_customed_nginx + +echo ' +apisix: + ssl: + ssl_trusted_certificate: t/certs/apisix.crt +nginx_config: + http_configuration_snippet: | + server { + listen 1983 ssl; + server_name test.com; + ssl_certificate ../t/certs/apisix.crt; + ssl_certificate_key ../t/certs/apisix.key; + location /hello { + return 200 "hello world"; + } + } + http_server_configuration_snippet: | + proxy_ssl_verify on; +' > conf/config.yaml + +rm logs/error.log || true +make init +make run +sleep 0.1 + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +curl -k -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "upstream": { + "pass_host": "rewrite", + "nodes": { + "127.0.0.1:1983": 1 + }, + "scheme": "https", + "hash_on": "vars", + "upstream_host": "test.com", + "type": "roundrobin", + "tls": { + "client_cert": "-----BEGIN CERTIFICATE-----\nMIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV\nBAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G\nA1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa\nGA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n\nRG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM\nCHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe\ncvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb\nVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR\n2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr\nabf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2\nWjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/\nEvm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1\n/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh\n/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj\ncTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ\ntSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl\nc3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC\ntC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY\n1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl\nPYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob\nrJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy\nhme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1\n7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y\nIJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve\nU/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM=\n-----END CERTIFICATE-----\n", + "client_key": "HrMHUvE9Esvn7GnZ+vAynaIg/8wlB3r0zm0htmnwofYLp1VhtLeU1EmMJkPLUkcn2+v6Uav9bOQMkPdSpUMcEpRplLSXs+miu+B07CCUnsMrXkfQawRMIoePJZSLH5+PfDAlWIK2Q+ruYnjtnpNziiAtXf/HRRwHHMelnfedXqD8kn3Toe46ZYyBir99o/r/do5ludez5oY7qhOgNSWKCfnZE8Ip82g7t7n7jsAf5tTdRulUGBQ4ITV2zM3cxpD0PWnWMbOfygZIDxR8QU9wj8ihuFL1s1NM8PplcKbUxC4QlrSN+ZNkr6mxy+akPmXlABwcFIiSK7c/xvU1NjoILnhPpL6aRpbhmQX/a1XUCl+2INlQ5QbXbTN+JmDBhrU9NiYecRJMfmA1N/lhwgt01tUnxMoAhfpUVgEbZNalCJt+wn8TC+Xp3DZ0bCpXrfzqsprGKan9qC3mCN03jj50JyGFL+xt8wX8D0uaIsu4cVk4et7kbTIj9rvucsh0cfKn8va8/cdjw5QhFSRBkW5Vuz9NwvzVQ6DHWs1a8VZbN/hERxcbWNk/p1VgGLHioqZZTOd5CYdN4dGjnksjXa0Z77mTSoNx3U79FQPAgUMEA1phnO/jdryM3g5M+UvESXA/75we435xg5tLRDvNwJw2NlosQsGY7fzUi2+HFo436htydRFv8ChHezs2v99mjfCUijrWYoeJ5OB2+KO9XiOIz7gpqhTef9atajSYRhxhcwdCVupC1PrPGn9MzhdQLeqQCJj3kyazPfO3xPkNpMAqd2lXnLR4HGd9SBHe75Sik3jW9W1sUqrn2fDjyWd0jz57pl4qyHjbzjd3uE5qbH/QuYZBIzI9tEn7tj12brWrwHsMt+/4M7zp8Opsia64V3Y7ICLIi7fiYfr70RujXyn8Ik5TB1QC98JrnDjgQlTPDhHLk1r8XhZXqIIg6DmaN7UUjIuZhKxARTs8b5WMPvVV4GownlPN28sHIMAX84BNbP0597Fxipwp2oTMFKTzvxm+QUtbWvIPzF3n25L4sPCyUx5PRIRCJ5kDNQfhiN6o3Y/fAY0PyxI06PWYoNvSn3uO24XNXbF3RkpwKtV8n/iNo5dyM1VqFPWDuKRSLHY7E4lQTdqx4/n+rrnoH6SlmQ0zwxwxBeAz/TvkmiW7WLe3C5cUDKF9yYwvAe8ek4oTR3GxaiDWjNFsu7DUoDjpH5f3IxrX2IN4FyzE47hMeg4muPov7h74WwosqgnfmwoAEFV4+ldmzpdSjghZoF2M9EZI24Xa9rVdd6j2t6IjX20oL+SLQL/9HppMi1nC+3Zby1WOvuTR4g8K1QP75OeY4xTD1iEAXpd0WOX7C3ndceVF4THLCI4Imcf9FH9MBrE55FPMEsAk54HiAoyMd6tgqv/akRqmuAmnSsrWALhqiCnAVh2uzk644gSzmsFbh7zF33qrcafPpU4PxUEvpqbLz7asoNUDf4YB4gCcgZx30eK/w9FpMaLveiNq77EW7qcvJQPcjZ4uLaKkQVODJsd+1CbZF6370aiLxouXLFT3eQI7Ovu6be8D3MmazRPgCV36qzMwONqrXE/JbMFMKe5l1e4Y6avMejrj43BMgGo2u8LimCWkBeNwqIjH7plwbpDKo4OKZVbrzSZ0hplUDd/jMrb6Ulbc04uMeEigehrhSsZ0ZwoDiZcf/fDIclaTGNMl40N2wBiqdnw9uKTqD1YxzqDQ7vgiXG55ae31lvevPTgk/lLvpwzlyitjGs+6LJPu/wSCKA2VIyhJfK+8EnItEKjBUrXdOklBdOmTpUpdQ+zfd2NCrFRDJZKl26Uh412adFEkqY37O/0FbSCpAIsUCvaItcqK7qh5Rq26hVR0nS1MRs+MjGBzGqudXPQZHy+Yp7AlAa5UgJUaAwn2b/id6kNdv6hNWqSzHvOAVKdgC9/j0yN1VJD92+IoJTTiXsMQELcgm1Ehj2GZpTHu+GPuaOovHBnZMq/Kg4nUS+ig86X01jV28uGGtglERf1HqVQpdZwbrXtUqH0cbjlvUwQ1j7zp9yhs+0ta87v0I+elAZhXzqvehMiLJu2o9/k2+4dPvkEscduHOU6jZqe8ndNEMQWiaZEYJKxNWPTaQ6nZSlFTsT7GlENeJlFzlw8QkyRJPMBWkXuaymQUcu43Pm+gAjinHSAGUeaSaIdL2Yb0M88qNwG+UlNEslx/J37pA1oMJyxb7XOeySxkP7dXi5JvygLIfkEA3ENC4NHU9nsUvTvp5AZidZCxxtYCNYfjY6xyrlfnE+V+us31LA9Wc/tKa4y3Ldj30IT2sssUrdZ0l7UbwfcZT42ZeJpxDofpZ2rjgswTs0Upr72VuOCzjpKa1CJwxhVVtPVJJovcXp4bsNPJers+yIYfTl1aqaf4qSzU5OL/cze2e6qAh7622zEa/q6klpUx9b1f8YGlQhjQcy3++JnwwsHR71Ofh9woXq57LDCHFA6f95zdkadDDhwgRcvWVnbA2Szps8iJv7h2m25qZPFtN6puJj3RlmT6hnfBeYCjpfy/2TxyCqm6bG3HZxGuhzWs2ZGxzsjBJ3ueO1pAOjtDhkRqzoWt/v2o367IYP7iTcp4pi+qJHIWCN1ElDI0BVoZ+Xq9iLfKmjrjcxQ7EYGHfQDE52QaCQ3nMB7oiqncZ1Q5n/ICDHha9RkPP9V9vWiJIZwgOJtPfGzsGQ9AigH6po65IJyxmY5upuhg7DTmsLQnKC/fwjkBF9So/4cdZuqDbxGrDDOgpL7uvWXANRNMrqYoMFUG7M90QJHj7NgSL+B6mSNwa9ctTua7Estkoyvavda3Bl3qHQ0Hva5gjSg6elL6PQ4ksqhESvjztuy58qk9aZHsQB8ZKRu8VSay40a/3ueX6bnd0hwsYy42aWJR1z+uie3yTWPuG2JZ7DjkgDduWdC+cxfvTVTG58E5luafy5j/t85UVoB2nr46VHlt/vg4M9G8/4F0d0Y6ThI4/XTfg6l1vq5ouzhQxd+SRwnuXieZy+4/2XKJnrV6t+JbNAvwdGR1V9VPLlnb+IqpvOCYyL1YLYSlNubb9HU0wxVPppGSpJLmi+njQzl71PBgMm6QV9j889wPUo387fRbJjXbSSVLon61xk/4dNvjsgfv9rF+/qEML0q4tXBJVOJ1iwKjn84Nk6vdHM3Hu8knp0hYFa4AECYKInSTVXajWAKFx4SOq8G8MA/0YlIN872LBjUm2GKs17wsJuWID+mSyVE5pV5gQ+r92YvPcC+yIvB8hTTaRclAP/KyJesDTA==" + } + } +}' + +sleep 1 + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/hello) + +if [ ! $code -eq 200 ]; then + echo "failed: connection to upstream with mTLS failed" + exit 1 +fi + +sleep 0.1 + +make stop + +echo "passed: connection to upstream with mTLS success" + +# test proxy_ssl_trusted_certificate and use incorrect ca cert +echo ' +apisix: + ssl: + ssl_trusted_certificate: t/certs/apisix_ecc.crt +nginx_config: + http_configuration_snippet: | + server { + listen 1983 ssl; + server_name test.com; + ssl_certificate ../t/certs/apisix.crt; + ssl_certificate_key ../t/certs/apisix.key; + location /hello { + return 200 "hello world"; + } + } + http_server_configuration_snippet: | + proxy_ssl_verify on; +' > conf/config.yaml + +rm logs/error.log || true +make init +make run +sleep 0.1 + +admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"//g') +curl -k -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: $admin_key" -X PUT -d ' +{ + "uri": "/hello", + "upstream": { + "pass_host": "rewrite", + "nodes": { + "127.0.0.1:1983": 1 + }, + "scheme": "https", + "hash_on": "vars", + "upstream_host": "test.com", + "type": "roundrobin", + "tls": { + "client_cert": "-----BEGIN CERTIFICATE-----\nMIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV\nBAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G\nA1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa\nGA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n\nRG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM\nCHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe\ncvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb\nVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR\n2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr\nabf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2\nWjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/\nEvm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1\n/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh\n/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj\ncTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ\ntSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl\nc3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC\ntC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY\n1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl\nPYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob\nrJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy\nhme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1\n7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y\nIJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve\nU/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM=\n-----END CERTIFICATE-----\n", + "client_key": "HrMHUvE9Esvn7GnZ+vAynaIg/8wlB3r0zm0htmnwofYLp1VhtLeU1EmMJkPLUkcn2+v6Uav9bOQMkPdSpUMcEpRplLSXs+miu+B07CCUnsMrXkfQawRMIoePJZSLH5+PfDAlWIK2Q+ruYnjtnpNziiAtXf/HRRwHHMelnfedXqD8kn3Toe46ZYyBir99o/r/do5ludez5oY7qhOgNSWKCfnZE8Ip82g7t7n7jsAf5tTdRulUGBQ4ITV2zM3cxpD0PWnWMbOfygZIDxR8QU9wj8ihuFL1s1NM8PplcKbUxC4QlrSN+ZNkr6mxy+akPmXlABwcFIiSK7c/xvU1NjoILnhPpL6aRpbhmQX/a1XUCl+2INlQ5QbXbTN+JmDBhrU9NiYecRJMfmA1N/lhwgt01tUnxMoAhfpUVgEbZNalCJt+wn8TC+Xp3DZ0bCpXrfzqsprGKan9qC3mCN03jj50JyGFL+xt8wX8D0uaIsu4cVk4et7kbTIj9rvucsh0cfKn8va8/cdjw5QhFSRBkW5Vuz9NwvzVQ6DHWs1a8VZbN/hERxcbWNk/p1VgGLHioqZZTOd5CYdN4dGjnksjXa0Z77mTSoNx3U79FQPAgUMEA1phnO/jdryM3g5M+UvESXA/75we435xg5tLRDvNwJw2NlosQsGY7fzUi2+HFo436htydRFv8ChHezs2v99mjfCUijrWYoeJ5OB2+KO9XiOIz7gpqhTef9atajSYRhxhcwdCVupC1PrPGn9MzhdQLeqQCJj3kyazPfO3xPkNpMAqd2lXnLR4HGd9SBHe75Sik3jW9W1sUqrn2fDjyWd0jz57pl4qyHjbzjd3uE5qbH/QuYZBIzI9tEn7tj12brWrwHsMt+/4M7zp8Opsia64V3Y7ICLIi7fiYfr70RujXyn8Ik5TB1QC98JrnDjgQlTPDhHLk1r8XhZXqIIg6DmaN7UUjIuZhKxARTs8b5WMPvVV4GownlPN28sHIMAX84BNbP0597Fxipwp2oTMFKTzvxm+QUtbWvIPzF3n25L4sPCyUx5PRIRCJ5kDNQfhiN6o3Y/fAY0PyxI06PWYoNvSn3uO24XNXbF3RkpwKtV8n/iNo5dyM1VqFPWDuKRSLHY7E4lQTdqx4/n+rrnoH6SlmQ0zwxwxBeAz/TvkmiW7WLe3C5cUDKF9yYwvAe8ek4oTR3GxaiDWjNFsu7DUoDjpH5f3IxrX2IN4FyzE47hMeg4muPov7h74WwosqgnfmwoAEFV4+ldmzpdSjghZoF2M9EZI24Xa9rVdd6j2t6IjX20oL+SLQL/9HppMi1nC+3Zby1WOvuTR4g8K1QP75OeY4xTD1iEAXpd0WOX7C3ndceVF4THLCI4Imcf9FH9MBrE55FPMEsAk54HiAoyMd6tgqv/akRqmuAmnSsrWALhqiCnAVh2uzk644gSzmsFbh7zF33qrcafPpU4PxUEvpqbLz7asoNUDf4YB4gCcgZx30eK/w9FpMaLveiNq77EW7qcvJQPcjZ4uLaKkQVODJsd+1CbZF6370aiLxouXLFT3eQI7Ovu6be8D3MmazRPgCV36qzMwONqrXE/JbMFMKe5l1e4Y6avMejrj43BMgGo2u8LimCWkBeNwqIjH7plwbpDKo4OKZVbrzSZ0hplUDd/jMrb6Ulbc04uMeEigehrhSsZ0ZwoDiZcf/fDIclaTGNMl40N2wBiqdnw9uKTqD1YxzqDQ7vgiXG55ae31lvevPTgk/lLvpwzlyitjGs+6LJPu/wSCKA2VIyhJfK+8EnItEKjBUrXdOklBdOmTpUpdQ+zfd2NCrFRDJZKl26Uh412adFEkqY37O/0FbSCpAIsUCvaItcqK7qh5Rq26hVR0nS1MRs+MjGBzGqudXPQZHy+Yp7AlAa5UgJUaAwn2b/id6kNdv6hNWqSzHvOAVKdgC9/j0yN1VJD92+IoJTTiXsMQELcgm1Ehj2GZpTHu+GPuaOovHBnZMq/Kg4nUS+ig86X01jV28uGGtglERf1HqVQpdZwbrXtUqH0cbjlvUwQ1j7zp9yhs+0ta87v0I+elAZhXzqvehMiLJu2o9/k2+4dPvkEscduHOU6jZqe8ndNEMQWiaZEYJKxNWPTaQ6nZSlFTsT7GlENeJlFzlw8QkyRJPMBWkXuaymQUcu43Pm+gAjinHSAGUeaSaIdL2Yb0M88qNwG+UlNEslx/J37pA1oMJyxb7XOeySxkP7dXi5JvygLIfkEA3ENC4NHU9nsUvTvp5AZidZCxxtYCNYfjY6xyrlfnE+V+us31LA9Wc/tKa4y3Ldj30IT2sssUrdZ0l7UbwfcZT42ZeJpxDofpZ2rjgswTs0Upr72VuOCzjpKa1CJwxhVVtPVJJovcXp4bsNPJers+yIYfTl1aqaf4qSzU5OL/cze2e6qAh7622zEa/q6klpUx9b1f8YGlQhjQcy3++JnwwsHR71Ofh9woXq57LDCHFA6f95zdkadDDhwgRcvWVnbA2Szps8iJv7h2m25qZPFtN6puJj3RlmT6hnfBeYCjpfy/2TxyCqm6bG3HZxGuhzWs2ZGxzsjBJ3ueO1pAOjtDhkRqzoWt/v2o367IYP7iTcp4pi+qJHIWCN1ElDI0BVoZ+Xq9iLfKmjrjcxQ7EYGHfQDE52QaCQ3nMB7oiqncZ1Q5n/ICDHha9RkPP9V9vWiJIZwgOJtPfGzsGQ9AigH6po65IJyxmY5upuhg7DTmsLQnKC/fwjkBF9So/4cdZuqDbxGrDDOgpL7uvWXANRNMrqYoMFUG7M90QJHj7NgSL+B6mSNwa9ctTua7Estkoyvavda3Bl3qHQ0Hva5gjSg6elL6PQ4ksqhESvjztuy58qk9aZHsQB8ZKRu8VSay40a/3ueX6bnd0hwsYy42aWJR1z+uie3yTWPuG2JZ7DjkgDduWdC+cxfvTVTG58E5luafy5j/t85UVoB2nr46VHlt/vg4M9G8/4F0d0Y6ThI4/XTfg6l1vq5ouzhQxd+SRwnuXieZy+4/2XKJnrV6t+JbNAvwdGR1V9VPLlnb+IqpvOCYyL1YLYSlNubb9HU0wxVPppGSpJLmi+njQzl71PBgMm6QV9j889wPUo387fRbJjXbSSVLon61xk/4dNvjsgfv9rF+/qEML0q4tXBJVOJ1iwKjn84Nk6vdHM3Hu8knp0hYFa4AECYKInSTVXajWAKFx4SOq8G8MA/0YlIN872LBjUm2GKs17wsJuWID+mSyVE5pV5gQ+r92YvPcC+yIvB8hTTaRclAP/KyJesDTA==" + } + } +}' + +sleep 0.1 + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/hello) + +if [ ! $code -eq 502 ]; then + echo "failed: should fail when proxy_ssl_verify is enabled and ssl_trusted_certificate is wrong ca cert" + exit 1 +fi + +sleep 0.1 + +make stop + +if ! grep -E 'self-signed certificate' logs/error.log; then + echo "failed: should got 'self-signed certificate' when ssl_trusted_certificate is wrong ca cert" + exit 1 +fi + +echo "passed: when proxy_ssl_verify is enabled and ssl_trusted_certificate is wrong ca cert, got 502" + + +# test combined proxy_ssl_trusted_certificate success +echo ' +apisix: + ssl: + ssl_trusted_certificate: t/certs/apisix.crt +nginx_config: + http_configuration_snippet: | + server { + listen 1983 ssl; + server_name test.com; + ssl_certificate ../t/certs/apisix.crt; + ssl_certificate_key ../t/certs/apisix.key; + location /hello { + return 200 "hello world"; + } + } + http_server_configuration_snippet: | + proxy_ssl_verify on; +' > conf/config.yaml + +rm logs/error.log || true +make init +make run +sleep 0.1 + +curl -k -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "upstream": { + "pass_host": "rewrite", + "nodes": { + "127.0.0.1:1983": 1 + }, + "scheme": "https", + "hash_on": "vars", + "upstream_host": "test.com", + "type": "roundrobin", + "tls": { + "client_cert": "-----BEGIN CERTIFICATE-----\nMIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV\nBAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G\nA1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa\nGA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n\nRG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM\nCHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe\ncvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb\nVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR\n2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr\nabf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2\nWjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/\nEvm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1\n/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh\n/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj\ncTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ\ntSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl\nc3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC\ntC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY\n1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl\nPYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob\nrJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy\nhme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1\n7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y\nIJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve\nU/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM=\n-----END CERTIFICATE-----\n", + "client_key": "HrMHUvE9Esvn7GnZ+vAynaIg/8wlB3r0zm0htmnwofYLp1VhtLeU1EmMJkPLUkcn2+v6Uav9bOQMkPdSpUMcEpRplLSXs+miu+B07CCUnsMrXkfQawRMIoePJZSLH5+PfDAlWIK2Q+ruYnjtnpNziiAtXf/HRRwHHMelnfedXqD8kn3Toe46ZYyBir99o/r/do5ludez5oY7qhOgNSWKCfnZE8Ip82g7t7n7jsAf5tTdRulUGBQ4ITV2zM3cxpD0PWnWMbOfygZIDxR8QU9wj8ihuFL1s1NM8PplcKbUxC4QlrSN+ZNkr6mxy+akPmXlABwcFIiSK7c/xvU1NjoILnhPpL6aRpbhmQX/a1XUCl+2INlQ5QbXbTN+JmDBhrU9NiYecRJMfmA1N/lhwgt01tUnxMoAhfpUVgEbZNalCJt+wn8TC+Xp3DZ0bCpXrfzqsprGKan9qC3mCN03jj50JyGFL+xt8wX8D0uaIsu4cVk4et7kbTIj9rvucsh0cfKn8va8/cdjw5QhFSRBkW5Vuz9NwvzVQ6DHWs1a8VZbN/hERxcbWNk/p1VgGLHioqZZTOd5CYdN4dGjnksjXa0Z77mTSoNx3U79FQPAgUMEA1phnO/jdryM3g5M+UvESXA/75we435xg5tLRDvNwJw2NlosQsGY7fzUi2+HFo436htydRFv8ChHezs2v99mjfCUijrWYoeJ5OB2+KO9XiOIz7gpqhTef9atajSYRhxhcwdCVupC1PrPGn9MzhdQLeqQCJj3kyazPfO3xPkNpMAqd2lXnLR4HGd9SBHe75Sik3jW9W1sUqrn2fDjyWd0jz57pl4qyHjbzjd3uE5qbH/QuYZBIzI9tEn7tj12brWrwHsMt+/4M7zp8Opsia64V3Y7ICLIi7fiYfr70RujXyn8Ik5TB1QC98JrnDjgQlTPDhHLk1r8XhZXqIIg6DmaN7UUjIuZhKxARTs8b5WMPvVV4GownlPN28sHIMAX84BNbP0597Fxipwp2oTMFKTzvxm+QUtbWvIPzF3n25L4sPCyUx5PRIRCJ5kDNQfhiN6o3Y/fAY0PyxI06PWYoNvSn3uO24XNXbF3RkpwKtV8n/iNo5dyM1VqFPWDuKRSLHY7E4lQTdqx4/n+rrnoH6SlmQ0zwxwxBeAz/TvkmiW7WLe3C5cUDKF9yYwvAe8ek4oTR3GxaiDWjNFsu7DUoDjpH5f3IxrX2IN4FyzE47hMeg4muPov7h74WwosqgnfmwoAEFV4+ldmzpdSjghZoF2M9EZI24Xa9rVdd6j2t6IjX20oL+SLQL/9HppMi1nC+3Zby1WOvuTR4g8K1QP75OeY4xTD1iEAXpd0WOX7C3ndceVF4THLCI4Imcf9FH9MBrE55FPMEsAk54HiAoyMd6tgqv/akRqmuAmnSsrWALhqiCnAVh2uzk644gSzmsFbh7zF33qrcafPpU4PxUEvpqbLz7asoNUDf4YB4gCcgZx30eK/w9FpMaLveiNq77EW7qcvJQPcjZ4uLaKkQVODJsd+1CbZF6370aiLxouXLFT3eQI7Ovu6be8D3MmazRPgCV36qzMwONqrXE/JbMFMKe5l1e4Y6avMejrj43BMgGo2u8LimCWkBeNwqIjH7plwbpDKo4OKZVbrzSZ0hplUDd/jMrb6Ulbc04uMeEigehrhSsZ0ZwoDiZcf/fDIclaTGNMl40N2wBiqdnw9uKTqD1YxzqDQ7vgiXG55ae31lvevPTgk/lLvpwzlyitjGs+6LJPu/wSCKA2VIyhJfK+8EnItEKjBUrXdOklBdOmTpUpdQ+zfd2NCrFRDJZKl26Uh412adFEkqY37O/0FbSCpAIsUCvaItcqK7qh5Rq26hVR0nS1MRs+MjGBzGqudXPQZHy+Yp7AlAa5UgJUaAwn2b/id6kNdv6hNWqSzHvOAVKdgC9/j0yN1VJD92+IoJTTiXsMQELcgm1Ehj2GZpTHu+GPuaOovHBnZMq/Kg4nUS+ig86X01jV28uGGtglERf1HqVQpdZwbrXtUqH0cbjlvUwQ1j7zp9yhs+0ta87v0I+elAZhXzqvehMiLJu2o9/k2+4dPvkEscduHOU6jZqe8ndNEMQWiaZEYJKxNWPTaQ6nZSlFTsT7GlENeJlFzlw8QkyRJPMBWkXuaymQUcu43Pm+gAjinHSAGUeaSaIdL2Yb0M88qNwG+UlNEslx/J37pA1oMJyxb7XOeySxkP7dXi5JvygLIfkEA3ENC4NHU9nsUvTvp5AZidZCxxtYCNYfjY6xyrlfnE+V+us31LA9Wc/tKa4y3Ldj30IT2sssUrdZ0l7UbwfcZT42ZeJpxDofpZ2rjgswTs0Upr72VuOCzjpKa1CJwxhVVtPVJJovcXp4bsNPJers+yIYfTl1aqaf4qSzU5OL/cze2e6qAh7622zEa/q6klpUx9b1f8YGlQhjQcy3++JnwwsHR71Ofh9woXq57LDCHFA6f95zdkadDDhwgRcvWVnbA2Szps8iJv7h2m25qZPFtN6puJj3RlmT6hnfBeYCjpfy/2TxyCqm6bG3HZxGuhzWs2ZGxzsjBJ3ueO1pAOjtDhkRqzoWt/v2o367IYP7iTcp4pi+qJHIWCN1ElDI0BVoZ+Xq9iLfKmjrjcxQ7EYGHfQDE52QaCQ3nMB7oiqncZ1Q5n/ICDHha9RkPP9V9vWiJIZwgOJtPfGzsGQ9AigH6po65IJyxmY5upuhg7DTmsLQnKC/fwjkBF9So/4cdZuqDbxGrDDOgpL7uvWXANRNMrqYoMFUG7M90QJHj7NgSL+B6mSNwa9ctTua7Estkoyvavda3Bl3qHQ0Hva5gjSg6elL6PQ4ksqhESvjztuy58qk9aZHsQB8ZKRu8VSay40a/3ueX6bnd0hwsYy42aWJR1z+uie3yTWPuG2JZ7DjkgDduWdC+cxfvTVTG58E5luafy5j/t85UVoB2nr46VHlt/vg4M9G8/4F0d0Y6ThI4/XTfg6l1vq5ouzhQxd+SRwnuXieZy+4/2XKJnrV6t+JbNAvwdGR1V9VPLlnb+IqpvOCYyL1YLYSlNubb9HU0wxVPppGSpJLmi+njQzl71PBgMm6QV9j889wPUo387fRbJjXbSSVLon61xk/4dNvjsgfv9rF+/qEML0q4tXBJVOJ1iwKjn84Nk6vdHM3Hu8knp0hYFa4AECYKInSTVXajWAKFx4SOq8G8MA/0YlIN872LBjUm2GKs17wsJuWID+mSyVE5pV5gQ+r92YvPcC+yIvB8hTTaRclAP/KyJesDTA==" + } + } +}' + +sleep 1 + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/hello) + +if [ ! $code -eq 200 ]; then + echo "failed: connection to upstream with mTLS failed" + exit 1 +fi + +sleep 0.1 + +make stop + +echo "passed: connection to upstream with mTLS success" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_validate_config.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_validate_config.sh new file mode 100755 index 0000000..0f8a09a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_validate_config.sh @@ -0,0 +1,206 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# validate the config.yaml + +. ./t/cli/common.sh + +echo ' +discovery: + nacos: + host: "127.0.0.1" +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'property "host" validation failed: wrong type: expected array, got string'; then + echo "failed: should check discovery schema during init" + exit 1 +fi + +echo ' +discovery: + unknown: + host: "127.0.0.1" +' > conf/config.yaml + +if ! make init; then + echo "failed: should ignore discovery without schema" + exit 1 +fi + +echo "passed: check discovery schema during init" + +echo ' +apisix: + dns_resolver_valid: "/apisix" +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'property "dns_resolver_valid" validation failed: wrong type: expected integer, got string'; then + echo "failed: dns_resolver_valid should be a number" + exit 1 +fi + +echo "passed: dns_resolver_valid should be a number" + +echo ' +apisix: + ssl: + ssl_trusted_certificate: t/certs/mtls_ca.crt +' > conf/config.yaml + +out=$(make run 2>&1) +if echo "$out" | grep 'no such file'; then + echo "failed: find the certificate correctly" + exit 1 +fi +make stop + +echo "passed: find the certificate correctly" + +echo ' +deployment: + admin: + admin_listen: + port: 9180 +apisix: + node_listen: 9080 + enable_admin: true + proxy_mode: http&stream + stream_proxy: + tcp: + - "localhost:9100" + udp: + - "127.0.0.1:9101" +' > conf/config.yaml + +out=$(make run 2>&1 || echo "ouch") +if echo "$out" | grep 'ouch'; then + echo "failed: allow configuring address in stream_proxy" + exit 1 +fi +make stop + +echo "passed: allow configuring address in stream_proxy" + +# apisix test +git checkout conf/config.yaml + +out=$(./bin/apisix test 2>&1 || true) +if ! echo "$out" | grep "configuration test is successful"; then + echo "failed: configuration test should be successful" + exit 1 +fi + +echo "pass: apisix test" + +./bin/apisix start +sleep 1 # wait for apisix starts + +# set invalid configuration +echo ' +nginx_config: + main_configuration_snippet: | + notexist on; +' > conf/config.yaml + +# apisix restart +out=$(./bin/apisix restart 2>&1 || true) +if ! (echo "$out" | grep "\[emerg\] unknown directive \"notexist\"") && ! (echo "$out" | grep "the old APISIX is still running"); then + echo "failed: should restart failed when configuration invalid" + exit 1 +fi + +echo "passed: apisix restart" + +# apisix test - failure scenario +out=$(./bin/apisix test 2>&1 || true) +if ! echo "$out" | grep "configuration test failed"; then + echo "failed: should test failed when configuration invalid" + exit 1 +fi + +# apisix test failure should not affect apisix stop +out=$(./bin/apisix stop 2>&1 || true) +if echo "$out" | grep "\[emerg\] unknown directive \"notexist\""; then + echo "failed: `apisix test` failure should not affect `apisix stop`" + exit 1 +fi + +echo "passed: apisix test(failure scenario)" + +# apisix plugin batch-requests real_ip_from invalid - failure scenario +echo ' +plugins: +- batch-requests +nginx_config: + http: + real_ip_from: + - "128.0.0.2" +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep "missing loopback or unspecified in the nginx_config.http.real_ip_from for plugin batch-requests"; then + echo "failed: should check the realip configuration for batch-requests" + exit 1 +fi + +echo "passed: apisix plugin batch-requests real_ip_from(failure scenario)" + +# apisix plugin batch-requests real_ip_from valid +echo ' +plugins: +- batch-requests +nginx_config: + http: + real_ip_from: + - "127.0.0.1" + - "127.0.0.2/8" + - "0.0.0.0" + - "0.0.0.0/0" + - "::" + - "::/0" + - "unix:" +' > conf/config.yaml + +out=$(make init 2>&1) +if echo "$out" | grep "missing loopback or unspecified in the nginx_config.http.real_ip_from for plugin batch-requests"; then + echo "failed: should check the realip configuration for batch-requests" + exit 1 +fi + +echo "passed: check the realip configuration for batch-requests" + +echo ' +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - 127.0.0.1 +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'property "host" validation failed'; then + echo "failed: should check etcd schema during init" + exit 1 +fi + +echo "passed: check etcd schema during init" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_wasm.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_wasm.sh new file mode 100755 index 0000000..a8e5584 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_wasm.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +exit_if_not_customed_nginx + +echo ' +wasm: + plugins: + - name: wasm_log + file: t/wasm/log/main.go.wasm +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'property "priority" is required'; then + echo "failed: priority is required" + exit 1 +fi + +echo ' +wasm: + plugins: + - name: wasm_log + priority: 888 +' > conf/config.yaml + +out=$(make init 2>&1 || true) +if ! echo "$out" | grep 'property "file" is required'; then + echo "failed: file is required" + exit 1 +fi + +echo "passed: wasm configuration is validated" + +echo ' +wasm: + plugins: + - name: wasm_log + priority: 7999 + file: t/wasm/log/main.go.wasm + ' > conf/config.yaml + +make init +if ! grep "wasm_vm " conf/nginx.conf; then + echo "failed: wasm isn't enabled" + exit 1 +fi + +echo "passed: wasm is enabled" diff --git a/CloudronPackages/APISIX/apisix-source/t/cli/test_zipkin_set_ngx_var.sh b/CloudronPackages/APISIX/apisix-source/t/cli/test_zipkin_set_ngx_var.sh new file mode 100755 index 0000000..3ddd021 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/cli/test_zipkin_set_ngx_var.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +echo ' +plugins: + - zipkin +plugin_attr: + zipkin: + set_ngx_var: true +' > conf/config.yaml + +make init + +if ! grep "set \$zipkin_context_traceparent '';" conf/nginx.conf > /dev/null; then + echo "failed: zipkin_context_traceparent not found in nginx.conf" + exit 1 +fi + +if ! grep "set \$zipkin_trace_id '';" conf/nginx.conf > /dev/null; then + echo "failed: zipkin_trace_id not found in nginx.conf" + exit 1 +fi + +if ! grep "set \$zipkin_span_id '';" conf/nginx.conf > /dev/null; then + echo "failed: zipkin_span_id not found in nginx.conf" + exit 1 +fi + + +echo "passed: zipkin_set_ngx_var configuration is validated" diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-json/consumer-group.t b/CloudronPackages/APISIX/apisix-source/t/config-center-json/consumer-group.t new file mode 100644 index 0000000..3293800 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-json/consumer-group.t @@ -0,0 +1,212 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /hello?apikey=one"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "consumer_groups": [ + { + "id": "foobar", + "plugins": { + "response-rewrite": { + "body": "hello\n" + } + } + } + ], + "consumers": [ + { + "username": "one", + "group_id": "foobar", + "plugins": { + "key-auth": { + "key": "one" + } + } + } + ] +} +--- response_body +hello + + + +=== TEST 2: consumer group not found +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "consumers": [ + { + "username": "one", + "group_id": "invalid_group", + "plugins": { + "key-auth": { + "key": "one" + } + } + } + ] +} +--- error_code: 503 +--- error_log +failed to fetch consumer group config by id: invalid_group + + + +=== TEST 3: plugin priority +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "consumer_groups": [ + { + "id": "foobar", + "plugins": { + "response-rewrite": { + "body": "hello\n" + } + } + } + ], + "consumers": [ + { + "username": "one", + "group_id": "foobar", + "plugins": { + "key-auth": { + "key": "one" + }, + "response-rewrite": { + "body": "world\n" + } + } + } + ] +} +--- response_body +world + + + +=== TEST 4: invalid plugin +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "consumer_groups": [ + { + "id": "foobar", + "plugins": { + "example-plugin": { + "skey": "s" + }, + "response-rewrite": { + "body": "hello\n" + } + } + } + ], + "consumers": [ + { + "username": "one", + "group_id": "foobar", + "plugins": { + "key-auth": { + "key": "one" + } + } + } + ] +} +--- error_code: 503 +--- error_log +failed to check the configuration of plugin example-plugin +failed to fetch consumer group config by id: foobar diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-json/consumer.t b/CloudronPackages/APISIX/apisix-source/t/config-center-json/consumer.t new file mode 100644 index 0000000..80b8019 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-json/consumer.t @@ -0,0 +1,93 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: validate consumer +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "consumers": [ + { + "username": "jwt#auth" + } + ] +} +--- request +GET /hello +--- response_body +hello world +--- error_log +property "username" validation failed + + + +=== TEST 2: consumer restriction +--- apisix_json +{ + "consumers": [ + { + "username": "jack", + "plugins": { + "key-auth": { + "key": "user-key" + } + } + } + ], + "routes": [ + { + "id": "1", + "methods": ["POST"], + "uri": "/hello", + "plugins": { + "key-auth": {}, + "consumer-restriction": { + "whitelist": ["jack"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + } + ] +} +--- more_headers +apikey: user-key +--- request +POST /hello diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-json/global-rule.t b/CloudronPackages/APISIX/apisix-source/t/config-center-json/global-rule.t new file mode 100644 index 0000000..54d5898 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-json/global-rule.t @@ -0,0 +1,160 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /hello"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "global_rules": [ + { + "id": 1, + "plugins": { + "response-rewrite": { + "body": "hello\n" + } + } + } + ] +} +--- response_body +hello + + + +=== TEST 2: global rule with bad plugin +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "global_rules": [ + { + "id": 1, + "plugins": { + "response-rewrite": { + "body": 4 + } + } + } + ] +} +--- response_body +hello world +--- error_log +property "body" validation failed + + + +=== TEST 3: fix global rule with default value +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "global_rules": [ + { + "id": 1, + "plugins": { + "uri-blocker": { + "block_rules": [ + "/h*" + ] + } + } + } + ] +} +--- error_code: 403 + + + +=== TEST 4: common phase without matched route +--- apisix_json +{ + "routes": [ + { + "uri": "/apisix/prometheus/metrics", + "plugins": { + "public-api": {} + } + } + ], + "global_rules": [ + { + "id": 1, + "plugins": { + "cors": { + "allow_origins": "http://a.com,http://b.com" + } + } + } + ] +} +--- request +GET /apisix/prometheus/metrics +--- error_code: 200 diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-json/plugin-configs.t b/CloudronPackages/APISIX/apisix-source/t/config-center-json/plugin-configs.t new file mode 100644 index 0000000..79f8d0f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-json/plugin-configs.t @@ -0,0 +1,175 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /hello"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_json +{ + "plugin_configs": [ + { + "id": 1, + "plugins": { + "response-rewrite": { + "body": "hello\n" + } + } + } + ], + "routes": [ + { + "id": 1, + "uri": "/hello", + "plugin_config_id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- response_body +hello + + + +=== TEST 2: plugin_config not found +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "plugin_config_id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- error_code: 503 +--- error_log +failed to fetch plugin config by id: 1 + + + +=== TEST 3: mix plugins & plugin_config_id +--- apisix_json +{ + "plugin_configs": [ + { + "id": 1, + "plugins": { + "example-plugin": { + "i": 1 + }, + "response-rewrite": { + "body": "hello\n" + } + } + } + ], + "routes": [ + { + "id": 1, + "uri": "/echo", + "plugin_config_id": 1, + "plugins": { + "proxy-rewrite": { + "headers": { + "in": "out" + } + }, + "response-rewrite": { + "body": "world\n" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /echo +--- response_body +world +--- response_headers +in: out +--- error_log eval +qr/conf_version: \d+#\d+,/ + + + +=== TEST 4: invalid plugin +--- apisix_json +{ + "plugin_configs": [ + { + "id": 1, + "plugins": { + "example-plugin": { + "skey": "s" + }, + "response-rewrite": { + "body": "hello\n" + } + } + } + ], + "routes": [ + { + "id": 1, + "uri": "/hello", + "plugin_config_id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- error_code: 503 +--- error_log +failed to check the configuration of plugin example-plugin +failed to fetch plugin config by id: 1 diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-json/plugin-metadata.t b/CloudronPackages/APISIX/apisix-source/t/config-center-json/plugin-metadata.t new file mode 100644 index 0000000..2eed9d4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-json/plugin-metadata.t @@ -0,0 +1,100 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_json +{ + "upstreams": [ + { + "id": 1, + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + ], + "routes": [ + { + "uri": "/hello", + "upstream_id": 1, + "plugins": { + "http-logger": { + "batch_max_size": 1, + "uri": "http://127.0.0.1:1980/log" + } + } + } + ], + "plugin_metadata": [ + { + "id": "http-logger", + "log_format": { + "host": "$host", + "remote_addr": "$remote_addr" + } + } + ] +} +--- request +GET /hello +--- error_log +"remote_addr":"127.0.0.1" +--- no_error_log +failed to get schema for plugin: + + + +=== TEST 2: sanity +--- apisix_json +{ + "upstreams": [ + { + "id": 1, + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + ], + "routes": [ + { + "uri": "/hello", + "upstream_id": 1 + } + ], + "plugin_metadata": [ + { + "id": "authz-casbin", + "model": 123 + } + ] +} +--- request +GET /hello +--- error_log +failed to check item data of [plugin_metadata] diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-json/plugin.t b/CloudronPackages/APISIX/apisix-source/t/config-center-json/plugin.t new file mode 100644 index 0000000..9dd1437 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-json/plugin.t @@ -0,0 +1,291 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +our $debug_config = t::APISIX::read_file("conf/debug.yaml"); +$debug_config =~ s/basic:\n enable: false/basic:\n enable: true/; + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "plugins": [ + { + "name": "ip-restriction" + }, + { + "name": "jwt-auth" + }, + { + "name": "mqtt-proxy", + "stream": true + } + ] +} +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + }) + ngx.print(res.body) + } + } +--- request +GET /t +--- response_body +hello world +--- error_log +use config_provider: yaml +load(): loaded plugin and sort by priority: 3000 name: ip-restriction +load(): loaded plugin and sort by priority: 2510 name: jwt-auth +load_stream(): loaded stream plugin and sort by priority: 1000 name: mqtt-proxy +--- grep_error_log eval +qr/load\(\): new plugins/ +--- grep_error_log_out +load(): new plugins +load(): new plugins +load(): new plugins +load(): new plugins + + + +=== TEST 2: plugins not changed, but still need to reload +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +plugins: + - ip-restriction + - jwt-auth +stream_plugins: + - mqtt-proxy +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "plugins": [ + { + "name": "ip-restriction" + }, + { + "name": "jwt-auth" + }, + { + "name": "mqtt-proxy", + "stream": true + } + ] +} +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + }) + ngx.print(res.body) + } + } +--- request +GET /t +--- response_body +hello world +--- grep_error_log eval +qr/loaded plugin and sort by priority: \d+ name: [^,]+/ +--- grep_error_log_out eval +qr/(loaded plugin and sort by priority: (3000 name: ip-restriction|2510 name: jwt-auth) +){4}/ + + + +=== TEST 3: disable plugin and its router +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "plugins": [ + { + "name": "jwt-auth" + } + ] +} +--- request +GET /apisix/prometheus/metrics +--- error_code: 404 + + + +=== TEST 4: enable plugin and its router +--- apisix_json +{ + "routes": [ + { + "uri": "/apisix/prometheus/metrics", + "plugins": { + "public-api": {} + } + } + ], + "plugins": [ + { + "name": "public-api" + }, + { + "name": "prometheus" + } + ] +} +--- request +GET /apisix/prometheus/metrics + + + +=== TEST 5: invalid plugin config +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +plugins: + - ip-restriction + - jwt-auth +stream_plugins: + - mqtt-proxy +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "plugins": [ + { + "name": "xxx", + "stream": "ip-restriction" + } + ] +} +--- request +GET /hello +--- response_body +hello world +--- error_log +property "stream" validation failed: wrong type: expected boolean, got string +--- no_error_log +load(): plugins not changed + + + +=== TEST 6: empty plugin list +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "plugins": [], + "stream_plugins": [] +} +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + }) + ngx.print(res.body) + } + } +--- request +GET /t +--- response_body +hello world +--- error_log +use config_provider: yaml +load(): new plugins: {} +load_stream(): new plugins: {} diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-json/route-service.t b/CloudronPackages/APISIX/apisix-source/t/config-center-json/route-service.t new file mode 100644 index 0000000..b16e680 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-json/route-service.t @@ -0,0 +1,379 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: hit route +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "service_id": 1, + "id": 1 + } + ], + "services": [ + { + "id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 2: not found service +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "id": 1, + "service_id": 1111 + } + ], + "services": [ + { + "id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- error_code: 404 +--- error_log +failed to fetch service configuration by id: 1111 + + + +=== TEST 3: service upstream priority +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "service_id": 1 + } + ], + "services": [ + { + "id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1977": 1 + }, + "type": "roundrobin" + }, + "upstream_id": 1 + } + ], + "upstreams": [ + { + "id": 1, + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + ] +} +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 4: route service upstream priority +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "service_id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "services": [ + { + "id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1977": 1 + }, + "type": "roundrobin" + } + } + ], + "upstreams": [ + { + "id": 1, + "nodes": { + "127.0.0.1:1977": 1 + }, + "type": "roundrobin" + } + ] +} +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 5: route service upstream by upstream_id priority +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "service_id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1977": 1 + }, + "type": "roundrobin" + }, + "upstream_id": 1 + } + ], + "services": [ + { + "id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1977": 1 + }, + "type": "roundrobin" + } + } + ], + "upstreams": [ + { + "id": 1, + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + ] +} +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 6: route service upstream priority +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "service_id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "services": [ + { + "id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1977": 1 + }, + "type": "roundrobin" + }, + "upstream_id": 1 + } + ], + "upstreams": [ + { + "id": 1, + "nodes": { + "127.0.0.1:1977": 1 + }, + "type": "roundrobin" + } + ] +} +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 7: two routes with the same service +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "uris": ["/hello"], + "service_id": 1, + "id": 1, + "plugins": { + "response-rewrite": { + "body": "hello\n" + } + } + }, + { + "uris": ["/world"], + "service_id": 1, + "id": 2, + "plugins": { + "response-rewrite": { + "body": "world\n" + } + } + } + ], + "services": [ + { + "id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- response_body +hello + + + +=== TEST 8: service with bad plugin +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "service_id": 1 + } + ], + "services": [ + { + "id": 1, + "plugins": { + "proxy-rewrite": { + "uri": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- error_code: 404 +--- error_log +property "uri" validation failed + + + +=== TEST 9: fix service with default value +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "service_id": 1 + } + ], + "services": [ + { + "id": 1, + "plugins": { + "uri-blocker": { + "block_rules": [ + "/h*" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- error_code: 403 diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-json/route-upstream.t b/CloudronPackages/APISIX/apisix-source/t/config-center-json/route-upstream.t new file mode 100644 index 0000000..7082de2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-json/route-upstream.t @@ -0,0 +1,244 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: hit route +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "upstream_id": 1 + } + ], + "upstreams": [ + { + "id": 1, + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + ] +} +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 2: not found upstream +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "upstream_id": 1111 + } + ], + "upstreams": [ + { + "id": 1, + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + ] +} +--- request +GET /hello +--- error_code_like: ^(?:50\d)$ +--- error_log +failed to find upstream by id: 1111 + + + +=== TEST 3: upstream_id priority upstream +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "upstream_id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1977": 1 + }, + "type": "roundrobin" + } + } + ], + "upstreams": [ + { + "id": 1, + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + } + ] +} +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 4: enable healthcheck +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "upstream_id": 1 + } + ], + "upstreams": [ + { + "id": 1, + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "retries": 2, + "checks": { + "active": { + "http_path": "/status", + "healthy": { + "interval": 2, + "successes": 1 + } + } + } + } + ] +} +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 5: upstream domain +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "upstream_id": 1 + } + ], + "upstreams": [ + { + "id": 1, + "nodes": { + "test.com:1980": 1 + }, + "type": "roundrobin" + } + ] +} +--- request +GET /hello +--- error_code: 200 + + + +=== TEST 6: upstream hash_on (bad) +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "upstream_id": 1 + } + ], + "upstreams": [ + { + "id": 1, + "nodes": { + "test.com:1980": 1 + }, + "type": "chash", + "hash_on": "header", + "key": "$aaa" + } + ] +} +--- request +GET /hello +--- error_code: 502 +--- error_log +invalid configuration: failed to match pattern + + + +=== TEST 7: upstream hash_on (good) +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "upstream_id": 1 + } + ], + "upstreams": [ + { + "id": 1, + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.2:1980": 1 + }, + "type": "chash", + "hash_on": "header", + "key": "test" + } + ] +} +--- request +GET /hello +--- more_headers +test: one +--- error_log +proxy request to 127.0.0.1:1980 diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-json/route.t b/CloudronPackages/APISIX/apisix-source/t/config-center-json/route.t new file mode 100644 index 0000000..eef7b5e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-json/route.t @@ -0,0 +1,364 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- response_body +hello world +--- error_log +use config_provider: yaml + + + +=== TEST 2: route:uri + host (missing host, not hit) +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "host": "foo.com", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- error_code: 404 +--- error_log +use config_provider: yaml + + + +=== TEST 3: route:uri + host +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "host": "foo.com", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- more_headers +host: foo.com +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 4: route with bad plugin +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "plugins": { + "proxy-rewrite": { + "uri": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- error_code: 404 +--- error_log +property "uri" validation failed + + + +=== TEST 5: ignore unknown plugin +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "plugins": { + "x-rewrite": { + "uri": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 6: route with bad plugin, radixtree_host_uri +--- yaml_config +apisix: + node_listen: 1984 + router: + http: "radixtree_host_uri" +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "plugins": { + "proxy-rewrite": { + "uri": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- error_code: 404 +--- error_log +property "uri" validation failed + + + +=== TEST 7: fix route with default value +--- yaml_config +apisix: + node_listen: 1984 + router: + http: "radixtree_host_uri" +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "plugins": { + "uri-blocker": { + "block_rules": [ + "/h*" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- error_code: 403 + + + +=== TEST 8: invalid route, bad vars operator +--- yaml_config +apisix: + node_listen: 1984 + router: + http: "radixtree_host_uri" +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "vars": [ + ["remote_addr", "=", "1"] + ], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- error_code: 404 +--- error_log +failed to validate the 'vars' expression + + + +=== TEST 9: script with id +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "script": "local ngx = ngx", + "script_id": "1", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- error_code: 200 +--- error_log +missing loaded script object + + + +=== TEST 10: hosts with '_' is valid +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "hosts": [ + "foo.com", + "v1_test-api.com" + ], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- more_headers +host: v1_test-api.com +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 11: script with plugin_config_id +--- yaml_config eval: $::yaml_config +--- apisix_json +{ + "routes": [ + { + "id": 1, + "uri": "/hello", + "script": "local ngx = ngx", + "plugin_config_id": "1", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- request +GET /hello +--- error_code: 404 +--- error_log +failed to check item data of [routes] diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-json/secret.t b/CloudronPackages/APISIX/apisix-source/t/config-center-json/secret.t new file mode 100644 index 0000000..178f19e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-json/secret.t @@ -0,0 +1,458 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->apisix_json) { + my $json_config = <<_EOC_; +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ] +} +_EOC_ + + $block->set_value("apisix_json", $json_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: validate secret/vault: wrong schema +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "secrets": [ + { + "id": "vault/1", + "prefix": "kv/apisix", + "token": "root", + "uri": "127.0.0.1:8200" + } + ] +} +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local values = secret.secrets() + ngx.say(#values) + } + } +--- request +GET /t +--- response_body +0 +--- error_log +property "uri" validation failed: failed to match pattern "^[^\\/]+:\\/\\/([\\da-zA-Z.-]+|\\[[\\da-fA-F:]+\\])(:\\d+)?" + + + +=== TEST 2: validate secrets: manager not exits +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "secrets": [ + { + "id": "hhh/1", + "prefix": "kv/apisix", + "token": "root", + "uri": "127.0.0.1:8200" + } + ] +} +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local values = secret.secrets() + ngx.say(#values) + } + } +--- request +GET /t +--- response_body +0 +--- error_log +secret manager not exits + + + +=== TEST 3: load config normal +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "secrets": [ + { + "id": "vault/1", + "prefix": "kv/apisix", + "token": "root", + "uri": "http://127.0.0.1:8200" + } + ] +} +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local values = secret.secrets() + ngx.say("len: ", #values) + + ngx.say("id: ", values[1].value.id) + ngx.say("prefix: ", values[1].value.prefix) + ngx.say("token: ", values[1].value.token) + ngx.say("uri: ", values[1].value.uri) + } + } +--- request +GET /t +--- response_body +len: 1 +id: vault/1 +prefix: kv/apisix +token: root +uri: http://127.0.0.1:8200 + + + +=== TEST 4: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/apisix-key key=value +--- response_body +Success! Data written to: kv/apisix/apisix-key + + + +=== TEST 5: secret.fetch_by_uri: start with $secret:// +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "secrets": [ + { + "id": "vault/1", + "prefix": "kv/apisix", + "token": "root", + "uri": "http://127.0.0.1:8200" + } + ] +} +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local value = secret.fetch_by_uri("$secret://vault/1/apisix-key/key") + ngx.say(value) + } + } +--- request +GET /t +--- response_body +value + + + +=== TEST 6: secret.fetch_by_uri, wrong ref format: wrong type +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local _, err = secret.fetch_by_uri(1) + ngx.say(err) + } + } +--- request +GET /t +--- response_body +error secret_uri type: number + + + +=== TEST 7: secret.fetch_by_uri, wrong ref format: wrong prefix +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local _, err = secret.fetch_by_uri("secret://") + ngx.say(err) + } + } +--- request +GET /t +--- response_body +error secret_uri prefix: secret:// + + + +=== TEST 8: secret.fetch_by_uri, error format: no secret manager +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local _, err = secret.fetch_by_uri("$secret://") + ngx.say(err) + } + } +--- request +GET /t +--- response_body +error format: no secret manager + + + +=== TEST 9: secret.fetch_by_uri, error format: no secret conf id +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local _, err = secret.fetch_by_uri("$secret://vault/") + ngx.say(err) + } + } +--- request +GET /t +--- response_body +error format: no secret conf id + + + +=== TEST 10: secret.fetch_by_uri, error format: no secret key id +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local _, err = secret.fetch_by_uri("$secret://vault/2/") + ngx.say(err) + } + } +--- request +GET /t +--- response_body +error format: no secret key id + + + +=== TEST 11: secret.fetch_by_uri, no config +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local _, err = secret.fetch_by_uri("$secret://vault/2/bar") + ngx.say(err) + } + } +--- request +GET /t +--- response_body +no secret conf, secret_uri: $secret://vault/2/bar + + + +=== TEST 12: secret.fetch_by_uri, no sub key value +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "secrets": [ + { + "id": "vault/1", + "prefix": "kv/apisix", + "token": "root", + "uri": "http://127.0.0.1:8200" + } + ] +} +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local value = secret.fetch_by_uri("$secret://vault/1/apisix-key/bar") + ngx.say(value) + } + } +--- request +GET /t +--- response_body +nil + + + +=== TEST 13: fetch_secrets env: no cache +--- main_config +env secret=apisix; +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local refs = { + key = "jack", + secret = "$env://secret" + } + local new_refs = secret.fetch_secrets(refs) + assert(new_refs ~= refs) + ngx.say(refs.secret) + ngx.say(new_refs.secret) + ngx.say(new_refs.key) + } + } +--- request +GET /t +--- response_body +$env://secret +apisix +jack +--- error_log_like +qr/retrieve secrets refs/ + + + +=== TEST 14: fetch_secrets env: cache +--- main_config +env secret=apisix; +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local refs = { + key = "jack", + secret = "$env://secret" + } + local refs_1 = secret.fetch_secrets(refs, true, "key", 1) + local refs_2 = secret.fetch_secrets(refs, true, "key", 1) + assert(refs_1 == refs_2) + ngx.say(refs_1.secret) + ngx.say(refs_2.secret) + } + } +--- request +GET /t +--- response_body +apisix +apisix +--- grep_error_log eval +qr/retrieve secrets refs/ +--- grep_error_log_out +retrieve secrets refs + + + +=== TEST 15: fetch_secrets env: table nesting +--- main_config +env secret=apisix; +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local refs = { + key = "jack", + user = { + username = "apisix", + passsword = "$env://secret" + } + } + local new_refs = secret.fetch_secrets(refs) + ngx.say(new_refs.user.passsword) + } + } +--- request +GET /t +--- response_body +apisix + + + +=== TEST 16: fetch_secrets: wrong refs type +--- main_config +env secret=apisix; +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local refs = "wrong" + local new_refs = secret.fetch_secrets(refs) + ngx.say(new_refs) + } + } +--- request +GET /t +--- response_body +nil diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-json/ssl.t b/CloudronPackages/APISIX/apisix-source/t/config-center-json/ssl.t new file mode 100644 index 0000000..f7637de --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-json/ssl.t @@ -0,0 +1,191 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('debug'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } + + if ($block->sslhandshake) { + my $sslhandshake = $block->sslhandshake; + + $block->set_value("config", <<_EOC_) +listen unix:\$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:\$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + $sslhandshake + local req = "GET /hello HTTP/1.0\\r\\nHost: test.com\\r\\nConnection: close\\r\\n\\r\\n" + local bytes, err = sock:send(req) + if not bytes then + ngx.say("failed to send http request: ", err) + return + end + + local line, err = sock:receive() + if not line then + ngx.say("failed to receive: ", err) + return + end + + ngx.say("received: ", line) + + local ok, err = sock:close() + ngx.say("close: ", ok, " ", err) + end -- do + -- collectgarbage() + } +} +_EOC_ + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "ssls": [ + { + "cert": "-----BEGIN CERTIFICATE-----\nMIIDrzCCApegAwIBAgIJAI3Meu/gJVTLMA0GCSqGSIb3DQEBCwUAMG4xCzAJBgNV\nBAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL\nBgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl\nci5sb2NhbDAeFw0yMDEwMjgwMzMzMDJaFw0yMTEwMjgwMzMzMDJaMG4xCzAJBgNV\nBAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL\nBgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl\nci5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ/qwxCR7g5S\ns9+VleopkLi5pAszEkHYOBpwF/hDeRdxU0I0e1zZTdTlwwPy2vf8m3kwoq6fmNCt\ntdUUXh5Wvgi/2OA8HBBzaQFQL1Av9qWwyES5cx6p0ZBwIrcXQIsl1XfNSUpQNTSS\nD44TGduXUIdeshukPvMvLWLezynf2/WlgVh/haWtDG99r/Gj3uBdjl0m/xGvKvIv\nNFy6EdgG9fkwcIalutjrUnGl9moGjwKYu4eXW2Zt5el0d1AHXUsqK4voe0p+U2Nz\nquDmvxteXWdlsz8o5kQT6a4DUtWhpPIfNj9oZfPRs3LhBFQ74N70kVxMOCdec1lU\nbnFzLIMGlz0CAwEAAaNQME4wHQYDVR0OBBYEFFHeljijrr+SPxlH5fjHRPcC7bv2\nMB8GA1UdIwQYMBaAFFHeljijrr+SPxlH5fjHRPcC7bv2MAwGA1UdEwQFMAMBAf8w\nDQYJKoZIhvcNAQELBQADggEBAG6NNTK7sl9nJxeewVuogCdMtkcdnx9onGtCOeiQ\nqvh5Xwn9akZtoLMVEdceU0ihO4wILlcom3OqHs9WOd6VbgW5a19Thh2toxKidHz5\nrAaBMyZsQbFb6+vFshZwoCtOLZI/eIZfUUMFqMXlEPrKru1nSddNdai2+zi5rEnM\nHCot43+3XYuqkvWlOjoi9cP+C4epFYrxpykVbcrtbd7TK+wZNiK3xtDPnVzjdNWL\ngeAEl9xrrk0ss4nO/EreTQgS46gVU+tLC+b23m2dU7dcKZ7RDoiA9bdVc4a2IsaS\n2MvLL4NZ2nUh8hAEHiLtGMAV3C6xNbEyM07hEpDW6vk6tqk=\n-----END CERTIFICATE-----", + "key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCf6sMQke4OUrPf\nlZXqKZC4uaQLMxJB2DgacBf4Q3kXcVNCNHtc2U3U5cMD8tr3/Jt5MKKun5jQrbXV\nFF4eVr4Iv9jgPBwQc2kBUC9QL/alsMhEuXMeqdGQcCK3F0CLJdV3zUlKUDU0kg+O\nExnbl1CHXrIbpD7zLy1i3s8p39v1pYFYf4WlrQxvfa/xo97gXY5dJv8RryryLzRc\nuhHYBvX5MHCGpbrY61JxpfZqBo8CmLuHl1tmbeXpdHdQB11LKiuL6HtKflNjc6rg\n5r8bXl1nZbM/KOZEE+muA1LVoaTyHzY/aGXz0bNy4QRUO+De9JFcTDgnXnNZVG5x\ncyyDBpc9AgMBAAECggEAatcEtehZPJaCeClPPF/Cwbe9YoIfe4BCk186lHI3z7K1\n5nB7zt+bwVY0AUpagv3wvXoB5lrYVOsJpa9y5iAb3GqYMc/XDCKfD/KLea5hwfcn\nBctEn0LjsPVKLDrLs2t2gBDWG2EU+udunwQh7XTdp2Nb6V3FdOGbGAg2LgrSwP1g\n0r4z14F70oWGYyTQ5N8UGuyryVrzQH525OYl38Yt7R6zJ/44FVi/2TvdfHM5ss39\nSXWi00Q30fzaBEf4AdHVwVCRKctwSbrIOyM53kiScFDmBGRblCWOxXbiFV+d3bjX\ngf2zxs7QYZrFOzOO7kLtHGua4itEB02497v+1oKDwQKBgQDOBvCVGRe2WpItOLnj\nSF8iz7Sm+jJGQz0D9FhWyGPvrN7IXGrsXavA1kKRz22dsU8xdKk0yciOB13Wb5y6\nyLsr/fPBjAhPb4h543VHFjpAQcxpsH51DE0b2oYOWMmz+rXGB5Jy8EkP7Q4njIsc\n2wLod1dps8OT8zFx1jX3Us6iUQKBgQDGtKkfsvWi3HkwjFTR+/Y0oMz7bSruE5Z8\ng0VOHPkSr4XiYgLpQxjbNjq8fwsa/jTt1B57+By4xLpZYD0BTFuf5po+igSZhH8s\nQS5XnUnbM7d6Xr/da7ZkhSmUbEaMeHONSIVpYNgtRo4bB9Mh0l1HWdoevw/w5Ryt\nL/OQiPhfLQKBgQCh1iG1fPh7bbnVe/HI71iL58xoPbCwMLEFIjMiOFcINirqCG6V\nLR91Ytj34JCihl1G4/TmWnsH1hGIGDRtJLCiZeHL70u32kzCMkI1jOhFAWqoutMa\n7obDkmwraONIVW/kFp6bWtSJhhTQTD4adI9cPCKWDXdcCHSWj0Xk+U8HgQKBgBng\nt1HYhaLzIZlP/U/nh3XtJyTrX7bnuCZ5FhKJNWrYjxAfgY+NXHRYCKg5x2F5j70V\nbe7pLhxmCnrPTMKZhik56AaTBOxVVBaYWoewhUjV4GRAaK5Wc8d9jB+3RizPFwVk\nV3OU2DJ1SNZ+W2HBOsKrEfwFF/dgby6i2w6MuAP1AoGBAIxvxUygeT/6P0fHN22P\nzAHFI4v2925wYdb7H//D8DIADyBwv18N6YH8uH7L+USZN7e4p2k8MGGyvTXeC6aX\nIeVtU6fH57Ddn59VPbF20m8RCSkmBvSdcbyBmqlZSBE+fKwCliKl6u/GH0BNAWKz\nr8yiEiskqRmy7P7MY9hDmEbG\n-----END PRIVATE KEY-----", + "snis": [ + "t.com", + "test.com" + ] + } + ] +} +--- sslhandshake +local sess, err = sock:sslhandshake(nil, "test.com", false) +if not sess then + ngx.say("failed to do SSL handshake: ", err) + return +end +--- response_body +received: HTTP/1.1 200 OK +close: 1 nil +--- error_log +server name: "test.com" + + + +=== TEST 2: single sni +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + } + ], + "ssls": [ + { + "cert": "-----BEGIN CERTIFICATE-----\nMIIDrzCCApegAwIBAgIJAI3Meu/gJVTLMA0GCSqGSIb3DQEBCwUAMG4xCzAJBgNV\nBAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL\nBgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl\nci5sb2NhbDAeFw0yMDEwMjgwMzMzMDJaFw0yMTEwMjgwMzMzMDJaMG4xCzAJBgNV\nBAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL\nBgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl\nci5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ/qwxCR7g5S\ns9+VleopkLi5pAszEkHYOBpwF/hDeRdxU0I0e1zZTdTlwwPy2vf8m3kwoq6fmNCt\ntdUUXh5Wvgi/2OA8HBBzaQFQL1Av9qWwyES5cx6p0ZBwIrcXQIsl1XfNSUpQNTSS\nD44TGduXUIdeshukPvMvLWLezynf2/WlgVh/haWtDG99r/Gj3uBdjl0m/xGvKvIv\nNFy6EdgG9fkwcIalutjrUnGl9moGjwKYu4eXW2Zt5el0d1AHXUsqK4voe0p+U2Nz\nquDmvxteXWdlsz8o5kQT6a4DUtWhpPIfNj9oZfPRs3LhBFQ74N70kVxMOCdec1lU\nbnFzLIMGlz0CAwEAAaNQME4wHQYDVR0OBBYEFFHeljijrr+SPxlH5fjHRPcC7bv2\nMB8GA1UdIwQYMBaAFFHeljijrr+SPxlH5fjHRPcC7bv2MAwGA1UdEwQFMAMBAf8w\nDQYJKoZIhvcNAQELBQADggEBAG6NNTK7sl9nJxeewVuogCdMtkcdnx9onGtCOeiQ\nqvh5Xwn9akZtoLMVEdceU0ihO4wILlcom3OqHs9WOd6VbgW5a19Thh2toxKidHz5\nrAaBMyZsQbFb6+vFshZwoCtOLZI/eIZfUUMFqMXlEPrKru1nSddNdai2+zi5rEnM\nHCot43+3XYuqkvWlOjoi9cP+C4epFYrxpykVbcrtbd7TK+wZNiK3xtDPnVzjdNWL\ngeAEl9xrrk0ss4nO/EreTQgS46gVU+tLC+b23m2dU7dcKZ7RDoiA9bdVc4a2IsaS\n2MvLL4NZ2nUh8hAEHiLtGMAV3C6xNbEyM07hEpDW6vk6tqk=\n-----END CERTIFICATE-----", + "key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCf6sMQke4OUrPf\nlZXqKZC4uaQLMxJB2DgacBf4Q3kXcVNCNHtc2U3U5cMD8tr3/Jt5MKKun5jQrbXV\nFF4eVr4Iv9jgPBwQc2kBUC9QL/alsMhEuXMeqdGQcCK3F0CLJdV3zUlKUDU0kg+O\nExnbl1CHXrIbpD7zLy1i3s8p39v1pYFYf4WlrQxvfa/xo97gXY5dJv8RryryLzRc\nuhHYBvX5MHCGpbrY61JxpfZqBo8CmLuHl1tmbeXpdHdQB11LKiuL6HtKflNjc6rg\n5r8bXl1nZbM/KOZEE+muA1LVoaTyHzY/aGXz0bNy4QRUO+De9JFcTDgnXnNZVG5x\ncyyDBpc9AgMBAAECggEAatcEtehZPJaCeClPPF/Cwbe9YoIfe4BCk186lHI3z7K1\n5nB7zt+bwVY0AUpagv3wvXoB5lrYVOsJpa9y5iAb3GqYMc/XDCKfD/KLea5hwfcn\nBctEn0LjsPVKLDrLs2t2gBDWG2EU+udunwQh7XTdp2Nb6V3FdOGbGAg2LgrSwP1g\n0r4z14F70oWGYyTQ5N8UGuyryVrzQH525OYl38Yt7R6zJ/44FVi/2TvdfHM5ss39\nSXWi00Q30fzaBEf4AdHVwVCRKctwSbrIOyM53kiScFDmBGRblCWOxXbiFV+d3bjX\ngf2zxs7QYZrFOzOO7kLtHGua4itEB02497v+1oKDwQKBgQDOBvCVGRe2WpItOLnj\nSF8iz7Sm+jJGQz0D9FhWyGPvrN7IXGrsXavA1kKRz22dsU8xdKk0yciOB13Wb5y6\nyLsr/fPBjAhPb4h543VHFjpAQcxpsH51DE0b2oYOWMmz+rXGB5Jy8EkP7Q4njIsc\n2wLod1dps8OT8zFx1jX3Us6iUQKBgQDGtKkfsvWi3HkwjFTR+/Y0oMz7bSruE5Z8\ng0VOHPkSr4XiYgLpQxjbNjq8fwsa/jTt1B57+By4xLpZYD0BTFuf5po+igSZhH8s\nQS5XnUnbM7d6Xr/da7ZkhSmUbEaMeHONSIVpYNgtRo4bB9Mh0l1HWdoevw/w5Ryt\nL/OQiPhfLQKBgQCh1iG1fPh7bbnVe/HI71iL58xoPbCwMLEFIjMiOFcINirqCG6V\nLR91Ytj34JCihl1G4/TmWnsH1hGIGDRtJLCiZeHL70u32kzCMkI1jOhFAWqoutMa\n7obDkmwraONIVW/kFp6bWtSJhhTQTD4adI9cPCKWDXdcCHSWj0Xk+U8HgQKBgBng\nt1HYhaLzIZlP/U/nh3XtJyTrX7bnuCZ5FhKJNWrYjxAfgY+NXHRYCKg5x2F5j70V\nbe7pLhxmCnrPTMKZhik56AaTBOxVVBaYWoewhUjV4GRAaK5Wc8d9jB+3RizPFwVk\nV3OU2DJ1SNZ+W2HBOsKrEfwFF/dgby6i2w6MuAP1AoGBAIxvxUygeT/6P0fHN22P\nzAHFI4v2925wYdb7H//D8DIADyBwv18N6YH8uH7L+USZN7e4p2k8MGGyvTXeC6aX\nIeVtU6fH57Ddn59VPbF20m8RCSkmBvSdcbyBmqlZSBE+fKwCliKl6u/GH0BNAWKz\nr8yiEiskqRmy7P7MY9hDmEbG\n-----END PRIVATE KEY-----", + "sni": "test.com" + } + ] +} +--- sslhandshake +local sess, err = sock:sslhandshake(nil, "test.com", false) +if not sess then + ngx.say("failed to do SSL handshake: ", err) + return +end +--- response_body +received: HTTP/1.1 200 OK +close: 1 nil +--- error_log +server name: "test.com" + + + +=== TEST 3: bad cert +--- apisix_json +{ + "routes": [ + { + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + }, + "ssl_enable": true + } + ], + "ssls": [ + { + "cert": "-----BEGIN CERTIFICATE-----\nMIIDrzCCApegAwIBAgIJAI3Meu/gJVTLMA0GCSqGSIb3DQEBCwUAMG4xCzAJBgNV\nBAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL\nBgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl\nci5sb2NhbDAeFw0yMDEwMjgwMzMzMDJaFw0yMTEwMjgwMzMzMDJaMG4xCzAJBgNV\nBAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL\nBgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl\nci5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ/qwxCR7g5S\ns9+VleopkLi5pAszEkHYOBpwF/hDeRdxU0I0e1zZTdTlwwPy2vf8m3kwoq6fmNCt\ntdUUXh5Wvgi/2OA8HBBzaQFQL1Av9qWwyES5cx6p0ZBwIrcXQIsl1XfNSUpQNTSS\nD44TGduXUIdeshukPvMvLWLezynf2/WlgVh/haWtDG99r/Gj3uBdjl0m/xGvKvIv\nquDmvxteXWdlsz8o5kQT6a4DUtWhpPIfNj9oZfPRs3LhBFQ74N70kVxMOCdec1lU\nbnFzLIMGlz0CAwEAAaNQME4wHQYDVR0OBBYEFFHeljijrr+SPxlH5fjHRPcC7bv2\nMB8GA1UdIwQYMBaAFFHeljijrr+SPxlH5fjHRPcC7bv2MAwGA1UdEwQFMAMBAf8w\nDQYJKoZIhvcNAQELBQADggEBAG6NNTK7sl9nJxeewVuogCdMtkcdnx9onGtCOeiQ\nqvh5Xwn9akZtoLMVEdceU0ihO4wILlcom3OqHs9WOd6VbgW5a19Thh2toxKidHz5\nrAaBMyZsQbFb6+vFshZwoCtOLZI/eIZfUUMFqMXlEPrKru1nSddNdai2+zi5rEnM\nHCot43+3XYuqkvWlOjoi9cP+C4epFYrxpykVbcrtbd7TK+wZNiK3xtDPnVzjdNWL\ngeAEl9xrrk0ss4nO/EreTQgS46gVU+tLC+b23m2dU7dcKZ7RDoiA9bdVc4a2IsaS\n2MvLL4NZ2nUh8hAEHiLtGMAV3C6xNbEyM07hEpDW6vk6tqk=\n-----END CERTIFICATE-----", + "key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCf6sMQke4OUrPf\nlZXqKZC4uaQLMxJB2DgacBf4Q3kXcVNCNHtc2U3U5cMD8tr3/Jt5MKKun5jQrbXV\nFF4eVr4Iv9jgPBwQc2kBUC9QL/alsMhEuXMeqdGQcCK3F0CLJdV3zUlKUDU0kg+O\nExnbl1CHXrIbpD7zLy1i3s8p39v1pYFYf4WlrQxvfa/xo97gXY5dJv8RryryLzRc\nuhHYBvX5MHCGpbrY61JxpfZqBo8CmLuHl1tmbeXpdHdQB11LKiuL6HtKflNjc6rg\n5r8bXl1nZbM/KOZEE+muA1LVoaTyHzY/aGXz0bNy4QRUO+De9JFcTDgnXnNZVG5x\ncyyDBpc9AgMBAAECggEAatcEtehZPJaCeClPPF/Cwbe9YoIfe4BCk186lHI3z7K1\n5nB7zt+bwVY0AUpagv3wvXoB5lrYVOsJpa9y5iAb3GqYMc/XDCKfD/KLea5hwfcn\nBctEn0LjsPVKLDrLs2t2gBDWG2EU+udunwQh7XTdp2Nb6V3FdOGbGAg2LgrSwP1g\n0r4z14F70oWGYyTQ5N8UGuyryVrzQH525OYl38Yt7R6zJ/44FVi/2TvdfHM5ss39\nSXWi00Q30fzaBEf4AdHVwVCRKctwSbrIOyM53kiScFDmBGRblCWOxXbiFV+d3bjX\ngf2zxs7QYZrFOzOO7kLtHGua4itEB02497v+1oKDwQKBgQDOBvCVGRe2WpItOLnj\nSF8iz7Sm+jJGQz0D9FhWyGPvrN7IXGrsXavA1kKRz22dsU8xdKk0yciOB13Wb5y6\nyLsr/fPBjAhPb4h543VHFjpAQcxpsH51DE0b2oYOWMmz+rXGB5Jy8EkP7Q4njIsc\n2wLod1dps8OT8zFx1jX3Us6iUQKBgQDGtKkfsvWi3HkwjFTR+/Y0oMz7bSruE5Z8\ng0VOHPkSr4XiYgLpQxjbNjq8fwsa/jTt1B57+By4xLpZYD0BTFuf5po+igSZhH8s\nQS5XnUnbM7d6Xr/da7ZkhSmUbEaMeHONSIVpYNgtRo4bB9Mh0l1HWdoevw/w5Ryt\nL/OQiPhfLQKBgQCh1iG1fPh7bbnVe/HI71iL58xoPbCwMLEFIjMiOFcINirqCG6V\nLR91Ytj34JCihl1G4/TmWnsH1hGIGDRtJLCiZeHL70u32kzCMkI1jOhFAWqoutMa\n7obDkmwraONIVW/kFp6bWtSJhhTQTD4adI9cPCKWDXdcCHSWj0Xk+U8HgQKBgBng\nt1HYhaLzIZlP/U/nh3XtJyTrX7bnuCZ5FhKJNWrYjxAfgY+NXHRYCKg5x2F5j70V\nbe7pLhxmCnrPTMKZhik56AaTBOxVVBaYWoewhUjV4GRAaK5Wc8d9jB+3RizPFwVk\nV3OU2DJ1SNZ+W2HBOsKrEfwFF/dgby6i2w6MuAP1AoGBAIxvxUygeT/6P0fHN22P\nzAHFI4v2925wYdb7H//D8DIADyBwv18N6YH8uH7L+USZN7e4p2k8MGGyvTXeC6aX\nIeVtU6fH57Ddn59VPbF20m8RCSkmBvSdcbyBmqlZSBE+fKwCliKl6u/GH0BNAWKz\nr8yiEiskqRmy7P7MY9hDmEbG\n-----END PRIVATE KEY-----", + "snis": [ + "t.com", + "test.com" + ] + } + ] +} + +--- error_log +failed to parse cert +--- error_code: 404 diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-json/stream-route.t b/CloudronPackages/APISIX/apisix-source/t/config-center-json/stream-route.t new file mode 100644 index 0000000..47eb16e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-json/stream-route.t @@ -0,0 +1,148 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + $block->set_value("stream_enable", 1); + + if (!$block->stream_request) { + $block->set_value("stream_request", "mmm"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_json +{ + "stream_routes": [ + { + "server_addr": "127.0.0.1", + "server_port": 1985, + "id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- stream_response +hello world + + + +=== TEST 2: rule with bad plugin +--- apisix_json +{ + "stream_routes": [ + { + "server_addr": "127.0.0.1", + "server_port": 1985, + "id": 1, + "plugins": { + "mqtt-proxy": { + "uri": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- error_log eval +qr/property "\w+" is required/ + + + +=== TEST 3: ignore unknown plugin +--- apisix_json +{ + "stream_routes": [ + { + "server_addr": "127.0.0.1", + "server_port": 1985, + "id": 1, + "plugins": { + "x-rewrite": { + "uri": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + } + ] +} +--- stream_response +hello world + + + +=== TEST 4: sanity with plugin +--- apisix_json +{ + "stream_routes": [ + { + "server_addr": "127.0.0.1", + "server_port": 1985, + "id": 1, + "upstream_id": 1, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + } + } + ], + "upstreams": [ + { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin", + "id": 1 + } + ] +} +--- stream_request eval +"\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" +--- stream_response +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/consumer-group.t b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/consumer-group.t new file mode 100644 index 0000000..ff6f6a6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/consumer-group.t @@ -0,0 +1,140 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + my $routes = <<_EOC_; +routes: + - + uri: /hello + plugins: + key-auth: + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $routes); + + if (!$block->request) { + $block->set_value("request", "GET /hello?apikey=one"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_yaml +consumer_groups: + - id: foobar + plugins: + response-rewrite: + body: "hello\n" +consumers: + - username: one + group_id: foobar + plugins: + key-auth: + key: one +#END +--- response_body +hello + + + +=== TEST 2: consumer group not found +--- apisix_yaml +consumers: + - username: one + group_id: invalid_group + plugins: + key-auth: + key: one +#END +--- error_code: 503 +--- error_log +failed to fetch consumer group config by id: invalid_group + + + +=== TEST 3: plugin priority +--- apisix_yaml +consumer_groups: + - id: foobar + plugins: + response-rewrite: + body: "hello\n" +consumers: + - username: one + group_id: foobar + plugins: + key-auth: + key: one + response-rewrite: + body: "world\n" +#END +--- response_body +world + + + +=== TEST 4: invalid plugin +--- apisix_yaml +consumer_groups: + - id: foobar + plugins: + example-plugin: + skey: "s" + response-rewrite: + body: "hello\n" +consumers: + - username: one + group_id: foobar + plugins: + key-auth: + key: one +#END +--- error_code: 503 +--- error_log +failed to check the configuration of plugin example-plugin +failed to fetch consumer group config by id: foobar diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/consumer.t b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/consumer.t new file mode 100644 index 0000000..7884ee9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/consumer.t @@ -0,0 +1,88 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: validate consumer +--- apisix_yaml +consumers: + - username: jwt&auth +routes: + - uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- response_body +hello world +--- error_log +property "username" validation failed + + + +=== TEST 2: consumer restriction +--- apisix_yaml +consumers: + - username: jack + plugins: + key-auth: + key: user-key +routes: + - id: 1 + methods: + - POST + uri: "/hello" + plugins: + key-auth: + consumer-restriction: + whitelist: + - jack + upstream: + type: roundrobin + nodes: + "127.0.0.1:1980": 1 +#END +--- more_headers +apikey: user-key +--- request +POST /hello diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/global-rule.t b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/global-rule.t new file mode 100644 index 0000000..5fe5d0f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/global-rule.t @@ -0,0 +1,136 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /hello"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +global_rules: + - + id: 1 + plugins: + response-rewrite: + body: "hello\n" +#END +--- response_body +hello + + + +=== TEST 2: global rule with bad plugin +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +global_rules: + - + id: 1 + plugins: + response-rewrite: + body: 4 +#END +--- response_body +hello world +--- error_log +property "body" validation failed + + + +=== TEST 3: fix global rule with default value +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + +global_rules: + - + id: 1 + plugins: + uri-blocker: + block_rules: + - /h* +#END +--- error_code: 403 + + + +=== TEST 4: common phase without matched route +--- apisix_yaml +routes: + - + uri: /apisix/prometheus/metrics + plugins: + public-api: {} +global_rules: + - + id: 1 + plugins: + cors: + allow_origins: "http://a.com,http://b.com" +#END +--- request +GET /apisix/prometheus/metrics +--- error_code: 200 diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/plugin-configs.t b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/plugin-configs.t new file mode 100644 index 0000000..e199e58 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/plugin-configs.t @@ -0,0 +1,144 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /hello"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_yaml +plugin_configs: + - + id: 1 + plugins: + response-rewrite: + body: "hello\n" +routes: + - id: 1 + uri: /hello + plugin_config_id: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- response_body +hello + + + +=== TEST 2: plugin_config not found +--- apisix_yaml +routes: + - id: 1 + uri: /hello + plugin_config_id: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- error_code: 503 +--- error_log +failed to fetch plugin config by id: 1 + + + +=== TEST 3: mix plugins & plugin_config_id +--- apisix_yaml +plugin_configs: + - + id: 1 + plugins: + example-plugin: + i: 1 + response-rewrite: + body: "hello\n" +routes: + - id: 1 + uri: /echo + plugin_config_id: 1 + plugins: + proxy-rewrite: + headers: + "in": "out" + response-rewrite: + body: "world\n" + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /echo +--- response_body +world +--- response_headers +in: out +--- error_log eval +qr/conf_version: \d+#\d+,/ + + + +=== TEST 4: invalid plugin +--- apisix_yaml +plugin_configs: + - + id: 1 + plugins: + example-plugin: + skey: "s" + response-rewrite: + body: "hello\n" +routes: + - id: 1 + uri: /hello + plugin_config_id: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- error_code: 503 +--- error_log +failed to check the configuration of plugin example-plugin +failed to fetch plugin config by id: 1 diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/plugin-metadata.t b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/plugin-metadata.t new file mode 100644 index 0000000..34c6949 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/plugin-metadata.t @@ -0,0 +1,89 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_yaml +upstreams: + - id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +routes: + - + uri: /hello + upstream_id: 1 + plugins: + http-logger: + batch_max_size: 1 + uri: http://127.0.0.1:1980/log +plugin_metadata: + - id: http-logger + log_format: + host: "$host" + remote_addr: "$remote_addr" +#END +--- request +GET /hello +--- error_log +"remote_addr":"127.0.0.1" + + + +=== TEST 2: sanity +--- apisix_yaml +upstreams: + - id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +routes: + - + uri: /hello + upstream_id: 1 +plugin_metadata: + - id: authz-casbin + model: 123 +#END +--- request +GET /hello +--- error_log +failed to check item data of [plugin_metadata] diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/plugin.t b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/plugin.t new file mode 100644 index 0000000..2ee975d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/plugin.t @@ -0,0 +1,229 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + if (!$block->apisix_yaml) { + my $routes = <<_EOC_; +routes: + - uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->extra_apisix_yaml . $routes); + } +}); + +our $debug_config = t::APISIX::read_file("conf/debug.yaml"); +$debug_config =~ s/basic:\n enable: false/basic:\n enable: true/; + +run_tests(); + +## TODO: extra_apisix_yaml is specific to this document and is not standard behavior for +## the APISIX testing framework, so it should be standardized or replaced later. + +__DATA__ + +=== TEST 1: sanity +--- extra_apisix_yaml +plugins: + - name: ip-restriction + - name: jwt-auth + - name: mqtt-proxy + stream: true +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + }) + ngx.print(res.body) + } + } +--- request +GET /t +--- response_body +hello world +--- error_log +use config_provider: yaml +load(): loaded plugin and sort by priority: 3000 name: ip-restriction +load(): loaded plugin and sort by priority: 2510 name: jwt-auth +load_stream(): loaded stream plugin and sort by priority: 1000 name: mqtt-proxy +--- grep_error_log eval +qr/load\(\): new plugins/ +--- grep_error_log_out +load(): new plugins +load(): new plugins +load(): new plugins +load(): new plugins + + + +=== TEST 2: plugins not changed, but still need to reload +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +plugins: + - ip-restriction + - jwt-auth +stream_plugins: + - mqtt-proxy +--- extra_apisix_yaml +plugins: + - name: ip-restriction + - name: jwt-auth + - name: mqtt-proxy + stream: true +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + }) + ngx.print(res.body) + } + } +--- request +GET /t +--- response_body +hello world +--- grep_error_log eval +qr/loaded plugin and sort by priority: \d+ name: [^,]+/ +--- grep_error_log_out eval +qr/(loaded plugin and sort by priority: (3000 name: ip-restriction|2510 name: jwt-auth) +){4}/ + + + +=== TEST 3: disable plugin and its router +--- extra_apisix_yaml +plugins: + - name: jwt-auth +--- request +GET /apisix/prometheus/metrics +--- error_code: 404 + + + +=== TEST 4: enable plugin and its router +--- apisix_yaml +routes: + - uri: /apisix/prometheus/metrics + plugins: + public-api: {} +plugins: + - name: public-api + - name: prometheus +#END +--- request +GET /apisix/prometheus/metrics + + + +=== TEST 5: invalid plugin config +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +plugins: + - ip-restriction + - jwt-auth +stream_plugins: + - mqtt-proxy +--- extra_apisix_yaml +plugins: + - name: xxx + stream: ip-restriction +--- request +GET /hello +--- response_body +hello world +--- error_log +property "stream" validation failed: wrong type: expected boolean, got string +--- no_error_log +load(): plugins not changed + + + +=== TEST 6: empty plugin list +--- extra_apisix_yaml +plugins: +stream_plugins: +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + }) + ngx.print(res.body) + } + } +--- request +GET /t +--- response_body +hello world +--- error_log +use config_provider: yaml +load(): new plugins: {} +load_stream(): new plugins: {} diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/route-service.t b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/route-service.t new file mode 100644 index 0000000..357221e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/route-service.t @@ -0,0 +1,297 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + +run_tests(); + +__DATA__ + +=== TEST 1: hit route +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + service_id: 1 + id: 1 +services: + - + id: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 2: not found service +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + id: 1 + service_id: 1111 +services: + - + id: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 404 +--- error_log +failed to fetch service configuration by id: 1111 + + + +=== TEST 3: service upstream priority +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + service_id: 1 +services: + - + id: 1 + upstream: + nodes: + "127.0.0.1:1977": 1 + type: roundrobin + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 4: route service upstream priority +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + service_id: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +services: + - + id: 1 + upstream: + nodes: + "127.0.0.1:1977": 1 + type: roundrobin +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1977": 1 + type: roundrobin +#END +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 5: route service upstream by upstream_id priority +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + service_id: 1 + upstream: + nodes: + "127.0.0.1:1977": 1 + type: roundrobin + upstream_id: 1 +services: + - + id: 1 + upstream: + nodes: + "127.0.0.1:1977": 1 + type: roundrobin +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 6: route service upstream priority +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + service_id: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +services: + - + id: 1 + upstream: + nodes: + "127.0.0.1:1977": 1 + type: roundrobin + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1977": 1 + type: roundrobin +#END +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 7: two routes with the same service +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - uris: + - /hello + service_id: 1 + id: 1 + plugins: + response-rewrite: + body: "hello\n" + - uris: + - /world + service_id: 1 + id: 2 + plugins: + response-rewrite: + body: "world\n" +services: + - + id: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- response_body +hello + + + +=== TEST 8: service with bad plugin +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + service_id: 1 +services: + - + id: 1 + plugins: + proxy-rewrite: + uri: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 404 +--- error_log +property "uri" validation failed + + + +=== TEST 9: fix service with default value +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + service_id: 1 +services: + - + id: 1 + plugins: + uri-blocker: + block_rules: + - /h* + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 403 diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/route-upstream.t b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/route-upstream.t new file mode 100644 index 0000000..d5353a4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/route-upstream.t @@ -0,0 +1,206 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + +run_tests(); + +__DATA__ + +=== TEST 1: hit route +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 2: not found upstream +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + upstream_id: 1111 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code_like: ^(?:50\d)$ +--- error_log +failed to find upstream by id: 1111 + + + +=== TEST 3: upstream_id priority upstream +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + upstream_id: 1 + upstream: + nodes: + "127.0.0.1:1977": 1 + type: roundrobin +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1981": 1 + type: roundrobin +#END +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 4: enable healthcheck +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + retries: 2 + checks: + active: + http_path: "/status" + healthy: + interval: 2 + successes: 1 +#END +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 5: upstream domain +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "test.com:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 200 + + + +=== TEST 6: upstream hash_on (bad) +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "test.com:1980": 1 + type: chash + hash_on: header + key: "$aaa" +#END +--- request +GET /hello +--- error_code: 502 +--- error_log +invalid configuration: failed to match pattern + + + +=== TEST 7: upstream hash_on (good) +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + "127.0.0.2:1980": 1 + type: chash + hash_on: header + key: "test" +#END +--- request +GET /hello +--- more_headers +test: one +--- error_log +proxy request to 127.0.0.1:1980 diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/route.t b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/route.t new file mode 100644 index 0000000..2da1397 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/route.t @@ -0,0 +1,297 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- response_body +hello world +--- error_log +use config_provider: yaml + + + +=== TEST 2: route:uri + host (missing host, not hit) +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + host: foo.com + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 404 +--- error_log +use config_provider: yaml + + + +=== TEST 3: route:uri + host +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + host: foo.com + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- more_headers +host: foo.com +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 4: route with bad plugin +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + plugins: + proxy-rewrite: + uri: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 404 +--- error_log +property "uri" validation failed + + + +=== TEST 5: ignore unknown plugin +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + plugins: + x-rewrite: + uri: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 6: route with bad plugin, radixtree_host_uri +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + plugins: + proxy-rewrite: + uri: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 404 +--- error_log +property "uri" validation failed + + + +=== TEST 7: fix route with default value +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + plugins: + uri-blocker: + block_rules: + - /h* + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 403 + + + +=== TEST 8: invalid route, bad vars operator +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + vars: + - remote_addr + - = + - 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 404 +--- error_log +failed to validate the 'vars' expression + + + +=== TEST 9: script with id +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + script: "local ngx = ngx" + script_id: "1" + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 200 +--- error_log +missing loaded script object + + + +=== TEST 10: hosts with '_' is valid +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + hosts: + - foo.com + - v1_test-api.com + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- more_headers +host: v1_test-api.com +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 11: script with plugin_config_id +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + id: 1 + uri: /hello + script: "local ngx = ngx" + plugin_config_id: "1" + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 404 +--- error_log +failed to check item data of [routes] diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/secret.t b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/secret.t new file mode 100644 index 0000000..82fefd3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/secret.t @@ -0,0 +1,390 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + if (!$block->apisix_yaml) { + my $routes = <<_EOC_; +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +_EOC_ + + $block->set_value("apisix_yaml", $routes); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: validate secret/vault: wrong schema +--- apisix_yaml +secrets: + - id: vault/1 + prefix: kv/apisix + token: root + uri: 127.0.0.1:8200 +#END +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local values = secret.secrets() + ngx.say(#values) + } + } +--- request +GET /t +--- response_body +0 +--- error_log +property "uri" validation failed: failed to match pattern "^[^\\/]+:\\/\\/([\\da-zA-Z.-]+|\\[[\\da-fA-F:]+\\])(:\\d+)?" + + + +=== TEST 2: validate secrets: manager not exits +--- apisix_yaml +secrets: + - id: hhh/1 + prefix: kv/apisix + token: root + uri: 127.0.0.1:8200 +#END +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local values = secret.secrets() + ngx.say(#values) + } + } +--- request +GET /t +--- response_body +0 +--- error_log +secret manager not exits + + + +=== TEST 3: load config normal +--- apisix_yaml +secrets: + - id: vault/1 + prefix: kv/apisix + token: root + uri: http://127.0.0.1:8200 +#END +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local values = secret.secrets() + ngx.say("len: ", #values) + + ngx.say("id: ", values[1].value.id) + ngx.say("prefix: ", values[1].value.prefix) + ngx.say("token: ", values[1].value.token) + ngx.say("uri: ", values[1].value.uri) + } + } +--- request +GET /t +--- response_body +len: 1 +id: vault/1 +prefix: kv/apisix +token: root +uri: http://127.0.0.1:8200 + + + +=== TEST 4: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/apisix-key key=value +--- response_body +Success! Data written to: kv/apisix/apisix-key + + + +=== TEST 5: secret.fetch_by_uri: start with $secret:// +--- apisix_yaml +secrets: + - id: vault/1 + prefix: kv/apisix + token: root + uri: http://127.0.0.1:8200 +#END +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local value = secret.fetch_by_uri("$secret://vault/1/apisix-key/key") + ngx.say(value) + } + } +--- request +GET /t +--- response_body +value + + + +=== TEST 6: secret.fetch_by_uri, wrong ref format: wrong type +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local _, err = secret.fetch_by_uri(1) + ngx.say(err) + } + } +--- request +GET /t +--- response_body +error secret_uri type: number + + + +=== TEST 7: secret.fetch_by_uri, wrong ref format: wrong prefix +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local _, err = secret.fetch_by_uri("secret://") + ngx.say(err) + } + } +--- request +GET /t +--- response_body +error secret_uri prefix: secret:// + + + +=== TEST 8: secret.fetch_by_uri, error format: no secret manager +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local _, err = secret.fetch_by_uri("$secret://") + ngx.say(err) + } + } +--- request +GET /t +--- response_body +error format: no secret manager + + + +=== TEST 9: secret.fetch_by_uri, error format: no secret conf id +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local _, err = secret.fetch_by_uri("$secret://vault/") + ngx.say(err) + } + } +--- request +GET /t +--- response_body +error format: no secret conf id + + + +=== TEST 10: secret.fetch_by_uri, error format: no secret key id +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local _, err = secret.fetch_by_uri("$secret://vault/2/") + ngx.say(err) + } + } +--- request +GET /t +--- response_body +error format: no secret key id + + + +=== TEST 11: secret.fetch_by_uri, no config +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local _, err = secret.fetch_by_uri("$secret://vault/2/bar") + ngx.say(err) + } + } +--- request +GET /t +--- response_body +no secret conf, secret_uri: $secret://vault/2/bar + + + +=== TEST 12: secret.fetch_by_uri, no sub key value +--- apisix_yaml +secrets: + - id: vault/1 + prefix: kv/apisix + token: root + uri: http://127.0.0.1:8200 +#END +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local value = secret.fetch_by_uri("$secret://vault/1/apisix-key/bar") + ngx.say(value) + } + } +--- request +GET /t +--- response_body +nil + + + +=== TEST 13: fetch_secrets env: no cache +--- main_config +env secret=apisix; +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local refs = { + key = "jack", + secret = "$env://secret" + } + local new_refs = secret.fetch_secrets(refs) + assert(new_refs ~= refs) + ngx.say(refs.secret) + ngx.say(new_refs.secret) + ngx.say(new_refs.key) + } + } +--- request +GET /t +--- response_body +$env://secret +apisix +jack +--- error_log_like +qr/retrieve secrets refs/ + + + +=== TEST 14: fetch_secrets env: cache +--- main_config +env secret=apisix; +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local refs = { + key = "jack", + secret = "$env://secret" + } + local refs_1 = secret.fetch_secrets(refs, true, "key", 1) + local refs_2 = secret.fetch_secrets(refs, true, "key", 1) + assert(refs_1 == refs_2) + ngx.say(refs_1.secret) + ngx.say(refs_2.secret) + } + } +--- request +GET /t +--- response_body +apisix +apisix +--- grep_error_log eval +qr/retrieve secrets refs/ +--- grep_error_log_out +retrieve secrets refs + + + +=== TEST 15: fetch_secrets env: table nesting +--- main_config +env secret=apisix; +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local refs = { + key = "jack", + user = { + username = "apisix", + passsword = "$env://secret" + } + } + local new_refs = secret.fetch_secrets(refs) + ngx.say(new_refs.user.passsword) + } + } +--- request +GET /t +--- response_body +apisix + + + +=== TEST 16: fetch_secrets: wrong refs type +--- main_config +env secret=apisix; +--- config + location /t { + content_by_lua_block { + local secret = require("apisix.secret") + local refs = "wrong" + local new_refs = secret.fetch_secrets(refs) + ngx.say(new_refs) + } + } +--- request +GET /t +--- response_body +nil diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/ssl.t b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/ssl.t new file mode 100644 index 0000000..401ba4e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/ssl.t @@ -0,0 +1,315 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('debug'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + my $routes = <<_EOC_; +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $routes); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } + + if ($block->sslhandshake) { + my $sslhandshake = $block->sslhandshake; + + $block->set_value("config", <<_EOC_) +listen unix:\$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:\$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + $sslhandshake + local req = "GET /hello HTTP/1.0\\r\\nHost: test.com\\r\\nConnection: close\\r\\n\\r\\n" + local bytes, err = sock:send(req) + if not bytes then + ngx.say("failed to send http request: ", err) + return + end + + local line, err = sock:receive() + if not line then + ngx.say("failed to receive: ", err) + return + end + + ngx.say("received: ", line) + + local ok, err = sock:close() + ngx.say("close: ", ok, " ", err) + end -- do + -- collectgarbage() + } +} +_EOC_ + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_yaml +ssls: + - + cert: | + -----BEGIN CERTIFICATE----- + MIIDrzCCApegAwIBAgIJAI3Meu/gJVTLMA0GCSqGSIb3DQEBCwUAMG4xCzAJBgNV + BAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL + BgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl + ci5sb2NhbDAeFw0yMDEwMjgwMzMzMDJaFw0yMTEwMjgwMzMzMDJaMG4xCzAJBgNV + BAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL + BgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl + ci5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ/qwxCR7g5S + s9+VleopkLi5pAszEkHYOBpwF/hDeRdxU0I0e1zZTdTlwwPy2vf8m3kwoq6fmNCt + tdUUXh5Wvgi/2OA8HBBzaQFQL1Av9qWwyES5cx6p0ZBwIrcXQIsl1XfNSUpQNTSS + D44TGduXUIdeshukPvMvLWLezynf2/WlgVh/haWtDG99r/Gj3uBdjl0m/xGvKvIv + NFy6EdgG9fkwcIalutjrUnGl9moGjwKYu4eXW2Zt5el0d1AHXUsqK4voe0p+U2Nz + quDmvxteXWdlsz8o5kQT6a4DUtWhpPIfNj9oZfPRs3LhBFQ74N70kVxMOCdec1lU + bnFzLIMGlz0CAwEAAaNQME4wHQYDVR0OBBYEFFHeljijrr+SPxlH5fjHRPcC7bv2 + MB8GA1UdIwQYMBaAFFHeljijrr+SPxlH5fjHRPcC7bv2MAwGA1UdEwQFMAMBAf8w + DQYJKoZIhvcNAQELBQADggEBAG6NNTK7sl9nJxeewVuogCdMtkcdnx9onGtCOeiQ + qvh5Xwn9akZtoLMVEdceU0ihO4wILlcom3OqHs9WOd6VbgW5a19Thh2toxKidHz5 + rAaBMyZsQbFb6+vFshZwoCtOLZI/eIZfUUMFqMXlEPrKru1nSddNdai2+zi5rEnM + HCot43+3XYuqkvWlOjoi9cP+C4epFYrxpykVbcrtbd7TK+wZNiK3xtDPnVzjdNWL + geAEl9xrrk0ss4nO/EreTQgS46gVU+tLC+b23m2dU7dcKZ7RDoiA9bdVc4a2IsaS + 2MvLL4NZ2nUh8hAEHiLtGMAV3C6xNbEyM07hEpDW6vk6tqk= + -----END CERTIFICATE----- + key: | + -----BEGIN PRIVATE KEY----- + MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCf6sMQke4OUrPf + lZXqKZC4uaQLMxJB2DgacBf4Q3kXcVNCNHtc2U3U5cMD8tr3/Jt5MKKun5jQrbXV + FF4eVr4Iv9jgPBwQc2kBUC9QL/alsMhEuXMeqdGQcCK3F0CLJdV3zUlKUDU0kg+O + Exnbl1CHXrIbpD7zLy1i3s8p39v1pYFYf4WlrQxvfa/xo97gXY5dJv8RryryLzRc + uhHYBvX5MHCGpbrY61JxpfZqBo8CmLuHl1tmbeXpdHdQB11LKiuL6HtKflNjc6rg + 5r8bXl1nZbM/KOZEE+muA1LVoaTyHzY/aGXz0bNy4QRUO+De9JFcTDgnXnNZVG5x + cyyDBpc9AgMBAAECggEAatcEtehZPJaCeClPPF/Cwbe9YoIfe4BCk186lHI3z7K1 + 5nB7zt+bwVY0AUpagv3wvXoB5lrYVOsJpa9y5iAb3GqYMc/XDCKfD/KLea5hwfcn + BctEn0LjsPVKLDrLs2t2gBDWG2EU+udunwQh7XTdp2Nb6V3FdOGbGAg2LgrSwP1g + 0r4z14F70oWGYyTQ5N8UGuyryVrzQH525OYl38Yt7R6zJ/44FVi/2TvdfHM5ss39 + SXWi00Q30fzaBEf4AdHVwVCRKctwSbrIOyM53kiScFDmBGRblCWOxXbiFV+d3bjX + gf2zxs7QYZrFOzOO7kLtHGua4itEB02497v+1oKDwQKBgQDOBvCVGRe2WpItOLnj + SF8iz7Sm+jJGQz0D9FhWyGPvrN7IXGrsXavA1kKRz22dsU8xdKk0yciOB13Wb5y6 + yLsr/fPBjAhPb4h543VHFjpAQcxpsH51DE0b2oYOWMmz+rXGB5Jy8EkP7Q4njIsc + 2wLod1dps8OT8zFx1jX3Us6iUQKBgQDGtKkfsvWi3HkwjFTR+/Y0oMz7bSruE5Z8 + g0VOHPkSr4XiYgLpQxjbNjq8fwsa/jTt1B57+By4xLpZYD0BTFuf5po+igSZhH8s + QS5XnUnbM7d6Xr/da7ZkhSmUbEaMeHONSIVpYNgtRo4bB9Mh0l1HWdoevw/w5Ryt + L/OQiPhfLQKBgQCh1iG1fPh7bbnVe/HI71iL58xoPbCwMLEFIjMiOFcINirqCG6V + LR91Ytj34JCihl1G4/TmWnsH1hGIGDRtJLCiZeHL70u32kzCMkI1jOhFAWqoutMa + 7obDkmwraONIVW/kFp6bWtSJhhTQTD4adI9cPCKWDXdcCHSWj0Xk+U8HgQKBgBng + t1HYhaLzIZlP/U/nh3XtJyTrX7bnuCZ5FhKJNWrYjxAfgY+NXHRYCKg5x2F5j70V + be7pLhxmCnrPTMKZhik56AaTBOxVVBaYWoewhUjV4GRAaK5Wc8d9jB+3RizPFwVk + V3OU2DJ1SNZ+W2HBOsKrEfwFF/dgby6i2w6MuAP1AoGBAIxvxUygeT/6P0fHN22P + zAHFI4v2925wYdb7H//D8DIADyBwv18N6YH8uH7L+USZN7e4p2k8MGGyvTXeC6aX + IeVtU6fH57Ddn59VPbF20m8RCSkmBvSdcbyBmqlZSBE+fKwCliKl6u/GH0BNAWKz + r8yiEiskqRmy7P7MY9hDmEbG + -----END PRIVATE KEY----- + snis: + - "t.com" + - "test.com" +--- sslhandshake +local sess, err = sock:sslhandshake(nil, "test.com", false) +if not sess then + ngx.say("failed to do SSL handshake: ", err) + return +end +--- response_body +received: HTTP/1.1 200 OK +close: 1 nil +--- error_log +server name: "test.com" + + + +=== TEST 2: single sni +--- apisix_yaml +ssls: + - + cert: | + -----BEGIN CERTIFICATE----- + MIIDrzCCApegAwIBAgIJAI3Meu/gJVTLMA0GCSqGSIb3DQEBCwUAMG4xCzAJBgNV + BAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL + BgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl + ci5sb2NhbDAeFw0yMDEwMjgwMzMzMDJaFw0yMTEwMjgwMzMzMDJaMG4xCzAJBgNV + BAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL + BgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl + ci5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ/qwxCR7g5S + s9+VleopkLi5pAszEkHYOBpwF/hDeRdxU0I0e1zZTdTlwwPy2vf8m3kwoq6fmNCt + tdUUXh5Wvgi/2OA8HBBzaQFQL1Av9qWwyES5cx6p0ZBwIrcXQIsl1XfNSUpQNTSS + D44TGduXUIdeshukPvMvLWLezynf2/WlgVh/haWtDG99r/Gj3uBdjl0m/xGvKvIv + NFy6EdgG9fkwcIalutjrUnGl9moGjwKYu4eXW2Zt5el0d1AHXUsqK4voe0p+U2Nz + quDmvxteXWdlsz8o5kQT6a4DUtWhpPIfNj9oZfPRs3LhBFQ74N70kVxMOCdec1lU + bnFzLIMGlz0CAwEAAaNQME4wHQYDVR0OBBYEFFHeljijrr+SPxlH5fjHRPcC7bv2 + MB8GA1UdIwQYMBaAFFHeljijrr+SPxlH5fjHRPcC7bv2MAwGA1UdEwQFMAMBAf8w + DQYJKoZIhvcNAQELBQADggEBAG6NNTK7sl9nJxeewVuogCdMtkcdnx9onGtCOeiQ + qvh5Xwn9akZtoLMVEdceU0ihO4wILlcom3OqHs9WOd6VbgW5a19Thh2toxKidHz5 + rAaBMyZsQbFb6+vFshZwoCtOLZI/eIZfUUMFqMXlEPrKru1nSddNdai2+zi5rEnM + HCot43+3XYuqkvWlOjoi9cP+C4epFYrxpykVbcrtbd7TK+wZNiK3xtDPnVzjdNWL + geAEl9xrrk0ss4nO/EreTQgS46gVU+tLC+b23m2dU7dcKZ7RDoiA9bdVc4a2IsaS + 2MvLL4NZ2nUh8hAEHiLtGMAV3C6xNbEyM07hEpDW6vk6tqk= + -----END CERTIFICATE----- + key: | + -----BEGIN PRIVATE KEY----- + MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCf6sMQke4OUrPf + lZXqKZC4uaQLMxJB2DgacBf4Q3kXcVNCNHtc2U3U5cMD8tr3/Jt5MKKun5jQrbXV + FF4eVr4Iv9jgPBwQc2kBUC9QL/alsMhEuXMeqdGQcCK3F0CLJdV3zUlKUDU0kg+O + Exnbl1CHXrIbpD7zLy1i3s8p39v1pYFYf4WlrQxvfa/xo97gXY5dJv8RryryLzRc + uhHYBvX5MHCGpbrY61JxpfZqBo8CmLuHl1tmbeXpdHdQB11LKiuL6HtKflNjc6rg + 5r8bXl1nZbM/KOZEE+muA1LVoaTyHzY/aGXz0bNy4QRUO+De9JFcTDgnXnNZVG5x + cyyDBpc9AgMBAAECggEAatcEtehZPJaCeClPPF/Cwbe9YoIfe4BCk186lHI3z7K1 + 5nB7zt+bwVY0AUpagv3wvXoB5lrYVOsJpa9y5iAb3GqYMc/XDCKfD/KLea5hwfcn + BctEn0LjsPVKLDrLs2t2gBDWG2EU+udunwQh7XTdp2Nb6V3FdOGbGAg2LgrSwP1g + 0r4z14F70oWGYyTQ5N8UGuyryVrzQH525OYl38Yt7R6zJ/44FVi/2TvdfHM5ss39 + SXWi00Q30fzaBEf4AdHVwVCRKctwSbrIOyM53kiScFDmBGRblCWOxXbiFV+d3bjX + gf2zxs7QYZrFOzOO7kLtHGua4itEB02497v+1oKDwQKBgQDOBvCVGRe2WpItOLnj + SF8iz7Sm+jJGQz0D9FhWyGPvrN7IXGrsXavA1kKRz22dsU8xdKk0yciOB13Wb5y6 + yLsr/fPBjAhPb4h543VHFjpAQcxpsH51DE0b2oYOWMmz+rXGB5Jy8EkP7Q4njIsc + 2wLod1dps8OT8zFx1jX3Us6iUQKBgQDGtKkfsvWi3HkwjFTR+/Y0oMz7bSruE5Z8 + g0VOHPkSr4XiYgLpQxjbNjq8fwsa/jTt1B57+By4xLpZYD0BTFuf5po+igSZhH8s + QS5XnUnbM7d6Xr/da7ZkhSmUbEaMeHONSIVpYNgtRo4bB9Mh0l1HWdoevw/w5Ryt + L/OQiPhfLQKBgQCh1iG1fPh7bbnVe/HI71iL58xoPbCwMLEFIjMiOFcINirqCG6V + LR91Ytj34JCihl1G4/TmWnsH1hGIGDRtJLCiZeHL70u32kzCMkI1jOhFAWqoutMa + 7obDkmwraONIVW/kFp6bWtSJhhTQTD4adI9cPCKWDXdcCHSWj0Xk+U8HgQKBgBng + t1HYhaLzIZlP/U/nh3XtJyTrX7bnuCZ5FhKJNWrYjxAfgY+NXHRYCKg5x2F5j70V + be7pLhxmCnrPTMKZhik56AaTBOxVVBaYWoewhUjV4GRAaK5Wc8d9jB+3RizPFwVk + V3OU2DJ1SNZ+W2HBOsKrEfwFF/dgby6i2w6MuAP1AoGBAIxvxUygeT/6P0fHN22P + zAHFI4v2925wYdb7H//D8DIADyBwv18N6YH8uH7L+USZN7e4p2k8MGGyvTXeC6aX + IeVtU6fH57Ddn59VPbF20m8RCSkmBvSdcbyBmqlZSBE+fKwCliKl6u/GH0BNAWKz + r8yiEiskqRmy7P7MY9hDmEbG + -----END PRIVATE KEY----- + sni: "test.com" +--- sslhandshake +local sess, err = sock:sslhandshake(nil, "test.com", false) +if not sess then + ngx.say("failed to do SSL handshake: ", err) + return +end +--- response_body +received: HTTP/1.1 200 OK +close: 1 nil +--- error_log +server name: "test.com" + + + +=== TEST 3: bad cert +--- apisix_yaml +ssls: + - + cert: | + -----BEGIN CERTIFICATE----- + MIIDrzCCApegAwIBAgIJAI3Meu/gJVTLMA0GCSqGSIb3DQEBCwUAMG4xCzAJBgNV + BAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL + BgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl + ci5sb2NhbDAeFw0yMDEwMjgwMzMzMDJaFw0yMTEwMjgwMzMzMDJaMG4xCzAJBgNV + BAYTAkNOMREwDwYDVQQIDAhaaGVqaWFuZzERMA8GA1UEBwwISGFuZ3pob3UxDTAL + BgNVBAoMBHRlc3QxDTALBgNVBAsMBHRlc3QxGzAZBgNVBAMMEmV0Y2QuY2x1c3Rl + ci5sb2NhbDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ/qwxCR7g5S + s9+VleopkLi5pAszEkHYOBpwF/hDeRdxU0I0e1zZTdTlwwPy2vf8m3kwoq6fmNCt + tdUUXh5Wvgi/2OA8HBBzaQFQL1Av9qWwyES5cx6p0ZBwIrcXQIsl1XfNSUpQNTSS + D44TGduXUIdeshukPvMvLWLezynf2/WlgVh/haWtDG99r/Gj3uBdjl0m/xGvKvIv + quDmvxteXWdlsz8o5kQT6a4DUtWhpPIfNj9oZfPRs3LhBFQ74N70kVxMOCdec1lU + bnFzLIMGlz0CAwEAAaNQME4wHQYDVR0OBBYEFFHeljijrr+SPxlH5fjHRPcC7bv2 + MB8GA1UdIwQYMBaAFFHeljijrr+SPxlH5fjHRPcC7bv2MAwGA1UdEwQFMAMBAf8w + DQYJKoZIhvcNAQELBQADggEBAG6NNTK7sl9nJxeewVuogCdMtkcdnx9onGtCOeiQ + qvh5Xwn9akZtoLMVEdceU0ihO4wILlcom3OqHs9WOd6VbgW5a19Thh2toxKidHz5 + rAaBMyZsQbFb6+vFshZwoCtOLZI/eIZfUUMFqMXlEPrKru1nSddNdai2+zi5rEnM + HCot43+3XYuqkvWlOjoi9cP+C4epFYrxpykVbcrtbd7TK+wZNiK3xtDPnVzjdNWL + geAEl9xrrk0ss4nO/EreTQgS46gVU+tLC+b23m2dU7dcKZ7RDoiA9bdVc4a2IsaS + 2MvLL4NZ2nUh8hAEHiLtGMAV3C6xNbEyM07hEpDW6vk6tqk= + -----END CERTIFICATE----- + key: | + -----BEGIN PRIVATE KEY----- + MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCf6sMQke4OUrPf + lZXqKZC4uaQLMxJB2DgacBf4Q3kXcVNCNHtc2U3U5cMD8tr3/Jt5MKKun5jQrbXV + FF4eVr4Iv9jgPBwQc2kBUC9QL/alsMhEuXMeqdGQcCK3F0CLJdV3zUlKUDU0kg+O + Exnbl1CHXrIbpD7zLy1i3s8p39v1pYFYf4WlrQxvfa/xo97gXY5dJv8RryryLzRc + uhHYBvX5MHCGpbrY61JxpfZqBo8CmLuHl1tmbeXpdHdQB11LKiuL6HtKflNjc6rg + 5r8bXl1nZbM/KOZEE+muA1LVoaTyHzY/aGXz0bNy4QRUO+De9JFcTDgnXnNZVG5x + cyyDBpc9AgMBAAECggEAatcEtehZPJaCeClPPF/Cwbe9YoIfe4BCk186lHI3z7K1 + 5nB7zt+bwVY0AUpagv3wvXoB5lrYVOsJpa9y5iAb3GqYMc/XDCKfD/KLea5hwfcn + BctEn0LjsPVKLDrLs2t2gBDWG2EU+udunwQh7XTdp2Nb6V3FdOGbGAg2LgrSwP1g + 0r4z14F70oWGYyTQ5N8UGuyryVrzQH525OYl38Yt7R6zJ/44FVi/2TvdfHM5ss39 + SXWi00Q30fzaBEf4AdHVwVCRKctwSbrIOyM53kiScFDmBGRblCWOxXbiFV+d3bjX + gf2zxs7QYZrFOzOO7kLtHGua4itEB02497v+1oKDwQKBgQDOBvCVGRe2WpItOLnj + SF8iz7Sm+jJGQz0D9FhWyGPvrN7IXGrsXavA1kKRz22dsU8xdKk0yciOB13Wb5y6 + yLsr/fPBjAhPb4h543VHFjpAQcxpsH51DE0b2oYOWMmz+rXGB5Jy8EkP7Q4njIsc + 2wLod1dps8OT8zFx1jX3Us6iUQKBgQDGtKkfsvWi3HkwjFTR+/Y0oMz7bSruE5Z8 + g0VOHPkSr4XiYgLpQxjbNjq8fwsa/jTt1B57+By4xLpZYD0BTFuf5po+igSZhH8s + QS5XnUnbM7d6Xr/da7ZkhSmUbEaMeHONSIVpYNgtRo4bB9Mh0l1HWdoevw/w5Ryt + L/OQiPhfLQKBgQCh1iG1fPh7bbnVe/HI71iL58xoPbCwMLEFIjMiOFcINirqCG6V + LR91Ytj34JCihl1G4/TmWnsH1hGIGDRtJLCiZeHL70u32kzCMkI1jOhFAWqoutMa + 7obDkmwraONIVW/kFp6bWtSJhhTQTD4adI9cPCKWDXdcCHSWj0Xk+U8HgQKBgBng + t1HYhaLzIZlP/U/nh3XtJyTrX7bnuCZ5FhKJNWrYjxAfgY+NXHRYCKg5x2F5j70V + be7pLhxmCnrPTMKZhik56AaTBOxVVBaYWoewhUjV4GRAaK5Wc8d9jB+3RizPFwVk + V3OU2DJ1SNZ+W2HBOsKrEfwFF/dgby6i2w6MuAP1AoGBAIxvxUygeT/6P0fHN22P + zAHFI4v2925wYdb7H//D8DIADyBwv18N6YH8uH7L+USZN7e4p2k8MGGyvTXeC6aX + IeVtU6fH57Ddn59VPbF20m8RCSkmBvSdcbyBmqlZSBE+fKwCliKl6u/GH0BNAWKz + r8yiEiskqRmy7P7MY9hDmEbG + -----END PRIVATE KEY----- + snis: + - "t.com" + - "test.com" +--- error_log +failed to parse cert +--- error_code: 404 diff --git a/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/stream-route.t b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/stream-route.t new file mode 100644 index 0000000..6792b1b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/config-center-yaml/stream-route.t @@ -0,0 +1,127 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + $block->set_value("stream_enable", 1); + + if (!$block->stream_request) { + $block->set_value("stream_request", "mmm"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream: + nodes: + "127.0.0.1:1995": 1 + type: roundrobin +#END +--- stream_response +hello world + + + +=== TEST 2: rule with bad plugin +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + plugins: + mqtt-proxy: + uri: 1 + upstream: + nodes: + "127.0.0.1:1995": 1 + type: roundrobin +#END +--- error_log eval +qr/property "\w+" is required/ + + + +=== TEST 3: ignore unknown plugin +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + plugins: + x-rewrite: + uri: 1 + upstream: + nodes: + "127.0.0.1:1995": 1 + type: roundrobin +#END +--- stream_response +hello world + + + +=== TEST 4: sanity with plugin +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream_id: 1 + plugins: + mqtt-proxy: + protocol_name: "MQTT" + protocol_level: 4 +upstreams: + - nodes: + "127.0.0.1:1995": 1 + type: roundrobin + id: 1 +#END +--- stream_request eval +"\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" +--- stream_response +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/control/control-healthcheck-bug-fix.t b/CloudronPackages/APISIX/apisix-source/t/control/control-healthcheck-bug-fix.t new file mode 100644 index 0000000..ee2e516 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/control/control-healthcheck-bug-fix.t @@ -0,0 +1,134 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: setup route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "httpbin.org:80": 1, + "mockbin.org:80": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit the route +--- request +GET /status/403 +--- error_code: 403 + + + +=== TEST 3: hit control api +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local passed = true + + for i = 1, 40 do + local code, body, res = t.test('/v1/routes/1', ngx.HTTP_GET) + if code ~= ngx.HTTP_OK then + passed = code + break + end + end + + if passed then + ngx.say("passed") + else + ngx.say("failed. got status code: ", passed) + end + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit the route again +--- request +GET /status/403 +--- error_code: 403 + + + +=== TEST 5: hit control api +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local passed = true + + for i = 1, 40 do + local code, body, res = t.test('/v1/routes/1', ngx.HTTP_GET) + if code ~= ngx.HTTP_OK then + passed = code + break + end + end + + if passed then + ngx.say("passed") + else + ngx.say("failed. got status code: ", passed) + end + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/control/discovery.t b/CloudronPackages/APISIX/apisix-source/t/control/discovery.t new file mode 100644 index 0000000..7bf81b1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/control/discovery.t @@ -0,0 +1,221 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + + +# Because this whole test file is only used to verify the configuration set or not, +# but the configuration content is invalid, which contains non-exist consul server address, +# so we have to ignore consul connect errors in some test cases. + + +our $yaml_config = <<_EOC_; +apisix: + enable_control: true + node_listen: 1984 +discovery: + eureka: + host: + - "http://127.0.0.1:8761" + prefix: "/eureka/" + fetch_interval: 10 + weight: 80 + timeout: + connect: 1500 + send: 1500 + read: 1500 + consul_kv: + servers: + - "http://127.0.0.1:8500" + - "http://127.0.0.1:8600" + dns: + servers: + - "127.0.0.1:1053" +_EOC_ + + +run_tests(); + +__DATA__ + +=== TEST 1: test consul_kv dump_data api +--- yaml_config eval: $::yaml_config +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, body, res = t.test('/v1/discovery/consul_kv/dump', + ngx.HTTP_GET) + local entity = json.decode(res) + ngx.say(json.encode(entity.services)) + ngx.say(json.encode(entity.config)) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +{} +{"fetch_interval":3,"keepalive":true,"prefix":"upstreams","servers":["http://127.0.0.1:8500","http://127.0.0.1:8600"],"timeout":{"connect":2000,"read":2000,"wait":60},"token":"","weight":1} +--- error_log +connect consul + + + +=== TEST 2: test eureka dump_data api +--- yaml_config eval: $::yaml_config +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, body, res = t.test('/v1/discovery/eureka/dump', + ngx.HTTP_GET, nil, + [[{ + "config": { + "fetch_interval": 10, + "host": [ + "http://127.0.0.1:8761" + ], + "prefix": "/eureka/", + "timeout": { + "connect": 1500, + "read": 1500, + "send": 1500 + }, + "weight": 80 + }, + "services": {} + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed +--- error_log +connect consul + + + +=== TEST 3: test dns api +--- yaml_config eval: $::yaml_config +--- request +GET /v1/discovery/dns/dump +--- error_code: 404 +--- error_log +connect consul + + + +=== TEST 4: test unconfigured eureka dump_data api +--- yaml_config +apisix: + enable_control: true + node_listen: 1984 +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8500" + - "http://127.0.0.1:8600" +#END +--- request +GET /v1/discovery/eureka/dump +--- error_code: 404 +--- error_log +connect consul + + + +=== TEST 5: prepare consul kv register nodes +--- config +location /consul1 { + rewrite ^/consul1/(.*) /v1/kv/$1 break; + proxy_pass http://127.0.0.1:8500; +} + +location /consul2 { + rewrite ^/consul2/(.*) /v1/kv/$1 break; + proxy_pass http://127.0.0.1:8600; +} +--- pipelined_requests eval +[ + "DELETE /consul1/upstreams/?recurse=true", + "DELETE /consul2/upstreams/?recurse=true", + "PUT /consul1/upstreams/webpages/127.0.0.1:30511\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /consul1/upstreams/webpages/127.0.0.1:30512\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /consul2/upstreams/webpages/127.0.0.1:30513\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /consul2/upstreams/webpages/127.0.0.1:30514\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", +] +--- response_body eval +["true", "true", "true", "true", "true", "true"] + + + +=== TEST 6: dump consul_kv services +--- yaml_config eval: $::yaml_config +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + ngx.sleep(2) + + local code, body, res = t.test('/v1/discovery/consul_kv/dump', + ngx.HTTP_GET) + local entity = json.decode(res) + ngx.say(json.encode(entity.services)) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +{"http://127.0.0.1:8500/v1/kv/upstreams/webpages/":[{"host":"127.0.0.1","port":30511,"weight":1},{"host":"127.0.0.1","port":30512,"weight":1}],"http://127.0.0.1:8600/v1/kv/upstreams/webpages/":[{"host":"127.0.0.1","port":30513,"weight":1},{"host":"127.0.0.1","port":30514,"weight":1}]} + + + +=== TEST 7: clean consul kv register nodes +--- config +location /consul1 { + rewrite ^/consul1/(.*) /v1/kv/$1 break; + proxy_pass http://127.0.0.1:8500; +} + +location /consul2 { + rewrite ^/consul2/(.*) /v1/kv/$1 break; + proxy_pass http://127.0.0.1:8600; +} +--- pipelined_requests eval +[ + "DELETE /consul1/upstreams/?recurse=true", + "DELETE /consul2/upstreams/?recurse=true" +] +--- response_body eval +["true", "true"] diff --git a/CloudronPackages/APISIX/apisix-source/t/control/gc.t b/CloudronPackages/APISIX/apisix-source/t/control/gc.t new file mode 100644 index 0000000..bafb574 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/control/gc.t @@ -0,0 +1,66 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: trigger full gc +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local before = collectgarbage("count") + do + local tab = {} + for i = 1, 10000 do + tab[i] = {"a", 1} + end + end + local after_alloc = collectgarbage("count") + local code = t.test('/v1/gc', + ngx.HTTP_POST + ) + local after_gc = collectgarbage("count") + if code == 200 then + if after_alloc - after_gc > 0.9 * (after_alloc - before) then + ngx.say("ok") + else + ngx.say(before, " ", after_alloc, " ", after_gc) + end + end + } + } +--- response_body +ok diff --git a/CloudronPackages/APISIX/apisix-source/t/control/healthcheck.t b/CloudronPackages/APISIX/apisix-source/t/control/healthcheck.t new file mode 100644 index 0000000..9673ab9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/control/healthcheck.t @@ -0,0 +1,305 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: upstreams +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +upstreams: + - nodes: + "127.0.0.1:1980": 1 + "127.0.0.2:1988": 0 + type: roundrobin + id: 1 + checks: + active: + http_path: "/status" + healthy: + interval: 1 + successes: 1 + unhealthy: + interval: 1 + http_failures: 1 +#END +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local json = require("toolkit.json") + local t = require("lib.test_admin") + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + + ngx.sleep(2.2) + + local _, _, res = t.test('/v1/healthcheck', + ngx.HTTP_GET) + res = json.decode(res) + assert(#res == 1, "invalid number of results") + table.sort(res[1].nodes, function(a, b) + return a.ip < b.ip + end) + ngx.say(core.json.stably_encode(res[1].nodes)) + + local _, _, res = t.test('/v1/healthcheck/upstreams/1', + ngx.HTTP_GET) + res = json.decode(res) + table.sort(res.nodes, function(a, b) + return a.ip < b.ip + end) + ngx.say(core.json.stably_encode(res.nodes)) + + local _, _, res = t.test('/v1/healthcheck/upstreams/1', + ngx.HTTP_GET, nil, nil, {["Accept"] = "text/html"}) + local xml2lua = require("xml2lua") + local xmlhandler = require("xmlhandler.tree") + local handler = xmlhandler:new() + local parser = xml2lua.parser(handler) + parser.parse(parser, res) + local matches = 0 + for _, td in ipairs(handler.root.html.body.table.tr) do + if td.td then + if td.td[4] == "127.0.0.2:1988" then + assert(td.td[5] == "unhealthy", "127.0.0.2:1988 is not unhealthy") + matches = matches + 1 + end + if td.td[4] == "127.0.0.1:1980" then + assert(td.td[5] == "healthy", "127.0.0.1:1980 is not healthy") + matches = matches + 1 + end + end + end + assert(matches == 2, "unexpected html") + } + } +--- grep_error_log eval +qr/unhealthy TCP increment \(.+\) for '[^']+'/ +--- grep_error_log_out +unhealthy TCP increment (1/2) for '127.0.0.2(127.0.0.2:1988)' +unhealthy TCP increment (2/2) for '127.0.0.2(127.0.0.2:1988)' +--- response_body +[{"counter":{"http_failure":0,"success":0,"tcp_failure":0,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1980,"status":"healthy"},{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"hostname":"127.0.0.2","ip":"127.0.0.2","port":1988,"status":"unhealthy"}] +[{"counter":{"http_failure":0,"success":0,"tcp_failure":0,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1980,"status":"healthy"},{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"hostname":"127.0.0.2","ip":"127.0.0.2","port":1988,"status":"unhealthy"}] + + + +=== TEST 2: routes +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_yaml +routes: + - + id: 1 + uris: + - /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + "127.0.0.1:1988": 1 + type: roundrobin + checks: + active: + http_path: "/status" + host: "127.0.0.1" + healthy: + interval: 1 + successes: 1 + unhealthy: + interval: 1 + http_failures: 1 +#END +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + + ngx.sleep(2.2) + + local code, body, res = t.test('/v1/healthcheck', + ngx.HTTP_GET) + res = json.decode(res) + table.sort(res[1].nodes, function(a, b) + return a.port < b.port + end) + ngx.say(json.encode(res)) + + local code, body, res = t.test('/v1/healthcheck/routes/1', + ngx.HTTP_GET) + res = json.decode(res) + table.sort(res.nodes, function(a, b) + return a.port < b.port + end) + ngx.say(json.encode(res)) + } + } +--- grep_error_log eval +qr/unhealthy TCP increment \(.+\) for '[^']+'/ +--- grep_error_log_out +unhealthy TCP increment (1/2) for '127.0.0.1(127.0.0.1:1988)' +unhealthy TCP increment (2/2) for '127.0.0.1(127.0.0.1:1988)' +--- response_body +[{"name":"/routes/1","nodes":[{"counter":{"http_failure":0,"success":0,"tcp_failure":0,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1980,"status":"healthy"},{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1988,"status":"unhealthy"}],"type":"http"}] +{"name":"/routes/1","nodes":[{"counter":{"http_failure":0,"success":0,"tcp_failure":0,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1980,"status":"healthy"},{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1988,"status":"unhealthy"}],"type":"http"} + + + +=== TEST 3: services +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_yaml +routes: + - id: 1 + service_id: 1 + uris: + - /hello + +services: + - + id: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + "127.0.0.1:1988": 1 + type: roundrobin + checks: + active: + http_path: "/status" + host: "127.0.0.1" + port: 1988 + healthy: + interval: 1 + successes: 1 + unhealthy: + interval: 1 + http_failures: 1 +#END +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + + ngx.sleep(2.2) + + local code, body, res = t.test('/v1/healthcheck', + ngx.HTTP_GET) + res = json.decode(res) + table.sort(res[1].nodes, function(a, b) + return a.port < b.port + end) + ngx.say(json.encode(res)) + + local code, body, res = t.test('/v1/healthcheck/services/1', + ngx.HTTP_GET) + res = json.decode(res) + table.sort(res.nodes, function(a, b) + return a.port < b.port + end) + ngx.say(json.encode(res)) + } + } +--- grep_error_log eval +qr/unhealthy TCP increment \(.+\) for '[^']+'/ +--- grep_error_log_out +unhealthy TCP increment (1/2) for '127.0.0.1(127.0.0.1:1988)' +unhealthy TCP increment (2/2) for '127.0.0.1(127.0.0.1:1988)' +--- response_body +[{"name":"/services/1","nodes":[{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1988,"status":"unhealthy"}],"type":"http"}] +{"name":"/services/1","nodes":[{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1988,"status":"unhealthy"}],"type":"http"} + + + +=== TEST 4: no checkers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local code, body, res = t.test('/v1/healthcheck', + ngx.HTTP_GET) + ngx.print(res) + } + } +--- response_body +{} + + + +=== TEST 5: no checker +--- request +GET /v1/healthcheck/routes/1 +--- error_code: 404 +--- response_body +{"error_msg":"routes[1] not found"} + + + +=== TEST 6: invalid src type +--- request +GET /v1/healthcheck/route/1 +--- error_code: 400 +--- response_body +{"error_msg":"invalid src type route"} diff --git a/CloudronPackages/APISIX/apisix-source/t/control/plugin-api.t b/CloudronPackages/APISIX/apisix-source/t/control/plugin-api.t new file mode 100644 index 0000000..d1e4a17 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/control/plugin-api.t @@ -0,0 +1,55 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- request +GET /v1/plugin/example-plugin/hello +--- response_body +world + + + +=== TEST 2: set Content-Type for table response +--- request +GET /v1/plugin/example-plugin/hello?json +--- response_body +{"msg":"world"} +--- response_headers +Content-Type: application/json diff --git a/CloudronPackages/APISIX/apisix-source/t/control/plugin-metadata.t b/CloudronPackages/APISIX/apisix-source/t/control/plugin-metadata.t new file mode 100644 index 0000000..cc08368 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/control/plugin-metadata.t @@ -0,0 +1,113 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadatas +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/plugin_metadata/example-plugin', + ngx.HTTP_PUT, + [[{ + "skey": "val", + "ikey": 1 + }]] + ) + if code >= 300 then + ngx.status = code + return + end + + local code = t('/apisix/admin/plugin_metadata/file-logger', + ngx.HTTP_PUT, + [[ + {"log_format": {"upstream_response_time": "$upstream_response_time"}} + ]] + ) + if code >= 300 then + ngx.status = code + return + end + } + } +--- error_code: 200 + + + +=== TEST 2: dump all plugin metadatas +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local _, _, res = t('/v1/plugin_metadatas', ngx.HTTP_GET) + local json = require("toolkit.json") + res = json.decode(res) + for _, metadata in ipairs(res) do + if metadata.id == "file-logger" then + ngx.say("check log_format: ", metadata.log_format.upstream_response_time == "$upstream_response_time") + elseif metadata.id == "example-plugin" then + ngx.say("check skey: ", metadata.skey == "val") + ngx.say("check ikey: ", metadata.ikey == 1) + end + end + } + } +--- response_body +check log_format: true +check skey: true +check ikey: true + + + +=== TEST 3: dump file-logger metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local _, _, res = t('/v1/plugin_metadata/file-logger', ngx.HTTP_GET) + local json = require("toolkit.json") + metadata = json.decode(res) + if metadata.id == "file-logger" then + ngx.say("check log_format: ", metadata.log_format.upstream_response_time == "$upstream_response_time") + end + } + } +--- response_body +check log_format: true + + + +=== TEST 4: plugin without metadata +--- request +GET /v1/plugin_metadata/batch-requests +--- error_code: 404 +--- response_body +{"error_msg":"plugin metadata[batch-requests] not found"} diff --git a/CloudronPackages/APISIX/apisix-source/t/control/plugins-reload.t b/CloudronPackages/APISIX/apisix-source/t/control/plugins-reload.t new file mode 100644 index 0000000..cb9e186 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/control/plugins-reload.t @@ -0,0 +1,341 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); +workers(2); + +add_block_preprocessor(sub { + my ($block) = @_; + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: reload plugins +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true + control: + ip: "127.0.0.1" + port: 9090 +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local code, body, res = t.test('/v1/plugins/reload', + ngx.HTTP_PUT) + ngx.say(res) + ngx.sleep(1) + } +} +--- request +GET /t +--- response_body +done +--- error_log +load plugin times: 2 +load plugin times: 2 +start to hot reload plugins +start to hot reload plugins + + + +=== TEST 2: reload plugins when attributes changed +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: true + node_listen: 1984 +plugins: + - example-plugin +plugin_attr: + example-plugin: + val: 0 +--- config +location /t { + content_by_lua_block { + local core = require "apisix.core" + ngx.sleep(0.1) + local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 + enable_control: true + control: + ip: "127.0.0.1" + port: 9090 +plugins: + - example-plugin +plugin_attr: + example-plugin: + val: 1 + ]] + require("lib.test_admin").set_config_yaml(data) + + local t = require("lib.test_admin").test + local code, _, org_body = t('/v1/plugins/reload', + ngx.HTTP_PUT) + + ngx.status = code + ngx.say(org_body) + ngx.sleep(0.1) + + local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: + - example-plugin +plugin_attr: + example-plugin: + val: 1 + ]] + require("lib.test_admin").set_config_yaml(data) + + local t = require("lib.test_admin").test + local code, _, org_body = t('/v1/plugins/reload', + ngx.HTTP_PUT) + ngx.say(org_body) + ngx.sleep(0.1) + } +} +--- request +GET /t +--- response_body +done +done +--- grep_error_log eval +qr/example-plugin get plugin attr val: \d+/ +--- grep_error_log_out +example-plugin get plugin attr val: 0 +example-plugin get plugin attr val: 0 +example-plugin get plugin attr val: 0 +example-plugin get plugin attr val: 1 +example-plugin get plugin attr val: 1 +example-plugin get plugin attr val: 1 +example-plugin get plugin attr val: 1 +example-plugin get plugin attr val: 1 +example-plugin get plugin attr val: 1 + + + +=== TEST 3: reload plugins to change prometheus' export uri +--- yaml_config +apisix: + node_listen: 1984 +plugins: + - public-api + - prometheus +plugin_attr: + prometheus: + export_uri: /metrics +--- config +location /t { + content_by_lua_block { + local core = require "apisix.core" + ngx.sleep(0.1) + local t = require("lib.test_admin").test + + -- setup public API route + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/metrics" + }]] + ) + ngx.say(code) + + local code, _, org_body = t('/apisix/metrics', + ngx.HTTP_GET) + ngx.say(code) + + local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 + enable_control: true + control: + ip: "127.0.0.1" + port: 9090 +plugins: + - public-api + - prometheus +plugin_attr: + prometheus: + export_uri: /apisix/metrics + ]] + require("lib.test_admin").set_config_yaml(data) + + local code, _, org_body = t('/v1/plugins/reload', + ngx.HTTP_PUT) + + ngx.say(org_body) + + ngx.sleep(0.1) + local code, _, org_body = t('/apisix/metrics', + ngx.HTTP_GET) + ngx.say(code) + } +} +--- request +GET /t +--- response_body +201 +404 +done +200 + + + +=== TEST 4: reload plugins to disable skywalking +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true + control: + ip: "127.0.0.1" + port: 9090 +plugins: + - skywalking +plugin_attr: + skywalking: + service_name: APISIX + service_instance_name: "APISIX Instance Name" + endpoint_addr: http://127.0.0.1:12801 + report_interval: 1 +--- config +location /t { + content_by_lua_block { + local core = require "apisix.core" + ngx.sleep(1.2) + local t = require("lib.test_admin").test + + local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: + - prometheus + ]] + require("lib.test_admin").set_config_yaml(data) + + local code, _, org_body = t('/v1/plugins/reload', + ngx.HTTP_PUT) + + ngx.say(org_body) + + ngx.sleep(2) + } +} +--- request +GET /t +--- response_body +done +--- no_error_log +[alert] +--- grep_error_log eval +qr/Instance report fails/ +--- grep_error_log_out +Instance report fails + + + +=== TEST 5: wrong method to reload plugins +--- request +GET /v1/plugins/reload +--- error_code: 404 + + + +=== TEST 6: wrong method to reload plugins +--- request +POST /v1/plugins/reload +--- error_code: 404 + + + +=== TEST 7: reload plugin with data_plane deployment +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_yaml +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local code, body, res = t.test('/v1/plugins/reload', + ngx.HTTP_PUT) + ngx.say(res) + ngx.sleep(1) + } +} +--- request +GET /t +--- response_body +done +--- error_log +load plugin times: 2 +load plugin times: 2 +start to hot reload plugins +start to hot reload plugins diff --git a/CloudronPackages/APISIX/apisix-source/t/control/routes.t b/CloudronPackages/APISIX/apisix-source/t/control/routes.t new file mode 100644 index 0000000..a24bc2c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/control/routes.t @@ -0,0 +1,142 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: routes +--- apisix_yaml +routes: + - + id: 1 + uris: + - /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local code, body, res = t.test('/v1/routes', + ngx.HTTP_GET) + res = json.decode(res) + if res[1] then + local data = {} + data.uris = res[1].value.uris + data.upstream = res[1].value.upstream + ngx.say(json.encode(data)) + end + } + } +--- response_body +{"upstream":{"hash_on":"vars","nodes":[{"host":"127.0.0.1","port":1980,"weight":1}],"pass_host":"pass","scheme":"http","type":"roundrobin"},"uris":["/hello"]} + + + +=== TEST 2: get route with id 1 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local code, body, res = t.test('/v1/route/1', + ngx.HTTP_GET) + res = json.decode(res) + if res then + local data = {} + data.uris = res.value.uris + data.upstream = res.value.upstream + ngx.say(json.encode(data)) + end + } + } +--- response_body +{"upstream":{"hash_on":"vars","nodes":[{"host":"127.0.0.1","port":1980,"weight":1}],"pass_host":"pass","scheme":"http","type":"roundrobin"},"uris":["/hello"]} + + + +=== TEST 3: routes with invalid id +--- apisix_yaml +routes: + - + id: 1 + uris: + - /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local code, body, res = t.test('/v1/route/2', + ngx.HTTP_GET) + local data = {} + data.status = code + ngx.say(json.encode(data)) + return + } + } +--- response_body +{"status":404} diff --git a/CloudronPackages/APISIX/apisix-source/t/control/schema.t b/CloudronPackages/APISIX/apisix-source/t/control/schema.t new file mode 100644 index 0000000..3491938 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/control/schema.t @@ -0,0 +1,149 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local code, body, res = t.test('/v1/schema', + ngx.HTTP_GET, + nil, + [[{ + "main": { + "consumer": {"type":"object"}, + "consumer_group": {"type":"object"}, + "global_rule": {"type":"object"}, + "plugin_config": {"type":"object"}, + "plugins": {"type":"array"}, + "proto": {"type":"object"}, + "route": {"type":"object"}, + "service": {"type":"object"}, + "ssl": {"type":"object"}, + "stream_route": {"type":"object"}, + "upstream": {"type":"object"}, + "upstream_hash_header_schema": {"type":"string"}, + "upstream_hash_vars_schema": {"type":"string"}, + },]] .. [[ + "plugins": { + "example-plugin": { + "version": 0.1, + "priority": 0, + "schema": { + "type":"object", + "properties": { + "_meta": { + "properties": { + "disable": {"type": "boolean"} + } + } + } + }, + "metadata_schema": {"type":"object"} + }, + "basic-auth": { + "type": "auth", + "consumer_schema": {"type":"object"} + } + }, + "stream_plugins": { + "mqtt-proxy": { + "schema": { + "type":"object", + "properties": { + "_meta": { + "properties": { + "disable": {"type": "boolean"} + } + } + } + }, + "priority": 1000 + } + } + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: confirm the scope of plugin +--- extra_yaml_config +plugins: + - batch-requests + - error-log-logger + - server-info + - example-plugin + - node-status +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message, res = t('/v1/schema', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + res = json.decode(res) + local global_plugins = {} + local plugins = res["plugins"] + for k, v in pairs(plugins) do + if v.scope == "global" then + global_plugins[k] = v.scope + end + end + ngx.say(json.encode(global_plugins)) + } + } +--- response_body +{"batch-requests":"global","error-log-logger":"global","node-status":"global","server-info":"global"} diff --git a/CloudronPackages/APISIX/apisix-source/t/control/services.t b/CloudronPackages/APISIX/apisix-source/t/control/services.t new file mode 100644 index 0000000..0003bcc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/control/services.t @@ -0,0 +1,188 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: services +--- apisix_yaml +services: + - + id: 200 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local code, body, res = t.test('/v1/services', + ngx.HTTP_GET) + res = json.decode(res) + if res[1] then + local data = {} + data.id = res[1].value.id + data.plugins = res[1].value.plugins + data.upstream = res[1].value.upstream + ngx.say(json.encode(data)) + end + return + } + } +--- response_body +{"id":"200","upstream":{"hash_on":"vars","nodes":[{"host":"127.0.0.1","port":1980,"weight":1}],"pass_host":"pass","scheme":"http","type":"roundrobin"}} + + + +=== TEST 2: multiple services +--- apisix_yaml +services: + - + id: 200 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + - + id: 201 + upstream: + nodes: + "127.0.0.2:1980": 1 + type: roundrobin +#END +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local core = require("apisix.core") + local code, body, res = t.test('/v1/services', + ngx.HTTP_GET) + res = json.decode(res) + local g_data = {} + for _, r in core.config_util.iterate_values(res) do + local data = {} + data.id = r.value.id + data.plugins = r.value.plugins + data.upstream = r.value.upstream + core.table.insert(g_data, data) + end + ngx.say(json.encode(g_data)) + return + } + } +--- response_body +[{"id":"200","upstream":{"hash_on":"vars","nodes":[{"host":"127.0.0.1","port":1980,"weight":1}],"pass_host":"pass","scheme":"http","type":"roundrobin"}},{"id":"201","upstream":{"hash_on":"vars","nodes":[{"host":"127.0.0.2","port":1980,"weight":1}],"pass_host":"pass","scheme":"http","type":"roundrobin"}}] + + + +=== TEST 3: get service with id 5 +--- apisix_yaml +services: + - + id: 5 + plugins: + limit-count: + count: 2 + time_window: 60 + rejected_code: 503 + key: remote_addr + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local code, body, res = t.test('/v1/service/5', + ngx.HTTP_GET) + res = json.decode(res) + if res then + local data = {} + data.id = res.value.id + data.plugins = res.value.plugins + data.upstream = res.value.upstream + ngx.say(json.encode(data)) + end + return + } + } +--- response_body +{"id":"5","plugins":{"limit-count":{"allow_degradation":false,"count":2,"key":"remote_addr","key_type":"var","policy":"local","rejected_code":503,"show_limit_quota_header":true,"time_window":60}},"upstream":{"hash_on":"vars","nodes":[{"host":"127.0.0.1","port":1980,"weight":1}],"pass_host":"pass","scheme":"http","type":"roundrobin"}} + + + +=== TEST 4: services with invalid id +--- apisix_yaml +services: + - + id: 1 + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local code, body, res = t.test('/v1/service/2', + ngx.HTTP_GET) + local data = {} + data.status = code + ngx.say(json.encode(data)) + return + } + } +--- response_body +{"status":404} diff --git a/CloudronPackages/APISIX/apisix-source/t/control/upstreams.t b/CloudronPackages/APISIX/apisix-source/t/control/upstreams.t new file mode 100644 index 0000000..09e9104 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/control/upstreams.t @@ -0,0 +1,146 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: dump all upstreams +--- apisix_yaml +upstreams: + - + id: 1 + nodes: + "127.0.0.1:8001": 1 + type: roundrobin + - + id: 2 + nodes: + "127.0.0.1:8002": 1 + type: roundrobin +#END +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local code, body, res = t.test('/v1/upstreams', + ngx.HTTP_GET) + res = json.decode(res) + if res[2] and table.getn(res) == 2 then + local data = {} + data.nodes = res[2].value.nodes + ngx.say(json.encode(data)) + end + } + } +--- response_body +{"nodes":[{"host":"127.0.0.1","port":8002,"weight":1}]} + + + +=== TEST 2: dump specific upstream with id 1 +--- apisix_yaml +upstreams: + - + id: 1 + nodes: + "127.0.0.1:8001": 1 + type: roundrobin + - + id: 2 + nodes: + "127.0.0.1:8002": 1 + type: roundrobin +#END +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local code, body, res = t.test('/v1/upstream/1', + ngx.HTTP_GET) + res = json.decode(res) + if res then + local data = {} + data.nodes = res.value.nodes + ngx.say(json.encode(data)) + end + } + } +--- response_body +{"nodes":[{"host":"127.0.0.1","port":8001,"weight":1}]} + + + +=== TEST 3: upstreams with invalid id +--- apisix_yaml +upstreams: + - + id: 1 + nodes: + "127.0.0.1:8001": 1 + type: roundrobin + - + id: 2 + nodes: + "127.0.0.1:8002": 1 + type: roundrobin +#END +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local code, body, res = t.test('/v1/upstream/3', + ngx.HTTP_GET) + local data = {} + data.status = code + ngx.say(json.encode(data)) + return + } + } +--- response_body +{"status":404} diff --git a/CloudronPackages/APISIX/apisix-source/t/core/config-default.t b/CloudronPackages/APISIX/apisix-source/t/core/config-default.t new file mode 100644 index 0000000..f19392d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/config-default.t @@ -0,0 +1,140 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local encode_json = require("toolkit.json").encode + local config = require("apisix.core").config.local_conf() + + ngx.say("node_listen: ", config.apisix.node_listen) + ngx.say("stream_proxy: ", encode_json(config.apisix.stream_proxy)) + ngx.say("admin_key: ", encode_json(config.deployment.admin.admin_key)) + } + } +--- request +GET /t +--- response_body +node_listen: 1984 +stream_proxy: {"tcp":[9100]} +admin_key: null + + + +=== TEST 2: wrong type: expect: table, but got: string +--- yaml_config +apisix: + node_listen: xxxx +--- must_die +--- error_log +failed to parse yaml config: failed to merge, path[apisix->node_listen] expect: table, but got: string + + + +=== TEST 3: use `null` means delete +--- yaml_config +deployment: + admin: + admin_key: null +--- config + location /t { + content_by_lua_block { + local encode_json = require("toolkit.json").encode + local config = require("apisix.core").config.local_conf() + + ngx.say("admin_key: ", encode_json(config.deployment.admin.admin_key)) + } +} +--- request +GET /t +--- response_body +admin_key: null + + + +=== TEST 4: use `~` means delete +--- yaml_config +deployment: + admin: + admin_key: null +--- config + location /t { + content_by_lua_block { + local encode_json = require("toolkit.json").encode + local config = require("apisix.core").config.local_conf() + + ngx.say("admin_key: ", encode_json(config.deployment.admin.admin_key)) + } +} +--- request +GET /t +--- response_body +admin_key: null + + + +=== TEST 5: support listen multiple ports with array +--- yaml_config +apisix: + node_listen: + - 1985 + - 1986 +--- config + location /t { + content_by_lua_block { + local encode_json = require("toolkit.json").encode + local config = require("apisix.core").config.local_conf() + + ngx.say("node_listen: ", encode_json(config.apisix.node_listen)) + } +} +--- request +GET /t +--- response_body +node_listen: [1985,1986] + + + +=== TEST 6: support listen multiple ports with array table +--- yaml_config +apisix: + node_listen: + - port: 1985 + - port: 1986 + enable_http2: true +--- config + location /t { + content_by_lua_block { + local encode_json = require("toolkit.json").encode + local config = require("apisix.core").config.local_conf() + + ngx.say("node_listen: ", encode_json(config.apisix.node_listen)) + } +} +--- request +GET /t +--- response_body +node_listen: [{"port":1985},{"port":1986}] diff --git a/CloudronPackages/APISIX/apisix-source/t/core/config.t b/CloudronPackages/APISIX/apisix-source/t/core/config.t new file mode 100644 index 0000000..29d1cc5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/config.t @@ -0,0 +1,347 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local encode_json = require("toolkit.json").encode + local config = require("apisix.core").config.local_conf() + + ngx.say("etcd host: ", config.etcd.host) + ngx.say("first plugin: ", encode_json(config.plugins[1])) + } + } +--- request +GET /t +--- response_body +etcd host: http://127.0.0.1:2379 +first plugin: "real-ip" + + + +=== TEST 2: different elements in yaml +--- config + location /t { + content_by_lua_block { + local encode_json = require("toolkit.json").encode + local config = require("apisix.core").config.local_conf() + + ngx.say("etcd host: ", config.etcd.host) + ngx.say("first plugin: ", encode_json(config.plugins[1])) + ngx.say("seq: ", encode_json(config.seq)) + } + } +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" # etcd address + prefix: "/apisix" # apisix configurations prefix + timeout: 1 +plugins: + - example-plugin + +# Collection Types ############################################################# +################################################################################ + +# http://yaml.org/type/map.html -----------------------------------------------# + +map: + # Unordered set of key: value pairs. + Block style: !!map + Clark : Evans + Ingy : döt Net + Oren : Ben-Kiki + Flow style: !!map { Clark: Evans, Ingy: döt Net, Oren: Ben-Kiki } + +# http://yaml.org/type/omap.html ----------------------------------------------# + +omap: + # Explicitly typed ordered map (dictionary). + Bestiary: !!omap + - aardvark: African pig-like ant eater. Ugly. + - anteater: South-American ant eater. Two species. + - anaconda: South-American constrictor snake. Scaly. + # Etc. + # Flow style + Numbers: !!omap [ one: 1, two: 2, three : 3 ] + +# http://yaml.org/type/pairs.html ---------------------------------------------# + +pairs: + # Explicitly typed pairs. + Block tasks: !!pairs + - meeting: with team. + - meeting: with boss. + - break: lunch. + - meeting: with client. + Flow tasks: !!pairs [ meeting: with team, meeting: with boss ] + +# http://yaml.org/type/set.html -----------------------------------------------# + +set: + # Explicitly typed set. + baseball players: !!set + ? Mark McGwire + ? Sammy Sosa + ? Ken Griffey + # Flow style + baseball teams: !!set { Boston Red Sox, Detroit Tigers, New York Yankees } + +# http://yaml.org/type/seq.html -----------------------------------------------# + +seq: + # Ordered sequence of nodes + Block style: !!seq + - Mercury # Rotates - no light/dark sides. + - Venus # Deadliest. Aptly named. + - Earth # Mostly dirt. + - Mars # Seems empty. + - Jupiter # The king. + - Saturn # Pretty. + - Uranus # Where the sun hardly shines. + - Neptune # Boring. No rings. + - Pluto # You call this a planet? + Flow style: !!seq [ Mercury, Venus, Earth, Mars, # Rocks + Jupiter, Saturn, Uranus, Neptune, # Gas + Pluto ] # Overrated + + +# Scalar Types ################################################################# +################################################################################ + +# http://yaml.org/type/bool.html ----------------------------------------------# + +bool: + - true + - True + - TRUE + - false + - False + - FALSE + +# http://yaml.org/type/float.html ---------------------------------------------# + +float: + canonical: 6.8523015e+5 + exponential: 685.230_15e+03 + fixed: 685_230.15 + sexagesimal: 190:20:30.15 + negative infinity: -.inf + not a number: .NaN + +# http://yaml.org/type/int.html -----------------------------------------------# + +int: + canonical: 685230 + decimal: +685_230 + octal: 02472256 + hexadecimal: 0x_0A_74_AE + binary: 0b1010_0111_0100_1010_1110 + sexagesimal: 190:20:30 + +# http://yaml.org/type/merge.html ---------------------------------------------# + +merge: + - &CENTER { x: 1, y: 2 } + - &LEFT { x: 0, y: 2 } + - &BIG { r: 10 } + - &SMALL { r: 1 } + + # All the following maps are equal: + + - # Explicit keys + x: 1 + y: 2 + r: 10 + label: nothing + + - # Merge one map + << : *CENTER + r: 10 + label: center + + - # Merge multiple maps + << : [ *CENTER, *BIG ] + label: center/big + + - # Override + << : [ *BIG, *LEFT, *SMALL ] + x: 1 + label: big/left/small + +# http://yaml.org/type/null.html ----------------------------------------------# + +null: + # This mapping has four keys, + # one has a value. + empty: + canonical: ~ + english: null + ~: null key + # This sequence has five + # entries, two have values. + sparse: + - ~ + - 2nd entry + - + - 4th entry + - Null + +# http://yaml.org/type/str.html -----------------------------------------------# + +string: + inline1: abcd + inline2: "abcd" + inline3: 'abcd' + block1: | + aaa + bbb + ccc + block2: |+ + aaa + bbb + ccc + block3: |- + aaa + bbb + ccc + block4: > + aaa + bbb + ccc + text5: >+ + aaa + bbb + ccc + text6: >- + aaa + bbb + ccc +# http://yaml.org/type/timestamp.html -----------------------------------------# + +timestamp: + canonical: 2001-12-15T02:59:43.1Z + valid iso8601: 2001-12-14t21:59:43.10-05:00 + space separated: 2001-12-14 21:59:43.10 -5 + no time zone (Z): 2001-12-15 2:59:43.10 + date (00:00:00Z): 2002-12-14 + + +# JavaScript Specific Types #################################################### +################################################################################ + +# https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/RegExp + +regexp: + simple: !!js/regexp foobar + modifiers: !!js/regexp /foobar/mi + +# https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/undefined + +undefined: !!js/undefined ~ + +# https://developer.mozilla.org/en/JavaScript/Reference/Global_Objects/Function + +function: !!js/function > + function foobar() { + return 'Wow! JS-YAML Rocks!'; + } + + +# Custom types ################################################################# +################################################################################ + + +# JS-YAML allows you to specify a custom YAML types for your structures. +# This is a simple example of custom constructor defined in `js/demo.js` for +# custom `!sexy` type: +# +# var SexyYamlType = new jsyaml.Type('!sexy', { +# kind: 'sequence', +# construct: function (data) { +# return data.map(function (string) { return 'sexy ' + string; }); +# } +# }); +# +# var SEXY_SCHEMA = jsyaml.Schema.create([ SexyYamlType ]); +# +# result = jsyaml.load(yourData, { schema: SEXY_SCHEMA }); + +foobar: !sexy + - bunny + - chocolate +--- request +GET /t +--- response_body +etcd host: http://127.0.0.1:2379 +first plugin: "example-plugin" +seq: {"Block style":["Mercury","Venus","Earth","Mars","Jupiter","Saturn","Uranus","Neptune","Pluto"],"Flow style":["Mercury","Venus","Earth","Mars","Jupiter","Saturn","Uranus","Neptune","Pluto"]} + + + +=== TEST 3: allow environment variable +--- config + location /t { + content_by_lua_block { + local config = require("apisix.core").config.local_conf() + + ngx.say(config.apisix.id) + } + } +--- main_config +env AID=3; +--- yaml_config +#nginx_config: + #env: AID=3 +apisix: + id: ${{ AID }} +--- request +GET /t +--- response_body +3 + + + +=== TEST 4: allow integer worker processes +--- config + location /t { + content_by_lua_block { + local config = require("apisix.core").config.local_conf() + + ngx.say(config.nginx_config.worker_processes) + } + } +--- extra_yaml_config +nginx_config: + worker_processes: 1 +--- request +GET /t +--- response_body +1 diff --git a/CloudronPackages/APISIX/apisix-source/t/core/config_etcd.t b/CloudronPackages/APISIX/apisix-source/t/core/config_etcd.t new file mode 100644 index 0000000..9425156 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/config_etcd.t @@ -0,0 +1,516 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: wrong etcd port +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - "http://127.0.0.1:7777" # wrong etcd port + timeout: 1 +--- config + location /t { + content_by_lua_block { + ngx.sleep(8) + ngx.say(body) + } + } +--- timeout: 12 +--- request +GET /t +--- grep_error_log eval +qr{connection refused} +--- grep_error_log_out eval +qr/(connection refused){1,}/ + + + +=== TEST 2: originate TLS connection to etcd cluster without TLS configuration +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:2379" +--- extra_init_by_lua +local health_check = require("resty.etcd.health_check") +health_check.get_target_status = function() + return true +end +--- config + location /t { + content_by_lua_block { + ngx.sleep(4) + ngx.say("ok") + } + } +--- timeout: 5 +--- request +GET /t +--- grep_error_log chop +peer closed connection in SSL handshake +--- grep_error_log_out eval +qr/(peer closed connection in SSL handshake){1,}/ + + + +=== TEST 3: originate plain connection to etcd cluster which enables TLS +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:12379" +--- config + location /t { + content_by_lua_block { + ngx.sleep(4) + ngx.say("ok") + } + } +--- timeout: 5 +--- request +GET /t +--- grep_error_log chop +closed +--- grep_error_log_out eval +qr/(closed){1,}/ + + + +=== TEST 4: set route(id: 1) to etcd cluster with TLS +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null + etcd: + host: + - "https://127.0.0.1:12379" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]] + ) + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: get route(id: 1) from etcd cluster with TLS +--- yaml_config +apisix: + node_listen: 1984 + admin_key: null +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: ~ + etcd: + host: + - "https://127.0.0.1:12379" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_GET, + nil + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: ensure only one auth request per subsystem for all the etcd sync +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:1980" # fake server port + timeout: 1 + user: root # root username for etcd + password: 5tHkHhYkjr6cQY # root password for etcd +--- extra_init_by_lua +local health_check = require("resty.etcd.health_check") +health_check.get_target_status = function() + return true +end +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.5) + } + } +--- request +GET /t +--- grep_error_log eval +qr/etcd auth failed/ +--- grep_error_log_out +etcd auth failed +etcd auth failed +etcd auth failed + + + +=== TEST 7: ensure add prefix automatically for _M.getkey +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + local config = core.config.new() + local res = config:getkey("/routes/") + if res and res.status == 200 and res.body + and res.body.count and tonumber(res.body.count) >= 1 then + ngx.say("passed") + else + ngx.say("failed") + end + + local res = config:getkey("/phantomkey") + if res and res.status == 404 then + ngx.say("passed") + else + ngx.say("failed") + end + } + } +--- request +GET /t +--- response_body +passed +passed + + + +=== TEST 8: Test ETCD health check mode switch during APISIX startup +--- config + location /t { + content_by_lua_block { + ngx.say("passed") + } + } +--- request +GET /t +--- response_body +passed +--- grep_error_log eval +qr/healthy check use \S+ \w+/ +--- grep_error_log_out eval +qr/healthy check use round robin +(healthy check use ngx.shared dict){1,}/ + + + +=== TEST 9: last_err can be nil when the reconnection is successful +--- config + location /t { + content_by_lua_block { + local config_etcd = require("apisix.core.config_etcd") + local count = 0 + config_etcd.inject_sync_data(function() + if count % 2 == 0 then + count = count + 1 + return nil, "has no healthy etcd endpoint available" + else + return true + end + end) + config_etcd.test_automatic_fetch(false, { + running = true, + resync_delay = 1, + }) + ngx.say("passed") + } + } +--- request +GET /t +--- error_log +reconnected to etcd +--- response_body +passed + + + +=== TEST 10: reloaded data may be in res.body.node (special kvs structure) +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +--- config + location /t { + content_by_lua_block { + local config_etcd = require("apisix.core.config_etcd") + local etcd_cli = {} + function etcd_cli.readdir() + return { + status = 200, + headers = {}, + body = { + header = {revision = 1}, + kvs = {{key = "foo", value = "bar"}}, + } + } + end + config_etcd.test_sync_data({ + etcd_cli = etcd_cli, + key = "fake", + single_item = true, + -- need_reload because something wrong happened before + need_reload = true, + upgrade_version = function() end, + conf_version = 1, + }) + } + } +--- request +GET /t +--- log_level: debug +--- grep_error_log eval +qr/readdir key: fake res: .+/ +--- grep_error_log_out eval +qr/readdir key: fake res: \[\{("value":"bar","key":"foo"|"key":"foo","value":"bar")\}\]/ +--- wait: 1 +--- no_error_log +[error] + + + +=== TEST 11: reloaded data may be in res.body.node (admin_api_version is v2) +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null + admin_api_version: v2 +--- config + location /t { + content_by_lua_block { + local config_etcd = require("apisix.core.config_etcd") + local etcd_cli = {} + function etcd_cli.readdir() + return { + status = 200, + headers = {}, + body = { + header = {revision = 1}, + kvs = { + {key = "/foo"}, + {key = "/foo/bar", value = {"bar"}} + }, + } + } + end + config_etcd.test_sync_data({ + etcd_cli = etcd_cli, + key = "fake", + -- need_reload because something wrong happened before + need_reload = true, + upgrade_version = function() end, + conf_version = 1, + }) + } + } +--- request +GET /t +--- log_level: debug +--- grep_error_log eval +qr/readdir key: fake res: .+/ +--- grep_error_log_out eval +qr/readdir key: fake res: \{.*"nodes":\[\{.*"value":\["bar"\].*\}\].*\}/ +--- wait: 1 +--- no_error_log +[error] + + + +=== TEST 12: test route with special character "-" +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null + etcd: + prefix: "/apisix-test" +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.5) + + local http = require "resty.http" + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + + -- hit + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET" + }) + + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.print(res.body) + + -- delete route + code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + -- hit + res, err = httpc:request_uri(uri, { + method = "GET" + }) + + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.print(res.body) + } + } +--- request +GET /t +--- response_body +passed +hello world +passed +{"error_msg":"404 Route Not Found"} + + + +=== TEST 13: the main watcher should be initialised once +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null + etcd: + host: + - "http://127.0.0.1:2379" + watch_timeout: 1 +--- config + location /t { + content_by_lua_block { + ngx.sleep(1) + } + } +--- request +GET /t +--- grep_error_log eval +qr/main etcd watcher initialised, revision=/ +--- grep_error_log_out +main etcd watcher initialised, revision= +main etcd watcher initialised, revision= diff --git a/CloudronPackages/APISIX/apisix-source/t/core/config_util.t b/CloudronPackages/APISIX/apisix-source/t/core/config_util.t new file mode 100644 index 0000000..6d9e1e2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/config_util.t @@ -0,0 +1,119 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: parse_time_unit +--- config + location /t { + content_by_lua_block { + local parse_time_unit = require("apisix.core.config_util").parse_time_unit + for _, case in ipairs({ + {exp = 1, input = "1"}, + {exp = 1, input = "1s"}, + {exp = 60, input = "60s"}, + {exp = 1.1, input = "1s100ms"}, + {exp = 10.001, input = "10s1ms"}, + {exp = 3600, input = "60m"}, + {exp = 3600.11, input = "60m110ms"}, + {exp = 3710, input = "1h110"}, + {exp = 5400, input = "1h 30m"}, + {exp = 34822861.001, input = "1y1M1w1d1h1m1s1ms"}, + }) do + assert(case.exp == parse_time_unit(case.input), + string.format("input %s, got %s", case.input, + parse_time_unit(case.input))) + end + + for _, case in ipairs({ + {exp = "invalid data: -", input = "-1"}, + {exp = "unexpected unit: h", input = "1m1h"}, + {exp = "invalid data: ", input = ""}, + {exp = "specific unit conflicts with the default unit second", input = "1s1"}, + }) do + local _, err = parse_time_unit(case.input) + assert(case.exp == err, + string.format("input %s, got %s", case.input, err)) + end + } + } + + + +=== TEST 2: add_clean_handler / cancel_clean_handler / fire_all_clean_handlers +--- config + location /t { + content_by_lua_block { + local util = require("apisix.core.config_util") + local function setup() + local item = {clean_handlers = {}} + local idx1 = util.add_clean_handler(item, function() + ngx.log(ngx.WARN, "fire one") + end) + local idx2 = util.add_clean_handler(item, function() + ngx.log(ngx.WARN, "fire two") + end) + return item, idx1, idx2 + end + + local function setup_to_false() + local item = false + return item + end + + local item, idx1, idx2 = setup() + util.cancel_clean_handler(item, idx1, true) + util.cancel_clean_handler(item, idx2, true) + + local item, idx1, idx2 = setup() + util.fire_all_clean_handlers(item) + + local item, idx1, idx2 = setup() + util.cancel_clean_handler(item, idx2) + util.fire_all_clean_handlers(item) + + local item, idx1, idx2 = setup() + util.cancel_clean_handler(item, idx1) + util.fire_all_clean_handlers(item) + + local item = setup_to_false() + util.fire_all_clean_handlers(item) + } + } +--- grep_error_log eval +qr/fire \w+/ +--- grep_error_log_out eval +"fire one\nfire two\n" x 3 diff --git a/CloudronPackages/APISIX/apisix-source/t/core/ctx.t b/CloudronPackages/APISIX/apisix-source/t/core/ctx.t new file mode 100644 index 0000000..d49d876 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/ctx.t @@ -0,0 +1,917 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local ctx = {} + core.ctx.set_vars_meta(ctx) + + ngx.say("remote_addr: ", ctx.var["remote_addr"]) + ngx.say("server_port: ", ctx.var["server_port"]) + } + } +--- request +GET /t +--- response_body +remote_addr: 127.0.0.1 +server_port: 1984 + + + +=== TEST 2: http header +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local ctx = {} + core.ctx.set_vars_meta(ctx) + + ngx.say("http_host: ", ctx.var["http_host"]) + } + } +--- request +GET /t +--- response_body +http_host: localhost + + + +=== TEST 3: cookie + no cookie +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local ctx = {} + core.ctx.set_vars_meta(ctx) + + ngx.say("cookie_host: ", ctx.var["cookie_host"]) + } + } +--- request +GET /t?a=aaa +--- response_body +cookie_host: nil +--- no_error_log +failed to fetch cookie value by key + + + +=== TEST 4: cookie +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local ctx = {} + core.ctx.set_vars_meta(ctx) + + ngx.say("cookie_a: ", ctx.var["cookie_a"]) + ngx.say("cookie_b: ", ctx.var["cookie_b"]) + ngx.say("cookie_c: ", ctx.var["cookie_c"]) + ngx.say("cookie_d: ", ctx.var["cookie_d"]) + ngx.say("cookie with dash and uppercase: ", ngx.var["cookie_X-user-id"]) + ngx.say("cookie with []: ", ngx.var["cookie_user[id]"]) + ngx.say("cookie with .: ", ngx.var["cookie_user.id"]) + } + } +--- more_headers +Cookie: a=a; b=bb; c=ccc; X-user-id=2; user[id]=3; user.id=4 +--- request +GET /t?a=aaa +--- response_body +cookie_a: a +cookie_b: bb +cookie_c: ccc +cookie_d: nil +cookie with dash and uppercase: 2 +cookie with []: 3 +cookie with .: 4 + + + +=== TEST 5: key is nil +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local ctx = {} + core.ctx.set_vars_meta(ctx) + + ngx.say("cookie_a: ", ctx.var[nil]) + } + } +--- more_headers +Cookie: a=a; b=bb; c=ccc +--- request +GET /t?a=aaa +--- error_code: 500 +--- error_log +invalid argument, expect string value + + + +=== TEST 6: key is number +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local ctx = {} + core.ctx.set_vars_meta(ctx) + + ngx.say("cookie_a: ", ctx.var[2222]) + } + } +--- more_headers +Cookie: a=a; b=bb; c=ccc +--- request +GET /t?a=aaa +--- error_code: 500 +--- error_log +invalid argument, expect string value + + + +=== TEST 7: add route and get `route_id` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "serverless-pre-function": { + "phase": "access", + "functions" : ["return function() ngx.log(ngx.INFO, \"route_id: \", ngx.ctx.api_ctx.var.route_id) end"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: `url` exist and `route_id` is 1 +--- request +GET /hello +--- response_body +hello world +--- error_log +route_id: 1 + + + +=== TEST 9: create a service and `service_id` is 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "desc": "new_service" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: the route object not bind any service object +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "serverless-pre-function": { + "phase": "access", + "functions" : ["return function() ngx.log(ngx.INFO, \"service_id: \", ngx.ctx.api_ctx.var.service_id or 'empty route_id') end"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: service_id is empty +--- request +GET /hello +--- response_body +hello world +--- error_log +service_id: empty route_id + + + +=== TEST 12: update route and binding service_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "service_id": 1, + "plugins": { + "serverless-pre-function": { + "phase": "access", + "functions" : ["return function() ngx.log(ngx.INFO, \"service_id: \", ngx.ctx.api_ctx.var.service_id) end"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: service_id is 1 +--- request +GET /hello +--- response_body +hello world +--- error_log +service_id: 1 + + + +=== TEST 14: create consumer and bind key-auth plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "consumer_jack", + "plugins": { + "key-auth": { + "key": "auth-jack" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: create route and consumer_name is consumer_jack +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "plugins": { + "key-auth": {}, + "serverless-pre-function": { + "phase": "access", + "functions" : ["return function() ngx.log(ngx.INFO, \"consumer_name: \", ngx.ctx.api_ctx.var.consumer_name) end"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: consumer_name is `consumer_jack` +--- request +GET /hello +--- more_headers +apikey: auth-jack +--- response_body +hello world +--- error_log +consumer_name: consumer_jack + + + +=== TEST 17: update the route, and the consumer_name is nil +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "plugins": { + "serverless-pre-function": { + "phase": "access", + "functions" : ["return function() ngx.log(ngx.INFO, \"consumer_name: \", ngx.ctx.api_ctx.var.consumer_name or 'consumer_name is nil') end"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: consumer_name is empty +--- request +GET /hello +--- response_body +hello world +--- error_log +consumer_name: consumer_name is nil + + + +=== TEST 19: create route and consumer_name is consumer_jack +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "plugins": { + "key-auth": {}, + "serverless-pre-function": { + "phase": "access", + "functions" : ["return function() ngx.log(ngx.INFO, \"consumer_name: \", ngx.ctx.api_ctx.var.consumer_name) end"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: consumer_name is `consumer_jack` +--- request +GET /hello +--- more_headers +apikey: auth-jack +--- response_body +hello world +--- error_log +consumer_name: consumer_jack + + + +=== TEST 21: update the route, and the consumer_name is nil +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "plugins": { + "serverless-pre-function": { + "phase": "access", + "functions" : ["return function() ngx.log(ngx.INFO, \"consumer_name: \", ngx.ctx.api_ctx.var.consumer_name or 'consumer_name is nil') end"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: consumer_name is nil +--- request +GET /hello +--- response_body +hello world +--- error_log +consumer_name: consumer_name is nil + + + +=== TEST 23: add plugin metadata `service_name` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/http-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "service_name": "$service_name" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 24: add `http-logger` plugin on service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "name": "ctx_var-support-service_name", + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1980/log", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "concat_method": "json" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 25: route binding service and concat_method is json +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "service_id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 26: hit route and report http logger +--- request +GET /hello +--- response_body +hello world +--- error_log eval +qr/request log: \{"route_id":"1","service_id":"1","service_name":"ctx_var-support-service_name"\}/ + + + +=== TEST 27: log_format is configured with `service_name`, but there is no matching service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1980/log", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "concat_method": "json" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 28: hit route but there is no matching service +--- request +GET /hello +--- response_body +hello world +--- error_log eval +qr/request log: \{"route_id":"1"\}/ + + + +=== TEST 29: add plugin metadata `route_name` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/http-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "route_name": "$route_name" + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 30: sanity, batch_max_size=1 and concat_method is json +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "ctx_var-support-route_name", + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1980/log", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "concat_method": "json" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 31: hit route and report http logger +--- request +GET /hello +--- response_body +hello world +--- error_log eval +qr/request log: \{"route_id":"1","route_name":"ctx_var-support-route_name"\}/ + + + +=== TEST 32: missing `name` field, batch_max_size=1 and concat_method is json +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1980/log", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "concat_method": "json" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 33: hit route and report http logger +--- request +GET /hello +--- response_body +hello world +--- error_log eval +qr/request log: \{"route_id":"1"\}/ + + + +=== TEST 34: add metadata, service and route, and the service is bound to the route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/http-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "route_name": "$route_name", + "service_name": "$service_name" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "name": "my_service", + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1980/log", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "concat_method": "json" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "my_route", + "uri": "/hello", + "service_id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 35: hit route and route_name and service_name are different +--- request +GET /hello +--- response_body +hello world +--- error_log eval +qr/request log: \{"route_id":"1","route_name":"my_route","service_id":"1","service_name":"my_service"\}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/core/ctx2.t b/CloudronPackages/APISIX/apisix-source/t/core/ctx2.t new file mode 100644 index 0000000..7782ac9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/ctx2.t @@ -0,0 +1,449 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->response_body) { + $block->set_value("response_body", "passed\n"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: should update cached ctx.var +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "functions" : ["return function(conf, ctx) + ngx.log(ngx.WARN, 'pre uri: ', ctx.var.upstream_uri); + ctx.var.upstream_uri = '/server_port'; + ngx.log(ngx.WARN, 'post uri: ', ctx.var.upstream_uri); + end"] + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/xxx" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 2: check +--- request +GET /xxx +--- response_body chomp +1980 +--- error_log +pre uri: /hello +post uri: /server_port + + + +=== TEST 3: get balancer_ip and balancer_port through ctx.var +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "serverless-post-function": { + "phase": "log", + "functions" : ["return function(conf, ctx) + ngx.log(ngx.WARN, 'balancer_ip: ', ctx.var.balancer_ip) + ngx.log(ngx.WARN, 'balancer_port: ', ctx.var.balancer_port) + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 4: check(balancer_ip is 127.0.0.1 and balancer_port is 1980) +--- request +GET /hello +--- response_body +hello world +--- grep_error_log eval +qr/balancer_ip: 127.0.0.1|balancer_port: 1980/ +--- grep_error_log_out +balancer_ip: 127.0.0.1 +balancer_port: 1980 + + + +=== TEST 5: parsed graphql is cached under ctx +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "methods": ["POST"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "serverless-pre-function": { + "phase": "header_filter", + "functions" : ["return function(conf, ctx) ngx.log(ngx.WARN, 'find ctx._graphql: ', ctx._graphql ~= nil) end"] + } + }, + "uri": "/hello", + "vars": [["graphql_name", "==", "repo"]] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: hit +--- request +POST /hello +query repo { + owner { + name + } +} +--- response_body +hello world +--- error_log +find ctx._graphql: true + + + +=== TEST 7: support dash in the args +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "vars": [["arg_a-b", "==", "ab"]] + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 8: check (support dash in the args) +--- request +GET /hello?a-b=ab +--- response_body +hello world + + + +=== TEST 9: support dash in the args(Multi args with the same name, only fetch the first one) +--- request +GET /hello?a-b=ab&a-b=ccc +--- response_body +hello world + + + +=== TEST 10: support dash in the args(arg is missing) +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 11: parsed post args is cached under ctx +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : ["return function(conf, ctx) ngx.log(ngx.WARN, 'find ctx.req_post_args.test: ', ctx.req_post_args.test ~= nil) end"] + } + }, + "uri": "/hello", + "vars": [["post_arg_test", "==", "test"]] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: hit +--- request +POST /hello +test=test +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- response_body +hello world +--- error_log +find ctx.req_post_args.test: true + + + +=== TEST 13: hit with charset +--- request +POST /hello +test=test +--- more_headers +Content-Type: application/x-www-form-urlencoded;charset=utf-8 +--- response_body +hello world +--- error_log +find ctx.req_post_args.test: true + + + +=== TEST 14: missed (post_arg_test is missing) +--- request +POST /hello +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 15: missed (post_arg_test is mismatch) +--- request +POST /hello +test=tesy +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 16: register custom variable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : ["return function(conf, ctx) ngx.say('find ctx.var.a6_labels_zone: ', ctx.var.a6_labels_zone) end"] + } + }, + "uri": "/hello", + "labels": { + "zone": "Singapore" + } + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 17: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local core = require "apisix.core" + core.ctx.register_var("a6_labels_zone", function(ctx) + local route = ctx.matched_route and ctx.matched_route.value + if route and route.labels then + return route.labels.zone + end + return nil + end) + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res = assert(httpc:request_uri(uri)) + ngx.print(res.body) + } + } +--- response_body +find ctx.var.a6_labels_zone: Singapore + + + +=== TEST 18: register custom variable with no cacheable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : ["return function(conf, ctx) ngx.say('find ctx.var.a6_count: ', ctx.var.a6_count) end"] + }, + "serverless-post-function": { + "phase": "rewrite", + "functions" : ["return function(conf, ctx) ngx.say('find ctx.var.a6_count: ', ctx.var.a6_count) end"] + } + }, + "uri": "/hello" + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 19: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local core = require "apisix.core" + core.ctx.register_var("a6_count", function(ctx) + if not ctx.a6_count then + ctx.a6_count = 0 + end + ctx.a6_count = ctx.a6_count + 1 + return ctx.a6_count + end, {no_cacheable = true}) + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res = assert(httpc:request_uri(uri)) + ngx.print(res.body) + } + } +--- response_body +find ctx.var.a6_count: 1 +find ctx.var.a6_count: 2 diff --git a/CloudronPackages/APISIX/apisix-source/t/core/ctx3.t b/CloudronPackages/APISIX/apisix-source/t/core/ctx3.t new file mode 100644 index 0000000..69f3057 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/ctx3.t @@ -0,0 +1,101 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('debug'); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->response_body) { + $block->set_value("response_body", "passed\n"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: parse graphql only once and use subsequent from cache +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "methods": ["POST"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "serverless-pre-function": { + "phase": "header_filter", + "functions" : ["return function(conf, ctx) + ngx.log(ngx.WARN, 'find ctx._graphql: ', ctx.var.graphql_name == \"repo\"); + ngx.log(ngx.WARN, 'find ctx._graphql: ', ctx.var.graphql_name == \"repo\"); + ngx.log(ngx.WARN, 'find ctx._graphql: ', ctx.var.graphql_name == \"repo\"); + end"] + } + }, + "uri": "/hello", + "vars": [["graphql_name", "==", "repo"]] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit +--- request +POST /hello +query repo { + owner { + name + } +} +--- response_body +hello world +--- error_log +--- grep_error_log eval +qr/serving ctx value from cache for key: graphql_name/ +--- grep_error_log_out +serving ctx value from cache for key: graphql_name +serving ctx value from cache for key: graphql_name +serving ctx value from cache for key: graphql_name diff --git a/CloudronPackages/APISIX/apisix-source/t/core/ctx_with_params.t b/CloudronPackages/APISIX/apisix-source/t/core/ctx_with_params.t new file mode 100644 index 0000000..6bff7dc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/ctx_with_params.t @@ -0,0 +1,153 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + router: + http: 'radixtree_uri_with_parameter' +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: add route and get `uri_param_` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "serverless-pre-function": { + "phase": "access", + "functions" : ["return function() ngx.log(ngx.INFO, \"uri_param_id: \", ngx.ctx.api_ctx.var.uri_param_id) end"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/:id" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: `uri_param_id` exist (hello) +--- request +GET /hello +--- response_body +hello world +--- error_log +uri_param_id: hello + + + +=== TEST 3: `uri_param_id` exist (hello1) +--- request +GET /hello1 +--- response_body +hello1 world +--- error_log +uri_param_id: hello1 + + + +=== TEST 4: `uri_param_id` nonexisting route +--- request +GET /not_a_route +--- error_code: 404 +--- error_log +uri_param_id: not_a_route + + + +=== TEST 5: add route and get unknown `uri_param_id` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "serverless-pre-function": { + "phase": "access", + "functions" : ["return function() ngx.log(ngx.INFO, \"uri_param_id: \", ngx.ctx.api_ctx.var.uri_param_id) end"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: `uri_param_id` not in uri +--- request +GET /hello +--- response_body +hello world +--- error_log +uri_param_id: diff --git a/CloudronPackages/APISIX/apisix-source/t/core/env.t b/CloudronPackages/APISIX/apisix-source/t/core/env.t new file mode 100644 index 0000000..2e14a43 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/env.t @@ -0,0 +1,181 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{TEST_ENV_VAR} = "test-value"; + $ENV{TEST_ENV_SUB_VAR} = '{"main":"main_value","sub":"sub_value"}'; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: sanity: start with $env:// +--- config + location /t { + content_by_lua_block { + local env = require("apisix.core.env") + local value = env.fetch_by_uri("$env://TEST_ENV_VAR") + ngx.say(value) + } + } +--- request +GET /t +--- response_body +test-value + + + +=== TEST 2: sanity: start with $ENV:// +--- config + location /t { + content_by_lua_block { + local env = require("apisix.core.env") + local value = env.fetch_by_uri("$ENV://TEST_ENV_VAR") + ngx.say(value) + } + } +--- request +GET /t +--- response_body +test-value + + + +=== TEST 3: env var case sensitive +--- config + location /t { + content_by_lua_block { + local env = require("apisix.core.env") + local value = env.fetch_by_uri("$ENV://test_env_var") + ngx.say(value) + } + } +--- request +GET /t +--- response_body +nil + + + +=== TEST 4: wrong format: wrong type +--- config + location /t { + content_by_lua_block { + local env = require("apisix.core.env") + local _, err = env.fetch_by_uri(1) + ngx.say(err) + + local _, err = env.fetch_by_uri(true) + ngx.say(err) + } + } +--- request +GET /t +--- response_body +error env_uri type: number +error env_uri type: boolean + + + +=== TEST 5: wrong format: wrong prefix +--- config + location /t { + content_by_lua_block { + local env = require("apisix.core.env") + local _, err = env.fetch_by_uri("env://") + ngx.say(err) + } + } +--- request +GET /t +--- response_body +error env_uri prefix: env:// + + + +=== TEST 6: sub value +--- config + location /t { + content_by_lua_block { + local env = require("apisix.core.env") + local value = env.fetch_by_uri("$ENV://TEST_ENV_SUB_VAR/main") + ngx.say(value) + local value = env.fetch_by_uri("$ENV://TEST_ENV_SUB_VAR/sub") + ngx.say(value) + } + } +--- request +GET /t +--- response_body +main_value +sub_value + + + +=== TEST 7: wrong sub value: error json +--- config + location /t { + content_by_lua_block { + local env = require("apisix.core.env") + local _, err = env.fetch_by_uri("$ENV://TEST_ENV_VAR/main") + ngx.say(err) + } + } +--- request +GET /t +--- response_body +decode failed, err: Expected value but found invalid token at character 1, value: test-value + + + +=== TEST 8: wrong sub value: not exits +--- config + location /t { + content_by_lua_block { + local env = require("apisix.core.env") + local value = env.fetch_by_uri("$ENV://TEST_ENV_VAR/no") + ngx.say(value) + } + } +--- request +GET /t +--- response_body +nil + + + +=== TEST 9: use nginx env +--- main_config +env ngx_env=apisix-nice; +--- config + location /t { + content_by_lua_block { + local env = require("apisix.core.env") + local value = env.fetch_by_uri("$ENV://ngx_env") + ngx.say(value) + } + } +--- request +GET /t +--- response_body +apisix-nice diff --git a/CloudronPackages/APISIX/apisix-source/t/core/etcd-auth-fail.t b/CloudronPackages/APISIX/apisix-source/t/core/etcd-auth-fail.t new file mode 100644 index 0000000..b7f9374 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/etcd-auth-fail.t @@ -0,0 +1,95 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{"ETCD_ENABLE_AUTH"} = "false"; + delete $ENV{"FLUSH_ETCD"}; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +# Authentication is enabled at etcd and credentials are set +system('etcdctl --endpoints="http://127.0.0.1:2379" user add root:5tHkHhYkjr6cQY'); +system('etcdctl --endpoints="http://127.0.0.1:2379" role add root'); +system('etcdctl --endpoints="http://127.0.0.1:2379" user grant-role root root'); +system('etcdctl --endpoints="http://127.0.0.1:2379" role list'); +system('etcdctl --endpoints="http://127.0.0.1:2379" user user list'); +# Grant the user access to the specified directory +system('etcdctl --endpoints="http://127.0.0.1:2379" user add apisix:abc123'); +system('etcdctl --endpoints="http://127.0.0.1:2379" role add apisix'); +system('etcdctl --endpoints="http://127.0.0.1:2379" user grant-role apisix apisix'); +system('etcdctl --endpoints=http://127.0.0.1:2379 role grant-permission apisix --prefix=true readwrite /apisix/'); +system('etcdctl --endpoints="http://127.0.0.1:2379" auth enable'); + +run_tests; + +# Authentication is disabled at etcd +system('etcdctl --endpoints="http://127.0.0.1:2379" --user root:5tHkHhYkjr6cQY auth disable'); +system('etcdctl --endpoints="http://127.0.0.1:2379" user delete root'); +system('etcdctl --endpoints="http://127.0.0.1:2379" role delete root'); +system('etcdctl --endpoints="http://127.0.0.1:2379" user delete apisix'); +system('etcdctl --endpoints="http://127.0.0.1:2379" role delete apisix'); +__DATA__ + +=== TEST 1: Set and Get a value pass +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local key = "/test_key" + local val = "test_value" + local res, err = core.etcd.set(key, val) + ngx.say(err) + } + } +--- request +GET /t +--- error_log eval +qr /(insufficient credentials code: 401|etcdserver: user name is empty)/ + + + +=== TEST 2: etcd grants permissions with a different prefix than the one used by apisix, etcd will forbidden +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local key = "/test_key" + local val = "test_value" + local res, err = core.etcd.set(key, val) + ngx.say(err) + } + } +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + user: apisix + password: abc123 +--- request +GET /t +--- error_log eval +qr /etcd forbidden code: 403/ diff --git a/CloudronPackages/APISIX/apisix-source/t/core/etcd-auth.t b/CloudronPackages/APISIX/apisix-source/t/core/etcd-auth.t new file mode 100644 index 0000000..f571ef3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/etcd-auth.t @@ -0,0 +1,97 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{"ETCD_ENABLE_AUTH"} = "true"; + delete $ENV{"FLUSH_ETCD"}; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +# Authentication is enabled at etcd and credentials are set +system('etcdctl --endpoints="http://127.0.0.1:2379" user add root:5tHkHhYkjr6cQY'); +system('etcdctl --endpoints="http://127.0.0.1:2379" role add root'); +system('etcdctl --endpoints="http://127.0.0.1:2379" user grant-role root root'); +system('etcdctl --endpoints="http://127.0.0.1:2379" role list'); +system('etcdctl --endpoints="http://127.0.0.1:2379" user user list'); +# Grant the user access to the specified directory +system('etcdctl --endpoints="http://127.0.0.1:2379" user add apisix:abc123'); +system('etcdctl --endpoints="http://127.0.0.1:2379" role add apisix'); +system('etcdctl --endpoints="http://127.0.0.1:2379" user grant-role apisix apisix'); +system('etcdctl --endpoints=http://127.0.0.1:2379 role grant-permission apisix --prefix=true readwrite /apisix'); +system('etcdctl --endpoints="http://127.0.0.1:2379" auth enable'); + +run_tests; + +# Authentication is disabled at etcd +system('etcdctl --endpoints="http://127.0.0.1:2379" --user root:5tHkHhYkjr6cQY auth disable'); +system('etcdctl --endpoints="http://127.0.0.1:2379" user delete root'); +system('etcdctl --endpoints="http://127.0.0.1:2379" role delete root'); +system('etcdctl --endpoints="http://127.0.0.1:2379" user delete apisix'); +system('etcdctl --endpoints="http://127.0.0.1:2379" role delete apisix'); + + +__DATA__ + +=== TEST 1: Set and Get a value pass with authentication +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local key = "/test_key" + local val = "test_value" + core.etcd.set(key, val) + local res, err = core.etcd.get(key) + ngx.say(res.body.node.value) + core.etcd.delete(val) + } + } +--- request +GET /t +--- response_body +test_value + + + +=== TEST 2: etcd grants permissions with the same prefix as apisix uses, etcd is normal +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local key = "/test_key" + local val = "test_value" + local res, err = core.etcd.set(key, val) + ngx.say(err) + } + } +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + user: apisix + password: abc123 +--- request +GET /t diff --git a/CloudronPackages/APISIX/apisix-source/t/core/etcd-mtls.t b/CloudronPackages/APISIX/apisix-source/t/core/etcd-mtls.t new file mode 100644 index 0000000..3ff2571 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/etcd-mtls.t @@ -0,0 +1,282 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $out = eval { `resty -e "local s=ngx.socket.tcp();print(s.tlshandshake)"` }; + +if ($out !~ m/function:/) { + plan(skip_all => "tlshandshake not patched"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: run etcd in init phase +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false +--- init_by_lua_block + local apisix = require("apisix") + apisix.http_init() + local etcd = require("apisix.core.etcd") + assert(etcd.set("/a", "ab")) + + local res, err = etcd.get("/a") + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, res.body.node.value) + + local res, err = etcd.delete("/a") + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, res.status) + + local res, err = etcd.get("/a") + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, res.status) +--- config + location /t { + return 200; + } +--- request +GET /t +--- error_log eval +qr/init_by_lua:\d+: ab/ and qr/init_by_lua:\d+: 200/ and qr/init_by_lua:\d+: 404/ + + + +=== TEST 2: run etcd in init phase (stream) +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false +--- stream_init_by_lua_block + apisix = require("apisix") + apisix.stream_init() + local etcd = require("apisix.core.etcd") + assert(etcd.set("/a", "ab")) + + local res, err = etcd.get("/a") + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, res.body.node.value) + + local res, err = etcd.delete("/a") + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, res.status) + + local res, err = etcd.get("/a") + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, res.status) +--- stream_server_config + content_by_lua_block { + ngx.say("ok") + } +--- stream_enable +--- error_log eval +qr/init_by_lua:\d+: ab/ and qr/init_by_lua:\d+: 200/ and qr/init_by_lua:\d+: 404/ + + + +=== TEST 3: sync +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false + admin: + admin_key_required: false +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local consumers, _ = core.config.new("/consumers", { + automatic = true, + item_schema = core.schema.consumer, + }) + + ngx.sleep(0.6) + local idx = consumers.prev_index + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jobs", + "plugins": { + "basic-auth": { + "username": "jobs", + "password": "678901" + } + } + }]]) + + ngx.sleep(2) + local new_idx = consumers.prev_index + if new_idx > idx then + ngx.say("prev_index updated") + else + ngx.say("prev_index not update") + end + } + } +--- request +GET /t +--- response_body +prev_index updated +--- error_log +waitdir key + + + +=== TEST 4: sync (stream) +--- extra_yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key + verify: false +--- stream_server_config + content_by_lua_block { + local core = require("apisix.core") + + local sr, _ = core.config.new("/stream_routes", { + automatic = true, + item_schema = core.schema.stream_routes, + }) + + ngx.sleep(0.6) + local idx = sr.prev_index + + assert(core.etcd.set("/stream_routes/1", + { + plugins = { + } + })) + + ngx.sleep(2) + local new_idx = sr.prev_index + if new_idx > idx then + ngx.say("prev_index updated") + else + ngx.say("prev_index not update") + end + } +--- stream_enable +--- stream_response +prev_index updated +--- error_log +waitdir key + + + +=== TEST 5: ssl_trusted_certificate +--- yaml_config +apisix: + ssl: + ssl_trusted_certificate: t/certs/mtls_ca.crt +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "https://127.0.0.1:22379" + prefix: "/apisix" + tls: + cert: t/certs/mtls_client.crt + key: t/certs/mtls_client.key +--- init_by_lua_block + local apisix = require("apisix") + apisix.http_init() + local etcd = require("apisix.core.etcd") + assert(etcd.set("/a", "ab")) + local res, err = etcd.get("/a") + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, res.body.node.value) +--- config + location /t { + return 200; + } +--- request +GET /t +--- error_log eval +qr/init_by_lua:\d+: ab/ diff --git a/CloudronPackages/APISIX/apisix-source/t/core/etcd-sync.t b/CloudronPackages/APISIX/apisix-source/t/core/etcd-sync.t new file mode 100644 index 0000000..aef5e23 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/etcd-sync.t @@ -0,0 +1,159 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: using default timeout +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local consumers, _ = core.config.new("/consumers", { + automatic = true, + item_schema = core.schema.consumer, + }) + + ngx.sleep(0.6) + local idx = consumers.prev_index + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jobs", + "plugins": { + "basic-auth": { + "username": "jobs", + "password": "678901" + } + } + }]]) + + ngx.sleep(2) + local new_idx = consumers.prev_index + core.log.info("idx:", idx, " new_idx: ", new_idx) + if new_idx > idx then + ngx.say("prev_index updated") + else + ngx.say("prev_index not update") + end + } + } +--- request +GET /t +--- response_body +prev_index updated +--- error_log +waitdir key + + + +=== TEST 2: no update +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + local consumers, _ = core.config.new("/consumers", { + automatic = true, + item_schema = core.schema.consumer + }) + + ngx.sleep(0.6) + local idx = consumers.prev_index + + local key = "/test_key" + local val = "test_value" + core.etcd.set(key, val) + + ngx.sleep(2) + + local new_idx = consumers.prev_index + core.log.info("idx:", idx, " new_idx: ", new_idx) + if new_idx > idx then + ngx.say("prev_index updated") + else + ngx.say("prev_index not update") + end + } + } +--- request +GET /t +--- response_body +prev_index not update + + + +=== TEST 3: bad plugin configuration (validated via incremental sync) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + assert(core.etcd.set("/global_rules/etcdsync", + {id = 1, plugins = { ["proxy-rewrite"] = { uri = 1 }}} + )) + -- wait for sync + ngx.sleep(0.6) + } + } +--- request +GET /t +--- error_log +property "uri" validation failed + + + +=== TEST 4: bad plugin configuration (validated via full sync) +--- config + location /t { + content_by_lua_block { + } + } +--- request +GET /t +--- error_log +use loaded configuration /global_rules +property "uri" validation failed + + + +=== TEST 5: bad plugin configuration (validated without sync during start) +--- extra_yaml_config + disable_sync_configuration_during_start: true +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + -- wait for full sync finish + ngx.sleep(0.6) + + assert(core.etcd.delete("/global_rules/etcdsync")) + } + } +--- request +GET /t +--- error_log +property "uri" validation failed +--- no_error_log +use loaded configuration /global_rules diff --git a/CloudronPackages/APISIX/apisix-source/t/core/etcd-write.t b/CloudronPackages/APISIX/apisix-source/t/core/etcd-write.t new file mode 100644 index 0000000..457cb76 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/etcd-write.t @@ -0,0 +1,1107 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: add serverless-pre-function with etcd delete +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local etcd_cli = require(\"apisix.core.etcd\").new() etcd_cli:delete(\"/test-key\") end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 2: should show warn when serverless-pre-function try to write to etcd with cli delete +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 3: add serverless-pre-function with etcd cli grant +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local etcd_cli = require(\"apisix.core.etcd\").new() etcd_cli:grant(10) end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 4: should show warn when serverless-pre-function try to write to etcd with cli grant +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 5: add serverless-pre-function with etcd cli setnx +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local etcd_cli = require(\"apisix.core.etcd\").new() etcd_cli:setnx(\"/test-key\", {value = \"hello from serverless\"}) end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 6: should show warn when serverless-pre-function try to write to etcd with cli setnx +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 7: add serverless-pre-function with etcd cli set +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local etcd_cli = require(\"apisix.core.etcd\").new() etcd_cli:set(\"/test-key\", {value = \"hello from serverless\"}) end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 8: should show warn when serverless-pre-function try to write to etcd with cli set +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 9: add serverless-pre-function with etcd cli setx +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local etcd_cli = require(\"apisix.core.etcd\").new() etcd_cli:setx(\"/test-key\", {value = \"hello from serverless\"}) end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 10: should show warn when serverless-pre-function try to write to etcd with cli setx +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 11: add serverless-pre-function with etcd cli rmdir +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local etcd_cli = require(\"apisix.core.etcd\").new() etcd_cli:rmdir(\"/test-key\") end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 12: should show warn when serverless-pre-function try to write to etcd with cli rmdir +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 13: add serverless-pre-function with etcd cli revoke +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local etcd_cli = require(\"apisix.core.etcd\").new() etcd_cli:revoke(123) end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 14: should show warn when serverless-pre-function try to write to etcd with cli revoke +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 15: add serverless-pre-function with etcd cli keepalive +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local etcd_cli = require(\"apisix.core.etcd\").new() etcd_cli:keepalive(123) end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 16: should show warn when serverless-pre-function try to write to etcd with cli keepalive +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 17: add serverless-pre-function with etcd cli get +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local etcd_cli = require(\"apisix.core.etcd\").new() etcd_cli:get(\"/my-test-key\") end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: should not show warn when serverless-pre-function try to read from etcd with cli get +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/hello") + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- no_error_log +Data plane role should not write to etcd. This operation will be deprecated in future releases. + + + +=== TEST 19: add serverless-pre-function with etcd function set +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local core = require(\"apisix.core\") core.etcd.set(\"/my-test-key\", {value = \"hello from serverless\"}) end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: should show warn when serverless-pre-function try to write to etcd with function set +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 21: add serverless-pre-function with etcd function atomic_set +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local core = require(\"apisix.core\") core.etcd.atomic_set(\"/my-test-key\", {value = \"hello from serverless\"}) end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: should show warn when serverless-pre-function try to write to etcd with function atomic_set +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 23: add serverless-pre-function with etcd function push +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local core = require(\"apisix.core\") core.etcd.push(\"/my-test-key\", {value = \"hello from serverless\"}) end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 24: should show warn when serverless-pre-function try to write to etcd with function push +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 25: add serverless-pre-function with etcd function delete +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local core = require(\"apisix.core\") core.etcd.delete(\"/my-test-key\") end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 26: should show warn when serverless-pre-function try to write to etcd with function delete +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 27: add serverless-pre-function with etcd function rmdir +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local core = require(\"apisix.core\") core.etcd.rmdir(\"/my-test-key\") end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 28: should show warn when serverless-pre-function try to write to etcd with function rmdir +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 29: add serverless-pre-function with etcd function keepalive +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local core = require(\"apisix.core\") core.etcd.keepalive(123) end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 30: should show warn when serverless-pre-function try to write to etcd with function keepalive +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/hello') + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_log eval +qr/Data plane role should not write to etcd. This operation will be deprecated in future releases./ + + + +=== TEST 31: add serverless-pre-function with etcd function get +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : [ + "return function() local core = require(\"apisix.core\") core.etcd.get(\"/my-test-key\") end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 32: should not show warn when serverless-pre-function try to read from etcd with function get +--- yaml_config +deployment: + role: data_plane + role_data_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/hello") + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- no_error_log +Data plane role should not write to etcd. This operation will be deprecated in future releases. + + + +=== TEST 33: should not warn when not data_plane +--- yaml_config +deployment: + role: control_plane + role_control_plane: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" + prefix: "/apisix" + tls: + verify: false +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + etcd.set("foo", "bar") + etcd.delete("foo") + } + } +--- request +GET /t +--- no_error_log +Data plane role should not write to etcd. This operation will be deprecated in future releases. diff --git a/CloudronPackages/APISIX/apisix-source/t/core/etcd.t b/CloudronPackages/APISIX/apisix-source/t/core/etcd.t new file mode 100644 index 0000000..d6f5d70 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/etcd.t @@ -0,0 +1,429 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: delete test data if exists +--- config + location /delete { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + ngx.status = code + ngx.say(body) + } + } +--- request +GET /delete +--- ignore_response + + + +=== TEST 2: (add + update + delete) *2 (same uri) +--- config + location /add { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "host": "foo.com", + "uri": "/hello" + }]], + nil + ) + ngx.status = code + ngx.say(body) + } + } + location /update { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 2 + }, + "type": "roundrobin" + }, + "host": "foo.com", + "uri": "/hello" + }]], + nil + ) + ngx.status = code + ngx.say(body) + } + } + location /delete { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + ngx.status = code + ngx.say(body) + } + } +--- pipelined_requests eval +["GET /add", "GET /hello", "GET /update", "GET /hello", "GET /delete", "GET /hello", +"GET /add", "GET /hello", "GET /update", "GET /hello", "GET /delete", "GET /hello"] +--- more_headers +Host: foo.com +--- error_code eval +[201, 200, 200, 200, 200, 404, 201, 200, 200, 200, 200, 404] +--- response_body eval +["passed\n", "hello world\n", "passed\n", "hello world\n", "passed\n", "{\"error_msg\":\"404 Route Not Found\"}\n", +"passed\n", "hello world\n", "passed\n", "hello world\n", "passed\n", "{\"error_msg\":\"404 Route Not Found\"}\n"] +--- timeout: 5 + + + +=== TEST 3: add + update + delete + add + update + delete (different uris) +--- config + location /add { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "host": "foo.com", + "uri": "/hello" + }]], + nil + ) + ngx.status = code + ngx.say(body) + } + } + location /update { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + + "upstream": { + "nodes": { + "127.0.0.1:1980": 2 + }, + "type": "roundrobin" + }, + "host": "foo.com", + "uri": "/status" + }]], + nil + ) + ngx.status = code + ngx.say(body) + } + } + location /delete { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + ngx.status = code + ngx.say(body) + } + } + location /add2 { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "host": "foo.com", + "uri": "/hello_chunked" + }]], + nil + ) + ngx.sleep(1) + ngx.status = code + ngx.say(body) + } + } + location /update2 { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + + "upstream": { + "nodes": { + "127.0.0.1:1980": 2 + }, + "type": "roundrobin" + }, + "host": "foo.com", + "uri": "/hello1" + }]], + nil + ) + ngx.status = code + ngx.say(body) + } + } + location /delete2 { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + ngx.status = code + ngx.say(body) + } + } +--- pipelined_requests eval +["GET /add", "GET /hello", "GET /update", "GET /hello", "GET /status", "GET /delete", "GET /status", +"GET /add2", "GET /hello_chunked", "GET /update2", "GET /hello_chunked", "GET /hello1", "GET /delete", "GET /hello1"] +--- more_headers +Host: foo.com +--- error_code eval +[201, 200, 200, 404, 200, 200, 404, 201, 200, 200, 404, 200, 200, 404] +--- response_body eval +["passed\n", "hello world\n", "passed\n", "{\"error_msg\":\"404 Route Not Found\"}\n", "ok\n", "passed\n", "{\"error_msg\":\"404 Route Not Found\"}\n", +"passed\n", "hello world\n", "passed\n", "{\"error_msg\":\"404 Route Not Found\"}\n", "hello1 world\n", "passed\n", "{\"error_msg\":\"404 Route Not Found\"}\n"] +--- timeout: 5 + + + +=== TEST 4: add*50 + update*50 + delete*50 +--- config + location /add { + content_by_lua_block { + local t = require("lib.test_admin").test + local path = "" + local code, body + for i = 1, 25 do + path = '/apisix/admin/routes/' .. tostring(i) + code, body = t(path, + ngx.HTTP_PUT, + string.format('{"upstream": {"nodes": {"127.0.0.1:1980": 1},"type": "roundrobin"},"host": "foo.com","uri": "/print_uri_%s"}', tostring(i)), + nil + ) + end + ngx.sleep(2) + ngx.status = code + ngx.say(body) + } + } + location /add2 { + content_by_lua_block { + local t = require("lib.test_admin").test + local path = "" + local code, body + for i = 26, 50 do + path = '/apisix/admin/routes/' .. tostring(i) + code, body = t(path, + ngx.HTTP_PUT, + string.format('{"upstream": {"nodes": {"127.0.0.1:1980": 1},"type": "roundrobin"},"host": "foo.com","uri": "/print_uri_%s"}', tostring(i)), + nil + ) + end + ngx.sleep(2) + ngx.status = code + ngx.say(body) + } + } + location /update { + content_by_lua_block { + local t = require("lib.test_admin").test + local path = "" + local code, body + for i = 1, 25 do + path = '/apisix/admin/routes/' .. tostring(i) + code, body = t(path, + ngx.HTTP_PUT, + string.format('{"upstream": {"nodes": {"127.0.0.1:1980": 1},"type": "roundrobin"},"host": "foo.com","uri": "/print_uri_%s"}', tostring(i)), + nil + ) + end + ngx.sleep(2) + ngx.status = code + ngx.say(body) + } + } + location /update2 { + content_by_lua_block { + local t = require("lib.test_admin").test + local path = "" + local code, body + for i = 26, 50 do + path = '/apisix/admin/routes/' .. tostring(i) + code, body = t(path, + ngx.HTTP_PUT, + string.format('{"upstream": {"nodes": {"127.0.0.1:1980": 1},"type": "roundrobin"},"host": "foo.com","uri": "/print_uri_%s"}', tostring(i)), + nil + ) + end + ngx.sleep(2) + ngx.status = code + ngx.say(body) + } + } + location /delete { + content_by_lua_block { + local t = require("lib.test_admin").test + local path = "" + local code, body + for i = 1, 50 do + path = '/apisix/admin/routes/' .. tostring(i) + code, body = t(path, ngx.HTTP_DELETE) + end + ngx.status = code + ngx.say(body) + } + } +--- pipelined_requests eval +["GET /add", "GET /print_uri_20", "GET /add2", "GET /print_uri_36", "GET /update", "GET /print_uri_12", "GET /delete", "GET /print_uri_12"] +--- more_headers +Host: foo.com +--- error_code eval +[201, 200, 201, 200, 200, 200, 200, 404] +--- response_body eval +["passed\n", "/print_uri_20\n", "passed\n", "/print_uri_36\n", "passed\n", "/print_uri_12\n", "passed\n", "{\"error_msg\":\"404 Route Not Found\"}\n"] +--- timeout: 20 + + + +=== TEST 5: get single +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + assert(etcd.set("/ab", "ab")) + local res, err = etcd.get("/a") + ngx.status = res.status + } + } +--- request +GET /t +--- error_code: 404 + + + +=== TEST 6: get prefix +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + assert(etcd.set("/ab", "ab")) + local res, err = etcd.get("/a", true) + assert(err == nil) + assert(#res.body.list == 1) + ngx.status = res.status + ngx.say(res.body.list[1].value) + } + } +--- request +GET /t +--- response_body +ab + + + +=== TEST 7: run etcd in init phase +--- init_by_lua_block + local apisix = require("apisix") + apisix.http_init() + local etcd = require("apisix.core.etcd") + assert(etcd.set("/a", "ab")) + + local res, err = etcd.get("/a") + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, res.body.node.value) + + local res, err = etcd.delete("/a") + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, res.status) + + local res, err = etcd.get("/a") + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, res.status) +--- config + location /t { + return 200; + } +--- request +GET /t +--- grep_error_log eval +qr/init_by_lua.*: \S+/ +--- grep_error_log_out eval +qr{init_by_lua.* ab +init_by_lua.* 200 +init_by_lua.* 404} + + + +=== TEST 8: list multiple kv, get prefix +--- config + location /t { + content_by_lua_block { + local etcd = require("apisix.core.etcd") + assert(etcd.set("/ab", "ab")) + assert(etcd.set("/abc", "abc")) + -- get prefix + local res, err = etcd.get("/a", true) + assert(err == nil) + assert(#res.body.list == 2) + ngx.status = res.status + ngx.say(res.body.list[1].value) + ngx.say(res.body.list[2].value) + } + } +--- request +GET /t +--- response_body +ab +abc diff --git a/CloudronPackages/APISIX/apisix-source/t/core/json.t b/CloudronPackages/APISIX/apisix-source/t/core/json.t new file mode 100644 index 0000000..e946bc6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/json.t @@ -0,0 +1,153 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local json_data = core.json.encode({test="test"}) + + ngx.say("encode: ", json_data) + + local data = core.json.decode(json_data) + ngx.say("data: ", data.test) + } + } +--- response_body +encode: {"test":"test"} +data: test + + + +=== TEST 2: delay_encode +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + ngx.log(ngx.ERR, "val: ", core.json.delay_encode({test="test1"}),core.json.delay_encode({test="test2"})) + } + } +--- error_log +val: {"test":"test1"}{"test":"test2"} + + + +=== TEST 3: encode with force argument +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local data = core.json.encode({test="test", fun = function() end}, true) + + ngx.say("encode: ", data) + } + } +--- response_body_like eval +qr/\{("test":"test","fun":"function: 0x[0-9a-f]+"|"fun":"function: 0x[0-9a-f]+","test":"test")}/ + + + +=== TEST 4: encode, include `cdata` type +--- config + location /t { + content_by_lua_block { + local ffi = require "ffi" + local charpp = ffi.new("char *[1]") + + local core = require("apisix.core") + local json_data = core.json.encode({test=charpp}, true) + ngx.say("encode: ", json_data) + } + } +--- response_body_like eval +qr/encode: \{"test":"cdata\: 0x[0-9a-f]+"\}/ + + + +=== TEST 5: excessive nesting +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local a = {} + local b = {} + a.b = b + b.a = a + + local json_data = core.json.encode(a, true) + ngx.say("encode: ", json_data) + } + } +--- response_body eval +qr/\{"b":\{"a":\{"b":"table: 0x[\w]+"\}\}\}/ + + + +=== TEST 6: decode/encode empty array +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local data = core.json.decode('{"arr":[]}') + ngx.say(core.json.encode(data)) + local data = { arr = setmetatable({}, core.json.array_mt)} + ngx.say(core.json.encode(data)) + local data = core.json.decode('{"obj":{}}') + ngx.say(core.json.encode(data)) + } + } +--- response_body +{"arr":[]} +{"arr":[]} +{"obj":{}} + + + +=== TEST 7: encode slash without escape +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local json_data = core.json.encode({test="/test"}) + + ngx.say("encode: ", json_data) + + local data = core.json.decode(json_data) + ngx.say("data: ", data.test) + } + } +--- response_body +encode: {"test":"/test"} +data: /test diff --git a/CloudronPackages/APISIX/apisix-source/t/core/log.t b/CloudronPackages/APISIX/apisix-source/t/core/log.t new file mode 100644 index 0000000..6e847d0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/log.t @@ -0,0 +1,200 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: error log +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + core.log.error("error log") + core.log.warn("warn log") + core.log.notice("notice log") + core.log.info("info log") + ngx.say("done") + } + } +--- log_level: error +--- request +GET /t +--- error_log +error log +--- no_error_log +warn log +notice log +info log + + + +=== TEST 2: warn log +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + core.log.error("error log") + core.log.warn("warn log") + core.log.notice("notice log") + core.log.info("info log") + core.log.debug("debug log") + ngx.say("done") + } + } +--- log_level: warn +--- request +GET /t +--- error_log +error log +warn log +--- no_error_log +notice log +info log +debug log + + + +=== TEST 3: notice log +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + core.log.error("error log") + core.log.warn("warn log") + core.log.notice("notice log") + core.log.info("info log") + core.log.debug("debug log") + ngx.say("done") + } + } +--- log_level: notice +--- request +GET /t +--- error_log +error log +warn log +notice log +--- no_error_log +info log +debug log + + + +=== TEST 4: info log +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + core.log.error("error log") + core.log.warn("warn log") + core.log.notice("notice log") + core.log.info("info log") + core.log.debug("debug log") + ngx.say("done") + } + } +--- log_level: info +--- request +GET /t +--- error_log +error log +warn log +notice log +info log +--- no_error_log +debug log + + + +=== TEST 5: debug log +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + core.log.error("error log") + core.log.warn("warn log") + core.log.notice("notice log") + core.log.info("info log") + core.log.debug("debug log") + ngx.say("done") + } + } +--- log_level: debug +--- request +GET /t +--- error_log +error log +warn log +notice log +info log +debug log + + + +=== TEST 6: print error log with prefix +--- config + location /t { + content_by_lua_block { + local log_prefix = require("apisix.core").log.new("prefix: ") + log_prefix.error("error log") + log_prefix.warn("warn log") + log_prefix.notice("notice log") + log_prefix.info("info log") + ngx.say("done") + } + } +--- log_level: error +--- request +GET /t +--- error_log eval +qr/[error].+prefix: error log/ +--- no_error_log +[qr/[warn].+warn log/, qr/[notice].+notice log/, qr/[info].+info log/] + + + +=== TEST 7: print both prefixed error logs and normal logs +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local log_prefix = core.log.new("prefix: ") + core.log.error("raw error log") + core.log.warn("raw warn log") + core.log.notice("raw notice log") + core.log.info("raw info log") + + log_prefix.error("error log") + log_prefix.warn("warn log") + log_prefix.notice("notice log") + log_prefix.info("info log") + ngx.say("done") + } + } +--- log_level: error +--- request +GET /t +--- error_log eval +[qr/[error].+raw error log/, qr/[error].+prefix: error log/] +--- no_error_log +[qr/[warn].+warn log/, qr/[notice].+notice log/, qr/[info].+info log/] diff --git a/CloudronPackages/APISIX/apisix-source/t/core/lrucache.t b/CloudronPackages/APISIX/apisix-source/t/core/lrucache.t new file mode 100644 index 0000000..0b75e8e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/lrucache.t @@ -0,0 +1,271 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + local idx = 0 + local function create_obj() + idx = idx + 1 + return {idx = idx} + end + + local obj = core.lrucache.global("key", nil, create_obj) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + + obj = core.lrucache.global("key", nil, create_obj) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + + obj = core.lrucache.global("key", "1", create_obj) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + } + } +--- request +GET /t +--- response_body +obj: {"idx":1} +obj: {"idx":1} +obj: {"idx":2} + + + +=== TEST 2: new +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + local idx = 0 + local function create_obj() + idx = idx + 1 + return {idx = idx} + end + + local lru_get = core.lrucache.new() + + local obj = lru_get("key", nil, create_obj) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + + obj = lru_get("key", nil, create_obj) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + + obj = lru_get("key", "1", create_obj) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + + obj = lru_get("key", "1", create_obj) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + + obj = lru_get("key-different", "1", create_obj) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + } + } +--- request +GET /t +--- response_body +obj: {"idx":1} +obj: {"idx":1} +obj: {"idx":2} +obj: {"idx":2} +obj: {"idx":3} + + + +=== TEST 3: cache the non-table object, eg: number or string +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + local idx = 0 + local function create_num() + idx = idx + 1 + return idx + end + + local obj = core.lrucache.global("key", nil, create_num) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + + obj = core.lrucache.global("key", nil, create_num) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + + obj = core.lrucache.global("key", "1", create_num) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + } + } +--- request +GET /t +--- response_body +obj: 1 +obj: 1 +obj: 2 + + + +=== TEST 4: sanity +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + local function server_release(self) + ngx.say("release: ", require("toolkit.json").encode(self)) + end + + local lrucache_server_picker = core.lrucache.new({ + ttl = 300, count = 256, release = server_release, + }) + + local t1 = lrucache_server_picker("nnn", "t1", + function () return {name = "aaa"} end) + + ngx.say("obj: ", require("toolkit.json").encode(t1)) + + local t2 = lrucache_server_picker("nnn", "t2", + function () return {name = "bbb"} end) + + ngx.say("obj: ", require("toolkit.json").encode(t2)) + } + } +--- request +GET /t +--- response_body +obj: {"name":"aaa"} +release: {"name":"aaa"} +obj: {"name":"bbb"} + + + +=== TEST 5: invalid_stale = true +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + local idx = 0 + local function create_obj() + idx = idx + 1 + return {idx = idx} + end + + local lru_get = core.lrucache.new({ + ttl = 0.1, count = 256, invalid_stale = true, + }) + + local obj = lru_get("key", "ver", create_obj) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + local obj = lru_get("key", "ver", create_obj) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + + ngx.sleep(0.15) + local obj = lru_get("key", "ver", create_obj) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + } + } +--- request +GET /t +--- response_body +obj: {"idx":1} +obj: {"idx":1} +obj: {"idx":2} + + + +=== TEST 6: when creating cached objects, use resty-lock to avoid repeated creation. +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + local idx = 0 + local function create_obj() + idx = idx + 1 + ngx.sleep(0.1) + return {idx = idx} + end + + local lru_get = core.lrucache.new({ + ttl = 1, count = 256, invalid_stale = true, serial_creating = true, + }) + + local function f() + local obj = lru_get("key", "ver", create_obj) + ngx.say("obj: ", require("toolkit.json").encode(obj)) + end + + ngx.thread.spawn(f) + ngx.thread.spawn(f) + + ngx.sleep(0.3) + } + } +--- request +GET /t +--- response_body +obj: {"idx":1} +obj: {"idx":1} + + + +=== TEST 7: different `key` and `ver`, cached same one table +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + local item = {} + local idx = 0 + local function create_obj() + idx = idx + 1 + ngx.say("create obj ", idx, " time") + return item + end + + local lru_get = core.lrucache.new({ + ttl = 10, count = 256 + }) + + local obj = lru_get("key", "ver", create_obj) + ngx.say("fetch obj: ", obj == item) + + obj = lru_get("key2", "ver2", create_obj) + ngx.say("fetch obj: ", obj == item) + + obj = lru_get("key", "ver", create_obj) + ngx.say("fetch obj: ", obj == item) + } + } +--- request +GET /t +--- response_body +create obj 1 time +fetch obj: true +create obj 2 time +fetch obj: true +fetch obj: true diff --git a/CloudronPackages/APISIX/apisix-source/t/core/os.t b/CloudronPackages/APISIX/apisix-source/t/core/os.t new file mode 100644 index 0000000..4c99b31 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/os.t @@ -0,0 +1,91 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: setenv +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + core.os.setenv("TEST", "A") + ngx.say(os.getenv("TEST")) + core.os.setenv("TEST", 1) + ngx.say(os.getenv("TEST")) + } + } +--- response_body +A +1 + + + +=== TEST 2: setenv, bad arguments +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + for _, c in ipairs({ + {name = "A"}, + {value = "A"}, + {name = 1, value = "A"}, + }) do + local ok = core.os.setenv(c.name, c.value) + ngx.say(ok) + end + } + } +--- response_body +false +false +false + + + +=== TEST 3: usleep, bad arguments +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + for _, c in ipairs({ + {us = 0.1}, + }) do + local ok = pcall(core.os.usleep, c.us) + ngx.say(ok) + end + } + } +--- response_body +false diff --git a/CloudronPackages/APISIX/apisix-source/t/core/profile.t b/CloudronPackages/APISIX/apisix-source/t/core/profile.t new file mode 100644 index 0000000..3e28f97 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/profile.t @@ -0,0 +1,52 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{APISIX_PROFILE} = "dev"; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: set env "APISIX_PROFILE" +--- request +GET /t +--- error_code: 404 + + + +=== TEST 2: set env "APISIX_PROFILE" to Empty String +--- config + location /t { + content_by_lua_block { + local profile = require("apisix.core.profile") + profile.apisix_home = "./test/" + profile.profile = "" + local local_conf_path = profile:yaml_path("config") + ngx.say(local_conf_path) + } + } +--- request +GET /t +--- response_body +./test/conf/config.yaml diff --git a/CloudronPackages/APISIX/apisix-source/t/core/random.t b/CloudronPackages/APISIX/apisix-source/t/core/random.t new file mode 100644 index 0000000..b758472 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/random.t @@ -0,0 +1,73 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +workers(4); +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: generate different random number in different worker process +--- config + location /t { + content_by_lua_block { + local log_file = ngx.config.prefix() .. "logs/error.log" + local file = io.open(log_file, "r") + local log = file:read("*a") + + local it, err = ngx.re.gmatch(log, [[random test in \[1, 10000\]: (\d+)]], "jom") + if not it then + ngx.log(ngx.ERR, "failed to gmatch: ", err) + return + end + + local random_nums = {} + while true do + local m, err = it() + if err then + ngx.log(ngx.ERR, "error: ", err) + return + end + + if not m then + break + end + + -- found a match + table.insert(random_nums, m[1]) + end + + for i = 2, #random_nums do + local pre = random_nums[i - 1] + local cur = random_nums[i] + ngx.say("random[", i - 1, "] == random[", i, "]: ", pre == cur) + end + } + } +--- request +GET /t +--- response_body +random[1] == random[2]: false +random[2] == random[3]: false +random[3] == random[4]: false +random[4] == random[5]: false diff --git a/CloudronPackages/APISIX/apisix-source/t/core/request.t b/CloudronPackages/APISIX/apisix-source/t/core/request.t new file mode 100644 index 0000000..3220508 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/request.t @@ -0,0 +1,492 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: get_ip +--- config + location /t { + real_ip_header X-Real-IP; + + set_real_ip_from 0.0.0.0/0; + set_real_ip_from ::/0; + set_real_ip_from unix:; + + access_by_lua_block { + local core = require("apisix.core") + local ngx_ctx = ngx.ctx + local api_ctx = ngx_ctx.api_ctx + if api_ctx == nil then + api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + end + + core.ctx.set_vars_meta(api_ctx) + } + content_by_lua_block { + local core = require("apisix.core") + local ip = core.request.get_ip(ngx.ctx.api_ctx) + ngx.say(ip) + } + } +--- more_headers +X-Real-IP: 10.0.0.1 +--- response_body +127.0.0.1 + + + +=== TEST 2: get_ip +--- config + location /t { + real_ip_header X-Real-IP; + + set_real_ip_from 0.0.0.0/0; + set_real_ip_from ::/0; + set_real_ip_from unix:; + + access_by_lua_block { + local core = require("apisix.core") + local ngx_ctx = ngx.ctx + local api_ctx = ngx_ctx.api_ctx + if api_ctx == nil then + api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + end + + core.ctx.set_vars_meta(api_ctx) + } + content_by_lua_block { + local core = require("apisix.core") + local ip = core.request.get_ip(ngx.ctx.api_ctx) + ngx.say(ip) + } + } +--- more_headers +X-Real-IP: 10.0.0.1 +--- response_body +127.0.0.1 + + + +=== TEST 3: get_ip and X-Forwarded-For +--- config + location /t { + real_ip_header X-Forwarded-For; + + set_real_ip_from 0.0.0.0/0; + set_real_ip_from ::/0; + set_real_ip_from unix:; + + access_by_lua_block { + local core = require("apisix.core") + local ngx_ctx = ngx.ctx + local api_ctx = ngx_ctx.api_ctx + if api_ctx == nil then + api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + end + + core.ctx.set_vars_meta(api_ctx) + } + content_by_lua_block { + local core = require("apisix.core") + local ip = core.request.get_ip(ngx.ctx.api_ctx) + ngx.say(ip) + } + } +--- more_headers +X-Forwarded-For: 10.0.0.1 +--- response_body +127.0.0.1 + + + +=== TEST 4: get_remote_client_ip +--- config + location /t { + real_ip_header X-Real-IP; + + set_real_ip_from 0.0.0.0/0; + set_real_ip_from ::/0; + set_real_ip_from unix:; + + access_by_lua_block { + local core = require("apisix.core") + local ngx_ctx = ngx.ctx + local api_ctx = ngx_ctx.api_ctx + if api_ctx == nil then + api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + end + + core.ctx.set_vars_meta(api_ctx) + } + content_by_lua_block { + local core = require("apisix.core") + local ip = core.request.get_remote_client_ip(ngx.ctx.api_ctx) + ngx.say(ip) + } + } +--- more_headers +X-Real-IP: 10.0.0.1 +--- response_body +10.0.0.1 + + + +=== TEST 5: get_remote_client_ip and X-Forwarded-For +--- config + location /t { + real_ip_header X-Forwarded-For; + set_real_ip_from 0.0.0.0/0; + set_real_ip_from ::/0; + set_real_ip_from unix:; + + access_by_lua_block { + local core = require("apisix.core") + local ngx_ctx = ngx.ctx + local api_ctx = ngx_ctx.api_ctx + if api_ctx == nil then + api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + end + + core.ctx.set_vars_meta(api_ctx) + } + content_by_lua_block { + local core = require("apisix.core") + local ip = core.request.get_remote_client_ip(ngx.ctx.api_ctx) + ngx.say(ip) + } + } +--- more_headers +X-Forwarded-For: 10.0.0.1 +--- response_body +10.0.0.1 + + + +=== TEST 6: get_host +--- config + location /t { + real_ip_header X-Real-IP; + + set_real_ip_from 0.0.0.0/0; + set_real_ip_from ::/0; + set_real_ip_from unix:; + + access_by_lua_block { + local core = require("apisix.core") + local ngx_ctx = ngx.ctx + local api_ctx = ngx_ctx.api_ctx + if api_ctx == nil then + api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + end + + core.ctx.set_vars_meta(api_ctx) + } + content_by_lua_block { + local core = require("apisix.core") + local host = core.request.get_host(ngx.ctx.api_ctx) + ngx.say(host) + } + } +--- more_headers +X-Real-IP: 10.0.0.1 +--- response_body +localhost + + + +=== TEST 7: get_scheme +--- config + location /t { + real_ip_header X-Real-IP; + + set_real_ip_from 0.0.0.0/0; + set_real_ip_from ::/0; + set_real_ip_from unix:; + + access_by_lua_block { + local core = require("apisix.core") + local ngx_ctx = ngx.ctx + local api_ctx = ngx_ctx.api_ctx + if api_ctx == nil then + api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + end + + core.ctx.set_vars_meta(api_ctx) + } + content_by_lua_block { + local core = require("apisix.core") + local scheme = core.request.get_scheme(ngx.ctx.api_ctx) + ngx.say(scheme) + } + } +--- more_headers +X-Real-IP: 10.0.0.1 +--- response_body +http + + + +=== TEST 8: get_port +--- config + location /t { + real_ip_header X-Real-IP; + + set_real_ip_from 0.0.0.0/0; + set_real_ip_from ::/0; + set_real_ip_from unix:; + + access_by_lua_block { + local core = require("apisix.core") + local ngx_ctx = ngx.ctx + local api_ctx = ngx_ctx.api_ctx + if api_ctx == nil then + api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + end + + core.ctx.set_vars_meta(api_ctx) + } + content_by_lua_block { + local core = require("apisix.core") + local port = core.request.get_port(ngx.ctx.api_ctx) + ngx.say(port) + } + } +--- more_headers +X-Real-IP: 10.0.0.1 +--- response_body +1984 + + + +=== TEST 9: get_http_version +--- config + location /t { + real_ip_header X-Real-IP; + + set_real_ip_from 0.0.0.0/0; + set_real_ip_from ::/0; + set_real_ip_from unix:; + + access_by_lua_block { + local core = require("apisix.core") + local ngx_ctx = ngx.ctx + local api_ctx = ngx_ctx.api_ctx + if api_ctx == nil then + api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + end + + core.ctx.set_vars_meta(api_ctx) + } + content_by_lua_block { + local core = require("apisix.core") + local http_version = core.request.get_http_version() + ngx.say(http_version) + } + } +--- more_headers +X-Real-IP: 10.0.0.1 +--- response_body +1.1 + + + +=== TEST 10: set header +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + ngx.ctx.api_ctx = {} + local h = core.request.header(nil, "Test") + local ctx = ngx.ctx.api_ctx + core.request.set_header(ctx, "Test", "t") + local h2 = core.request.header(ctx, "Test") + ngx.say(h) + ngx.say(h2) + } + } +--- response_body +nil +t + + + +=== TEST 11: get_post_args +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local ngx_ctx = ngx.ctx + local api_ctx = ngx_ctx.api_ctx + if api_ctx == nil then + api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + end + + core.ctx.set_vars_meta(api_ctx) + + local args = core.request.get_post_args(ngx.ctx.api_ctx) + ngx.say(args["c"]) + ngx.say(args["v"]) + } + } +--- request +POST /t +c=z_z&v=x%20x +--- response_body +z_z +x x + + + +=== TEST 12: get_post_args when the body is stored in temp file +--- config + location /t { + client_body_in_file_only clean; + content_by_lua_block { + local core = require("apisix.core") + local ngx_ctx = ngx.ctx + local api_ctx = ngx_ctx.api_ctx + if api_ctx == nil then + api_ctx = core.tablepool.fetch("api_ctx", 0, 32) + ngx_ctx.api_ctx = api_ctx + end + + core.ctx.set_vars_meta(api_ctx) + + local args = core.request.get_post_args(ngx.ctx.api_ctx) + ngx.say(args["c"]) + } + } +--- request +POST /t +c=z_z&v=x%20x +--- response_body +nil +--- error_log +the post form is too large: request body in temp file not supported + + + +=== TEST 13: get_method +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + ngx.say(core.request.get_method()) + } + } +--- request +POST /t +--- response_body +POST + + + +=== TEST 14: add header +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + ngx.ctx.api_ctx = {} + local ctx = ngx.ctx.api_ctx + local json = require("toolkit.json") + core.request.add_header(ctx, "test_header", "test") + local h = core.request.header(ctx, "test_header") + ngx.say(h) + core.request.add_header(ctx, "test_header", "t2") + local h2 = core.request.headers(ctx)["test_header"] + ngx.say(json.encode(h2)) + core.request.add_header(ctx, "test_header", "t3") + local h3 = core.request.headers(ctx)["test_header"] + ngx.say(json.encode(h3)) + } + } +--- response_body +test +["test","t2"] +["test","t2","t3"] + + + +=== TEST 15: call add_header with deprecated way +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + ngx.ctx.api_ctx = {} + local ctx = ngx.ctx.api_ctx + core.request.add_header("test_header", "test") + local h = core.request.header(ctx, "test_header") + ngx.say(h) + } + } +--- response_body +test +--- error_log +DEPRECATED: use add_header(ctx, header_name, header_value) instead + + + +=== TEST 16: after setting the header, ctx.var can still access the correct value +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + ngx.ctx.api_ctx = {} + local ctx = ngx.ctx.api_ctx + core.ctx.set_vars_meta(ctx) + + ctx.var.http_server = "ngx" + ngx.say(ctx.var.http_server) + + core.request.set_header(ctx, "server", "test") + ngx.say(ctx.var.http_server) + + -- case-insensitive + core.request.set_header(ctx, "Server", "apisix") + ngx.say(ctx.var.http_server) + } + } +--- response_body +ngx +test +apisix diff --git a/CloudronPackages/APISIX/apisix-source/t/core/resolver.t b/CloudronPackages/APISIX/apisix-source/t/core/resolver.t new file mode 100644 index 0000000..06d2470 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/resolver.t @@ -0,0 +1,151 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: resolve host from /etc/hosts +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local resolver = require("apisix.core.resolver") + local domain = "localhost" + local ip_info, err = resolver.parse_domain(domain) + if not ip_info then + core.log.error("failed to parse domain: ", domain, ", error: ",err) + return + end + ngx.say("ip_info: ", require("toolkit.json").encode(ip_info)) + } + } +--- response_body +ip_info: "127.0.0.1" + + + +=== TEST 2: resolve host from dns +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local resolver = require("apisix.core.resolver") + local domain = "apisix.apache.org" + resolver.parse_domain = function(domain) -- mock: resolver parser + + if domain == "apisix.apache.org" then + return {address = "127.0.0.2" } + end + error("unknown domain: " .. domain) + end + local ip_info, err = resolver.parse_domain(domain) + if not ip_info then + core.log.error("failed to parse domain: ", domain, ", error: ",err) + return + end + ngx.say("ip_info: ", require("toolkit.json").encode(ip_info)) + } + } +--- response_body +ip_info: {"address":"127.0.0.2"} + + + +=== TEST 3: there is no mapping in /etc/hosts and dns +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local resolver = require("apisix.core.resolver") + local domain = "abc1.test" + resolver.parse_domain(domain) + } + } +--- error_log +failed to parse domain + + + +=== TEST 4: test dns config with ipv6 enable +--- yaml_config +apisix: + enable_ipv6: true +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local resolver = require("apisix.core.resolver") + local domain = "localhost6" + resolver.parse_domain = function(domain) -- mock: resolver parse_domain + if domain == "localhost6" then + return {address = "::1" } + end + error("unknown domain: " .. domain) + + end + local ip_info, err = resolver.parse_domain(domain) + if not ip_info then + core.log.error("failed to parse domain: ", domain, ", error: ",err) + return + end + ngx.say("ip_info: ", require("toolkit.json").encode(ip_info)) + } + } +--- response_body +ip_info: {"address":"::1"} + + + +=== TEST 5: test dns config with ipv6 disable +--- yaml_config +apisix: + enable_ipv6: false +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local resolver = require("apisix.core.resolver") + local domain = "localhost6" + local ip_info, err = resolver.parse_domain(domain) + if not ip_info then + core.log.error("failed to parse domain: ", domain, ", error: ",err) + return + end + ngx.say("ip_info: ", require("toolkit.json").encode(ip_info)) + } + } +--- error_log +failed to parse domain diff --git a/CloudronPackages/APISIX/apisix-source/t/core/response.t b/CloudronPackages/APISIX/apisix-source/t/core/response.t new file mode 100644 index 0000000..dc748a7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/response.t @@ -0,0 +1,202 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: exit with string +--- config + location = /t { + access_by_lua_block { + local core = require("apisix.core") + core.response.exit(201, "done\n") + } + } +--- request +GET /t +--- error_code: 201 +--- response_body +done + + + +=== TEST 2: exit with table +--- config + location = /t { + access_by_lua_block { + local core = require("apisix.core") + core.response.exit(201, {a = "a"}) + } + } +--- request +GET /t +--- error_code: 201 +--- response_body +{"a":"a"} + + + +=== TEST 3: multiple response headers +--- config + location = /t { + access_by_lua_block { + local core = require("apisix.core") + core.response.set_header("aaa", "bbb", "ccc", "ddd") + core.response.exit(200, "done\n") + } + } +--- request +GET /t +--- response_body +done +--- response_headers +aaa: bbb +ccc: ddd + + + +=== TEST 4: multiple response headers by table +--- config + location = /t { + access_by_lua_block { + local core = require("apisix.core") + core.response.set_header({aaa = "bbb", ccc = "ddd"}) + core.response.exit(200, "done\n") + } + } +--- request +GET /t +--- response_body +done +--- response_headers +aaa: bbb +ccc: ddd + + + +=== TEST 5: multiple response headers (add) +--- config + location = /t { + access_by_lua_block { + local core = require("apisix.core") + core.response.add_header("aaa", "bbb", "aaa", "bbb") + core.response.exit(200, "done\n") + } + } +--- request +GET /t +--- response_body +done +--- response_headers +aaa: bbb, bbb + + + +=== TEST 6: multiple response headers by table (add) +--- config + location = /t { + access_by_lua_block { + local core = require("apisix.core") + core.response.set_header({aaa = "bbb"}) + core.response.add_header({aaa = "bbb", ccc = "ddd"}) + core.response.exit(200, "done\n") + } + } +--- request +GET /t +--- response_body +done +--- response_headers +aaa: bbb, bbb +ccc: ddd + + + +=== TEST 7: delete header +--- config + location = /t { + access_by_lua_block { + local core = require("apisix.core") + core.response.set_header("aaa", "bbb") + core.response.set_header("aaa", nil) + core.response.exit(200, "done\n") + } + } +--- request +GET /t +--- response_body +done +--- response_headers +aaa: + + + +=== TEST 8: hold_body_chunk (ngx.arg[2] == true and ngx.arg[1] ~= "") +--- config + location = /t { + content_by_lua_block { + -- Nginx uses a separate buf to mark the end of the stream, + -- hence when ngx.arg[2] == true, ngx.arg[1] will be equal to "". + -- To avoid something unexpected, here we add a test to verify + -- this situation via mock. + local t = ngx.arg + local metatable = getmetatable(t) + local count = 0 + setmetatable(t, {__index = function(t, idx) + if count == 0 then + if idx == 1 then + return "hello " + end + count = count + 1 + return false + end + if count == 1 then + if idx == 1 then + return "world\n" + end + count = count + 1 + return true + end + + return metatable.__index(t, idx) + end, + __newindex = metatable.__newindex}) + + -- trigger body_filter_by_lua_block + ngx.print("A") + } + body_filter_by_lua_block { + local core = require("apisix.core") + ngx.ctx._plugin_name = "test" + local final_body = core.response.hold_body_chunk(ngx.ctx) + if not final_body then + return + end + ngx.arg[1] = final_body + } + } +--- request +GET /t +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/core/schema.t b/CloudronPackages/APISIX/apisix-source/t/core/schema.t new file mode 100644 index 0000000..2cf2793 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/schema.t @@ -0,0 +1,148 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local schema = { + type = "object", + properties = { + i = {type = "number", minimum = 0}, + s = {type = "string"}, + t = {type = "array", minItems = 1}, + } + } + + for i = 1, 10 do + local ok, err = core.schema.check(schema, + {i = i, s = "s" .. i, t = {i}}) + assert(ok) + assert(err == nil) + end + + ngx.say("passed") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: same schema in different timer +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local schema = { + type = "object", + properties = { + i = {type = "number", minimum = 0}, + s = {type = "string"}, + t = {type = "array", minItems = 1}, + } + } + + local count = 0 + local function test() + for i = 1, 10 do + local ok, err = core.schema.check(schema, + {i = i, s = "s" .. i, t = {i}}) + assert(ok) + assert(err == nil) + count = count + 1 + end + end + + ngx.timer.at(0, test) + ngx.timer.at(0, test) + ngx.timer.at(0, test) + + ngx.sleep(1) + ngx.say("passed: ", count) + } + } +--- request +GET /t +--- response_body +passed: 30 + + + +=== TEST 3: collectgarbage +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local schema = { + type = "object", + properties = { + i = {type = "number", minimum = 0}, + s = {type = "string"}, + t = {type = "array", minItems = 1}, + } + } + + for i = 1, 1000 do + collectgarbage() + local ok, err = core.schema.check(schema, + {i = i, s = "s" .. i, t = {i}}) + assert(ok) + assert(err == nil) + end + + ngx.say("passed") + } + } +--- timeout: 15 +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: invalid schema +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local schema = { + type = "invalid type" + } + + local ok, err = core.schema.check(schema, 11) + ngx.say("ok: ", ok, " err: ", err) + } + } +--- request +GET /t +--- response_body eval +qr/ok: false err: .* invalid JSON type: invalid type/ diff --git a/CloudronPackages/APISIX/apisix-source/t/core/schema_def.t b/CloudronPackages/APISIX/apisix-source/t/core/schema_def.t new file mode 100644 index 0000000..da3bb51 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/schema_def.t @@ -0,0 +1,239 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: ip_def +--- config + location /t { + content_by_lua_block { + local schema_def = require("apisix.schema_def") + local core = require("apisix.core") + local schema = { + type = "object", + properties = { + ip = { + type = "string", + anyOf = schema_def.ip_def, + } + }, + } + + local cases = { + "127.0.0.1/1", + "127.0.0.1/10", + "127.0.0.1/11", + "127.0.0.1/20", + "127.0.0.1/21", + "127.0.0.1/30", + "127.0.0.1/32", + } + for _, c in ipairs(cases) do + local ok, err = core.schema.check(schema, {ip = c}) + assert(ok, c) + assert(err == nil, c) + end + + local cases = { + "127.0.0.1/33", + } + for _, c in ipairs(cases) do + local ok, err = core.schema.check(schema, {ip = c}) + assert(not ok, c) + assert(err ~= nil, c) + end + + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 2: Missing required fields of global_rule. +--- config + location /t { + content_by_lua_block { + local schema_def = require("apisix.schema_def") + local core = require("apisix.core") + + local cases = { + {}, + { id = "ADfwefq12D9s" }, + { id = 1 }, + { + plugins = { + foo = "bar", + }, + }, + } + for _, c in ipairs(cases) do + local ok, err = core.schema.check(schema_def.global_rule, c) + assert(not ok) + assert(err ~= nil) + ngx.say("ok: ", ok, " err: ", err) + end + } + } +--- request +GET /t +--- response_body eval +qr/ok: false err: property "(id|plugins)" is required/ + + + +=== TEST 3: Sanity check with minimal valid configuration. +--- config + location /t { + content_by_lua_block { + local schema_def = require("apisix.schema_def") + local core = require("apisix.core") + + local case = { + id = 1, + plugins = {}, + } + + local ok, err = core.schema.check(schema_def.global_rule, case) + assert(ok) + assert(err == nil) + ngx.say("passed") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: sanity check upstream_schema +--- config + location /t { + content_by_lua_block { + local schema_def = require("apisix.schema_def") + local core = require("apisix.core") + local t = require("lib.test_admin") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local upstream = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + client_cert_id = 1, + client_cert = ssl_cert, + client_key = ssl_key + } + } + local ok, err = core.schema.check(schema_def.upstream, upstream) + assert(not ok) + assert(err ~= nil) + + upstream = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + client_cert_id = 1 + } + } + local ok, err = core.schema.check(schema_def.upstream, upstream) + assert(ok) + assert(err == nil, err) + + upstream = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + client_cert = ssl_cert, + client_key = ssl_key + } + } + local ok, err = core.schema.check(schema_def.upstream, upstream) + assert(ok) + assert(err == nil, err) + + upstream = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + } + } + local ok, err = core.schema.check(schema_def.upstream, upstream) + assert(ok) + assert(err == nil, err) + + upstream = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + client_cert = ssl_cert + } + } + local ok, err = core.schema.check(schema_def.upstream, upstream) + assert(not ok) + assert(err ~= nil) + + upstream = { + nodes = { + ["127.0.0.1:8080"] = 1 + }, + type = "roundrobin", + tls = { + client_cert_id = 1, + client_key = ssl_key + } + } + local ok, err = core.schema.check(schema_def.upstream, upstream) + assert(not ok) + assert(err ~= nil) + + ngx.say("passed") + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/core/string.t b/CloudronPackages/APISIX/apisix-source/t/core/string.t new file mode 100644 index 0000000..1c01e45 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/string.t @@ -0,0 +1,139 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: find +--- config + location /t { + content_by_lua_block { + local encode = require "toolkit.json".encode + local str = require("apisix.core.string") + local cases = { + {"xx", "", true}, + {"xx", "x", true}, + {"", "x", false}, + {"", "", true}, + {"", 0, false}, + {0, "x", false}, + {"a[", "[", true}, + + {"[a", "[", false, 2}, + {"[a", "[", false, 3}, + {"[a", "[", true, 1}, + } + for _, case in ipairs(cases) do + local ok, idx = pcall(str.find, case[1], case[2], case[4]) + if not ok then + if case[3] == true then + ngx.log(ngx.ERR, "unexpected error: ", idx, + " ", encode(case)) + end + else + if case[3] ~= (idx ~= nil) then + ngx.log(ngx.ERR, "unexpected res: ", idx, + " ", encode(case)) + end + end + end + } + } +--- request +GET /t + + + +=== TEST 2: prefix +--- config + location /t { + content_by_lua_block { + local encode = require "toolkit.json".encode + local str = require("apisix.core.string") + local cases = { + {"xx", "", true}, + {"xx", "x", true}, + {"", "x", false}, + {"", "", true}, + {"", 0, false}, + {0, "x", false}, + {"a[", "[", false}, + {"[a", "[", true}, + {"[a", "[b", false}, + } + for _, case in ipairs(cases) do + local ok, res = pcall(str.has_prefix, case[1], case[2]) + if not ok then + if case[3] == true then + ngx.log(ngx.ERR, "unexpected error: ", res, + " ", encode(case)) + end + else + if case[3] ~= res then + ngx.log(ngx.ERR, "unexpected res: ", res, + " ", encode(case)) + end + end + end + } + } +--- request +GET /t + + + +=== TEST 3: suffix +--- config + location /t { + content_by_lua_block { + local encode = require "toolkit.json".encode + local str = require("apisix.core.string") + local cases = { + {"xx", "", true}, + {"xx", "x", true}, + {"", "x", false}, + {"", "", true}, + {"", 0, false}, + {0, "x", false}, + {"a[", "[", true}, + {"[a", "[", false}, + {"[a", "[b", false}, + } + for _, case in ipairs(cases) do + local ok, res = pcall(str.has_suffix, case[1], case[2]) + if not ok then + if case[3] == true then + ngx.log(ngx.ERR, "unexpected error: ", res, + " ", encode(case)) + end + else + if case[3] ~= res then + ngx.log(ngx.ERR, "unexpected res: ", res, + " ", encode(case)) + end + end + end + } + } +--- request +GET /t diff --git a/CloudronPackages/APISIX/apisix-source/t/core/table.t b/CloudronPackages/APISIX/apisix-source/t/core/table.t new file mode 100644 index 0000000..38616ae --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/table.t @@ -0,0 +1,361 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = {"first"} + core.table.insert_tail(t, 'a', 1, true) + + ngx.say("encode: ", require("toolkit.json").encode(t)) + + core.table.set(t, 'a', 1, true) + ngx.say("encode: ", require("toolkit.json").encode(t)) + } + } +--- request +GET /t +--- response_body +encode: ["first","a",1,true] +encode: ["a",1,true,true] + + + +=== TEST 2: deepcopy +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local deepcopy = core.table.deepcopy + local cases = { + {t = {1, 2, a = {2, 3}}}, + {t = {{a = b}, 2, true}}, + {t = {{a = b}, {{a = c}, {}, 1}, true}}, + } + for _, case in ipairs(cases) do + local t = case.t + local actual = core.json.encode(deepcopy(t)) + local expect = core.json.encode(t) + if actual ~= expect then + ngx.say("expect ", expect, ", actual ", actual) + return + end + end + ngx.say("ok") + } + } +--- request +GET /t +--- response_body +ok + + + +=== TEST 3: try_read_attr +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local try_read_attr = core.table.try_read_attr + + local t = {level1 = {level2 = "value"}} + + local v = try_read_attr(t, "level1", "level2") + ngx.say(v) + + local v2 = try_read_attr(t, "level1", "level3") + ngx.say(v2) + } + } +--- request +GET /t +--- response_body +value +nil + + + +=== TEST 4: set_eq +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local cases = { + {expect = true, a = {}, b = {}}, + {expect = true, a = {a = 1}, b = {a = 1}}, + {expect = true, a = {a = 1}, b = {a = 2}}, + {expect = false, a = {b = 1}, b = {a = 1}}, + {expect = false, a = {a = 1, b = 1}, b = {a = 1}}, + {expect = false, a = {a = 1}, b = {a = 1, b = 2}}, + } + for _, t in ipairs(cases) do + local actual = core.table.set_eq(t.a, t.b) + local expect = t.expect + if actual ~= expect then + ngx.say("expect ", expect, ", actual ", actual) + return + end + end + ngx.say("ok") + } + } +--- response_body +ok +--- request +GET /t + + + +=== TEST 5: deep_eq +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local cases = { + {expect = true, a = {}, b = {}}, + {expect = true, a = nil, b = nil}, + {expect = false, a = nil, b = {}}, + {expect = false, a = {}, b = nil}, + {expect = true, a = {a = {b = 1}}, b = {a = {b = 1}}}, + {expect = false, a = {a = {b = 1}}, b = {a = {b = 1, c = 2}}}, + {expect = false, a = {a = {b = 1}}, b = {a = {b = 2}}}, + {expect = true, a = {{a = {b = 1}}}, b = {{a = {b = 1}}}}, + } + for _, t in ipairs(cases) do + local actual = core.table.deep_eq(t.a, t.b) + local expect = t.expect + if actual ~= expect then + ngx.say("expect ", expect, ", actual ", actual) + return + end + end + ngx.say("ok") + } + } +--- response_body +ok +--- request +GET /t + + + +=== TEST 6: pick +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local core = require("apisix.core") + local cases = { + {expect = {}, a = {}, b = {priority = true}}, + {expect = {priority = 1}, a = {priority = 1}, b = {priority = true}}, + {expect = {}, a = {priorities = 1}, b = {priority = true}}, + {expect = {priority = 1}, a = {priority = 1, ver = "2"}, b = {priority = true}}, + {expect = {priority = 1, ver = "2"}, a = {priority = 1, ver = "2"}, b = {priority = true, ver = true}}, + } + for _, t in ipairs(cases) do + local actual = core.table.pick(t.a, t.b) + local expect = t.expect + if not core.table.deep_eq(actual, expect) then + ngx.say("expect ", json.encode(expect), ", actual ", json.encode(actual)) + return + end + end + ngx.say("ok") + } + } +--- response_body +ok +--- request +GET /t + + + +=== TEST 7: deepcopy should keep metatable +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local deepcopy = core.table.deepcopy + local t = setmetatable({}, core.json.array_mt) + local actual = core.json.encode(deepcopy(t)) + local expect = "[]" + if actual ~= expect then + ngx.say("expect ", expect, ", actual ", actual) + return + end + ngx.say("ok") + } + } +--- request +GET /t +--- response_body +ok + + + +=== TEST 8: deepcopy copy same table only once +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local tmp = { name = "tmp", priority = 1, enabled = true } + local origin = { a = { b = tmp }, c = tmp} + local copy = core.table.deepcopy(origin) + if not core.table.deep_eq(copy, origin) then + ngx.say("copy: ", json.encode(expect), ", origin: ", json.encode(actual)) + return + end + if copy.a.b ~= copy.c then + ngx.say("copy.a.b should be the same as copy.c") + return + end + ngx.say("ok") + } + } +--- request +GET /t +--- response_body +ok + + + +=== TEST 9: reference same table +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local deepcopy = core.table.deepcopy + local tab1 = {name = "tab1"} + local tab2 = { + a = tab1, + b = tab1 + } + local tab_copied = deepcopy(tab2) + + ngx.say("table copied: ", require("toolkit.json").encode(tab_copied)) + + ngx.say("tab1 == tab2.a: ", tab1 == tab2.a) + ngx.say("tab2.a == tab2.b: ", tab2.a == tab2.b) + + ngx.say("tab_copied.a == tab1: ", tab_copied.a == tab1) + ngx.say("tab_copied.a == tab_copied.b: ", tab_copied.a == tab_copied.b) + } + } +--- request +GET /t +--- response_body +table copied: {"a":{"name":"tab1"},"b":{"name":"tab1"}} +tab1 == tab2.a: true +tab2.a == tab2.b: true +tab_copied.a == tab1: false +tab_copied.a == tab_copied.b: true + + + +=== TEST 10: reference table self(root node) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local deepcopy = core.table.deepcopy + local tab1 = {name = "tab1"} + local tab2 = { + a = tab1, + } + tab2.c = tab2 + + local tab_copied = deepcopy(tab2) + + ngx.say("tab_copied.a == tab1: ", tab_copied.a == tab_copied.b) + ngx.say("tab_copied == tab_copied.c: ", tab_copied == tab_copied.c) + } + } +--- request +GET /t +--- response_body +tab_copied.a == tab1: false +tab_copied == tab_copied.c: true + + + +=== TEST 11: reference table self(sub node) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local deepcopy = core.table.deepcopy + local tab_org = { + a = { + a2 = "a2" + }, + } + tab_org.b = tab_org.a + + local tab_copied = deepcopy(tab_org) + ngx.say("table copied: ", require("toolkit.json").encode(tab_copied)) + ngx.say("tab_copied.a == tab_copied.b: ", tab_copied.a == tab_copied.b) + } + } +--- request +GET /t +--- response_body +table copied: {"a":{"a2":"a2"},"b":{"a2":"a2"}} +tab_copied.a == tab_copied.b: true + + + +=== TEST 12: shallow copy +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local deepcopy = core.table.deepcopy + local t1 = {name = "tab1"} + local t2 = {name = "tab2"} + local tab = { + a = {b = {c = t1}}, + x = {y = t2}, + } + local tab_copied = deepcopy(tab, { shallows = { "self.a.b.c" }}) + + ngx.say("table copied: ", require("toolkit.json").encode(tab_copied)) + + ngx.say("tab_copied.a.b.c == tab.a.b.c1: ", tab_copied.a.b.c == tab.a.b.c) + ngx.say("tab_copied.a.b.c == t1: ", tab_copied.a.b.c == t1) + ngx.say("tab_copied.x.y == tab.x.y: ", tab_copied.x.y == tab.x.y) + ngx.say("tab_copied.x.y == t2: ", tab_copied.x.y == t2) + } + } +--- request +GET /t +--- response_body +table copied: {"a":{"b":{"c":{"name":"tab1"}}},"x":{"y":{"name":"tab2"}}} +tab_copied.a.b.c == tab.a.b.c1: true +tab_copied.a.b.c == t1: true +tab_copied.x.y == tab.x.y: false +tab_copied.x.y == t2: false diff --git a/CloudronPackages/APISIX/apisix-source/t/core/timer.t b/CloudronPackages/APISIX/apisix-source/t/core/timer.t new file mode 100644 index 0000000..40cecb8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/timer.t @@ -0,0 +1,53 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local function job() + core.log.warn("job enter") + ngx.sleep(0.5) + core.log.warn("job exit") + end + + local ok = core.timer.new("test job", job, + {each_ttl = 2, check_interval = 0.1}) + ngx.say("create timer: ", type(ok)) + ngx.sleep(3) + } + } +--- request +GET /t +--- response_body +create timer: table +--- grep_error_log eval +qr/job (enter|exit)/ +--- grep_error_log_out eval +qr/(job enter\njob exit)+/ +--- timeout: 5 diff --git a/CloudronPackages/APISIX/apisix-source/t/core/uid.t b/CloudronPackages/APISIX/apisix-source/t/core/uid.t new file mode 100644 index 0000000..078eff1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/uid.t @@ -0,0 +1,42 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + + ngx.say("uid: ", core.id.get()) + } + } +--- request +GET /t +--- response_body_like eval +qr/uid: [0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ +--- error_log +not found apisix uid, generate a new one diff --git a/CloudronPackages/APISIX/apisix-source/t/core/utils.t b/CloudronPackages/APISIX/apisix-source/t/core/utils.t new file mode 100644 index 0000000..9faa545 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/core/utils.t @@ -0,0 +1,395 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local get_seed = require("apisix.core.utils").get_seed_from_urandom + + ngx.say("random seed ", get_seed()) + ngx.say("twice: ", get_seed() == get_seed()) + } + } +--- request +GET /t +--- response_body_like eval +qr/random seed \d+(\.\d+)?(e\+\d+)?\ntwice: false/ + + + +=== TEST 2: parse_addr +--- config + location /t { + content_by_lua_block { + local parse_addr = require("apisix.core.utils").parse_addr + local cases = { + {addr = "127.0.0.1", host = "127.0.0.1"}, + {addr = "127.0.0.1:90", host = "127.0.0.1", port = 90}, + {addr = "www.test.com", host = "www.test.com"}, + {addr = "www.test.com:90", host = "www.test.com", port = 90}, + {addr = "localhost", host = "localhost"}, + {addr = "localhost:90", host = "localhost", port = 90}, + {addr = "[127.0.0.1:90", host = "[127.0.0.1:90"}, + {addr = "[::1]", host = "[::1]"}, + {addr = "[::1]:1234", host = "[::1]", port = 1234}, + {addr = "[::1234:1234]:12345", host = "[::1234:1234]", port = 12345}, + {addr = "::1", host = "::1"}, + } + for _, case in ipairs(cases) do + local host, port = parse_addr(case.addr) + assert(host == case.host, string.format("host %s mismatch %s", host, case.host)) + assert(port == case.port, string.format("port %s mismatch %s", port, case.port)) + end + } + } +--- request +GET /t + + + +=== TEST 3: specify resolvers +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local resolvers = {"8.8.8.8"} + core.utils.set_resolver(resolvers) + local domain = "github.com" + local ip_info, err = core.utils.dns_parse(domain) + if not ip_info then + core.log.error("failed to parse domain: ", domain, ", error: ",err) + end + ngx.say(require("toolkit.json").encode(ip_info)) + } + } +--- request +GET /t +--- response_body eval +qr/"address":.+,"name":"github.com"/ + + + +=== TEST 4: default resolvers +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local domain = "github.com" + local ip_info, err = core.utils.dns_parse(domain) + if not ip_info then + core.log.error("failed to parse domain: ", domain, ", error: ",err) + end + core.log.info("ip_info: ", require("toolkit.json").encode(ip_info)) + ngx.say("resolvers: ", require("toolkit.json").encode(core.utils.get_resolver())) + } + } +--- request +GET /t +--- response_body +resolvers: ["8.8.8.8","114.114.114.114"] +--- error_log eval +qr/"address":.+,"name":"github.com"/ + + + +=== TEST 5: enable_server_tokens false +--- yaml_config +apisix: + node_listen: 1984 + enable_server_tokens: false +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("failed") + return + end + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("127.0.0.1", 1984) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local req = "GET /hello HTTP/1.0\r\nHost: www.test.com\r\nConnection: close\r\n\r\n" + local bytes, err = sock:send(req) + if not bytes then + ngx.say("failed to send http request: ", err) + return + end + + ngx.say("sent http request: ", bytes, " bytes.") + + while true do + local line, err = sock:receive() + if not line then + -- ngx.say("failed to receive response status line: ", err) + break + end + + ngx.say("received: ", line) + end + + local ok, err = sock:close() + ngx.say("close: ", ok, " ", err) + end -- do + } +} +--- request +GET /t +--- response_body eval +qr{connected: 1 +sent http request: 62 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX +received: \nreceived: hello world +close: 1 nil} + + + +=== TEST 6: resolve_var +--- config + location /t { + content_by_lua_block { + local resolve_var = require("apisix.core.utils").resolve_var + local cases = { + "", + "xx", + "$me", + "$me run", + "talk with $me", + "tell $me to", + "$you and $me", + "$eva and $me", + "$you and \\$me", + "${you}_${me}", + "${you}${me}", + "${you}$me", + } + local ctx = { + you = "John", + me = "David", + } + for _, case in ipairs(cases) do + local res = resolve_var(case, ctx) + ngx.say("res:", res) + end + } + } +--- request +GET /t +--- response_body +res: +res:xx +res:David +res:David run +res:talk with David +res:tell David to +res:John and David +res: and David +res:John and \$me +res:John_David +res:JohnDavid +res:JohnDavid + + + +=== TEST 7: resolve host from /etc/hosts +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local domain = "test.com" + local ip_info, err = core.utils.dns_parse(domain) + if not ip_info then + core.log.error("failed to parse domain: ", domain, ", error: ",err) + return + end + ngx.say("ip_info: ", require("toolkit.json").encode(ip_info)) + } + } +--- request +GET /t +--- response_body +ip_info: {"address":"127.0.0.1","class":1,"name":"test.com","ttl":315360000,"type":1} + + + +=== TEST 8: search host with '.org' suffix +--- yaml_config +apisix: + node_listen: 1984 + enable_resolv_search_opt: true +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local domain = "apisix" + local ip_info, err = core.utils.dns_parse(domain) + if not ip_info then + core.log.error("failed to parse domain: ", domain, ", error: ",err) + return + end + ngx.say("ip_info: ", require("toolkit.json").encode(ip_info)) + } + } +--- request +GET /t +--- response_body_like +.+"name":"apisix\.apache\.org".+ + + + +=== TEST 9: disable search option +--- yaml_config +apisix: + node_listen: 1984 + enable_resolv_search_opt: false +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local domain = "apisix" + local ip_info, err = core.utils.dns_parse(domain) + if not ip_info then + core.log.error("failed to parse domain: ", domain, ", error: ",err) + return + end + ngx.say("ip_info: ", require("toolkit.json").encode(ip_info)) + } + } +--- request +GET /t +--- error_log +error: failed to query the DNS server +--- timeout: 10 + + + +=== TEST 10: test dns config with ipv6 enable +--- yaml_config +apisix: + enable_ipv6: true +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local domain = "ipv6.local" + local ip_info, err = core.utils.dns_parse(domain) + if not ip_info then + core.log.error("failed to parse domain: ", domain, ", error: ",err) + return + end + ngx.say("ip_info: ", require("toolkit.json").encode(ip_info)) + } + } +--- request +GET /t +--- response_body +ip_info: {"address":"[::1]","class":1,"name":"ipv6.local","ttl":315360000,"type":28} + + + +=== TEST 11: test dns config with ipv6 disable +--- yaml_config +apisix: + enable_ipv6: false +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local domain = "ipv6.local" + local ip_info, err = core.utils.dns_parse(domain) + if not ip_info then + core.log.error("failed to parse domain: ", domain, ", error: ",err) + return + end + ngx.say("ip_info: ", require("toolkit.json").encode(ip_info)) + } + } +--- request +GET /t +--- error_log +failed to parse domain: ipv6.local + + + +=== TEST 12: get_last_index +--- config + location /t { + content_by_lua_block { + local string_rfind = require("pl.stringx").rfind + local cases = { + {"you are welcome", "co"}, + {"nice to meet you", "meet"}, + {"chicken run", "cc"}, + {"day day up", "day"}, + {"happy new year", "e"}, + {"apisix__1928", "__"} + } + + for _, case in ipairs(cases) do + local res = string_rfind(case[1], case[2]) + ngx.say("res:", res) + end + } + } +--- request +GET /t +--- response_body +res:12 +res:9 +res:nil +res:5 +res:12 +res:7 diff --git a/CloudronPackages/APISIX/apisix-source/t/coredns/Corefile b/CloudronPackages/APISIX/apisix-source/t/coredns/Corefile new file mode 100644 index 0000000..53dad79 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/coredns/Corefile @@ -0,0 +1,4 @@ +test.local { + file db.test.local + log +} diff --git a/CloudronPackages/APISIX/apisix-source/t/coredns/db.test.local b/CloudronPackages/APISIX/apisix-source/t/coredns/db.test.local new file mode 100644 index 0000000..8570835 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/coredns/db.test.local @@ -0,0 +1,53 @@ +$ORIGIN test.local. +@ 3600 IN SOA sns.dns.icann.org. noc.dns.icann.org. ( + 2017042745 ; serial + 7200 ; refresh (2 hours) + 3600 ; retry (1 hour) + 1209600 ; expire (2 weeks) + 3600 ; minimum (1 hour) + ) + + 3600 IN NS a.iana-servers.net. + 3600 IN NS b.iana-servers.net. + + +sd IN A 127.0.0.1 +sd IN A 127.0.0.2 +ipv6.sd IN AAAA ::1 +mix.sd IN A 127.0.0.1 +mix.sd IN AAAA ::1 + +ipv6 IN AAAA ::1 + +ttl 300 IN A 127.0.0.1 +ttl.1s 1 IN A 127.0.0.1 + +; SRV +A IN A 127.0.0.1 +B IN A 127.0.0.2 +C IN A 127.0.0.3 +C IN A 127.0.0.4 +; RFC 2782 style +_sip._tcp.srv 86400 IN SRV 10 60 1980 A +_sip._tcp.srv 86400 IN SRV 10 20 1980 B +; standard style +srv 86400 IN SRV 10 60 1980 A +srv 86400 IN SRV 10 20 1980 B + +port.srv 86400 IN SRV 10 60 1980 A +port.srv 86400 IN SRV 10 20 1981 B + +zero-weight.srv 86400 IN SRV 10 60 1980 A +zero-weight.srv 86400 IN SRV 10 0 1980 B + +split-weight.srv 86400 IN SRV 10 100 1980 A +split-weight.srv 86400 IN SRV 10 0 1980 C + +priority.srv 86400 IN SRV 10 60 1979 A +priority.srv 86400 IN SRV 20 60 1980 B + +zero.srv 86400 IN SRV 10 60 0 A + +; a domain has both SRV & A records +srv-a 86400 IN SRV 10 60 1980 A +srv-a IN A 127.0.0.1 diff --git a/CloudronPackages/APISIX/apisix-source/t/debug/debug-mode.t b/CloudronPackages/APISIX/apisix-source/t/debug/debug-mode.t new file mode 100644 index 0000000..a41dacb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/debug/debug-mode.t @@ -0,0 +1,347 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +our $debug_config = t::APISIX::read_file("conf/debug.yaml"); +$debug_config =~ s/basic:\n enable: false/basic:\n enable: true/; + +run_tests; + +__DATA__ + +=== TEST 1: loaded plugin +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log +loaded plugin and sort by priority: 23000 name: real-ip +loaded plugin and sort by priority: 22000 name: client-control +loaded plugin and sort by priority: 12015 name: request-id +loaded plugin and sort by priority: 12011 name: zipkin +loaded plugin and sort by priority: 12000 name: ext-plugin-pre-req +loaded plugin and sort by priority: 11000 name: fault-injection +loaded plugin and sort by priority: 10000 name: serverless-pre-function +loaded plugin and sort by priority: 4000 name: cors +loaded plugin and sort by priority: 3000 name: ip-restriction +loaded plugin and sort by priority: 2990 name: referer-restriction +loaded plugin and sort by priority: 2900 name: uri-blocker +loaded plugin and sort by priority: 2800 name: request-validation +loaded plugin and sort by priority: 2600 name: multi-auth +loaded plugin and sort by priority: 2599 name: openid-connect +loaded plugin and sort by priority: 2555 name: wolf-rbac +loaded plugin and sort by priority: 2530 name: hmac-auth +loaded plugin and sort by priority: 2520 name: basic-auth +loaded plugin and sort by priority: 2510 name: jwt-auth +loaded plugin and sort by priority: 2500 name: key-auth +loaded plugin and sort by priority: 2400 name: consumer-restriction +loaded plugin and sort by priority: 2000 name: authz-keycloak +loaded plugin and sort by priority: 1085 name: proxy-cache +loaded plugin and sort by priority: 1010 name: proxy-mirror +loaded plugin and sort by priority: 1008 name: proxy-rewrite +loaded plugin and sort by priority: 1005 name: api-breaker +loaded plugin and sort by priority: 1003 name: limit-conn +loaded plugin and sort by priority: 1002 name: limit-count +loaded plugin and sort by priority: 1001 name: limit-req +loaded plugin and sort by priority: 995 name: gzip +loaded plugin and sort by priority: 966 name: traffic-split +loaded plugin and sort by priority: 900 name: redirect +loaded plugin and sort by priority: 899 name: response-rewrite +loaded plugin and sort by priority: 506 name: grpc-transcode +loaded plugin and sort by priority: 500 name: prometheus +loaded plugin and sort by priority: 412 name: echo +loaded plugin and sort by priority: 410 name: http-logger +loaded plugin and sort by priority: 406 name: sls-logger +loaded plugin and sort by priority: 405 name: tcp-logger +loaded plugin and sort by priority: 403 name: kafka-logger +loaded plugin and sort by priority: 402 name: rocketmq-logger +loaded plugin and sort by priority: 401 name: syslog +loaded plugin and sort by priority: 400 name: udp-logger +loaded plugin and sort by priority: 398 name: clickhouse-logger +loaded plugin and sort by priority: 0 name: example-plugin +loaded plugin and sort by priority: -2000 name: serverless-post-function +loaded plugin and sort by priority: -3000 name: ext-plugin-post-req + + + +=== TEST 2: set route(no plugin) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: hit routes +--- debug_config eval: $::debug_config +--- request +GET /hello +--- response_body +hello world +--- response_headers +Apisix-Plugins: no plugin + + + +=== TEST 4: set route(one plugin) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + }, + "limit-conn": { + "conn": 100, + "burst": 50, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: hit routes +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local ngx_re = require("ngx.re") + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + }) + local debug_header = res.headers["Apisix-Plugins"] + local arr = ngx_re.split(debug_header, ", ") + local hash = {} + for i, v in ipairs(arr) do + hash[v] = true + end + ngx.status = res.status + ngx.say(json.encode(hash)) + } + } +--- request +GET /t +--- response_body +{"limit-conn":true,"limit-count":true} + + + +=== TEST 6: global rule, header sent +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "status_code": 200, + "body": "yes\n" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: hit routes +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local ngx_re = require("ngx.re") + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + }) + local debug_header = res.headers["Apisix-Plugins"] + local arr = ngx_re.split(debug_header, ", ") + local hash = {} + for i, v in ipairs(arr) do + hash[v] = true + end + ngx.status = res.status + ngx.say(json.encode(hash)) + } + } +--- request +GET /t +--- response_body +{"limit-conn":true,"limit-count":true,"response-rewrite":true} +--- error_log +Apisix-Plugins: response-rewrite + + + +=== TEST 8: clear global routes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: set stream route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_port": 1985, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1995, + "weight": 1 + } + ] + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: hit route +--- debug_config eval: $::debug_config +--- stream_request eval +"\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" +--- stream_response +hello world +--- error_log +mqtt client id: foo while prereading client data diff --git a/CloudronPackages/APISIX/apisix-source/t/debug/dynamic-hook.t b/CloudronPackages/APISIX/apisix-source/t/debug/dynamic-hook.t new file mode 100644 index 0000000..91c5104 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/debug/dynamic-hook.t @@ -0,0 +1,454 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +our $debug_config = t::APISIX::read_file("conf/debug.yaml"); +$debug_config =~ s/http_filter:\n enable: false/http_filter:\n enable: true/; +$debug_config =~ s/hook_conf:\n enable: false/hook_conf:\n enable: true/; + +run_tests(); + +__DATA__ + +=== TEST 1: dynamic enable +# ai module would conflict with the debug module +--- extra_yaml_config +plugins: + #- ai + - example-plugin +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + ngx.sleep(0.6) -- wait for sync + + local headers = {} + headers["X-APISIX-Dynamic-Debug"] = "" + local code, body = t('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- wait: 2 +--- response_body +passed +--- error_log +call require("apisix").http_header_filter_phase() args:{} +call require("apisix").http_header_filter_phase() return:{} +call require("apisix").http_body_filter_phase() args:{} +call require("apisix").http_body_filter_phase() return:{} +call require("apisix").http_log_phase() args:{} +call require("apisix").http_log_phase() return:{} +--- no_error_log +call require("apisix").http_access_phase() return:{} +call require("apisix").http_access_phase() args:{} + + + +=== TEST 2: dynamic enable by per request and disable after handle request +# ai module would conflict with the debug module +--- extra_yaml_config +plugins: + #- ai + - example-plugin +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uris": ["/hello","/hello1"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + ngx.sleep(0.6) -- wait for sync + local http = require "resty.http" + local httpc = http.new() + local headers = {} + headers["X-APISIX-Dynamic-Debug"] = "" + local uri1 = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello1" + local res, err = httpc:request_uri(uri1, {method = "GET", headers = headers}) + if not res then + ngx.say(err) + return + end + + local uri2 = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + res, err = httpc:request_uri(uri2) + if not res then + ngx.say(err) + return + end + + ngx.print(res.body) + } + } +--- request +GET /t +--- wait: 2 +--- response_body +hello world +--- error_log eval +[qr/call\srequire\(\"apisix\"\).http_header_filter_phase\(\)\sargs\:\{\}.*GET\s\/hello1\sHTTP\/1.1/, +qr/call\srequire\(\"apisix\"\).http_header_filter_phase\(\)\sreturn\:\{\}.*GET\s\/hello1\sHTTP\/1.1/, +qr/call\srequire\(\"apisix\"\).http_body_filter_phase\(\)\sargs\:\{\}.*GET\s\/hello1\sHTTP\/1.1/, +qr/call\srequire\(\"apisix\"\).http_body_filter_phase\(\)\sreturn\:\{\}.*GET\s\/hello1\sHTTP\/1.1/, +qr/call\srequire\(\"apisix\"\).http_log_phase\(\)\sargs\:\{\}.*GET\s\/hello1\sHTTP\/1.1/, +qr/call\srequire\(\"apisix\"\).http_log_phase\(\)\sreturn\:\{\}.*GET\s\/hello1\sHTTP\/1.1/] +--- no_error_log eval +[qr/call\srequire\(\"apisix\"\).http_access_phase\(\)\sreturn\:\{\}.*GET\s\/hello1\sHTTP\/1.1/, +qr/call\srequire\(\"apisix\"\).http_access_phase\(\)\sargs\:\{\}.*GET\s\/hello\sHTTP\/1.1/, +qr/call\srequire\(\"apisix\"\).http_access_phase\(\)\sreturn\:\{\}.*GET\s\/hello\sHTTP\/1.1/, +qr/call\srequire\(\"apisix\"\).http_header_filter_phase\(\)\sargs\:\{\}.*GET\s\/hello\sHTTP\/1.1/, +qr/call\srequire\(\"apisix\"\).http_header_filter_phase\(\)\sreturn\:\{\}.*GET\s\/hello\sHTTP\/1.1/, +qr/call\srequire\(\"apisix\"\).http_body_filter_phase\(\)\sargs\:\{\}.*GET\s\/hello\sHTTP\/1.1/, +qr/call\srequire\(\"apisix\"\).http_body_filter_phase\(\)\sreturn\:\{\}.*GET\s\/hello\sHTTP\/1.1/, +qr/call\srequire\(\"apisix\"\).http_log_phase\(\)\sargs\:\{\}.*GET\s\/hello\sHTTP\/1.1/, +qr/call\srequire\(\"apisix\"\).http_log_phase\(\)\sreturn\:\{\}.*GET\s\/hello\sHTTP\/1.1/] + + + +=== TEST 3: error dynamic enable header +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + ngx.sleep(0.6) -- wait for sync + + local headers = {} + headers["X-APISIX-Dynamic-Error"] = "" + local code, body = t('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- wait: 2 +--- response_body +passed +--- no_error_log +call require("apisix").http_header_filter_phase() args:{} +call require("apisix").http_header_filter_phase() return:{} +call require("apisix").http_body_filter_phase() args:{} +call require("apisix").http_body_filter_phase() return:{} +call require("apisix").http_log_phase() args:{} +call require("apisix").http_log_phase() return:{} + + + +=== TEST 4: plugin filter log +--- debug_config +basic: + enable: true +http_filter: + enable: true # enable or disable this feature + enable_header_name: X-APISIX-Dynamic-Debug # the header name of dynamic enable +hook_conf: + enable: true # enable or disable this feature + name: hook_test # the name of module and function list + log_level: warn # log level + is_print_input_args: true # print the input arguments + is_print_return_value: true # print the return value + +hook_test: # module and function list, name: hook_test + apisix.plugin: # required module name + - filter # function name + +#END +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + ngx.sleep(0.6) -- wait for sync + + local headers = {} + headers["X-APISIX-Dynamic-Debug"] = "" -- has the header name of dynamic debug + local code, body = t('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.sleep(1.1) + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- wait: 2 +--- response_body +passed +--- error_log +filter(): call require("apisix.plugin").filter() args:{ +filter(): call require("apisix.plugin").filter() return:{ + + + +=== TEST 5: multiple requests, only output logs of the request with enable_header_name +--- debug_config +basic: + enable: true +http_filter: + enable: true + enable_header_name: X-APISIX-Dynamic-Debug +hook_conf: + enable: true + name: hook_test + log_level: warn + is_print_input_args: true + is_print_return_value: true +hook_test: + apisix.plugin: + - filter +#END +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/mysleep*", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + ngx.sleep(0.6) -- wait for sync + + local res, err + local http = require "resty.http" + local httpc = http.new() + for i = 1, 3 do + if i == 1 then + local headers = {} + headers["X-APISIX-Dynamic-Debug"] = "" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/mysleep?seconds=1" + local res, err = httpc:request_uri(uri, {method = "GET", headers = headers}) + if not res then + ngx.say(err) + return + end + else + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/mysleep?seconds=0.1" + res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + end + end + + ngx.say("passed") + } + } +--- request +GET /t +--- wait: 2 +--- response_body +passed +--- error_log eval +qr/call\srequire\(\"apisix.plugin\"\).filter\(\)\sreturn.*GET\s\/mysleep\?seconds\=1\sHTTP\/1.1/ +--- no_error_log eval +qr/call\srequire\(\"apisix.plugin\"\).filter\(\)\sreturn.*GET\s\/mysleep\?seconds\=0.1\sHTTP\/1.1/ + + + +=== TEST 6: hook function with ctx as param +# ai module would conflict with the debug module +--- extra_yaml_config +plugins: + #ai + - example-plugin +--- debug_config +basic: + enable: true +http_filter: + enable: true # enable or disable this feature + enable_header_name: X-APISIX-Dynamic-Debug # the header name of dynamic enable +hook_conf: + enable: true # enable or disable this feature + name: hook_test # the name of module and function list + log_level: warn # log level + is_print_input_args: true # print the input arguments + is_print_return_value: true # print the return value + +hook_test: # module and function list, name: hook_test + apisix.balancer: # required module name + - pick_server # function name + +#END +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + ngx.sleep(0.6) -- wait for sync + + local headers = {} + headers["X-APISIX-Dynamic-Debug"] = "" + local code, body = t('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- wait: 2 +--- response_body +passed +--- error_log +call require("apisix.balancer").pick_server() args:{ +call require("apisix.balancer").pick_server() return:{ diff --git a/CloudronPackages/APISIX/apisix-source/t/debug/hook.t b/CloudronPackages/APISIX/apisix-source/t/debug/hook.t new file mode 100644 index 0000000..891e56c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/debug/hook.t @@ -0,0 +1,153 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +our $debug_config = t::APISIX::read_file("conf/debug.yaml"); +$debug_config =~ s/hook_conf:\n enable: false/hook_conf:\n enable: true/; + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "hosts": ["foo.com", "*.bar.com"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: phases log +--- debug_config eval: $::debug_config +--- request +GET /hello +--- more_headers +Host: foo.com +--- response_body +hello world +--- error_log +call require("apisix").http_header_filter_phase() args:{} +call require("apisix").http_header_filter_phase() return:{} +call require("apisix").http_body_filter_phase() args:{} +call require("apisix").http_body_filter_phase() return:{} +call require("apisix").http_log_phase() args:{} +call require("apisix").http_log_phase() return:{} + + + +=== TEST 4: plugin filter log +--- debug_config +basic: + enable: true +http_filter: + enable: true # enable or disable this feature + enable_header_name: X-APISIX-Dynamic-Debug # the header name of dynamic enable +hook_conf: + enable: true # enable or disable this feature + name: hook_test # the name of module and function list + log_level: warn # log level + is_print_input_args: true # print the input arguments + is_print_return_value: true # print the return value + +hook_test: # module and function list, name: hook_test + apisix.plugin: # required module name + - filter # function name + +#END +--- request +GET /hello +--- more_headers +Host: foo.com +X-APISIX-Dynamic-Debug: true +--- response_body +hello world +--- error_log +filter(): call require("apisix.plugin").filter() args:{ +filter(): call require("apisix.plugin").filter() return:{ + + + +=== TEST 5: missing hook_conf +--- debug_config +basic: + enable: true +http_filter: + enable: true # enable or disable this feature + enable_header_name: X-APISIX-Dynamic-Debug # the header name of dynamic enable + +hook_test: # module and function list, name: hook_test + apisix.plugin: # required module name + - filter # function name + +#END +--- request +GET /hello +--- more_headers +Host: foo.com +X-APISIX-Dynamic-Debug: true +--- response_body +hello world +--- error_log +read_debug_yaml(): failed to validate debug config property "hook_conf" is required +--- wait: 3 diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/consul.t b/CloudronPackages/APISIX/apisix-source/t/discovery/consul.t new file mode 100644 index 0000000..9ec8720 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/consul.t @@ -0,0 +1,783 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + + +add_block_preprocessor(sub { + my ($block) = @_; + + my $http_config = $block->http_config // <<_EOC_; + + server { + listen 20999; + + location / { + content_by_lua_block { + ngx.say("missing consul services") + } + } + } + + server { + listen 30511; + + location /hello { + content_by_lua_block { + ngx.say("server 1") + } + } + } + server { + listen 30512; + + location /hello { + content_by_lua_block { + ngx.say("server 2") + } + } + } + server { + listen 30513; + + location /hello { + content_by_lua_block { + ngx.say("server 3") + } + } + } + server { + listen 30514; + + location /hello { + content_by_lua_block { + ngx.say("server 4") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + enable_control: true + control: + ip: 127.0.0.1 + port: 9090 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8500" + - "http://127.0.0.1:8600" + skip_services: + - "service_c" + timeout: + connect: 1000 + read: 1000 + wait: 60 + weight: 1 + fetch_interval: 1 + keepalive: true + default_service: + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 + weight: 1 + max_fails: 1 +_EOC_ + +our $yaml_config_with_acl = <<_EOC_; +apisix: + node_listen: 1984 + enable_control: true + control: + ip: 127.0.0.1 + port: 9090 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8502" + token: "2b778dd9-f5f1-6f29-b4b4-9a5fa948757a" + skip_services: + - "service_c" + timeout: + connect: 1000 + read: 1000 + wait: 60 + weight: 1 + fetch_interval: 1 + keepalive: true + default_service: + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 + weight: 1 + max_fails: 1 +_EOC_ + + +run_tests(); + +__DATA__ + +=== TEST 1: prepare consul catalog register nodes +--- config +location /consul1 { + rewrite ^/consul1/(.*) /v1/agent/service/$1 break; + proxy_pass http://127.0.0.1:8500; +} + +location /consul2 { + rewrite ^/consul2/(.*) /v1/agent/service/$1 break; + proxy_pass http://127.0.0.1:8600; +} +--- pipelined_requests eval +[ + "PUT /consul1/deregister/service_a1", + "PUT /consul1/deregister/service_b1", + "PUT /consul1/deregister/service_a2", + "PUT /consul1/deregister/service_b2", + "PUT /consul2/deregister/service_a1", + "PUT /consul2/deregister/service_b1", + "PUT /consul2/deregister/service_a2", + "PUT /consul2/deregister/service_b2", + "PUT /consul1/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul1/register\n" . "{\"ID\":\"service_a2\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30512,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul1/register\n" . "{\"ID\":\"service_b1\",\"Name\":\"service_b\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30513,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul1/register\n" . "{\"ID\":\"service_b2\",\"Name\":\"service_b\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30514,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", +] +--- error_code eval +[200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200] + + + +=== TEST 2: test consul server 1 +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]\n/, + qr/server [1-2]\n/, +] +--- no_error_log +[error, error] + + + +=== TEST 3: test consul server 2 +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_b + discovery_type: consul + type: roundrobin +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello" +] +--- response_body_like eval +[ + qr/server [3-4]\n/, + qr/server [3-4]\n/, +] +--- no_error_log +[error, error] + + + +=== TEST 4: test mini consul config +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8500" + - "http://127.0.0.1:6500" +#END +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- request +GET /hello +--- response_body_like eval +qr/server [1-2]/ +--- ignore_error_log + + + +=== TEST 5: test invalid service name sometimes the consul key maybe deleted by mistake + +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_c + discovery_type: consul + type: roundrobin +#END +--- pipelined_requests eval +[ + "GET /hello_api", + "GET /hello_api" +] +--- response_body eval +[ + "missing consul services\n", + "missing consul services\n" +] +--- ignore_error_log + + + +=== TEST 6: test skip keys +skip some services, return default nodes, get response: missing consul services +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8600" + prefix: "upstreams" + skip_services: + - "service_a" + default_service: + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 + weight: 1 + max_fails: 1 +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- request +GET /hello +--- response_body eval +"missing consul services\n" +--- ignore_error_log + + + +=== TEST 7: test register and unregister nodes +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- config +location /v1/agent { + proxy_pass http://127.0.0.1:8500; +} +location /sleep { + content_by_lua_block { + local args = ngx.req.get_uri_args() + local sec = args.sec or "2" + ngx.sleep(tonumber(sec)) + ngx.say("ok") + } +} +--- timeout: 6 +--- request eval +[ + "PUT /v1/agent/service/deregister/service_a1", + "PUT /v1/agent/service/deregister/service_a2", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30513,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service_a2\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30514,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "GET /sleep", + + "GET /hello?random1", + "GET /hello?random2", + "GET /hello?random3", + "GET /hello?random4", + + "PUT /v1/agent/service/deregister/service_a1", + "PUT /v1/agent/service/deregister/service_a2", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service_a2\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30512,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "GET /sleep?sec=5", + + "GET /hello?random1", + "GET /hello?random2", + "GET /hello?random3", + "GET /hello?random4", + +] +--- response_body_like eval +[ + qr//, + qr//, + qr//, + qr//, + qr/ok\n/, + + qr/server [3-4]\n/, + qr/server [3-4]\n/, + qr/server [3-4]\n/, + qr/server [3-4]\n/, + + qr//, + qr//, + qr//, + qr//, + qr/ok\n/, + + qr/server [1-2]\n/, + qr/server [1-2]\n/, + qr/server [1-2]\n/, + qr/server [1-2]\n/ +] +--- ignore_error_log + + + +=== TEST 8: clean nodes +--- config +location /v1/agent { + proxy_pass http://127.0.0.1:8500; +} +--- request eval +[ + "PUT /v1/agent/service/deregister/service_a1", + "PUT /v1/agent/service/deregister/service_a2", +] +--- error_code eval +[200, 200] + + + +=== TEST 9: test consul short connect type +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8500" + keepalive: false + fetch_interval: 3 + default_service: + host: "127.0.0.1" + port: 20999 +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- config +location /v1/agent { + proxy_pass http://127.0.0.1:8500; +} +location /sleep { + content_by_lua_block { + local args = ngx.req.get_uri_args() + local sec = args.sec or "2" + ngx.sleep(tonumber(sec)) + ngx.say("ok") + } +} +--- timeout: 6 +--- request eval +[ + "GET /hello", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "GET /sleep?sec=5", + "GET /hello", +] +--- response_body_like eval +[ + qr/missing consul services\n/, + qr//, + qr/ok\n/, + qr/server 1\n/ +] +--- ignore_error_log + + + +=== TEST 10: retry when Consul can't be reached (long connect type) +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8501" + keepalive: true + fetch_interval: 3 + default_service: + host: "127.0.0.1" + port: 20999 +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- timeout: 4 +--- config +location /sleep { + content_by_lua_block { + local args = ngx.req.get_uri_args() + local sec = args.sec or "2" + ngx.sleep(tonumber(sec)) + ngx.say("ok") + } +} +--- request +GET /sleep?sec=3 +--- response_body +ok +--- grep_error_log eval +qr/retry connecting consul after \d seconds/ +--- grep_error_log_out +retry connecting consul after 1 seconds +retry connecting consul after 4 seconds + + + +=== TEST 11: prepare healthy and unhealthy nodes +--- config +location /v1/agent { + proxy_pass http://127.0.0.1:8500; +} +--- request eval +[ + "PUT /v1/agent/service/deregister/service_a1", + "PUT /v1/agent/service/deregister/service_a2", + "PUT /v1/agent/service/deregister/service_b1", + "PUT /v1/agent/service/deregister/service_b2", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service_b1\",\"Name\":\"service_b\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30513,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service_b2\",\"Name\":\"service_b\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30514,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", +] +--- error_code eval +[200, 200, 200, 200, 200, 200] + + + +=== TEST 12: test health checker +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +upstreams: + - + service_name: service_b + discovery_type: consul + type: roundrobin + id: 1 + checks: + active: + http_path: "/hello" + healthy: + interval: 1 + successes: 1 + unhealthy: + interval: 1 + http_failures: 1 +#END +--- config + location /thc { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + httpc:request_uri(uri, {method = "GET"}) + ngx.sleep(3) + + local code, body, res = t.test('/v1/healthcheck', + ngx.HTTP_GET) + res = json.decode(res) + local nodes = res[1].nodes + table.sort(nodes, function(a, b) + return a.port < b.port + end) + for _, node in ipairs(nodes) do + node.counter = nil + end + ngx.say(json.encode(nodes)) + + local code, body, res = t.test('/v1/healthcheck/upstreams/1', + ngx.HTTP_GET) + res = json.decode(res) + nodes = res.nodes + table.sort(nodes, function(a, b) + return a.port < b.port + end) + for _, node in ipairs(nodes) do + node.counter = nil + end + ngx.say(json.encode(nodes)) + } + } +--- request +GET /thc +--- response_body +[{"hostname":"127.0.0.1","ip":"127.0.0.1","port":30513,"status":"healthy"},{"hostname":"127.0.0.1","ip":"127.0.0.1","port":30514,"status":"healthy"}] +[{"hostname":"127.0.0.1","ip":"127.0.0.1","port":30513,"status":"healthy"},{"hostname":"127.0.0.1","ip":"127.0.0.1","port":30514,"status":"healthy"}] +--- ignore_error_log + + + +=== TEST 13: test consul catalog service change +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8500" + keepalive: false + fetch_interval: 3 + default_service: + host: "127.0.0.1" + port: 20999 +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- config +location /v1/agent { + proxy_pass http://127.0.0.1:8500; +} + +location /sleep { + content_by_lua_block { + local args = ngx.req.get_uri_args() + local sec = args.sec or "2" + ngx.sleep(tonumber(sec)) + ngx.say("ok") + } +} +--- timeout: 6 +--- request eval +[ + "PUT /v1/agent/service/deregister/service_a1", + "GET /sleep?sec=3", + "GET /hello", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "GET /sleep?sec=5", + "GET /hello", + "PUT /v1/agent/service/deregister/service_a1", + "GET /sleep?sec=5", + "GET /hello", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "GET /sleep?sec=5", + "GET /hello", +] +--- response_body_like eval +[ + qr//, + qr/ok\n/, + qr/missing consul services\n/, + qr//, + qr/ok\n/, + qr/server 1\n/, + qr//, + qr/ok\n/, + qr/missing consul services\n/, + qr//, + qr/ok\n/, + qr/server 1\n/, +] +--- ignore_error_log + + + +=== TEST 14: bootstrap acl +--- config +location /v1/acl { + proxy_pass http://127.0.0.1:8502; +} +--- request eval +"PUT /v1/acl/bootstrap\n" . "{\"BootstrapSecret\": \"2b778dd9-f5f1-6f29-b4b4-9a5fa948757a\"}" +--- error_code_like: ^(?:200|403)$ + + + +=== TEST 15: test register and unregister nodes with acl +--- yaml_config eval: $::yaml_config_with_acl +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service-a + discovery_type: consul + type: roundrobin +#END +--- config +location /v1/agent { + proxy_pass http://127.0.0.1:8502; + proxy_set_header X-Consul-Token "2b778dd9-f5f1-6f29-b4b4-9a5fa948757a"; +} +location /sleep { + content_by_lua_block { + local args = ngx.req.get_uri_args() + local sec = args.sec or "2" + ngx.sleep(tonumber(sec)) + ngx.say("ok") + } +} +--- timeout: 6 +--- pipelined_requests eval +[ + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service-a1\",\"Name\":\"service-a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30513,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service-a2\",\"Name\":\"service-a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30514,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "GET /sleep", + + "GET /hello?random1", + "GET /hello?random2", + "GET /hello?random3", + "GET /hello?random4", + + "PUT /v1/agent/service/deregister/service-a1", + "PUT /v1/agent/service/deregister/service-a2", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service-a1\",\"Name\":\"service-a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service-a2\",\"Name\":\"service-a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30512,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "GET /sleep?sec=5", + + "GET /hello?random1", + "GET /hello?random2", + "GET /hello?random3", + "GET /hello?random4", + + "PUT /v1/agent/service/deregister/service-a1", + "PUT /v1/agent/service/deregister/service-a2", +] +--- response_body_like eval +[ + qr//, + qr//, + qr/ok\n/, + + qr/server [3-4]\n/, + qr/server [3-4]\n/, + qr/server [3-4]\n/, + qr/server [3-4]\n/, + + qr//, + qr//, + qr//, + qr//, + qr/ok\n/, + + qr/server [1-2]\n/, + qr/server [1-2]\n/, + qr/server [1-2]\n/, + qr/server [1-2]\n/, + + qr//, + qr// +] +--- ignore_error_log diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/consul2.t b/CloudronPackages/APISIX/apisix-source/t/discovery/consul2.t new file mode 100644 index 0000000..16f2f63 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/consul2.t @@ -0,0 +1,334 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + + +add_block_preprocessor(sub { + my ($block) = @_; + + my $http_config = $block->http_config // <<_EOC_; + + server { + listen 20999; + + location / { + content_by_lua_block { + ngx.say("missing consul services") + } + } + } + + server { + listen 30511; + + location /hello { + content_by_lua_block { + ngx.say("server 1") + } + } + } + server { + listen 30512; + + location /hello { + content_by_lua_block { + ngx.say("server 2") + } + } + } + server { + listen 30513; + + location /hello { + content_by_lua_block { + ngx.say("server 3") + } + } + } + server { + listen 30514; + + location /hello { + content_by_lua_block { + ngx.say("server 4") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + enable_control: true + control: + ip: 127.0.0.1 + port: 9090 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8500" + - "http://127.0.0.1:8600" + skip_services: + - "service_c" + timeout: + connect: 1000 + read: 1000 + wait: 60 + weight: 1 + fetch_interval: 1 + keepalive: true + default_service: + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 + weight: 1 + max_fails: 1 +_EOC_ + + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config +location /consul1 { + rewrite ^/consul1/(.*) /v1/agent/service/$1 break; + proxy_pass http://127.0.0.1:9500; +} + +location /consul2 { + rewrite ^/consul2/(.*) /v1/agent/service/$1 break; + proxy_pass http://127.0.0.1:9501; +} +location /consul3 { + rewrite ^/consul3/(.*) /v1/agent/service/$1 break; + proxy_pass http://127.0.0.1:9502; +} +--- pipelined_requests eval +[ + "PUT /consul1/deregister/service_a1", + "PUT /consul1/deregister/service_b1", + "PUT /consul1/deregister/service_a2", + "PUT /consul1/deregister/service_b2", + "PUT /consul1/deregister/service_a3", + "PUT /consul1/deregister/service_a4", + "PUT /consul1/deregister/service_no_port", + "PUT /consul2/deregister/service_a1", + "PUT /consul2/deregister/service_a2", + "PUT /consul3/deregister/service_a1", + "PUT /consul3/deregister/service_a2", + "PUT /consul1/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul1/register\n" . "{\"ID\":\"service_a2\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30512,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul1/register\n" . "{\"ID\":\"service_a3\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"localhost\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul1/register\n" . "{\"ID\":\"service_a4\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"localhost\",\"Port\":30512,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul1/register\n" . "{\"ID\":\"service_no_port\",\"Name\":\"service_no_port\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Meta\":{\"service_version\":\"1.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul2/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul2/register\n" . "{\"ID\":\"service_a2\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30512,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul3/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul3/register\n" . "{\"ID\":\"service_a2\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30512,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", +] +--- error_code eval +[200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200] + + + +=== TEST 2: show dump services without duplicates +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul: + servers: + - "http://127.0.0.1:9500" + dump: + path: "consul.dump" + load_on_init: false +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + ngx.sleep(2) + + local code, body, res = t.test('/v1/discovery/consul/show_dump_file', + ngx.HTTP_GET) + local entity = json.decode(res) + ngx.say(json.encode(entity.services)) + } + } +--- timeout: 3 +--- request +GET /t +--- response_body +{"service_a":[{"host":"127.0.0.1","port":30511,"weight":1},{"host":"127.0.0.1","port":30512,"weight":1},{"host":"localhost","port":30511,"weight":1},{"host":"localhost","port":30512,"weight":1}],"service_no_port":[{"host":"127.0.0.1","port":80,"weight":1}]} + + + +=== TEST 3: show dump services with host_sort +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul: + servers: + - "http://127.0.0.1:9500" + sort_type: host_sort + dump: + path: "consul.dump" + load_on_init: false +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + ngx.sleep(2) + + local code, body, res = t.test('/v1/discovery/consul/show_dump_file', + ngx.HTTP_GET) + local entity = json.decode(res) + ngx.say(json.encode(entity.services)) + } + } +--- timeout: 3 +--- request +GET /t +--- response_body +{"service_a":[{"host":"127.0.0.1","port":30511,"weight":1},{"host":"127.0.0.1","port":30512,"weight":1},{"host":"localhost","port":30511,"weight":1},{"host":"localhost","port":30512,"weight":1}],"service_no_port":[{"host":"127.0.0.1","port":80,"weight":1}]} + + + +=== TEST 4: show dump services with port sort +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul: + servers: + - "http://127.0.0.1:9500" + sort_type: port_sort + dump: + path: "consul.dump" + load_on_init: false +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + ngx.sleep(2) + + local code, body, res = t.test('/v1/discovery/consul/show_dump_file', + ngx.HTTP_GET) + local entity = json.decode(res) + ngx.say(json.encode(entity.services)) + } + } +--- timeout: 3 +--- request +GET /t +--- response_body +{"service_a":[{"host":"127.0.0.1","port":30511,"weight":1},{"host":"localhost","port":30511,"weight":1},{"host":"127.0.0.1","port":30512,"weight":1},{"host":"localhost","port":30512,"weight":1}],"service_no_port":[{"host":"127.0.0.1","port":80,"weight":1}]} + + + +=== TEST 5: show dump services with combine sort +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul: + servers: + - "http://127.0.0.1:9500" + sort_type: combine_sort + dump: + path: "consul.dump" + load_on_init: false +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + ngx.sleep(2) + + local code, body, res = t.test('/v1/discovery/consul/show_dump_file', + ngx.HTTP_GET) + local entity = json.decode(res) + ngx.say(json.encode(entity.services)) + } + } +--- timeout: 3 +--- request +GET /t +--- response_body +{"service_a":[{"host":"127.0.0.1","port":30511,"weight":1},{"host":"127.0.0.1","port":30512,"weight":1},{"host":"localhost","port":30511,"weight":1},{"host":"localhost","port":30512,"weight":1}],"service_no_port":[{"host":"127.0.0.1","port":80,"weight":1}]} + + + +=== TEST 6: verify service without port defaults to port 80 +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul: + servers: + - "http://127.0.0.1:9500" + dump: + path: "consul.dump" + load_on_init: false +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + ngx.sleep(2) + + local code, body, res = t.test('/v1/discovery/consul/show_dump_file', + ngx.HTTP_GET) + local entity = json.decode(res) + + -- Check that service_no_port exists and has default port 80 + local service_no_port = entity.services.service_no_port + if service_no_port and #service_no_port > 0 then + ngx.say("service_no_port found with port: ", service_no_port[1].port) + else + ngx.say("service_no_port not found") + end + } + } +--- timeout: 3 +--- request +GET /t +--- response_body +service_no_port found with port: 80 diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/consul_dump.t b/CloudronPackages/APISIX/apisix-source/t/discovery/consul_dump.t new file mode 100644 index 0000000..9cb24a3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/consul_dump.t @@ -0,0 +1,511 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + + +add_block_preprocessor(sub { + my ($block) = @_; + + my $http_config = $block->http_config // <<_EOC_; + + server { + listen 30511; + + location /hello { + content_by_lua_block { + ngx.say("server 1") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: prepare nodes +--- config +location /v1/agent { + proxy_pass http://127.0.0.1:8500; +} +--- request eval +[ + "PUT /v1/agent/service/deregister/service_a1", + "PUT /v1/agent/service/deregister/service_a2", + "PUT /v1/agent/service/deregister/service_b1", + "PUT /v1/agent/service/deregister/service_b2", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service_b1\",\"Name\":\"service_b\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":8002,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", +] +--- response_body eval +--- error_code eval +[200, 200, 200, 200, 200, 200] + + + +=== TEST 2: show dump services +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul: + servers: + - "http://127.0.0.1:8500" + dump: + path: "consul.dump" + load_on_init: false +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + ngx.sleep(2) + + local code, body, res = t.test('/v1/discovery/consul/show_dump_file', + ngx.HTTP_GET) + local entity = json.decode(res) + ngx.say(json.encode(entity.services)) + } + } +--- timeout: 3 +--- request +GET /t +--- response_body +{"service_a":[{"host":"127.0.0.1","port":30511,"weight":1}],"service_b":[{"host":"127.0.0.1","port":8002,"weight":1}]} + + + +=== TEST 3: prepare dump file for next test +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8500" + dump: + path: "/tmp/consul.dump" + load_on_init: false +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- request +GET /hello +--- response_body +server 1 + + + +=== TEST 4: clean registered nodes +--- config +location /v1/agent { + proxy_pass http://127.0.0.1:8500; +} +--- request eval +[ + "PUT /v1/agent/service/deregister/service_a1", + "PUT /v1/agent/service/deregister/service_b1", +] +--- error_code eval +[200, 200] + + + +=== TEST 5: test load dump on init +Configure the invalid consul server addr, and loading the last test 3 generated /tmp/consul.dump file into memory when initializing +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:38500" + dump: + path: "/tmp/consul.dump" + load_on_init: true +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- request +GET /hello +--- response_body +server 1 +--- error_log +connect consul + + + +=== TEST 6: delete dump file +--- config + location /t { + content_by_lua_block { + local util = require("apisix.cli.util") + local succ, err = util.execute_cmd("rm -f /tmp/consul.dump") + ngx.say(succ and "success" or err) + } + } +--- request +GET /t +--- response_body +success + + + +=== TEST 7: miss load dump on init +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:38500" + dump: + path: "/tmp/consul.dump" + load_on_init: true +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +connect consul +fetch nodes failed +failed to set upstream + + + +=== TEST 8: prepare expired dump file +--- config + location /t { + content_by_lua_block { + local util = require("apisix.cli.util") + local json = require("toolkit.json") + + local applications = json.decode('{"service_a":[{"host":"127.0.0.1","port":30511,"weight":1}]}') + local entity = { + services = applications, + last_update = ngx.time(), + expire = 10, + } + local succ, err = util.write_file("/tmp/consul.dump", json.encode(entity)) + + ngx.sleep(2) + ngx.say(succ and "success" or err) + } + } +--- timeout: 3 +--- request +GET /t +--- response_body +success + + + +=== TEST 9: unexpired dump +test load unexpired /tmp/consul.dump file generated by upper test when initializing + when initializing +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:38500" + dump: + path: "/tmp/consul.dump" + load_on_init: true + expire: 5 +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- request +GET /hello +--- response_body +server 1 +--- error_log +connect consul + + + +=== TEST 10: expired dump +test load expired ( by check: (dump_file.last_update + dump.expire) < ngx.time ) ) /tmp/consul.dump file generated by upper test when initializing + when initializing +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:38500" + dump: + path: "/tmp/consul.dump" + load_on_init: true + expire: 1 +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +dump file: /tmp/consul.dump had expired, ignored it + + + +=== TEST 11: delete dump file +--- config + location /t { + content_by_lua_block { + local util = require("apisix.cli.util") + local succ, err = util.execute_cmd("rm -f /tmp/consul.dump") + ngx.say(succ and "success" or err) + } + } +--- request +GET /t +--- response_body +success + + + +=== TEST 12: dump file inexistence +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul: + servers: + - "http://127.0.0.1:38500" + dump: + path: "/tmp/consul.dump" +#END +--- request +GET /v1/discovery/consul/show_dump_file +--- error_code: 503 +--- error_log +connect consul + + + +=== TEST 13: no dump config +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul: + servers: + - "http://127.0.0.1:38500" +#END +--- request +GET /v1/discovery/consul/show_dump_file +--- error_code: 503 +--- error_log +connect consul + + + +=== TEST 14: prepare nodes with different consul clusters +--- config +location /consul1 { + rewrite ^/consul1/(.*) /v1/agent/service/$1 break; + proxy_pass http://127.0.0.1:8500; +} + +location /consul2 { + rewrite ^/consul2/(.*) /v1/agent/service/$1 break; + proxy_pass http://127.0.0.1:8600; +} +--- pipelined_requests eval +[ + "PUT /consul1/deregister/service_a1", + "PUT /consul1/deregister/service_b1", + "PUT /consul1/deregister/service_a2", + "PUT /consul1/deregister/service_b2", + "PUT /consul2/deregister/service_a1", + "PUT /consul2/deregister/service_b1", + "PUT /consul2/deregister/service_a2", + "PUT /consul2/deregister/service_b2", + "PUT /consul1/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul2/register\n" . "{\"ID\":\"service_b1\",\"Name\":\"service_b\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30517,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", +] +--- error_code eval +[200, 200, 200, 200, 200, 200, 200, 200, 200, 200] + + + +=== TEST 15: show dump services with different consul clusters +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul: + servers: + - "http://127.0.0.1:8500" + - "http://127.0.0.1:8600" + dump: + path: "consul.dump" + load_on_init: false +--- config + location /bonjour { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + ngx.sleep(2) + + local code, body, res = t.test('/v1/discovery/consul/show_dump_file', + ngx.HTTP_GET) + local entity = json.decode(res) + ngx.say(json.encode(entity.services)) + } + } +--- timeout: 3 +--- request +GET /bonjour +--- response_body +{"service_a":[{"host":"127.0.0.1","port":30511,"weight":1}],"service_b":[{"host":"127.0.0.1","port":30517,"weight":1}]} + + + +=== TEST 16: prepare nodes with consul health check +--- config +location /v1/agent { + proxy_pass http://127.0.0.1:8500; +} +--- request eval +[ + "PUT /v1/agent/service/deregister/service_a1", + "PUT /v1/agent/service/deregister/service_a2", + "PUT /v1/agent/service/deregister/service_b1", + "PUT /v1/agent/service/deregister/service_b2", + "PUT /v1/agent/service/register\n" . "{\"Checks\": [{\"http\": \"https://1.1.1.1\",\"interval\": \"1s\"}],\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /v1/agent/service/register\n" . "{\"Checks\": [{\"http\": \"http://127.0.0.1:8002\",\"interval\": \"1s\"}],\"ID\":\"service_b1\",\"Name\":\"service_b\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":8002,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", +] +--- response_body eval +--- error_code eval +[200, 200, 200, 200, 200, 200] +--- wait: 2 + + + +=== TEST 17: show dump services with consul health check +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul: + servers: + - "http://127.0.0.1:8500" + dump: + path: "consul.dump" + load_on_init: false +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + for i = 1, 3 do + ngx.sleep(2) + local code, body, res = t.test('/v1/discovery/consul/show_dump_file', + ngx.HTTP_GET) + local entity = json.decode(res) + if entity.services and entity.services.service_a then + ngx.say(json.encode(entity.services)) + return + end + end + } + } +--- timeout: 8 +--- request +GET /t +--- response_body +{"service_a":[{"host":"127.0.0.1","port":30511,"weight":1}]} diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/consul_kv.t b/CloudronPackages/APISIX/apisix-source/t/discovery/consul_kv.t new file mode 100644 index 0000000..0034997 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/consul_kv.t @@ -0,0 +1,698 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + + +add_block_preprocessor(sub { + my ($block) = @_; + + my $http_config = $block->http_config // <<_EOC_; + + server { + listen 20999; + + location / { + content_by_lua_block { + ngx.say("missing consul_kv services") + } + } + } + + server { + listen 30511; + + location /hello { + content_by_lua_block { + ngx.say("server 1") + } + } + } + server { + listen 30512; + + location /hello { + content_by_lua_block { + ngx.say("server 2") + } + } + } + server { + listen 30513; + + location /hello { + content_by_lua_block { + ngx.say("server 3") + } + } + } + server { + listen 30514; + + location /hello { + content_by_lua_block { + ngx.say("server 4") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8500" + - "http://127.0.0.1:8600" + prefix: "upstreams" + skip_keys: + - "upstreams/unused_api/" + timeout: + connect: 1000 + read: 1000 + wait: 60 + weight: 1 + fetch_interval: 1 + keepalive: true + default_service: + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 + weight: 1 + max_fails: 1 +_EOC_ + +our $yaml_config_with_acl = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8502" + token: "2b778dd9-f5f1-6f29-b4b4-9a5fa948757a" + prefix: "upstreams" + skip_keys: + - "upstreams/unused_api/" + timeout: + connect: 1000 + read: 1000 + wait: 60 + weight: 1 + fetch_interval: 1 + keepalive: true + default_service: + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 + weight: 1 + max_fails: 1 +_EOC_ + + +run_tests(); + +__DATA__ + +=== TEST 1: prepare consul kv register nodes +--- config +location /consul1 { + rewrite ^/consul1/(.*) /v1/kv/$1 break; + proxy_pass http://127.0.0.1:8500; +} + +location /consul2 { + rewrite ^/consul2/(.*) /v1/kv/$1 break; + proxy_pass http://127.0.0.1:8600; +} +--- pipelined_requests eval +[ + "DELETE /consul1/upstreams/webpages/?recurse=true", + "DELETE /consul2/upstreams/webpages/?recurse=true", + "PUT /consul1/upstreams/webpages/127.0.0.1:30511\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /consul1/upstreams/webpages/127.0.0.1:30512\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /consul2/upstreams/webpages/127.0.0.1:30513\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /consul2/upstreams/webpages/127.0.0.1:30514\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", +] +--- response_body eval +["true", "true", "true", "true", "true", "true"] + + + +=== TEST 2: test consul server 1 +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8500/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]\n/, + qr/server [1-2]\n/, +] +--- no_error_log +[error, error] + + + +=== TEST 3: test consul server 2 +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8600/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello" +] +--- response_body_like eval +[ + qr/server [3-4]\n/, + qr/server [3-4]\n/, +] +--- no_error_log +[error, error] + + + +=== TEST 4: test mini consul_kv config +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8500" + - "http://127.0.0.1:6500" +#END +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: http://127.0.0.1:8500/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- request +GET /hello +--- response_body_like eval +qr/server [1-2]/ +--- ignore_error_log + + + +=== TEST 5: test invalid service name +sometimes the consul key maybe deleted by mistake + +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8600/v1/kv/upstreams/deleted_keys/ + discovery_type: consul_kv + type: roundrobin +#END +--- pipelined_requests eval +[ + "GET /hello_api", + "GET /hello_api" +] +--- response_body eval +[ + "missing consul_kv services\n", + "missing consul_kv services\n" +] +--- ignore_error_log + + + +=== TEST 6: test skip keys +skip some keys, return default nodes, get response: missing consul_kv services +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8600" + prefix: "upstreams" + skip_keys: + - "upstreams/webpages/" + default_service: + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 + weight: 1 + max_fails: 1 +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8600/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- request +GET /hello +--- response_body eval +"missing consul_kv services\n" +--- ignore_error_log + + + +=== TEST 7: test register and unregister nodes +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8500/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- config +location /v1/kv { + proxy_pass http://127.0.0.1:8500; +} +location /sleep { + content_by_lua_block { + local args = ngx.req.get_uri_args() + local sec = args.sec or "2" + ngx.sleep(tonumber(sec)) + ngx.say("ok") + } +} +--- timeout: 6 +--- request eval +[ + "DELETE /v1/kv/upstreams/webpages/127.0.0.1:30511", + "DELETE /v1/kv/upstreams/webpages/127.0.0.1:30512", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30513\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30514\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "GET /sleep", + + "GET /hello?random1", + "GET /hello?random2", + "GET /hello?random3", + "GET /hello?random4", + + "DELETE /v1/kv/upstreams/webpages/127.0.0.1:30513", + "DELETE /v1/kv/upstreams/webpages/127.0.0.1:30514", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30511\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30512\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "GET /sleep?sec=5", + + "GET /hello?random1", + "GET /hello?random2", + "GET /hello?random3", + "GET /hello?random4", + +] +--- response_body_like eval +[ + qr/true/, + qr/true/, + qr/true/, + qr/true/, + qr/ok\n/, + + qr/server [3-4]\n/, + qr/server [3-4]\n/, + qr/server [3-4]\n/, + qr/server [3-4]\n/, + + qr/true/, + qr/true/, + qr/true/, + qr/true/, + qr/ok\n/, + + qr/server [1-2]\n/, + qr/server [1-2]\n/, + qr/server [1-2]\n/, + qr/server [1-2]\n/ +] +--- ignore_error_log + + + +=== TEST 8: prepare healthy and unhealthy nodes +--- config +location /v1/kv { + proxy_pass http://127.0.0.1:8500; +} +--- request eval +[ + "DELETE /v1/kv/upstreams/webpages/?recurse=true", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30511\n" . "{\"weight\": 1, \"max_fails\": 1, \"fail_timeout\": 1}", + "PUT /v1/kv/upstreams/webpages/127.0.0.2:1988\n" . "{\"weight\": 1, \"max_fails\": 1, \"fail_timeout\": 1}", +] +--- response_body eval +[ + 'true', + 'true', + 'true', +] + + + +=== TEST 9: test health checker +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +upstreams: + - + service_name: http://127.0.0.1:8500/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin + id: 1 + checks: + active: + http_path: "/hello" + healthy: + interval: 1 + successes: 1 + unhealthy: + interval: 1 + http_failures: 1 +#END +--- config + location /thc { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + httpc:request_uri(uri, {method = "GET"}) + ngx.sleep(3) + + local code, body, res = t.test('/v1/healthcheck', + ngx.HTTP_GET) + res = json.decode(res) + local nodes = res[1].nodes + table.sort(nodes, function(a, b) + return a.ip < b.ip + end) + for _, node in ipairs(nodes) do + node.counter = nil + end + ngx.say(json.encode(nodes)) + + local code, body, res = t.test('/v1/healthcheck/upstreams/1', + ngx.HTTP_GET) + res = json.decode(res) + local nodes = res.nodes + table.sort(nodes, function(a, b) + return a.ip < b.ip + end) + for _, node in ipairs(nodes) do + node.counter = nil + end + ngx.say(json.encode(nodes)) + } + } +--- request +GET /thc +--- response_body +[{"hostname":"127.0.0.1","ip":"127.0.0.1","port":30511,"status":"healthy"},{"hostname":"127.0.0.2","ip":"127.0.0.2","port":1988,"status":"unhealthy"}] +[{"hostname":"127.0.0.1","ip":"127.0.0.1","port":30511,"status":"healthy"},{"hostname":"127.0.0.2","ip":"127.0.0.2","port":1988,"status":"unhealthy"}] +--- ignore_error_log + + + +=== TEST 10: clean nodes +--- config +location /v1/kv { + proxy_pass http://127.0.0.1:8500; +} +--- request eval +[ + "DELETE /v1/kv/upstreams/webpages/?recurse=true" +] +--- response_body eval +[ + 'true' +] + + + +=== TEST 11: test consul_kv short connect type +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8500" + keepalive: false + fetch_interval: 3 + default_service: + host: "127.0.0.1" + port: 20999 +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8500/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- config +location /v1/kv { + proxy_pass http://127.0.0.1:8500; +} +location /sleep { + content_by_lua_block { + local args = ngx.req.get_uri_args() + local sec = args.sec or "2" + ngx.sleep(tonumber(sec)) + ngx.say("ok") + } +} +--- timeout: 6 +--- request eval +[ + "GET /hello", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30511\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "GET /sleep?sec=5", + "GET /hello", +] +--- response_body_like eval +[ + qr/missing consul_kv services\n/, + qr/true/, + qr/ok\n/, + qr/server 1\n/ +] +--- ignore_error_log + + + +=== TEST 12: retry when Consul can't be reached (long connect type) +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8501" + keepalive: true + fetch_interval: 3 + default_service: + host: "127.0.0.1" + port: 20999 +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8501/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- timeout: 4 +--- config +location /sleep { + content_by_lua_block { + local args = ngx.req.get_uri_args() + local sec = args.sec or "2" + ngx.sleep(tonumber(sec)) + ngx.say("ok") + } +} +--- request +GET /sleep?sec=3 +--- response_body +ok +--- grep_error_log eval +qr/retry connecting consul after \d seconds/ +--- grep_error_log_out +retry connecting consul after 1 seconds +retry connecting consul after 4 seconds + + + +=== TEST 13: bootstrap acl +--- config +location /v1/acl { + proxy_pass http://127.0.0.1:8502; +} +--- request eval +"PUT /v1/acl/bootstrap\n" . "{\"BootstrapSecret\": \"2b778dd9-f5f1-6f29-b4b4-9a5fa948757a\"}" +--- error_code_like: ^(?:200|403)$ + + + +=== TEST 14: test register and unregister nodes +--- yaml_config eval: $::yaml_config_with_acl +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8502/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- config +location /v1/kv { + proxy_pass http://127.0.0.1:8502; + proxy_set_header X-Consul-Token "2b778dd9-f5f1-6f29-b4b4-9a5fa948757a"; +} +location /sleep { + content_by_lua_block { + local args = ngx.req.get_uri_args() + local sec = args.sec or "2" + ngx.sleep(tonumber(sec)) + ngx.say("ok") + } +} +--- timeout: 6 +--- request eval +[ + "DELETE /v1/kv/upstreams/webpages/127.0.0.1:30511", + "DELETE /v1/kv/upstreams/webpages/127.0.0.1:30512", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30513\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30514\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "GET /sleep", + + "GET /hello?random1", + "GET /hello?random2", + "GET /hello?random3", + "GET /hello?random4", + + "DELETE /v1/kv/upstreams/webpages/127.0.0.1:30513", + "DELETE /v1/kv/upstreams/webpages/127.0.0.1:30514", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30511\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30512\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "GET /sleep?sec=5", + + "GET /hello?random1", + "GET /hello?random2", + "GET /hello?random3", + "GET /hello?random4", + +] +--- response_body_like eval +[ + qr/true/, + qr/true/, + qr/true/, + qr/true/, + qr/ok\n/, + + qr/server [3-4]\n/, + qr/server [3-4]\n/, + qr/server [3-4]\n/, + qr/server [3-4]\n/, + + qr/true/, + qr/true/, + qr/true/, + qr/true/, + qr/ok\n/, + + qr/server [1-2]\n/, + qr/server [1-2]\n/, + qr/server [1-2]\n/, + qr/server [1-2]\n/ +] +--- ignore_error_log diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/consul_kv_dump.t b/CloudronPackages/APISIX/apisix-source/t/discovery/consul_kv_dump.t new file mode 100644 index 0000000..96b7195 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/consul_kv_dump.t @@ -0,0 +1,390 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + + +add_block_preprocessor(sub { + my ($block) = @_; + + my $http_config = $block->http_config // <<_EOC_; + + server { + listen 30511; + + location /hello { + content_by_lua_block { + ngx.say("server 1") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: prepare nodes +--- config +location /v1/kv { + proxy_pass http://127.0.0.1:8500; +} +--- request eval +[ + "DELETE /v1/kv/upstreams/?recurse=true", + "PUT /v1/kv/upstreams/webpages/127.0.0.1:30511\n" . "{\"weight\": 1, \"max_fails\": 1, \"fail_timeout\": 1}", +] +--- response_body eval +[ + 'true', + 'true', + 'true', +] + + + +=== TEST 2: show dump services +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8500" + dump: + path: "consul_kv.dump" + load_on_init: true +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + ngx.sleep(2) + + local code, body, res = t.test('/v1/discovery/consul_kv/show_dump_file', + ngx.HTTP_GET) + local entity = json.decode(res) + ngx.say(json.encode(entity.services)) + } + } +--- timeout: 3 +--- request +GET /t +--- response_body +{"http://127.0.0.1:8500/v1/kv/upstreams/webpages/":[{"host":"127.0.0.1","port":30511,"weight":1}]} + + + +=== TEST 3: prepare dump file for next test +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8500" + dump: + path: "/tmp/consul_kv.dump" + load_on_init: true +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8500/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- request +GET /hello +--- response_body +server 1 + + + +=== TEST 4: clean registered nodes +--- config +location /v1/kv { + proxy_pass http://127.0.0.1:8500; +} +--- request eval +[ + "DELETE /v1/kv/upstreams/?recurse=true", +] +--- response_body eval +[ + 'true' +] + + + +=== TEST 5: test load dump on init +Configure the invalid consul server addr, and loading the last test 3 generated /tmp/consul_kv.dump file into memory when initializing +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:38500" + dump: + path: "/tmp/consul_kv.dump" + load_on_init: true +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8500/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- request +GET /hello +--- response_body +server 1 +--- error_log +connect consul + + + +=== TEST 6: delete dump file +--- config + location /t { + content_by_lua_block { + local util = require("apisix.cli.util") + local succ, err = util.execute_cmd("rm -f /tmp/consul_kv.dump") + ngx.say(succ and "success" or err) + } + } +--- request +GET /t +--- response_body +success + + + +=== TEST 7: miss load dump on init +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:38500" + dump: + path: "/tmp/consul_kv.dump" + load_on_init: true +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8500/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +connect consul +fetch nodes failed +failed to set upstream + + + +=== TEST 8: prepare expired dump file +--- config + location /t { + content_by_lua_block { + local util = require("apisix.cli.util") + local json = require("toolkit.json") + + local applications = json.decode('{"http://127.0.0.1:8500/v1/kv/upstreams/webpages/":[{"host":"127.0.0.1","port":30511,"weight":1}]}') + local entity = { + services = applications, + last_update = ngx.time(), + expire = 10, + } + local succ, err = util.write_file("/tmp/consul_kv.dump", json.encode(entity)) + + ngx.sleep(2) + ngx.say(succ and "success" or err) + } + } +--- timeout: 3 +--- request +GET /t +--- response_body +success + + + +=== TEST 9: unexpired dump +test load unexpired /tmp/consul_kv.dump file generated by upper test when initializing + when initializing +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:38500" + dump: + path: "/tmp/consul_kv.dump" + load_on_init: true + expire: 5 +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8500/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- request +GET /hello +--- response_body +server 1 +--- error_log +connect consul + + + +=== TEST 10: expired dump +test load expired ( by check: (dump_file.last_update + dump.expire) < ngx.time ) ) /tmp/consul_kv.dump file generated by upper test when initializing + when initializing +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:38500" + dump: + path: "/tmp/consul_kv.dump" + load_on_init: true + expire: 1 +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: http://127.0.0.1:8500/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +dump file: /tmp/consul_kv.dump had expired, ignored it + + + +=== TEST 11: delete dump file +--- config + location /t { + content_by_lua_block { + local util = require("apisix.cli.util") + local succ, err = util.execute_cmd("rm -f /tmp/consul_kv.dump") + ngx.say(succ and "success" or err) + } + } +--- request +GET /t +--- response_body +success + + + +=== TEST 12: dump file inexistence +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul_kv: + servers: + - "http://127.0.0.1:38500" + dump: + path: "/tmp/consul_kv.dump" +#END +--- request +GET /v1/discovery/consul_kv/show_dump_file +--- error_code: 503 +--- error_log +connect consul + + + +=== TEST 13: no dump config +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul_kv: + servers: + - "http://127.0.0.1:38500" +#END +--- request +GET /v1/discovery/consul_kv/show_dump_file +--- error_code: 503 +--- error_log +connect consul diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/dns/mix.t b/CloudronPackages/APISIX/apisix-source/t/discovery/dns/mix.t new file mode 100644 index 0000000..e849942 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/dns/mix.t @@ -0,0 +1,131 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{CUSTOM_DNS_SERVER} = "127.0.0.1:1053"; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: # service discovery center + dns: + servers: + - "127.0.0.1:1053" +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + if ($block->apisix_yaml) { + my $upstream = <<_EOC_; +routes: + - + id: 1 + uris: + - /hello + upstream_id: 1 + - + id: 2 + uris: + - /hello_chunked + upstream_id: 2 +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $upstream); + } + + if (!$block->request) { + $block->set_value("request", "GET /hello"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: mix cache between discovery & global resolver +--- log_level: debug +--- apisix_yaml +upstreams: + - + id: 1 + nodes: + ttl.1s.test.local:1980: 1 + type: roundrobin + - + id: 2 + service_name: "ttl.1s.test.local:1980" + discovery_type: dns + type: roundrobin +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri1 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local uri2 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello_chunked" + for i = 1, 2 do + for j = 1, 3 do + local httpc = http.new() + local res, err + if j % 2 ~= 0 then + res, err = httpc:request_uri(uri1, {method = "GET"}) + else + res, err = httpc:request_uri(uri2, {method = "GET"}) + end + + if not res or res.body ~= "hello world\n" then + ngx.say(err) + return + end + end + + -- It is expected to have 5 DNS queries + -- the first turn: one for global resolver & two for discovery (SRV, then A) + -- the second turn: each one for both global resolver & discovery + if i < 2 then + ngx.sleep(1.1) + end + end + } + } +--- request +GET /t +--- grep_error_log eval +qr/connect to 127.0.0.1:1053/ +--- grep_error_log_out +connect to 127.0.0.1:1053 +connect to 127.0.0.1:1053 +connect to 127.0.0.1:1053 +connect to 127.0.0.1:1053 +connect to 127.0.0.1:1053 +connect to 127.0.0.1:1053 diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/dns/sanity.t b/CloudronPackages/APISIX/apisix-source/t/discovery/dns/sanity.t new file mode 100644 index 0000000..4e0eaa7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/dns/sanity.t @@ -0,0 +1,463 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: # service discovery center + dns: + servers: + - "127.0.0.1:1053" +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + if ($block->apisix_yaml) { + my $upstream = <<_EOC_; +routes: + - + id: 1 + uris: + - /hello + upstream_id: 1 +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $upstream); + } + + if (!$block->request) { + $block->set_value("request", "GET /hello"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: default port to 53 +--- log_level: debug +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: # service discovery center + dns: + servers: + - "127.0.0.1" +--- apisix_yaml +upstreams: + - service_name: sd.test.local + discovery_type: dns + type: roundrobin + id: 1 +--- error_code: 503 +--- error_log +connect to 127.0.0.1:53 + + + +=== TEST 2: A +--- apisix_yaml +upstreams: + - service_name: "sd.test.local:1980" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":1,"127.0.0.2:1980":1|"127.0.0.2:1980":1,"127.0.0.1:1980":1)\}/ +--- response_body +hello world + + + +=== TEST 3: AAAA +--- listen_ipv6 +--- apisix_yaml +upstreams: + - service_name: "ipv6.sd.test.local:1980" + discovery_type: dns + type: roundrobin + id: 1 +--- response_body +hello world +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to [0:0:0:0:0:0:0:1]:1980 + + + +=== TEST 4: prefer A to AAAA +--- listen_ipv6 +--- apisix_yaml +upstreams: + - service_name: "mix.sd.test.local:1980" + discovery_type: dns + type: roundrobin + id: 1 +--- response_body +hello world +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1980 + + + +=== TEST 5: no /etc/hosts +--- apisix_yaml +upstreams: + - service_name: test.com + discovery_type: dns + type: roundrobin + id: 1 +--- error_log +failed to query the DNS server +--- error_code: 503 + + + +=== TEST 6: no /etc/resolv.conf +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false + enable_resolv_search_option: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: # service discovery center + dns: + servers: + - "127.0.0.1:1053" +--- apisix_yaml +upstreams: + - service_name: apisix + discovery_type: dns + type: roundrobin + id: 1 +--- error_log +failed to query the DNS server +--- error_code: 503 + + + +=== TEST 7: SRV +--- apisix_yaml +upstreams: + - service_name: "srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":60,"127.0.0.2:1980":20|"127.0.0.2:1980":20,"127.0.0.1:1980":60)\}/ +--- response_body +hello world + + + +=== TEST 8: SRV (RFC 2782 style) +--- apisix_yaml +upstreams: + - service_name: "_sip._tcp.srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":60,"127.0.0.2:1980":20|"127.0.0.2:1980":20,"127.0.0.1:1980":60)\}/ +--- response_body +hello world + + + +=== TEST 9: SRV (different port) +--- apisix_yaml +upstreams: + - service_name: "port.srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":60,"127.0.0.2:1981":20|"127.0.0.2:1981":20,"127.0.0.1:1980":60)\}/ +--- response_body +hello world + + + +=== TEST 10: SRV (zero weight) +--- apisix_yaml +upstreams: + - service_name: "zero-weight.srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":60,"127.0.0.2:1980":1|"127.0.0.2:1980":1,"127.0.0.1:1980":60)\}/ +--- response_body +hello world + + + +=== TEST 11: SRV (split weight) +--- apisix_yaml +upstreams: + - service_name: "split-weight.srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{(,?"127.0.0.(1:1980":200|3:1980":1|4:1980":1)){3}\}/ +--- response_body +hello world + + + +=== TEST 12: SRV (priority) +--- apisix_yaml +upstreams: + - service_name: "priority.srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- response_body +hello world +--- error_log +connect() failed +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1979 +proxy request to 127.0.0.2:1980 + + + +=== TEST 13: prefer SRV than A +--- apisix_yaml +upstreams: + - service_name: "srv-a.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- error_log +proxy request to 127.0.0.1:1980 +--- response_body +hello world + + + +=== TEST 14: SRV (port is 0) +--- apisix_yaml +upstreams: + - service_name: "zero.srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- error_log +connect() failed +--- error_code: 502 +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:80 + + + +=== TEST 15: SRV (override port) +--- apisix_yaml +upstreams: + - service_name: "port.srv.test.local:1980" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":60,"127.0.0.2:1980":20|"127.0.0.2:1980":20,"127.0.0.1:1980":60)\}/ +--- response_body +hello world + + + +=== TEST 16: prefer A than SRV when A is ahead of SRV in config.yaml +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + dns: + servers: + - "127.0.0.1:1053" + order: + - A + - SRV +--- apisix_yaml +upstreams: + - service_name: "srv-a.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- error_code: 502 +--- error_log +proxy request to 127.0.0.1:80 + + + +=== TEST 17: Invalid order type in config.yaml +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + dns: + servers: + - "127.0.0.1:1053" + order: + - B + - SRV +--- apisix_yaml +upstreams: + - service_name: "srv-a.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- must_die +--- error_log +matches none of the enum values + + + +=== TEST 18: Multiple order type in config.yaml +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + dns: + servers: + - "127.0.0.1:1053" + order: + - SRV + - SRV +--- apisix_yaml +upstreams: + - service_name: "srv-a.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- must_die +--- error_log +expected unique items but items 1 and 2 are equal + + + +=== TEST 19: invalid order type in config.yaml +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + dns: + servers: + - "127.0.0.1:1053" + order: + - a + - SRV +--- apisix_yaml +upstreams: + - service_name: "srv-a.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- must_die +--- error_log +matches none of the enum values + + + +=== TEST 20: use resolv.conf +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: # service discovery center + dns: + resolv_conf: build-cache/test_resolve.conf +--- apisix_yaml +upstreams: + - service_name: "sd.test.local:1980" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":1,"127.0.0.2:1980":1|"127.0.0.2:1980":1,"127.0.0.1:1980":1)\}/ +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/eureka.t b/CloudronPackages/APISIX/apisix-source/t/discovery/eureka.t new file mode 100644 index 0000000..384ba7b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/eureka.t @@ -0,0 +1,117 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + eureka: + host: + - "http://127.0.0.1:8761" + prefix: "/eureka/" + fetch_interval: 10 + weight: 80 + timeout: + connect: 1500 + send: 1500 + read: 1500 +_EOC_ + +run_tests(); + +__DATA__ + +=== TEST 1: get APISIX-EUREKA info from EUREKA +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /eureka/* + upstream: + service_name: APISIX-EUREKA + discovery_type: eureka + type: roundrobin + +#END +--- request +GET /eureka/apps/APISIX-EUREKA +--- response_body_like +.*APISIX-EUREKA.* +--- error_log +use config_provider: yaml +default_weight:80. +fetch_interval:10. +eureka uri:http://127.0.0.1:8761/eureka/. +connect_timeout:1500, send_timeout:1500, read_timeout:1500. + + + +=== TEST 2: error service_name name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /eureka/* + upstream: + service_name: APISIX-EUREKA-DEMO + discovery_type: eureka + type: roundrobin + +#END +--- request +GET /eureka/apps/APISIX-EUREKA +--- error_code: 503 +--- error_log eval +qr/.* no valid upstream node.*/ + + + +=== TEST 3: with proxy-rewrite +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /eureka-test/* + plugins: + proxy-rewrite: + regex_uri: ["^/eureka-test/(.*)", "/${1}"] + upstream: + service_name: APISIX-EUREKA + discovery_type: eureka + type: roundrobin + +#END +--- request +GET /eureka-test/eureka/apps/APISIX-EUREKA +--- response_body_like +.*APISIX-EUREKA.* +--- error_log +use config_provider: yaml +default_weight:80. +fetch_interval:10. +eureka uri:http://127.0.0.1:8761/eureka/. +connect_timeout:1500, send_timeout:1500, read_timeout:1500. diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/nacos.t b/CloudronPackages/APISIX/apisix-source/t/discovery/nacos.t new file mode 100644 index 0000000..f2ebee5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/nacos.t @@ -0,0 +1,1068 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); +workers(4); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + prefix: "/nacos/v1/" + fetch_interval: 1 + weight: 1 + timeout: + connect: 2000 + send: 2000 + read: 5000 + +_EOC_ + +our $yaml_auth_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + nacos: + host: + - "http://nacos:nacos\@127.0.0.1:8848" + prefix: "/nacos/v1/" + fetch_interval: 1 + weight: 1 + timeout: + connect: 2000 + send: 2000 + read: 5000 + +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + $block->set_value("timeout", "10"); + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: get APISIX-NACOS info from NACOS - no auth +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] +--- no_error_log +[error, error] + + + +=== TEST 2: error service_name name - no auth +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin + +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 3: get APISIX-NACOS info from NACOS - auth +--- yaml_config eval: $::yaml_auth_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + +#END +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + -- Wait for 2 seconds for APISIX initialization + ngx.sleep(2) + local httpc = http.new() + local valid_responses = 0 + + for i = 1, 2 do + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.log(ngx.ERR, "Request failed: ", err) + else + -- Clean and validate response + local clean_body = res.body:gsub("%s+$", "") + if clean_body == "server 1" or clean_body == "server 2" then + valid_responses = valid_responses + 1 + else + ngx.log(ngx.ERR, "Invalid response: ", clean_body) + end + end + end + -- Final check + if valid_responses == 2 then + ngx.say("PASS") + else + ngx.say("FAIL: only ", valid_responses, " valid responses") + end + } + } +--- request +GET /t +--- response_body +PASS + + + +=== TEST 4: error service_name name - auth +--- yaml_config eval: $::yaml_auth_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin + +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 5: get APISIX-NACOS info from NACOS - configured in services +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + service_id: 1 +services: + - + id: 1 + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 6: get APISIX-NACOS info from NACOS - configured in upstreams + etcd +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: hit +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + -- Wait for 2 seconds for APISIX initialization + ngx.sleep(2) + local httpc = http.new() + local valid_responses = 0 + + for i = 1, 2 do + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.log(ngx.ERR, "Request failed: ", err) + else + -- Clean and validate response + local clean_body = res.body:gsub("%s+$", "") + if clean_body == "server 1" or clean_body == "server 2" then + valid_responses = valid_responses + 1 + else + ngx.log(ngx.ERR, "Invalid response: ", clean_body) + end + end + end + -- Final check + if valid_responses == 2 then + ngx.say("PASS") + else + ngx.say("FAIL: only ", valid_responses, " valid responses") + end + } + } +--- request +GET /t +--- response_body +PASS + + + +=== TEST 8: get APISIX-NACOS info from NACOS - no auth with namespace +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: test_ns +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 9: error namespace_id - no auth +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: err_ns +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 10: get APISIX-NACOS info from NACOS - configured in services with namespace +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + service_id: 1 +services: + - + id: 1 + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: test_ns +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 11: get APISIX-NACOS info from NACOS - configured in upstreams + etcd with namespace +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "type": "roundrobin", + "discovery_args": { + "namespace_id": "test_ns" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: hit with namespace +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + -- Wait for 2 seconds for APISIX initialization + ngx.sleep(2) + local httpc = http.new() + local valid_responses = 0 + + for i = 1, 2 do + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.log(ngx.ERR, "Request failed: ", err) + else + -- Clean and validate response + local clean_body = res.body:gsub("%s+$", "") + if clean_body == "server 1" or clean_body == "server 2" then + valid_responses = valid_responses + 1 + else + ngx.log(ngx.ERR, "Invalid response: ", clean_body) + end + end + end + -- Final check + if valid_responses == 2 then + ngx.say("PASS") + else + ngx.say("FAIL: only ", valid_responses, " valid responses") + end + } + } +--- request +GET /t +--- response_body +PASS + + + +=== TEST 13: get APISIX-NACOS info from NACOS - no auth with group_name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + discovery_args: + group_name: test_group +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 14: error group_name - no auth +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin + discovery_args: + group_name: err_group_name +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 15: get APISIX-NACOS info from NACOS - configured in services with group_name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + service_id: 1 +services: + - + id: 1 + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + discovery_args: + group_name: test_group +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 16: get APISIX-NACOS info from NACOS - configured in upstreams + etcd with group_name +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "type": "roundrobin", + "discovery_args": { + "group_name": "test_group" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: hit with group_name +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + -- Wait for 2 seconds for APISIX initialization + ngx.sleep(2) + local httpc = http.new() + local valid_responses = 0 + + for i = 1, 2 do + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.log(ngx.ERR, "Request failed: ", err) + else + -- Clean and validate response + local clean_body = res.body:gsub("%s+$", "") + if clean_body == "server 1" or clean_body == "server 2" then + valid_responses = valid_responses + 1 + else + ngx.log(ngx.ERR, "Invalid response: ", clean_body) + end + end + end + -- Final check + if valid_responses == 2 then + ngx.say("PASS") + else + ngx.say("FAIL: only ", valid_responses, " valid responses") + end + } + } +--- request +GET /t +--- response_body +PASS + + + +=== TEST 18: get APISIX-NACOS info from NACOS - no auth with namespace_id and group_name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: test_ns + group_name: test_group +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 19: error group_name and correct namespace_id - no auth +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: test_ns + group_name: err_group_name +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 20: error namespace_id and correct group_name - no auth +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: err_ns + group_name: test_group +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 21: error namespace_id and error group_name - no auth +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: err_ns + group_name: err_group_name +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 22: get APISIX-NACOS info from NACOS - configured in services with namespace_id and group_name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + service_id: 1 +services: + - + id: 1 + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: test_ns + group_name: test_group +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 23: get APISIX-NACOS info from NACOS - configured in upstreams + etcd with namespace_id and group_name +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "type": "roundrobin", + "discovery_args": { + "namespace_id": "test_ns", + "group_name": "test_group" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 24: hit with namespace_id and group_name +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + -- Wait for 2 seconds for APISIX initialization + ngx.sleep(2) + local httpc = http.new() + local valid_responses = 0 + + for i = 1, 2 do + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.log(ngx.ERR, "Request failed: ", err) + else + -- Clean and validate response + local clean_body = res.body:gsub("%s+$", "") + if clean_body == "server 1" or clean_body == "server 2" then + valid_responses = valid_responses + 1 + else + ngx.log(ngx.ERR, "Invalid response: ", clean_body) + end + end + end + -- Final check + if valid_responses == 2 then + ngx.say("PASS") + else + ngx.say("FAIL: only ", valid_responses, " valid responses") + end + } + } +--- request +GET /t +--- response_body +PASS + + + +=== TEST 25: same namespace_id and service_name, different group_name +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- use nacos-service5 + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "type": "roundrobin", + "discovery_args": { + "namespace_id": "test_ns", + "group_name": "test_group" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + -- use nacos-service6 + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello1", + "upstream": { + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "type": "roundrobin", + "discovery_args": { + "namespace_id": "test_ns", + "group_name": "test_group2" + } + }, + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.sleep(1.5) + + local http = require "resty.http" + local httpc = http.new() + local uri1 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri1, { method = "GET"}) + if err then + ngx.log(ngx.ERR, err) + ngx.status = res.status + return + end + ngx.say(res.body) + + local uri2 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + res, err = httpc:request_uri(uri2, { method = "GET"}) + if err then + ngx.log(ngx.ERR, err) + ngx.status = res.status + return + end + ngx.say(res.body) + } + } +--- request +GET /t +--- response_body +server 1 +server 3 + + + +=== TEST 26: same group_name and service_name, different namespace_id +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- use nacos-service5 + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "type": "roundrobin", + "discovery_args": { + "namespace_id": "test_ns", + "group_name": "test_group" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + -- use nacos-service7 + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello1", + "upstream": { + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "type": "roundrobin", + "discovery_args": { + "namespace_id": "test_ns2", + "group_name": "test_group" + } + }, + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.sleep(1.5) + + local http = require "resty.http" + local httpc = http.new() + local uri1 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri1, { method = "GET"}) + if err then + ngx.log(ngx.ERR, err) + ngx.status = res.status + return + end + ngx.say(res.body) + + local uri2 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + res, err = httpc:request_uri(uri2, { method = "GET"}) + if err then + ngx.log(ngx.ERR, err) + ngx.status = res.status + return + end + ngx.say(res.body) + } + } +--- request +GET /t +--- response_body +server 1 +server 4 diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/nacos2.t b/CloudronPackages/APISIX/apisix-source/t/discovery/nacos2.t new file mode 100644 index 0000000..e45d22c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/nacos2.t @@ -0,0 +1,342 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +workers(3); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: continue to get nacos data after failure in a service +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + nacos: + host: + - "http://127.0.0.1:20999" + prefix: "/nacos/v1/" + fetch_interval: 1 + weight: 1 + timeout: + connect: 2000 + send: 2000 + read: 5000 +--- apisix_yaml +routes: + - + uri: /hello_ + upstream: + service_name: NOT-NACOS + discovery_type: nacos + type: roundrobin + - + uri: /hello + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin +#END +--- http_config + server { + listen 20999; + + location / { + access_by_lua_block { + if not package.loaded.hit then + package.loaded.hit = true + ngx.exit(502) + end + } + proxy_pass http://127.0.0.1:8858; + } + } +--- request +GET /hello +--- response_body_like eval +qr/server [1-2]/ +--- error_log +err:status = 502 + + + +=== TEST 2: change nacos server auth password +--- config + location /t { + content_by_lua_block { + local json = require("cjson") + local http = require("resty.http") + + local httpc = http.new() + local nacos_host = "http://127.0.0.1:8848" + local res, err = httpc:request_uri(nacos_host .. "/nacos/v1/auth/login", { + method = "POST", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + }, + body = ngx.encode_args({username = "nacos", password = "nacos"}), + }) + + if res.status ~= 200 then + ngx.say("nacos auth failed") + ngx.exit(401) + end + + local res_json = json.decode(res.body) + res, err = httpc:request_uri(nacos_host .. "/nacos/v1/auth/users?accessToken=" .. res_json["accessToken"], { + method = "PUT", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + }, + body = ngx.encode_args({username = "nacos", newPassword = "nacos!@#$%^&*()[]"}), + }) + if res.status ~= 200 then + ngx.say("nacos token auth failed") + ngx.say(res.body) + ngx.exit(401) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 3: test complex host +--- extra_yaml_config +discovery: + nacos: + host: + - "http://nacos:nacos!@#$%^&*()[]@127.0.0.1:8848" + fetch_interval: 1 +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + +#END +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + -- Wait for 2 seconds for APISIX initialization + ngx.sleep(2) + local httpc = http.new() + local valid_responses = 0 + + for i = 1, 2 do + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.log(ngx.ERR, "Request failed: ", err) + else + -- Clean and validate response + local clean_body = res.body:gsub("%s+$", "") + if clean_body == "server 1" or clean_body == "server 2" then + valid_responses = valid_responses + 1 + else + ngx.log(ngx.ERR, "Invalid response: ", clean_body) + end + end + end + -- Final check + if valid_responses == 2 then + ngx.say("PASS") + else + ngx.say("FAIL: only ", valid_responses, " valid responses") + end + } + } +--- request +GET /t +--- response_body +PASS + + + +=== TEST 4: restore nacos server auth password +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + +#END +--- config + location /t { + content_by_lua_block { + local json = require("cjson") + local http = require("resty.http") + + local httpc = http.new() + local nacos_host = "http://127.0.0.1:8848" + local res, err = httpc:request_uri(nacos_host .. "/nacos/v1/auth/login", { + method = "POST", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + }, + body = ngx.encode_args({username = "nacos", password = "nacos!@#$%^&*()[]"}), + }) + + if res.status ~= 200 then + ngx.say("nacos auth failed") + ngx.exit(401) + end + + local res_json = json.decode(res.body) + res, err = httpc:request_uri(nacos_host .. "/nacos/v1/auth/users?accessToken=" .. res_json["accessToken"], { + method = "PUT", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + }, + body = ngx.encode_args({username = "nacos", newPassword = "nacos"}), + }) + if res.status ~= 200 then + ngx.say("nacos token auth failed") + ngx.say(res.body) + ngx.exit(401) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 5: same service is registered in route, service and upstream, de-duplicate +--- yaml_config +apisix: + node_listen: 1984 +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "scheme": "http", + "type": "roundrobin", + "discovery_args": { + "namespace_id": "public", + "group_name": "DEFAULT_GROUP" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "type": "roundrobin", + "scheme": "http", + "discovery_type": "nacos", + "pass_host": "pass", + "service_name": "APISIX-NACOS", + "discovery_args": { + "namespace_id": "public", + "group_name": "DEFAULT_GROUP" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "type": "roundrobin", + "scheme": "http", + "discovery_type": "nacos", + "pass_host": "pass", + "service_name": "APISIX-NACOS", + "discovery_args": { + "namespace_id": "public", + "group_name": "DEFAULT_GROUP" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.sleep(1.5) + + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local dump_uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/v1/discovery/nacos/dump" + local res, err = httpc:request_uri(dump_uri, { method = "GET"}) + if err then + ngx.log(ngx.ERR, err) + ngx.status = res.status + return + end + + local body = json_decode(res.body) + local services = body.services + local service = services["public.DEFAULT_GROUP.APISIX-NACOS"] + local number = table.getn(service.nodes) + ngx.say(number) + } + } +--- response_body +2 diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/nacos3.t b/CloudronPackages/APISIX/apisix-source/t/discovery/nacos3.t new file mode 100644 index 0000000..c712593 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/nacos3.t @@ -0,0 +1,638 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# we can't use mse nacos to test, access_key and secret_key won't affect the open source nacos +use t::APISIX 'no_plan'; + +workers(4); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + prefix: "/nacos/v1/" + fetch_interval: 1 + weight: 1 + timeout: + connect: 2000 + send: 2000 + read: 5000 + access_key: "my_access_key" + secret_key: "my_secret_key" + +_EOC_ + +run_tests(); + +__DATA__ + +=== TEST 1: error service_name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin + +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 2: error namespace_id +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: err_ns +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 3: error group_name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin + discovery_args: + group_name: err_group_name +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 4: error namespace_id and error group_name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: err_ns + group_name: err_group_name +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 5: error group_name and correct namespace_id +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: test_ns + group_name: err_group_name +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 6: error namespace_id and correct group_name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: err_ns + group_name: test_group +#END +--- request +GET /hello +--- error_code: 503 +--- error_log +no valid upstream node + + + +=== TEST 7: get APISIX-NACOS info from NACOS - configured in services +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + service_id: 1 +services: + - + id: 1 + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 8: get APISIX-NACOS info from NACOS - configured in services with group_name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + service_id: 1 +services: + - + id: 1 + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + discovery_args: + group_name: test_group +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 9: get APISIX-NACOS info from NACOS - configured in services with namespace_id +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + service_id: 1 +services: + - + id: 1 + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: test_ns +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 10: get APISIX-NACOS info from NACOS - configured in services with group_name and namespace_id +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + service_id: 1 +services: + - + id: 1 + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + discovery_args: + group_name: test_group + namespace_id: test_ns +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 11: get APISIX-NACOS info from NACOS - configured in upstreams +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] +--- no_error_log +[error, error] + + + +=== TEST 12: get APISIX-NACOS info from NACOS - configured in upstreams with namespace_id +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: test_ns +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 13: get APISIX-NACOS info from NACOS - configured in upstreams with group_name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + discovery_args: + group_name: test_group +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 14: get APISIX-NACOS info from NACOS - configured in upstreams with namespace_id and group_name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +routes: + - + uri: /hello + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin + discovery_args: + namespace_id: test_ns + group_name: test_group +#END +--- pipelined_requests eval +[ + "GET /hello", + "GET /hello", +] +--- response_body_like eval +[ + qr/server [1-2]/, + qr/server [1-2]/, +] + + + +=== TEST 15: get APISIX-NACOS info from NACOS - configured in upstreams + etcd +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 + access_key: "my_access_key" + secret_key: "my_secret_key" +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: same namespace_id and service_name, different group_name +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 + access_key: "my_access_key" + secret_key: "my_secret_key" +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- use nacos-service5 + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "type": "roundrobin", + "discovery_args": { + "namespace_id": "test_ns", + "group_name": "test_group" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + -- use nacos-service6 + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello1", + "upstream": { + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "type": "roundrobin", + "discovery_args": { + "namespace_id": "test_ns", + "group_name": "test_group2" + } + }, + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.sleep(1.5) + + local http = require "resty.http" + local httpc = http.new() + local uri1 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri1, { method = "GET"}) + if err then + ngx.log(ngx.ERR, err) + ngx.status = res.status + return + end + ngx.say(res.body) + + local uri2 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + res, err = httpc:request_uri(uri2, { method = "GET"}) + if err then + ngx.log(ngx.ERR, err) + ngx.status = res.status + return + end + ngx.say(res.body) + } + } +--- request +GET /t +--- response_body +server 1 +server 3 + + + +=== TEST 17: same group_name and service_name, different namespace_id +--- extra_yaml_config +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + fetch_interval: 1 + access_key: "my_access_key" + secret_key: "my_secret_key" +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- use nacos-service5 + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "type": "roundrobin", + "discovery_args": { + "namespace_id": "test_ns", + "group_name": "test_group" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + -- use nacos-service7 + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello1", + "upstream": { + "service_name": "APISIX-NACOS", + "discovery_type": "nacos", + "type": "roundrobin", + "discovery_args": { + "namespace_id": "test_ns2", + "group_name": "test_group" + } + }, + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.sleep(1.5) + + local http = require "resty.http" + local httpc = http.new() + local uri1 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri1, { method = "GET"}) + if err then + ngx.log(ngx.ERR, err) + ngx.status = res.status + return + end + ngx.say(res.body) + + local uri2 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + res, err = httpc:request_uri(uri2, { method = "GET"}) + if err then + ngx.log(ngx.ERR, err) + ngx.status = res.status + return + end + ngx.say(res.body) + } + } +--- request +GET /t +--- response_body +server 1 +server 4 diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/reset-healthchecker.t b/CloudronPackages/APISIX/apisix-source/t/discovery/reset-healthchecker.t new file mode 100644 index 0000000..8612f1f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/reset-healthchecker.t @@ -0,0 +1,169 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); +workers(1); + + + + +add_block_preprocessor(sub { + my ($block) = @_; + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + if ($block->apisix_yaml) { + my $upstream = <<_EOC_; +upstreams: + - service_name: mock + discovery_type: mock + type: roundrobin + checks: + active: + http_path: / + timeout: 1 + unhealthy: + tcp_failures: 30 + interval: 1 + healthy: + interval: 1 + id: 1 +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $upstream); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Validate healthchecker recreation on node count reduces to 1 +--- http_config +server { + listen 3000 ; + location / { + return 200 'ok'; + } +} +--- apisix_yaml +routes: + - + uris: + - / + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 3000, weight = 50}, + {host = "127.0.0.1", port = 8000, weight = 50}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.sleep(5) + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 3000, weight = 1} + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.say(res.body) + ngx.sleep(5) + } + } +--- request +GET /t +--- response_body +ok +--- timeout: 22 +--- no_error_log +unhealthy TCP increment (10/30) + + + +=== TEST 2: Validate healthchecker deletion on node count reduces to 0 +--- http_config +server { + listen 3000 ; + location / { + return 200 'ok'; + } +} +--- apisix_yaml +routes: + - + uris: + - / + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 3000, weight = 50}, + {host = "127.0.0.1", port = 8000, weight = 50}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.sleep(5) + discovery.mock = { + nodes = function() + return { + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.status = res.status + ngx.sleep(5) + } + } +--- request +GET /t +--- timeout: 22 +--- no_error_log +unhealthy TCP increment (10/30) +--- error_code: 503 diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/stream/consul.t b/CloudronPackages/APISIX/apisix-source/t/discovery/stream/consul.t new file mode 100644 index 0000000..b50fae1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/stream/consul.t @@ -0,0 +1,278 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + + +add_block_preprocessor(sub { + my ($block) = @_; + + my $http_config = $block->http_config // <<_EOC_; + + server { + listen 20999; + + location / { + content_by_lua_block { + ngx.say("missing consul services") + } + } + } + + server { + listen 30511; + + location /hello { + content_by_lua_block { + ngx.say("server 1") + } + } + } + server { + listen 30512; + + location /hello { + content_by_lua_block { + ngx.say("server 2") + } + } + } + server { + listen 30513; + + location /hello { + content_by_lua_block { + ngx.say("server 3") + } + } + } + server { + listen 30514; + + location /hello { + content_by_lua_block { + ngx.say("server 4") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); + + if (!$block->stream_request) { + $block->set_value("stream_request", "GET /hello HTTP/1.1\r\nHost: 127.0.0.1:1985\r\nConnection: close\r\n\r\n"); + } +}); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + enable_control: true + control: + ip: 127.0.0.1 + port: 9090 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8500" + - "http://127.0.0.1:8600" + skip_services: + - "service_c" + timeout: + connect: 1000 + read: 1000 + wait: 60 + weight: 1 + fetch_interval: 1 + keepalive: true + default_service: + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 + weight: 1 + max_fails: 1 +_EOC_ + + +run_tests(); + +__DATA__ + +=== TEST 1: prepare consul catalog register nodes +--- config +location /consul1 { + rewrite ^/consul1/(.*) /v1/agent/service/$1 break; + proxy_pass http://127.0.0.1:8500; +} + +location /consul2 { + rewrite ^/consul2/(.*) /v1/agent/service/$1 break; + proxy_pass http://127.0.0.1:8600; +} +--- pipelined_requests eval +[ + "PUT /consul1/deregister/service_a1", + "PUT /consul1/deregister/service_b1", + "PUT /consul1/deregister/service_a2", + "PUT /consul1/deregister/service_b2", + "PUT /consul2/deregister/service_a1", + "PUT /consul2/deregister/service_b1", + "PUT /consul2/deregister/service_a2", + "PUT /consul2/deregister/service_b2", + "PUT /consul1/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul1/register\n" . "{\"ID\":\"service_a2\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30512,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul1/register\n" . "{\"ID\":\"service_b1\",\"Name\":\"service_b\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30513,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /consul1/register\n" . "{\"ID\":\"service_b2\",\"Name\":\"service_b\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30514,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", +] +--- error_code eval +[200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200] + + + +=== TEST 2: test consul server 1 +--- yaml_config eval: $::yaml_config +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- stream_response eval +qr/server [1-2]/ +--- no_error_log +[error] + + + +=== TEST 3: test consul server 2 +--- yaml_config eval: $::yaml_config +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream: + service_name: service_b + discovery_type: consul + type: roundrobin +#END +--- stream_response eval +qr/server [3-4]/ +--- no_error_log +[error] + + + +=== TEST 4: test mini consul config +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8500" + - "http://127.0.0.1:6500" +#END +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- stream_response eval +qr/server [1-2]/ +--- ignore_error_log + + + +=== TEST 5: test invalid service name +sometimes the consul key maybe deleted by mistake +--- yaml_config eval: $::yaml_config +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream: + service_name: service_c + discovery_type: consul + type: roundrobin +#END +--- stream_response_like +missing consul services +--- ignore_error_log + + + +=== TEST 6: test skip keys +skip some services, return default nodes, get response: missing consul services +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8600" + prefix: "upstreams" + skip_services: + - "service_a" + default_service: + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 + weight: 1 + max_fails: 1 +#END +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- stream_response_like +missing consul services +--- ignore_error_log diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/stream/consul_kv.t b/CloudronPackages/APISIX/apisix-source/t/discovery/stream/consul_kv.t new file mode 100644 index 0000000..5ef1e65 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/stream/consul_kv.t @@ -0,0 +1,269 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + + +add_block_preprocessor(sub { + my ($block) = @_; + + my $http_config = $block->http_config // <<_EOC_; + + server { + listen 20999; + + location / { + content_by_lua_block { + ngx.say("missing consul_kv services") + } + } + } + + server { + listen 30511; + + location /hello { + content_by_lua_block { + ngx.say("server 1") + } + } + } + server { + listen 30512; + + location /hello { + content_by_lua_block { + ngx.say("server 2") + } + } + } + server { + listen 30513; + + location /hello { + content_by_lua_block { + ngx.say("server 3") + } + } + } + server { + listen 30514; + + location /hello { + content_by_lua_block { + ngx.say("server 4") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); + + if (!$block->stream_request) { + $block->set_value("stream_request", "GET /hello HTTP/1.1\r\nHost: 127.0.0.1:1985\r\nConnection: close\r\n\r\n"); + } +}); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8500" + - "http://127.0.0.1:8600" + prefix: "upstreams" + skip_keys: + - "upstreams/unused_api/" + timeout: + connect: 1000 + read: 1000 + wait: 60 + weight: 1 + fetch_interval: 1 + keepalive: true + default_service: + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 + weight: 1 + max_fails: 1 +_EOC_ + + +run_tests(); + +__DATA__ + +=== TEST 1: prepare consul kv register nodes +--- config +location /consul1 { + rewrite ^/consul1/(.*) /v1/kv/$1 break; + proxy_pass http://127.0.0.1:8500; +} + +location /consul2 { + rewrite ^/consul2/(.*) /v1/kv/$1 break; + proxy_pass http://127.0.0.1:8600; +} +--- pipelined_requests eval +[ + "DELETE /consul1/upstreams/webpages/?recurse=true", + "DELETE /consul2/upstreams/webpages/?recurse=true", + "PUT /consul1/upstreams/webpages/127.0.0.1:30511\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /consul1/upstreams/webpages/127.0.0.1:30512\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /consul2/upstreams/webpages/127.0.0.1:30513\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", + "PUT /consul2/upstreams/webpages/127.0.0.1:30514\n" . "{\"weight\": 1, \"max_fails\": 2, \"fail_timeout\": 1}", +] +--- response_body eval +["true", "true", "true", "true", "true", "true"] + + + +=== TEST 2: test consul server 1 +--- yaml_config eval: $::yaml_config +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream: + service_name: http://127.0.0.1:8500/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- stream_response eval +qr/server [1-2]/ +--- no_error_log +[error] + + + +=== TEST 3: test consul server 2 +--- yaml_config eval: $::yaml_config +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream: + service_name: http://127.0.0.1:8600/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- stream_response eval +qr/server [3-4]/ +--- no_error_log +[error] + + + +=== TEST 4: test mini consul_kv config +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8500" + - "http://127.0.0.1:6500" +#END +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream: + service_name: http://127.0.0.1:8500/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- stream_response eval +qr/server [1-2]/ +--- ignore_error_log + + + +=== TEST 5: test invalid service name +sometimes the consul key maybe deleted by mistake +--- yaml_config eval: $::yaml_config +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream: + service_name: http://127.0.0.1:8600/v1/kv/upstreams/deleted_keys/ + discovery_type: consul_kv + type: roundrobin +#END +--- stream_response_like +missing consul_kv services +--- ignore_error_log + + + +=== TEST 6: test skip keys +skip some keys, return default nodes, get response: missing consul_kv services +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul_kv: + servers: + - "http://127.0.0.1:8600" + prefix: "upstreams" + skip_keys: + - "upstreams/webpages/" + default_service: + host: "127.0.0.1" + port: 20999 + metadata: + fail_timeout: 1 + weight: 1 + max_fails: 1 +#END +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream: + service_name: http://127.0.0.1:8600/v1/kv/upstreams/webpages/ + discovery_type: consul_kv + type: roundrobin +#END +--- stream_response_like +missing consul_kv services +--- ignore_error_log diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/stream/dns.t b/CloudronPackages/APISIX/apisix-source/t/discovery/stream/dns.t new file mode 100644 index 0000000..01dbf2e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/stream/dns.t @@ -0,0 +1,342 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: # service discovery center + dns: + servers: + - "127.0.0.1:1053" +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + if ($block->apisix_yaml) { + my $upstream = <<_EOC_; +stream_routes: + - id: 1 + server_port: 1985 + upstream_id: 1 +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $upstream); + } + + if (!$block->stream_request) { + $block->set_value("stream_request", "GET /hello HTTP/1.0\r\nHost: 127.0.0.1:1985\r\n\r\n"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: default port to 53 +--- log_level: debug +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: # service discovery center + dns: + servers: + - "127.0.0.1" +--- apisix_yaml +upstreams: + - service_name: sd.test.local + discovery_type: dns + type: roundrobin + id: 1 +--- error_log +connect to 127.0.0.1:53 + + + +=== TEST 2: A +--- apisix_yaml +upstreams: + - service_name: "sd.test.local:1980" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":1,"127.0.0.2:1980":1|"127.0.0.2:1980":1,"127.0.0.1:1980":1)\}/ +--- stream_response_like +hello world + + + +=== TEST 3: AAAA +--- listen_ipv6 +--- apisix_yaml +upstreams: + - service_name: "ipv6.sd.test.local:1980" + discovery_type: dns + type: roundrobin + id: 1 +--- stream_response_like +hello world +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to [0:0:0:0:0:0:0:1]:1980 + + + +=== TEST 4: prefer A to AAAA +--- listen_ipv6 +--- apisix_yaml +upstreams: + - service_name: "mix.sd.test.local:1980" + discovery_type: dns + type: roundrobin + id: 1 +--- stream_response_like +hello world +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1980 + + + +=== TEST 5: no /etc/hosts +--- apisix_yaml +upstreams: + - service_name: test.com + discovery_type: dns + type: roundrobin + id: 1 +--- error_log +failed to query the DNS server + + + +=== TEST 6: no /etc/resolv.conf +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false + enable_resolv_search_option: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: # service discovery center + dns: + servers: + - "127.0.0.1:1053" +--- apisix_yaml +upstreams: + - service_name: apisix + discovery_type: dns + type: roundrobin + id: 1 +--- error_log +failed to query the DNS server + + + +=== TEST 7: SRV +--- apisix_yaml +upstreams: + - service_name: "srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":60,"127.0.0.2:1980":20|"127.0.0.2:1980":20,"127.0.0.1:1980":60)\}/ +--- stream_response_like +hello world + + + +=== TEST 8: SRV (RFC 2782 style) +--- apisix_yaml +upstreams: + - service_name: "_sip._tcp.srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":60,"127.0.0.2:1980":20|"127.0.0.2:1980":20,"127.0.0.1:1980":60)\}/ +--- stream_response_like +hello world + + + +=== TEST 9: SRV (different port) +--- apisix_yaml +upstreams: + - service_name: "port.srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":60,"127.0.0.2:1981":20|"127.0.0.2:1981":20,"127.0.0.1:1980":60)\}/ +--- stream_response_like +hello world + + + +=== TEST 10: SRV (zero weight) +--- apisix_yaml +upstreams: + - service_name: "zero-weight.srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":60,"127.0.0.2:1980":1|"127.0.0.2:1980":1,"127.0.0.1:1980":60)\}/ +--- stream_response_like +hello world + + + +=== TEST 11: SRV (split weight) +--- apisix_yaml +upstreams: + - service_name: "split-weight.srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{(,?"127.0.0.(1:1980":200|3:1980":1|4:1980":1)){3}\}/ +--- stream_response_like +hello world + + + +=== TEST 12: SRV (priority) +--- apisix_yaml +upstreams: + - service_name: "priority.srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- stream_response_like +hello world +--- error_log +connect() failed +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1979 +proxy request to 127.0.0.2:1980 + + + +=== TEST 13: prefer SRV than A +--- apisix_yaml +upstreams: + - service_name: "srv-a.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- error_log +proxy request to 127.0.0.1:1980 +--- stream_response_like +hello world + + + +=== TEST 14: SRV (port is 0) +--- apisix_yaml +upstreams: + - service_name: "zero.srv.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- error_log +no valid upstream node + + + +=== TEST 15: SRV (override port) +--- apisix_yaml +upstreams: + - service_name: "port.srv.test.local:1980" + discovery_type: dns + type: roundrobin + id: 1 +--- grep_error_log eval +qr/upstream nodes: \{[^}]+\}/ +--- grep_error_log_out eval +qr/upstream nodes: \{("127.0.0.1:1980":60,"127.0.0.2:1980":20|"127.0.0.2:1980":20,"127.0.0.1:1980":60)\}/ +--- stream_response_like +hello world + + + +=== TEST 16: prefer A than SRV when A is ahead of SRV in config.yaml +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + dns: + servers: + - "127.0.0.1:1053" + order: + - A + - SRV +--- apisix_yaml +upstreams: + - service_name: "srv-a.test.local" + discovery_type: dns + type: roundrobin + id: 1 +--- error_log +no valid upstream node diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/stream/eureka.t b/CloudronPackages/APISIX/apisix-source/t/discovery/stream/eureka.t new file mode 100644 index 0000000..df01ddd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/stream/eureka.t @@ -0,0 +1,95 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + eureka: + host: + - "http://127.0.0.1:8761" + prefix: "/eureka/" + fetch_interval: 10 + weight: 80 + timeout: + connect: 1500 + send: 1500 + read: 1500 +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->stream_request) { + $block->set_value("stream_request", "GET /eureka/apps/APISIX-EUREKA HTTP/1.1\r\nHost: 127.0.0.1:1985\r\nConnection: close\r\n\r\n"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: get APISIX-EUREKA info from EUREKA +--- yaml_config eval: $::yaml_config +--- apisix_yaml +stream_routes: + - + id: 1 + server_port: 1985 + upstream: + service_name: APISIX-EUREKA + discovery_type: eureka + type: roundrobin + +#END +--- stream_response_like +.*APISIX-EUREKA.* +--- error_log +use config_provider: yaml +default_weight:80. +fetch_interval:10. +eureka uri:http://127.0.0.1:8761/eureka/. +connect_timeout:1500, send_timeout:1500, read_timeout:1500. + + + +=== TEST 2: error service_name name +--- yaml_config eval: $::yaml_config +--- apisix_yaml +stream_routes: + - + id: 1 + server_port: 1985 + upstream: + service_name: APISIX-EUREKA-DEMO + discovery_type: eureka + type: roundrobin + +#END +--- error_log eval +qr/.* no valid upstream node.*/ diff --git a/CloudronPackages/APISIX/apisix-source/t/discovery/stream/nacos.t b/CloudronPackages/APISIX/apisix-source/t/discovery/stream/nacos.t new file mode 100644 index 0000000..1a1053c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/discovery/stream/nacos.t @@ -0,0 +1,92 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); +workers(4); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + nacos: + host: + - "http://127.0.0.1:8858" + prefix: "/nacos/v1/" + fetch_interval: 1 + weight: 1 + timeout: + connect: 2000 + send: 2000 + read: 5000 + +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->stream_request) { + $block->set_value("stream_request", "GET /hello HTTP/1.1\r\nHost: 127.0.0.1:1985\r\nConnection: close\r\n\r\n"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: get APISIX-NACOS info from NACOS - no auth +--- yaml_config eval: $::yaml_config +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream: + service_name: APISIX-NACOS + discovery_type: nacos + type: roundrobin +#END +--- stream_response eval +qr/server [1-2]/ +--- no_error_log +[error] + + + +=== TEST 2: error service_name name - no auth +--- yaml_config eval: $::yaml_config +--- apisix_yaml +stream_routes: + - server_addr: 127.0.0.1 + server_port: 1985 + id: 1 + upstream: + service_name: APISIX-NACOS-DEMO + discovery_type: nacos + type: roundrobin +#END +--- error_log +no valid upstream node diff --git a/CloudronPackages/APISIX/apisix-source/t/error_page/error_page.t b/CloudronPackages/APISIX/apisix-source/t/error_page/error_page.t new file mode 100644 index 0000000..c511856 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/error_page/error_page.t @@ -0,0 +1,239 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +# We put the error page into apisix-runtime. It is fine since this installation is the default. +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: set route with serverless-post-function plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "functions" : ["return function() if ngx.var.http_x_test_status ~= nil then;ngx.exit(tonumber(ngx.var.http_x_test_status));end;end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: test apisix with internal error code 500 +--- request +GET /hello +--- more_headers +X-Test-Status: 500 +--- error_code: 500 +--- response_body_like +.*apisix.apache.org.* + + + +=== TEST 3: test apisix with internal error code 502 +--- request +GET /hello +--- more_headers +X-Test-Status: 502 +--- error_code: 502 +--- response_body eval +qr/502 Bad Gateway/ + + + +=== TEST 4: test apisix with internal error code 503 +--- request +GET /hello +--- more_headers +X-Test-Status: 503 +--- error_code: 503 +--- response_body eval +qr/503 Service Temporarily Unavailable/ + + + +=== TEST 5: test apisix with internal error code 504 +--- request +GET /hello +--- more_headers +X-Test-Status: 504 +--- error_code: 504 +--- response_body eval +qr/504 Gateway Time-out/ + + + +=== TEST 6: test apisix with upstream error code 500 +--- request +GET /specific_status +--- more_headers +X-Test-Upstream-Status: 500 +--- error_code: 500 +--- response_body +upstream status: 500 + + + +=== TEST 7: test apisix with internal error code 500, method isn't GET or HEAD +--- request +POST /hello +123 +--- more_headers +X-Test-Status: 500 +--- error_code: 500 +--- response_body_like +.*apisix.apache.org.* + + + +=== TEST 8: delete route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 9: set route which upstream is blocking +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/mysleep" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: check if the phases after proxy are run when 500 happens before proxy +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "functions" : ["return function() if ngx.var.http_x_test_status ~= nil then;ngx.exit(tonumber(ngx.var.http_x_test_status));end;end"] + }, + "serverless-pre-function": { + "phase": "log", + "functions" : ["return function() ngx.log(ngx.WARN, 'run log phase in error_page') end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: hit +--- request +GET /hello +--- more_headers +X-Test-Status: 500 +--- error_code: 500 +--- response_body_like +.*apisix.apache.org.* +--- error_log +run log phase in error_page diff --git a/CloudronPackages/APISIX/apisix-source/t/fake-plugin-exit.lua b/CloudronPackages/APISIX/apisix-source/t/fake-plugin-exit.lua new file mode 100644 index 0000000..a654874 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/fake-plugin-exit.lua @@ -0,0 +1,46 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") + +local schema = { + type = "object", + properties = { + } +} + + +local plugin_name = "uri-blocker" + +local _M = { + version = 0.1, + priority = 2900, + name = plugin_name, + schema = schema, +} + + +function _M.check_schema(conf) + return true +end + + +function _M.rewrite(conf, ctx) + core.respond.exit(400) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/fuzzing/client_abort.py b/CloudronPackages/APISIX/apisix-source/t/fuzzing/client_abort.py new file mode 100755 index 0000000..80bf338 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/fuzzing/client_abort.py @@ -0,0 +1,74 @@ +#! /usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import http.client +import subprocess +import time +import threading +from public import check_leak, run_test +import yaml + +def get_admin_key_from_yaml(yaml_file_path): + with open(yaml_file_path, 'r') as file: + yaml_data = yaml.safe_load(file) + try: + admin_key = yaml_data['deployment']['admin']['admin_key'][0]['key'] + return admin_key + except KeyError: + return None + +def create_route(): + key = get_admin_key_from_yaml('conf/config.yaml') + if key is None: + print("Key not found in the YAML file.") + return + command = '''curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY:{key}" -X PUT -d ' +{ + "uri": "/client_abort", + "upstream": { + "nodes": { + "127.0.0.1:6666": 1 + }, + "type": "roundrobin" + } +}' + ''' + subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + +def req(): + conn = http.client.HTTPConnection("127.0.0.1", port=9080) + conn.request("GET", "/client_abort?seconds=0.01") + time.sleep(0.001) + conn.close() + +def run_in_thread(): + for i in range(50): + req() + +@check_leak +def run(): + th = [threading.Thread(target=run_in_thread) for i in range(10)] + for t in th: + t.start() + for t in th: + t.join() + + +if __name__ == "__main__": + run_test(create_route,run) diff --git a/CloudronPackages/APISIX/apisix-source/t/fuzzing/http_upstream.py b/CloudronPackages/APISIX/apisix-source/t/fuzzing/http_upstream.py new file mode 100755 index 0000000..6106602 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/fuzzing/http_upstream.py @@ -0,0 +1,100 @@ +#! /usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This file provides a fuzzing test with different upstreams +import http.client +import json +import random +import threading +from public import check_leak, run_test, connect_admin +import yaml + +REQ_PER_THREAD = 50 +THREADS_NUM = 4 +TOTOL_ROUTES = 10 + +def get_admin_key_from_yaml(yaml_file_path): + with open(yaml_file_path, 'r') as file: + yaml_data = yaml.safe_load(file) + try: + admin_key = yaml_data['deployment']['admin']['admin_key'][0]['key'] + return admin_key + except KeyError: + return None +def create_route(): + key = get_admin_key_from_yaml('conf/config.yaml') + if key is None: + print("Key not found in the YAML file.") + return + for i in range(TOTOL_ROUTES): + conn = connect_admin() + scheme = "http" if i % 2 == 0 else "https" + port = ":6666" if i % 2 == 0 else ":6667" + suffix = str(i + 1) + i = str(i) + conf = json.dumps({ + "uri": "/*", + "host": "test" + i + ".com", + "plugins": { + }, + "upstream": { + "scheme": scheme, + "nodes": { + "127.0.0." + suffix + port: 1 + }, + "type": "roundrobin" + }, + }) + + conn.request("PUT", "/apisix/admin/routes/" + i, conf, + headers={ + "X-API-KEY":key, + }) + response = conn.getresponse() + assert response.status <= 300, response.read() + +def req(): + route_id = random.randrange(TOTOL_ROUTES) + conn = http.client.HTTPConnection("127.0.0.1", port=9080) + conn.request("GET", "/server_addr", + headers={ + "Host":"test" + str(route_id) + ".com", + }) + response = conn.getresponse() + assert response.status == 200, response.read() + ip = response.read().rstrip().decode() + suffix = str(route_id + 1) + assert "127.0.0." + suffix == ip, f"expect: 127.0.0.{suffix}, actual: {ip}" + +def run_in_thread(): + for i in range(REQ_PER_THREAD): + req() + +@check_leak +def run(): + th = [threading.Thread(target=run_in_thread) for i in range(THREADS_NUM)] + for t in th: + t.start() + for t in th: + t.join() + + +if __name__ == "__main__": + run_test(create_route, run) + diff --git a/CloudronPackages/APISIX/apisix-source/t/fuzzing/public.py b/CloudronPackages/APISIX/apisix-source/t/fuzzing/public.py new file mode 100644 index 0000000..0897ec4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/fuzzing/public.py @@ -0,0 +1,138 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import http.client +import subprocess +import os +from functools import wraps +from pathlib import Path +import psutil +from boofuzz import FuzzLoggerText, Session, TCPSocketConnection, Target + +def cur_dir(): + return os.path.split(os.path.realpath(__file__))[0] + +def apisix_pwd(): + return os.environ.get("APISIX_FUZZING_PWD") or \ + (str(Path.home()) + "/work/apisix/apisix") + +def connect_admin(): + conn = http.client.HTTPConnection("127.0.0.1", port=9180) + return conn + +def check_log(): + boofuzz_log = cur_dir() + "/test.log" + apisix_errorlog = apisix_pwd() + "/logs/error.log" + apisix_accesslog = apisix_pwd() + "/logs/access.log" + + cmds = ['cat %s | grep -a "error" | grep -v "invalid request body"'%apisix_errorlog, 'cat %s | grep -a " 500 "'%apisix_accesslog] + if os.path.exists(boofuzz_log): + cmds.append('cat %s | grep -a "fail"'%boofuzz_log) + for cmd in cmds: + r = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) + err = r.stdout.read().strip() + print("Error in log: ", err) + assert err == b"" + +def check_process(): + with open(apisix_pwd() + "/logs/nginx.pid") as f: + pid = int(f.read().strip()) + parent = psutil.Process(pid) + children = parent.children(recursive=True) + process = {p.pid for p in children if "cache loader process" not in p.cmdline()[0]} + process.add(parent.pid) + return process + +def initfuzz(): + fw = open(cur_dir() + "/test.log",'w') + fuzz_loggers = [FuzzLoggerText(file_handle=fw)] + session = Session( + target=Target( + connection=TCPSocketConnection("127.0.0.1", 9080, send_timeout=5.0, recv_timeout=5.0, server=False) + ), + fuzz_loggers=fuzz_loggers, + keep_web_open=False, + ) + return session + +def sum_memory(): + pmap = {} + for p in check_process(): + proc = psutil.Process(p) + pmap[proc] = proc.memory_full_info() + return sum(m.rss for m in pmap.values()) + +def get_linear_regression_sloped(samples): + n = len(samples) + avg_x = (n + 1) / 2 + avg_y = sum(samples) / n + avg_xy = sum([(i + 1) * v for i, v in enumerate(samples)]) / n + avg_x2 = sum([i * i for i in range(1, n + 1)]) / n + denom = avg_x2 - avg_x * avg_x + if denom == 0: + return None + return (avg_xy - avg_x * avg_y) / denom + +def gc(): + conn = http.client.HTTPConnection("127.0.0.1", port=9090) + conn.request("POST", "/v1/gc") + conn.close() + +def leak_count(): + return int(os.environ.get("APISIX_FUZZING_LEAK_COUNT") or 100) + +LEAK_COUNT = leak_count() + +def check_leak(f): + @wraps(f) + def wrapper(*args, **kwds): + global LEAK_COUNT + + samples = [] + for i in range(LEAK_COUNT): + f(*args, **kwds) + gc() + samples.append(sum_memory()) + count = 0 + for i in range(1, LEAK_COUNT): + if samples[i - 1] < samples[i]: + count += 1 + print(samples) + sloped = get_linear_regression_sloped(samples) + print(sloped) + print(count / LEAK_COUNT) + + if os.environ.get("CI"): # CI is not stable + return + + # the threshold is chosen so that we can find leaking a table per request + if sloped > 10000 and (count / LEAK_COUNT) > 0.2: + raise AssertionError("memory leak") + + return wrapper + +def run_test(create_route, run): + # before test + create_route() + r1 = check_process() + run() + # after test + check_log() + r2 = check_process() + if r2 != r1: + print("before test, nginx's process list:%s,\nafter test, nginx's process list:%s"%(r1,r2)) + raise AssertionError diff --git a/CloudronPackages/APISIX/apisix-source/t/fuzzing/requirements.txt b/CloudronPackages/APISIX/apisix-source/t/fuzzing/requirements.txt new file mode 100644 index 0000000..9c68d01 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/fuzzing/requirements.txt @@ -0,0 +1,4 @@ +psutil==5.8.0 +typing==3.7.4.3 +boofuzz==0.4.0 +PyYAML==5.4.1 diff --git a/CloudronPackages/APISIX/apisix-source/t/fuzzing/serverless_route_test.py b/CloudronPackages/APISIX/apisix-source/t/fuzzing/serverless_route_test.py new file mode 100644 index 0000000..abdef56 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/fuzzing/serverless_route_test.py @@ -0,0 +1,106 @@ +#! /usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import subprocess +from public import initfuzz, run_test +from boofuzz import s_block, s_delim, s_get, s_group, s_initialize, s_size, s_static, s_string +import yaml + +def get_admin_key_from_yaml(yaml_file_path): + with open(yaml_file_path, 'r') as file: + yaml_data = yaml.safe_load(file) + try: + admin_key = yaml_data['deployment']['admin']['admin_key'][0]['key'] + return admin_key + except KeyError: + return None + +def create_route(): + key = get_admin_key_from_yaml('conf/config.yaml') + if key is None: + print("Key not found in the YAML file.") + return + command = '''curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: {key}" -X PUT -d ' +{ + "uri": "/post*", + "methods": ["POST"], + "plugins": { + "serverless-post-function": { + "functions": ["return function()\n local core = require(\"apisix.core\")\n ngx.req.read_body()\n local req_body = ngx.req.get_body_data()\n if req_body == \"{\\\"a\\\":\\\"b\\\"}\" then\n return\n else\n ngx.exit(ngx.HTTP_BAD_REQUEST)\n end\n end\n"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:6666": 1 + }, + "type": "roundrobin" + } +}' + ''' + subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + +def run(): + session = initfuzz() + + s_initialize(name="Request") + with s_block("Request-Line"): + s_group("Method", ["GET", "HEAD", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PURGE"]) + s_delim(" ", name="space-1") + s_string("/post", name="Request-URI") + s_delim(" ", name="space-2") + s_string("HTTP/1.1", name="HTTP-Version") + s_static("\r\n", name="Request-Line-CRLF") + s_string("Host:", name="Host-Line") + s_delim(" ", name="space-3") + s_string("127.0.0.1:9080", name="Host-Line-Value") + s_static("\r\n", name="Host-Line-CRLF") + s_static('User-Agent', name='User-Agent-Header') + s_delim(':', name='User-Agent-Colon-1') + s_delim(' ', name='User-Agent-Space-1') + s_string('Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3223.8 Safari/537.36', name='User-Agent-Value') + s_static('\r\n', name='User-Agent-CRLF'), + s_static('Accept', name='Accept-Header') + s_delim(':', name='Accept-Colon-1') + s_delim(' ', name='Accept-Space-1') + s_string('text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', name='Accept-Value') + s_static('\r\n', name='Accept-CRLF') + s_static("Content-Length:", name="Content-Length-Header") + s_delim(" ", name="space-4") + s_size("Body-Content", output_format="ascii", name="Content-Length-Value") + s_static("\r\n", "Content-Length-CRLF") + s_static('Connection', name='Connection-Header') + s_delim(':', name='Connection-Colon-1') + s_delim(' ', name='Connection-Space-1') + s_group('Connection-Type', ['keep-alive', 'close']) + s_static('\r\n', 'Connection-CRLF') + s_static('Content-Type', name='Content-Type-Header') + s_delim(':', name='Content-Type-Colon-1') + s_delim(' ', name='Content-Type-Space-1') + s_string('application/x-www-form-urlencoded', name='Content-Type-Value') + s_static('\r\n', name='Content-Type-CRLF') + s_static("\r\n", "Request-CRLF") + + with s_block("Body-Content"): + s_string('{"a":"b"}', name="Body-Content-Value") + + session.connect(s_get("Request")) + session.fuzz(max_depth=1) + +if __name__ == "__main__": + run_test(create_route,run) diff --git a/CloudronPackages/APISIX/apisix-source/t/fuzzing/simple_http.py b/CloudronPackages/APISIX/apisix-source/t/fuzzing/simple_http.py new file mode 100755 index 0000000..e8c5247 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/fuzzing/simple_http.py @@ -0,0 +1,134 @@ +#! /usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This file provides a fuzzing test with most common plugins via plain HTTP request +import http.client +import json +import random +import threading +from public import check_leak, LEAK_COUNT, run_test, connect_admin +import yaml + +REQ_PER_THREAD = 50 +THREADS_NUM = 10 +TOTOL_ROUTES = 50 + +def get_admin_key_from_yaml(yaml_file_path): + with open(yaml_file_path, 'r') as file: + yaml_data = yaml.safe_load(file) + try: + admin_key = yaml_data['deployment']['admin']['admin_key'][0]['key'] + return admin_key + except KeyError: + return None + +def create_route(): + conf = json.dumps({ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "my-secret-key" + } + } + }) + conn = connect_admin() + key = get_admin_key_from_yaml('conf/config.yaml') + if key is None: + print("Key not found in the YAML file.") + return + key = key.replace('"', '') + print("the key is", key) + headers = { + "X-API-KEY": key, + } + print("Request headers:", headers) + conn.request("PUT", "/apisix/admin/consumers", conf, + headers=headers) + response = conn.getresponse() + assert response.status <= 300, response.read() + + for i in range(TOTOL_ROUTES): + conn = connect_admin() + i = str(i) + conf = json.dumps({ + "uri": "/*", + "host": "test" + i + ".com", + "plugins": { + "limit-count": { + "count": LEAK_COUNT * REQ_PER_THREAD * THREADS_NUM, + "time_window": 3600, + }, + "jwt-auth": { + }, + "proxy-rewrite": { + "uri": "/" + i, + "headers": { + "X-APISIX-Route": "apisix-" + i + } + }, + "response-rewrite": { + "headers": { + "X-APISIX-Route": "$http_x_apisix_route" + } + }, + }, + "upstream": { + "nodes": { + "127.0.0.1:6666": 1 + }, + "type": "roundrobin" + }, + }) + conn.request("PUT", "/apisix/admin/routes/" + i, conf, + headers=headers) + response = conn.getresponse() + assert response.status <= 300, response.read() + +def req(): + jwt_token = ("Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."+ + "eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0."+ + "fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs") + route_id = str(random.randrange(TOTOL_ROUTES)) + conn = http.client.HTTPConnection("127.0.0.1", port=9080) + conn.request("GET", "/", + headers={ + "Host":"test" + route_id + ".com", + "Authorization":jwt_token, + }) + response = conn.getresponse() + assert response.status == 200, response.read() + hdr = response.headers["X-APISIX-Route"] + assert hdr == "apisix-" + route_id, hdr + +def run_in_thread(): + for i in range(REQ_PER_THREAD): + req() + +@check_leak +def run(): + th = [threading.Thread(target=run_in_thread) for i in range(THREADS_NUM)] + for t in th: + t.start() + for t in th: + t.join() + + +if __name__ == "__main__": + run_test(create_route, run) diff --git a/CloudronPackages/APISIX/apisix-source/t/fuzzing/simpleroute_test.py b/CloudronPackages/APISIX/apisix-source/t/fuzzing/simpleroute_test.py new file mode 100755 index 0000000..20d459e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/fuzzing/simpleroute_test.py @@ -0,0 +1,87 @@ +#! /usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import subprocess +from public import initfuzz, run_test +from boofuzz import s_block, s_delim, s_get, s_group, s_initialize, s_static, s_string +import yaml + + +def get_admin_key_from_yaml(yaml_file_path): + with open(yaml_file_path, 'r') as file: + yaml_data = yaml.safe_load(file) + try: + admin_key = yaml_data['deployment']['admin']['admin_key'][0]['key'] + return admin_key + except KeyError: + return None + + + + +def create_route(): + key = get_admin_key_from_yaml('conf/config.yaml') + if key is None: + print("Key not found in the YAML file.") + return + # Construct curl command with the extracted key + command = f'''curl http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: {key}" -X PUT -d ' +{{ + "uri": "/get*", + "methods": ["GET"], + "upstream": {{ + "type": "roundrobin", + "nodes": {{ + "127.0.0.1:6666": 1 + }} + }} +}}' + ''' + subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + +def run(): + session = initfuzz() + + s_initialize(name="Request") + with s_block("Request-Line"): + s_group("Method", ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE', "PURGE"]) + s_delim(" ", name='space-1') + s_string("/get", name='Request-URI') + s_delim(" ", name='space-2') + s_string('HTTP/1.1', name='HTTP-Version') + s_static("\r\n", name="Request-Line-CRLF") + s_string("Host:", name="Host-Line") + s_delim(" ", name="space-3") + s_string("example.com", name="Host-Line-Value") + s_static("\r\n", name="Host-Line-CRLF") + s_string("Connection:", name="Connection-Line") + s_delim(" ", name="space-4") + s_string("Keep-Alive", name="Connection-Line-Value") + s_static("\r\n", name="Connection-Line-CRLF") + s_string("User-Agent:", name="User-Agent-Line") + s_delim(" ", name="space-5") + s_string("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.83 Safari/537.1", name="User-Agent-Line-Value") + s_static("\r\n", name="User-Agent-Line-CRLF") + + s_static("\r\n", "Request-CRLF") + session.connect(s_get("Request")) + session.fuzz(max_depth=1) + +if __name__ == "__main__": + run_test(create_route,run) diff --git a/CloudronPackages/APISIX/apisix-source/t/fuzzing/upstream/nginx.conf b/CloudronPackages/APISIX/apisix-source/t/fuzzing/upstream/nginx.conf new file mode 100644 index 0000000..7a94517 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/fuzzing/upstream/nginx.conf @@ -0,0 +1,75 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +master_process on; +worker_processes 1; +worker_cpu_affinity auto; +error_log logs/error.log error; +pid logs/nginx.pid; +worker_rlimit_nofile 20480; + +events { + accept_mutex off; + worker_connections 10620; +} + +worker_shutdown_timeout 1; + +http { + lua_socket_log_errors off; + + resolver ipv6=off local=on; + + access_log off; + server_tokens off; + more_clear_headers Server; + keepalive_requests 10000; + tcp_nodelay on; + + server { + listen 6666 reuseport; + location / { + content_by_lua_block { + ngx.say("cur time: ", ngx.time()) + } + } + + location /client_abort { + content_by_lua_block { + ngx.sleep(tonumber(ngx.var.arg_seconds or 1)) + } + } + + location /server_addr { + content_by_lua_block { + ngx.say(ngx.var.server_addr) + } + } + } + + server { + listen 6667 ssl; + ssl_certificate ../../certs/apisix.crt; + ssl_certificate_key ../../certs/apisix.key; + + location /server_addr { + content_by_lua_block { + ngx.say(ngx.var.server_addr) + } + } + } +} diff --git a/CloudronPackages/APISIX/apisix-source/t/fuzzing/vars_route_test.py b/CloudronPackages/APISIX/apisix-source/t/fuzzing/vars_route_test.py new file mode 100644 index 0000000..3d382d2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/fuzzing/vars_route_test.py @@ -0,0 +1,88 @@ +#! /usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import subprocess +from public import initfuzz, run_test +from boofuzz import s_block, s_delim, s_get, s_group, s_initialize, s_static, s_string +import yaml + +def get_admin_key_from_yaml(yaml_file_path): + with open(yaml_file_path, 'r') as file: + yaml_data = yaml.safe_load(file) + try: + admin_key = yaml_data['deployment']['admin']['admin_key'][0]['key'] + return admin_key + except KeyError: + return None +def create_route(): + key = get_admin_key_from_yaml('conf/config.yaml') + if key is None: + print("Key not found in the YAML file.") + return + command = '''curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H "X-API-KEY: {key}" -X PUT -d ' +{ + "uri": "/parameter*", + "vars": [ + ["arg_name","==","jack"], + ["http_token","==","140b543013d988f4767277b6f45ba542"] + ], + "upstream": { + "nodes": { + "127.0.0.1:6666": 1 + }, + "type": "roundrobin" + } +}' + ''' + subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + +def run(): + session = initfuzz() + + s_initialize(name="Request") + with s_block("Request-Line"): + s_group("Method", ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE', 'PURGE']) + s_delim(" ", name='space-1') + s_string("/parameter?name=jack", name='Request-URI') + s_delim(" ", name='space-2') + s_string('HTTP/1.1', name='HTTP-Version') + s_static("\r\n", name="Request-Line-CRLF") + s_string("Host:", name="Host-Line") + s_delim(" ", name="space-3") + s_string("example.com", name="Host-Line-Value") + s_static("\r\n", name="Host-Line-CRLF") + s_string("Connection:", name="Connection-Line") + s_delim(" ", name="space-4") + s_string("Keep-Alive", name="Connection-Line-Value") + s_static("\r\n", name="Connection-Line-CRLF") + s_string("User-Agent:", name="User-Agent-Line") + s_delim(" ", name="space-5") + s_string("Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.83 Safari/537.1", name="User-Agent-Line-Value") + s_static("\r\n", name="User-Agent-Line-CRLF") + s_string("token:", name="age-Line") + s_delim(" ", name="space-6") + s_string("140b543013d988f4767277b6f45ba542", name="age-Line-Value") + s_static("\r\n", name="age-Line-CRLF") + + s_static("\r\n", "Request-CRLF") + session.connect(s_get("Request")) + session.fuzz(max_depth=1) + +if __name__ == "__main__": + run_test(create_route,run) diff --git a/CloudronPackages/APISIX/apisix-source/t/gm/gm.t b/CloudronPackages/APISIX/apisix-source/t/gm/gm.t new file mode 100644 index 0000000..dfd64d3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/gm/gm.t @@ -0,0 +1,257 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +BEGIN { + $ENV{TEST_ENV_GMSSL_CRT_ENC} = "-----BEGIN CERTIFICATE----- +MIIB2DCCAX6gAwIBAgIBAzAKBggqgRzPVQGDdTBFMQswCQYDVQQGEwJBQTELMAkG +A1UECAwCQkIxCzAJBgNVBAoMAkNDMQswCQYDVQQLDAJERDEPMA0GA1UEAwwGc3Vi +IGNhMB4XDTIyMTEwMjAzMTkzNloXDTMyMTAzMDAzMTkzNlowSTELMAkGA1UEBhMC +QUExCzAJBgNVBAgMAkJCMQswCQYDVQQKDAJDQzELMAkGA1UECwwCREQxEzARBgNV +BAMMCnNlcnZlciBlbmMwWjAUBggqgRzPVQGCLQYIKoEcz1UBgi0DQgAED+MQrLrZ +9PbMmz/44Kb73Qc7FlMs7u034XImjJREBAn1KzZ7jqcYfCiV/buhmu1sLhMXnB69 +mERtf1tAaXcgIaNaMFgwCQYDVR0TBAIwADALBgNVHQ8EBAMCAzgwHQYDVR0OBBYE +FBxHDo0gHhMoYkDeHWySTIJy5BZpMB8GA1UdIwQYMBaAFCTrpmbUig3JfveqAIGJ +6n+vAk2AMAoGCCqBHM9VAYN1A0gAMEUCIHtXgpOxcb3mZv2scRZHZz5YGFr45dfk +VfLkF9BkrB/xAiEA8EeUg7nCFfgHzrfgB7v0wgN1Hrgj8snTUO6IDfkBKYM= +-----END CERTIFICATE----- +"; +} + +use t::APISIX; + +if (-f "/usr/local/tongsuo/bin/openssl") { + plan 'no_plan'; +} else { + plan(skip_all => "only for GM tests"); +} + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + # setup default conf.yaml + my $extra_yaml_config = $block->extra_yaml_config // <<_EOC_; +plugins: + - gm +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set ssl +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local f = assert(io.open("t/certs/server_enc.crt")) + local cert_enc = f:read("*a") + f:close() + + local f = assert(io.open("t/certs/server_sign.crt")) + local cert_sign = f:read("*a") + f:close() + + local f = assert(io.open("t/certs/server_enc.key")) + local pkey_enc = f:read("*a") + f:close() + + local f = assert(io.open("t/certs/server_sign.key")) + local pkey_sign = f:read("*a") + f:close() + + local data = {cert = cert_enc, + key = pkey_enc, + certs = {cert_sign}, + keys = {pkey_sign}, + sni = "localhost", + gm = true, + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 2: hit +--- exec +/usr/local/tongsuo/bin/openssl s_client -connect localhost:1994 -servername localhost -cipher ECDHE-SM2-WITH-SM4-SM3 -enable_ntls -ntls -verifyCAfile t/certs/gm_ca.crt -sign_cert t/certs/client_sign.crt -sign_key t/certs/client_sign.key -enc_cert t/certs/client_enc.crt -enc_key t/certs/client_enc.key +--- response_body eval +qr/^CONNECTED/ +--- no_error_log +SSL_do_handshake() failed +[error] + + + +=== TEST 3: reject bad SSL +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local f = assert(io.open("t/certs/server_enc.crt")) + local cert_enc = f:read("*a") + f:close() + + local f = assert(io.open("t/certs/server_enc.key")) + local pkey_enc = f:read("*a") + f:close() + + local data = { + cert = cert_enc, + key = pkey_enc, + sni = "localhost", + gm = true, + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + } +} +--- error_code: 400 +--- response_body +{"error_msg":"sign cert/key are required"} + + + +=== TEST 4: hit with gm disabled +--- extra_yaml_config +--- exec +/usr/local/tongsuo/bin/openssl s_client -connect localhost:1994 -servername localhost -cipher ECDHE-SM2-WITH-SM4-SM3 -enable_ntls -ntls -verifyCAfile t/certs/gm_ca.crt -sign_cert t/certs/client_sign.crt -sign_key t/certs/client_sign.key -enc_cert t/certs/client_enc.crt -enc_key t/certs/client_enc.key +--- response_body +--- error_log +SSL_do_handshake() failed + + + +=== TEST 5: set ssl: server_enc with secret ref +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local f = assert(io.open("t/certs/server_sign.crt")) + local cert_sign = f:read("*a") + f:close() + + local f = assert(io.open("t/certs/server_enc.key")) + local pkey_enc = f:read("*a") + f:close() + + local f = assert(io.open("t/certs/server_sign.key")) + local pkey_sign = f:read("*a") + f:close() + + local data = { + cert = "$env://TEST_ENV_GMSSL_CRT_ENC", + key = pkey_enc, + certs = {cert_sign}, + keys = {pkey_sign}, + sni = "localhost", + gm = true, + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 6: hit +--- exec +/usr/local/tongsuo/bin/openssl s_client -connect localhost:1994 -servername localhost -cipher ECDHE-SM2-WITH-SM4-SM3 -enable_ntls -ntls -verifyCAfile t/certs/gm_ca.crt -sign_cert t/certs/client_sign.crt -sign_key t/certs/client_sign.key -enc_cert t/certs/client_enc.crt -enc_key t/certs/client_enc.key +--- response_body eval +qr/^CONNECTED/ +--- no_error_log +SSL_do_handshake() failed +[error] diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/echo.pb b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/echo.pb new file mode 100644 index 0000000..3f82f25 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/echo.pb differ diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/go.mod b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/go.mod new file mode 100644 index 0000000..c1e67da --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/go.mod @@ -0,0 +1,10 @@ +module github.com/api7/grpc_server_example + +go 1.11 + +require ( + github.com/golang/protobuf v1.5.2 + golang.org/x/net v0.7.0 + google.golang.org/grpc v1.53.0 + google.golang.org/protobuf v1.33.0 +) diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/go.sum b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/go.sum new file mode 100644 index 0000000..e27a835 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/go.sum @@ -0,0 +1,1117 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/main.go b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/main.go new file mode 100644 index 0000000..54bceb4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/main.go @@ -0,0 +1,346 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative proto/helloworld.proto +//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative proto/import.proto +//go:generate protoc --include_imports --descriptor_set_out=proto.pb --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative proto/src.proto +//go:generate protoc --descriptor_set_out=echo.pb --include_imports --proto_path=$PWD/proto echo.proto +//go:generate protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative proto/echo.proto + +// Package main implements a server for Greeter service. +package main + +import ( + "context" + "crypto/tls" + "crypto/x509" + "flag" + "fmt" + "io" + "log" + "net" + "net/http" + "os" + "os/signal" + "strings" + "syscall" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/reflection" + "google.golang.org/grpc/status" + + pb "github.com/api7/grpc_server_example/proto" +) + +var ( + grpcAddr = ":10051" + grpcsAddr = ":10052" + grpcsMtlsAddr string + grpcHTTPAddr string + + crtFilePath = "../t/cert/apisix.crt" + keyFilePath = "../t/cert/apisix.key" + caFilePath string +) + +func init() { + flag.StringVar(&grpcAddr, "grpc-address", grpcAddr, "address for grpc") + flag.StringVar(&grpcsAddr, "grpcs-address", grpcsAddr, "address for grpcs") + flag.StringVar(&grpcsMtlsAddr, "grpcs-mtls-address", grpcsMtlsAddr, "address for grpcs in mTLS") + flag.StringVar(&grpcHTTPAddr, "grpc-http-address", grpcHTTPAddr, "addresses for http and grpc services at the same time") + flag.StringVar(&crtFilePath, "crt", crtFilePath, "path to certificate") + flag.StringVar(&keyFilePath, "key", keyFilePath, "path to key") + flag.StringVar(&caFilePath, "ca", caFilePath, "path to ca") +} + +// server is used to implement helloworld.GreeterServer. +type server struct { + // Embed the unimplemented server + pb.UnimplementedGreeterServer + pb.UnimplementedTestImportServer + pb.UnimplementedEchoServer +} + +// SayHello implements helloworld.GreeterServer +func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + log.Printf("Received: %v", in.Name) + log.Printf("Enum Gender: %v", in.GetGender()) + msg := "Hello " + in.Name + + person := in.GetPerson() + if person != nil { + if person.GetName() != "" { + msg += fmt.Sprintf(", name: %v", person.GetName()) + } + if person.GetAge() != 0 { + msg += fmt.Sprintf(", age: %v", person.GetAge()) + } + } + + return &pb.HelloReply{ + Message: msg, + Items: in.GetItems(), + Gender: in.GetGender(), + }, nil +} + +// GetErrResp implements helloworld.GreeterServer +func (s *server) GetErrResp(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + st := status.New(codes.Unavailable, "Out of service") + st, err := st.WithDetails(&pb.ErrorDetail{ + Code: 1, + Message: "The server is out of service", + Type: "service", + }) + if err != nil { + panic(fmt.Sprintf("Unexpected error attaching metadata: %v", err)) + } + + return nil, st.Err() +} + +func (s *server) SayHelloAfterDelay(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { + select { + case <-time.After(1 * time.Second): + fmt.Println("overslept") + case <-ctx.Done(): + errStr := ctx.Err().Error() + if ctx.Err() == context.DeadlineExceeded { + return nil, status.Error(codes.DeadlineExceeded, errStr) + } + } + + time.Sleep(1 * time.Second) + + log.Printf("Received: %v", in.Name) + + return &pb.HelloReply{Message: "Hello delay " + in.Name}, nil +} + +func (s *server) Plus(ctx context.Context, in *pb.PlusRequest) (*pb.PlusReply, error) { + log.Printf("Received: %v %v", in.A, in.B) + return &pb.PlusReply{Result: in.A + in.B}, nil +} + +func (s *server) EchoStruct(ctx context.Context, in *pb.StructRequest) (*pb.StructReply, error) { + log.Printf("Received: %+v", in) + + return &pb.StructReply{ + Data: in.Data, + }, nil +} + +// SayHelloServerStream streams HelloReply back to the client. +func (s *server) SayHelloServerStream(req *pb.HelloRequest, stream pb.Greeter_SayHelloServerStreamServer) error { + log.Printf("Received server side stream req: %v\n", req) + + // Say Hello 5 times. + for i := 0; i < 5; i++ { + if err := stream.Send(&pb.HelloReply{ + Message: fmt.Sprintf("Hello %s", req.Name), + }); err != nil { + return status.Errorf(codes.Unavailable, "Unable to stream request back to client: %v", err) + } + } + return nil +} + +// SayHelloClientStream receives a stream of HelloRequest from a client. +func (s *server) SayHelloClientStream(stream pb.Greeter_SayHelloClientStreamServer) error { + log.Println("SayHello client side streaming has been initiated.") + cache := "" + for { + req, err := stream.Recv() + if err == io.EOF { + return stream.SendAndClose(&pb.HelloReply{Message: cache}) + } + if err != nil { + return status.Errorf(codes.Unavailable, "Failed to read client stream: %v", err) + } + cache = fmt.Sprintf("%sHello %s!", cache, req.Name) + } +} + +// SayHelloBidirectionalStream establishes a bidirectional stream with the client. +func (s *server) SayHelloBidirectionalStream(stream pb.Greeter_SayHelloBidirectionalStreamServer) error { + log.Println("SayHello bidirectional streaming has been initiated.") + + for { + req, err := stream.Recv() + if err == io.EOF { + return stream.Send(&pb.HelloReply{Message: "stream ended"}) + } + if err != nil { + return status.Errorf(codes.Unavailable, "Failed to read client stream: %v", err) + } + + // A small 0.5 sec sleep + time.Sleep(500 * time.Millisecond) + + if err := stream.Send(&pb.HelloReply{Message: fmt.Sprintf("Hello %s", req.Name)}); err != nil { + return status.Errorf(codes.Unknown, "Failed to stream response back to client: %v", err) + } + } +} + +// SayMultipleHello implements helloworld.GreeterServer +func (s *server) SayMultipleHello(ctx context.Context, in *pb.MultipleHelloRequest) (*pb.MultipleHelloReply, error) { + log.Printf("Received: %v", in.Name) + log.Printf("Enum Gender: %v", in.GetGenders()) + msg := "Hello " + in.Name + + persons := in.GetPersons() + if persons != nil { + for _, person := range persons { + if person.GetName() != "" { + msg += fmt.Sprintf(", name: %v", person.GetName()) + } + if person.GetAge() != 0 { + msg += fmt.Sprintf(", age: %v", person.GetAge()) + } + } + } + + return &pb.MultipleHelloReply{ + Message: msg, + Items: in.GetItems(), + Genders: in.GetGenders(), + }, nil +} + +func (s *server) Run(ctx context.Context, in *pb.Request) (*pb.Response, error) { + return &pb.Response{Body: in.User.Name + " " + in.Body}, nil +} + +func gRPCAndHTTPFunc(grpcServer *grpc.Server) http.Handler { + return h2c.NewHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hello http")) + }) + + if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { + grpcServer.ServeHTTP(w, r) + } else { + mux.ServeHTTP(w, r) + } + }), &http2.Server{}) +} + +func main() { + flag.Parse() + + go func() { + lis, err := net.Listen("tcp", grpcAddr) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + s := grpc.NewServer() + + reflection.Register(s) + pb.RegisterGreeterServer(s, &server{}) + pb.RegisterTestImportServer(s, &server{}) + pb.RegisterEchoServer(s, &server{}) + + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } + }() + + go func() { + lis, err := net.Listen("tcp", grpcsAddr) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + + c, err := credentials.NewServerTLSFromFile(crtFilePath, keyFilePath) + if err != nil { + log.Fatalf("credentials.NewServerTLSFromFile err: %v", err) + } + s := grpc.NewServer(grpc.Creds(c)) + reflection.Register(s) + pb.RegisterGreeterServer(s, &server{}) + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } + }() + + if grpcHTTPAddr != "" { + go func() { + lis, err := net.Listen("tcp", grpcHTTPAddr) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + s := grpc.NewServer() + + reflection.Register(s) + pb.RegisterGreeterServer(s, &server{}) + pb.RegisterTestImportServer(s, &server{}) + + if err := http.Serve(lis, gRPCAndHTTPFunc(s)); err != nil { + log.Fatalf("failed to serve grpc: %v", err) + } + }() + } + + if grpcsMtlsAddr != "" { + go func() { + lis, err := net.Listen("tcp", grpcsMtlsAddr) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + + certificate, err := tls.LoadX509KeyPair(crtFilePath, keyFilePath) + if err != nil { + log.Fatalf("could not load server key pair: %s", err) + } + + certPool := x509.NewCertPool() + ca, err := os.ReadFile(caFilePath) + if err != nil { + log.Fatalf("could not read ca certificate: %s", err) + } + + if ok := certPool.AppendCertsFromPEM(ca); !ok { + log.Fatalf("failed to append client certs") + } + + c := credentials.NewTLS(&tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + Certificates: []tls.Certificate{certificate}, + ClientCAs: certPool, + }) + s := grpc.NewServer(grpc.Creds(c)) + reflection.Register(s) + pb.RegisterGreeterServer(s, &server{}) + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } + }() + } + + signals := make(chan os.Signal) + signal.Notify(signals, os.Interrupt, syscall.SIGTERM) + sig := <-signals + log.Printf("get signal %s, exit\n", sig.String()) +} diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto.pb b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto.pb new file mode 100644 index 0000000..8edcc5c Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto.pb differ diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/echo.pb.go b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/echo.pb.go new file mode 100644 index 0000000..cafcdc2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/echo.pb.go @@ -0,0 +1,236 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.6.1 +// source: proto/echo.proto + +package proto + +import ( + _struct "github.com/golang/protobuf/ptypes/struct" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StructRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data *_struct.Struct `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *StructRequest) Reset() { + *x = StructRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_echo_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StructRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StructRequest) ProtoMessage() {} + +func (x *StructRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_echo_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StructRequest.ProtoReflect.Descriptor instead. +func (*StructRequest) Descriptor() ([]byte, []int) { + return file_proto_echo_proto_rawDescGZIP(), []int{0} +} + +func (x *StructRequest) GetData() *_struct.Struct { + if x != nil { + return x.Data + } + return nil +} + +type StructReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data *_struct.Struct `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *StructReply) Reset() { + *x = StructReply{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_echo_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StructReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StructReply) ProtoMessage() {} + +func (x *StructReply) ProtoReflect() protoreflect.Message { + mi := &file_proto_echo_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StructReply.ProtoReflect.Descriptor instead. +func (*StructReply) Descriptor() ([]byte, []int) { + return file_proto_echo_proto_rawDescGZIP(), []int{1} +} + +func (x *StructReply) GetData() *_struct.Struct { + if x != nil { + return x.Data + } + return nil +} + +var File_proto_echo_proto protoreflect.FileDescriptor + +var file_proto_echo_proto_rawDesc = []byte{ + 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x63, 0x68, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x04, 0x65, 0x63, 0x68, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x3c, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x3a, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x2b, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x32, 0x3e, 0x0a, 0x04, 0x45, 0x63, 0x68, 0x6f, 0x12, 0x36, 0x0a, 0x0a, 0x45, 0x63, 0x68, 0x6f, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x13, 0x2e, 0x65, 0x63, 0x68, 0x6f, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x65, 0x63, + 0x68, 0x6f, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, + 0x42, 0x09, 0x5a, 0x07, 0x2e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_proto_echo_proto_rawDescOnce sync.Once + file_proto_echo_proto_rawDescData = file_proto_echo_proto_rawDesc +) + +func file_proto_echo_proto_rawDescGZIP() []byte { + file_proto_echo_proto_rawDescOnce.Do(func() { + file_proto_echo_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_echo_proto_rawDescData) + }) + return file_proto_echo_proto_rawDescData +} + +var file_proto_echo_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_proto_echo_proto_goTypes = []interface{}{ + (*StructRequest)(nil), // 0: echo.StructRequest + (*StructReply)(nil), // 1: echo.StructReply + (*_struct.Struct)(nil), // 2: google.protobuf.Struct +} +var file_proto_echo_proto_depIdxs = []int32{ + 2, // 0: echo.StructRequest.data:type_name -> google.protobuf.Struct + 2, // 1: echo.StructReply.data:type_name -> google.protobuf.Struct + 0, // 2: echo.Echo.EchoStruct:input_type -> echo.StructRequest + 1, // 3: echo.Echo.EchoStruct:output_type -> echo.StructReply + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_proto_echo_proto_init() } +func file_proto_echo_proto_init() { + if File_proto_echo_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_echo_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StructRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_echo_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StructReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_echo_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_echo_proto_goTypes, + DependencyIndexes: file_proto_echo_proto_depIdxs, + MessageInfos: file_proto_echo_proto_msgTypes, + }.Build() + File_proto_echo_proto = out.File + file_proto_echo_proto_rawDesc = nil + file_proto_echo_proto_goTypes = nil + file_proto_echo_proto_depIdxs = nil +} diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/echo.proto b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/echo.proto new file mode 100644 index 0000000..144c261 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/echo.proto @@ -0,0 +1,35 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package echo; +option go_package = "./proto"; + +import "google/protobuf/struct.proto"; + +service Echo { + rpc EchoStruct (StructRequest) returns (StructReply) {} +} + +message StructRequest { + google.protobuf.Struct data = 1; +} + +message StructReply { + google.protobuf.Struct data = 1; +} diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/echo_grpc.pb.go b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/echo_grpc.pb.go new file mode 100644 index 0000000..a546e7f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/echo_grpc.pb.go @@ -0,0 +1,105 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.6.1 +// source: proto/echo.proto + +package proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// EchoClient is the client API for Echo service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type EchoClient interface { + EchoStruct(ctx context.Context, in *StructRequest, opts ...grpc.CallOption) (*StructReply, error) +} + +type echoClient struct { + cc grpc.ClientConnInterface +} + +func NewEchoClient(cc grpc.ClientConnInterface) EchoClient { + return &echoClient{cc} +} + +func (c *echoClient) EchoStruct(ctx context.Context, in *StructRequest, opts ...grpc.CallOption) (*StructReply, error) { + out := new(StructReply) + err := c.cc.Invoke(ctx, "/echo.Echo/EchoStruct", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// EchoServer is the server API for Echo service. +// All implementations must embed UnimplementedEchoServer +// for forward compatibility +type EchoServer interface { + EchoStruct(context.Context, *StructRequest) (*StructReply, error) + mustEmbedUnimplementedEchoServer() +} + +// UnimplementedEchoServer must be embedded to have forward compatible implementations. +type UnimplementedEchoServer struct { +} + +func (UnimplementedEchoServer) EchoStruct(context.Context, *StructRequest) (*StructReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method EchoStruct not implemented") +} +func (UnimplementedEchoServer) mustEmbedUnimplementedEchoServer() {} + +// UnsafeEchoServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to EchoServer will +// result in compilation errors. +type UnsafeEchoServer interface { + mustEmbedUnimplementedEchoServer() +} + +func RegisterEchoServer(s grpc.ServiceRegistrar, srv EchoServer) { + s.RegisterService(&Echo_ServiceDesc, srv) +} + +func _Echo_EchoStruct_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StructRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EchoServer).EchoStruct(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/echo.Echo/EchoStruct", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EchoServer).EchoStruct(ctx, req.(*StructRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Echo_ServiceDesc is the grpc.ServiceDesc for Echo service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Echo_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "echo.Echo", + HandlerType: (*EchoServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "EchoStruct", + Handler: _Echo_EchoStruct_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "proto/echo.proto", +} diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/helloworld.pb.go b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/helloworld.pb.go new file mode 100644 index 0000000..22af6b8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/helloworld.pb.go @@ -0,0 +1,851 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.12.4 +// source: proto/helloworld.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Gender int32 + +const ( + Gender_GENDER_UNKNOWN Gender = 0 + Gender_GENDER_MALE Gender = 1 + Gender_GENDER_FEMALE Gender = 2 +) + +// Enum value maps for Gender. +var ( + Gender_name = map[int32]string{ + 0: "GENDER_UNKNOWN", + 1: "GENDER_MALE", + 2: "GENDER_FEMALE", + } + Gender_value = map[string]int32{ + "GENDER_UNKNOWN": 0, + "GENDER_MALE": 1, + "GENDER_FEMALE": 2, + } +) + +func (x Gender) Enum() *Gender { + p := new(Gender) + *p = x + return p +} + +func (x Gender) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Gender) Descriptor() protoreflect.EnumDescriptor { + return file_proto_helloworld_proto_enumTypes[0].Descriptor() +} + +func (Gender) Type() protoreflect.EnumType { + return &file_proto_helloworld_proto_enumTypes[0] +} + +func (x Gender) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Gender.Descriptor instead. +func (Gender) EnumDescriptor() ([]byte, []int) { + return file_proto_helloworld_proto_rawDescGZIP(), []int{0} +} + +type Person struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Age int32 `protobuf:"varint,2,opt,name=age,proto3" json:"age,omitempty"` +} + +func (x *Person) Reset() { + *x = Person{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_helloworld_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Person) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Person) ProtoMessage() {} + +func (x *Person) ProtoReflect() protoreflect.Message { + mi := &file_proto_helloworld_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Person.ProtoReflect.Descriptor instead. +func (*Person) Descriptor() ([]byte, []int) { + return file_proto_helloworld_proto_rawDescGZIP(), []int{0} +} + +func (x *Person) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Person) GetAge() int32 { + if x != nil { + return x.Age + } + return 0 +} + +type HelloRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Items []string `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` + Gender Gender `protobuf:"varint,3,opt,name=gender,proto3,enum=helloworld.Gender" json:"gender,omitempty"` + Person *Person `protobuf:"bytes,4,opt,name=person,proto3" json:"person,omitempty"` +} + +func (x *HelloRequest) Reset() { + *x = HelloRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_helloworld_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HelloRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HelloRequest) ProtoMessage() {} + +func (x *HelloRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_helloworld_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HelloRequest.ProtoReflect.Descriptor instead. +func (*HelloRequest) Descriptor() ([]byte, []int) { + return file_proto_helloworld_proto_rawDescGZIP(), []int{1} +} + +func (x *HelloRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *HelloRequest) GetItems() []string { + if x != nil { + return x.Items + } + return nil +} + +func (x *HelloRequest) GetGender() Gender { + if x != nil { + return x.Gender + } + return Gender_GENDER_UNKNOWN +} + +func (x *HelloRequest) GetPerson() *Person { + if x != nil { + return x.Person + } + return nil +} + +type HelloReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` + Items []string `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` + Gender Gender `protobuf:"varint,3,opt,name=gender,proto3,enum=helloworld.Gender" json:"gender,omitempty"` +} + +func (x *HelloReply) Reset() { + *x = HelloReply{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_helloworld_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HelloReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HelloReply) ProtoMessage() {} + +func (x *HelloReply) ProtoReflect() protoreflect.Message { + mi := &file_proto_helloworld_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HelloReply.ProtoReflect.Descriptor instead. +func (*HelloReply) Descriptor() ([]byte, []int) { + return file_proto_helloworld_proto_rawDescGZIP(), []int{2} +} + +func (x *HelloReply) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *HelloReply) GetItems() []string { + if x != nil { + return x.Items + } + return nil +} + +func (x *HelloReply) GetGender() Gender { + if x != nil { + return x.Gender + } + return Gender_GENDER_UNKNOWN +} + +type PlusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + A int64 `protobuf:"varint,1,opt,name=a,proto3" json:"a,omitempty"` + B int64 `protobuf:"varint,2,opt,name=b,proto3" json:"b,omitempty"` +} + +func (x *PlusRequest) Reset() { + *x = PlusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_helloworld_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlusRequest) ProtoMessage() {} + +func (x *PlusRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_helloworld_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlusRequest.ProtoReflect.Descriptor instead. +func (*PlusRequest) Descriptor() ([]byte, []int) { + return file_proto_helloworld_proto_rawDescGZIP(), []int{3} +} + +func (x *PlusRequest) GetA() int64 { + if x != nil { + return x.A + } + return 0 +} + +func (x *PlusRequest) GetB() int64 { + if x != nil { + return x.B + } + return 0 +} + +type PlusReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Result int64 `protobuf:"varint,1,opt,name=result,proto3" json:"result,omitempty"` +} + +func (x *PlusReply) Reset() { + *x = PlusReply{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_helloworld_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlusReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlusReply) ProtoMessage() {} + +func (x *PlusReply) ProtoReflect() protoreflect.Message { + mi := &file_proto_helloworld_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlusReply.ProtoReflect.Descriptor instead. +func (*PlusReply) Descriptor() ([]byte, []int) { + return file_proto_helloworld_proto_rawDescGZIP(), []int{4} +} + +func (x *PlusReply) GetResult() int64 { + if x != nil { + return x.Result + } + return 0 +} + +type MultipleHelloRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Items []string `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` + Genders []Gender `protobuf:"varint,3,rep,packed,name=genders,proto3,enum=helloworld.Gender" json:"genders,omitempty"` + Persons []*Person `protobuf:"bytes,4,rep,name=persons,proto3" json:"persons,omitempty"` +} + +func (x *MultipleHelloRequest) Reset() { + *x = MultipleHelloRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_helloworld_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MultipleHelloRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MultipleHelloRequest) ProtoMessage() {} + +func (x *MultipleHelloRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_helloworld_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MultipleHelloRequest.ProtoReflect.Descriptor instead. +func (*MultipleHelloRequest) Descriptor() ([]byte, []int) { + return file_proto_helloworld_proto_rawDescGZIP(), []int{5} +} + +func (x *MultipleHelloRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MultipleHelloRequest) GetItems() []string { + if x != nil { + return x.Items + } + return nil +} + +func (x *MultipleHelloRequest) GetGenders() []Gender { + if x != nil { + return x.Genders + } + return nil +} + +func (x *MultipleHelloRequest) GetPersons() []*Person { + if x != nil { + return x.Persons + } + return nil +} + +type MultipleHelloReply struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` + Items []string `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` + Genders []Gender `protobuf:"varint,3,rep,packed,name=genders,proto3,enum=helloworld.Gender" json:"genders,omitempty"` +} + +func (x *MultipleHelloReply) Reset() { + *x = MultipleHelloReply{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_helloworld_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MultipleHelloReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MultipleHelloReply) ProtoMessage() {} + +func (x *MultipleHelloReply) ProtoReflect() protoreflect.Message { + mi := &file_proto_helloworld_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MultipleHelloReply.ProtoReflect.Descriptor instead. +func (*MultipleHelloReply) Descriptor() ([]byte, []int) { + return file_proto_helloworld_proto_rawDescGZIP(), []int{6} +} + +func (x *MultipleHelloReply) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *MultipleHelloReply) GetItems() []string { + if x != nil { + return x.Items + } + return nil +} + +func (x *MultipleHelloReply) GetGenders() []Gender { + if x != nil { + return x.Genders + } + return nil +} + +type ErrorDetail struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Code int64 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *ErrorDetail) Reset() { + *x = ErrorDetail{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_helloworld_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorDetail) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorDetail) ProtoMessage() {} + +func (x *ErrorDetail) ProtoReflect() protoreflect.Message { + mi := &file_proto_helloworld_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorDetail.ProtoReflect.Descriptor instead. +func (*ErrorDetail) Descriptor() ([]byte, []int) { + return file_proto_helloworld_proto_rawDescGZIP(), []int{7} +} + +func (x *ErrorDetail) GetCode() int64 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *ErrorDetail) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *ErrorDetail) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +var File_proto_helloworld_proto protoreflect.FileDescriptor + +var file_proto_helloworld_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, + 0x6c, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, + 0x6f, 0x72, 0x6c, 0x64, 0x22, 0x2e, 0x0a, 0x06, 0x50, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x61, 0x67, 0x65, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, + 0x2a, 0x0a, 0x06, 0x67, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x12, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x47, 0x65, 0x6e, + 0x64, 0x65, 0x72, 0x52, 0x06, 0x67, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x70, + 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65, + 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x50, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x52, + 0x06, 0x70, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x22, 0x68, 0x0a, 0x0a, 0x48, 0x65, 0x6c, 0x6c, 0x6f, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, + 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x67, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, + 0x6c, 0x64, 0x2e, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x06, 0x67, 0x65, 0x6e, 0x64, 0x65, + 0x72, 0x22, 0x29, 0x0a, 0x0b, 0x50, 0x6c, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x0c, 0x0a, 0x01, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x01, 0x61, 0x12, 0x0c, + 0x0a, 0x01, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x01, 0x62, 0x22, 0x23, 0x0a, 0x09, + 0x50, 0x6c, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x22, 0x9c, 0x01, 0x0a, 0x14, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x48, 0x65, + 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2c, 0x0a, 0x07, 0x67, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, + 0x6c, 0x64, 0x2e, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x07, 0x67, 0x65, 0x6e, 0x64, 0x65, + 0x72, 0x73, 0x12, 0x2c, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, + 0x2e, 0x50, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x52, 0x07, 0x70, 0x65, 0x72, 0x73, 0x6f, 0x6e, 0x73, + 0x22, 0x72, 0x0a, 0x12, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x48, 0x65, 0x6c, 0x6c, + 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2c, 0x0a, 0x07, 0x67, 0x65, 0x6e, 0x64, 0x65, 0x72, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, + 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x52, 0x07, 0x67, 0x65, 0x6e, + 0x64, 0x65, 0x72, 0x73, 0x22, 0x4f, 0x0a, 0x0b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x2a, 0x40, 0x0a, 0x06, 0x47, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x0e, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x4d, 0x41, + 0x4c, 0x45, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x5f, 0x46, + 0x45, 0x4d, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x32, 0xda, 0x04, 0x0a, 0x07, 0x47, 0x72, 0x65, 0x65, + 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x08, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x12, + 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, + 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, + 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x45, 0x72, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, + 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, + 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x38, 0x0a, 0x04, 0x50, 0x6c, 0x75, 0x73, 0x12, 0x17, 0x2e, + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x50, 0x6c, 0x75, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, + 0x72, 0x6c, 0x64, 0x2e, 0x50, 0x6c, 0x75, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, + 0x48, 0x0a, 0x12, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x41, 0x66, 0x74, 0x65, 0x72, + 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, + 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, + 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x56, 0x0a, 0x10, 0x53, 0x61, 0x79, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x12, 0x20, 0x2e, + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x65, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1e, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, + 0x00, 0x12, 0x4c, 0x0a, 0x14, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, + 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, + 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x4c, 0x0a, 0x14, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x43, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, + 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, + 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x28, 0x01, 0x12, 0x55, 0x0a, + 0x1b, 0x53, 0x61, 0x79, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x42, 0x69, 0x64, 0x69, 0x72, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x18, 0x2e, 0x68, + 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, + 0x72, 0x6c, 0x64, 0x2e, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, + 0x28, 0x01, 0x30, 0x01, 0x42, 0x09, 0x5a, 0x07, 0x2e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_helloworld_proto_rawDescOnce sync.Once + file_proto_helloworld_proto_rawDescData = file_proto_helloworld_proto_rawDesc +) + +func file_proto_helloworld_proto_rawDescGZIP() []byte { + file_proto_helloworld_proto_rawDescOnce.Do(func() { + file_proto_helloworld_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_helloworld_proto_rawDescData) + }) + return file_proto_helloworld_proto_rawDescData +} + +var file_proto_helloworld_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_proto_helloworld_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_proto_helloworld_proto_goTypes = []interface{}{ + (Gender)(0), // 0: helloworld.Gender + (*Person)(nil), // 1: helloworld.Person + (*HelloRequest)(nil), // 2: helloworld.HelloRequest + (*HelloReply)(nil), // 3: helloworld.HelloReply + (*PlusRequest)(nil), // 4: helloworld.PlusRequest + (*PlusReply)(nil), // 5: helloworld.PlusReply + (*MultipleHelloRequest)(nil), // 6: helloworld.MultipleHelloRequest + (*MultipleHelloReply)(nil), // 7: helloworld.MultipleHelloReply + (*ErrorDetail)(nil), // 8: helloworld.ErrorDetail +} +var file_proto_helloworld_proto_depIdxs = []int32{ + 0, // 0: helloworld.HelloRequest.gender:type_name -> helloworld.Gender + 1, // 1: helloworld.HelloRequest.person:type_name -> helloworld.Person + 0, // 2: helloworld.HelloReply.gender:type_name -> helloworld.Gender + 0, // 3: helloworld.MultipleHelloRequest.genders:type_name -> helloworld.Gender + 1, // 4: helloworld.MultipleHelloRequest.persons:type_name -> helloworld.Person + 0, // 5: helloworld.MultipleHelloReply.genders:type_name -> helloworld.Gender + 2, // 6: helloworld.Greeter.SayHello:input_type -> helloworld.HelloRequest + 2, // 7: helloworld.Greeter.GetErrResp:input_type -> helloworld.HelloRequest + 4, // 8: helloworld.Greeter.Plus:input_type -> helloworld.PlusRequest + 2, // 9: helloworld.Greeter.SayHelloAfterDelay:input_type -> helloworld.HelloRequest + 6, // 10: helloworld.Greeter.SayMultipleHello:input_type -> helloworld.MultipleHelloRequest + 2, // 11: helloworld.Greeter.SayHelloServerStream:input_type -> helloworld.HelloRequest + 2, // 12: helloworld.Greeter.SayHelloClientStream:input_type -> helloworld.HelloRequest + 2, // 13: helloworld.Greeter.SayHelloBidirectionalStream:input_type -> helloworld.HelloRequest + 3, // 14: helloworld.Greeter.SayHello:output_type -> helloworld.HelloReply + 3, // 15: helloworld.Greeter.GetErrResp:output_type -> helloworld.HelloReply + 5, // 16: helloworld.Greeter.Plus:output_type -> helloworld.PlusReply + 3, // 17: helloworld.Greeter.SayHelloAfterDelay:output_type -> helloworld.HelloReply + 7, // 18: helloworld.Greeter.SayMultipleHello:output_type -> helloworld.MultipleHelloReply + 3, // 19: helloworld.Greeter.SayHelloServerStream:output_type -> helloworld.HelloReply + 3, // 20: helloworld.Greeter.SayHelloClientStream:output_type -> helloworld.HelloReply + 3, // 21: helloworld.Greeter.SayHelloBidirectionalStream:output_type -> helloworld.HelloReply + 14, // [14:22] is the sub-list for method output_type + 6, // [6:14] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_proto_helloworld_proto_init() } +func file_proto_helloworld_proto_init() { + if File_proto_helloworld_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_helloworld_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Person); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_helloworld_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HelloRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_helloworld_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HelloReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_helloworld_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_helloworld_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlusReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_helloworld_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MultipleHelloRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_helloworld_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MultipleHelloReply); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_helloworld_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorDetail); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_helloworld_proto_rawDesc, + NumEnums: 1, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_helloworld_proto_goTypes, + DependencyIndexes: file_proto_helloworld_proto_depIdxs, + EnumInfos: file_proto_helloworld_proto_enumTypes, + MessageInfos: file_proto_helloworld_proto_msgTypes, + }.Build() + File_proto_helloworld_proto = out.File + file_proto_helloworld_proto_rawDesc = nil + file_proto_helloworld_proto_goTypes = nil + file_proto_helloworld_proto_depIdxs = nil +} diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/helloworld.proto b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/helloworld.proto new file mode 100644 index 0000000..28f711a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/helloworld.proto @@ -0,0 +1,92 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package helloworld; +option go_package = "./proto"; + +service Greeter { + // Unary RPC. + rpc SayHello (HelloRequest) returns (HelloReply) {} + rpc GetErrResp (HelloRequest) returns (HelloReply) {} + rpc Plus (PlusRequest) returns (PlusReply) {} + rpc SayHelloAfterDelay (HelloRequest) returns (HelloReply) {} + rpc SayMultipleHello(MultipleHelloRequest) returns (MultipleHelloReply) {} + + // Server side streaming. + rpc SayHelloServerStream (HelloRequest) returns (stream HelloReply) {} + + // Client side streaming. + rpc SayHelloClientStream (stream HelloRequest) returns (HelloReply) {} + + // Bidirectional streaming. + rpc SayHelloBidirectionalStream (stream HelloRequest) returns (stream HelloReply) {} + +} + +enum Gender { + GENDER_UNKNOWN = 0; + GENDER_MALE = 1; + GENDER_FEMALE = 2; +} + +message Person { + string name = 1; + int32 age = 2; +} + +message HelloRequest { + string name = 1; + repeated string items = 2; + Gender gender = 3; + Person person = 4; +} + +message HelloReply { + string message = 1; + repeated string items = 2; + Gender gender = 3; +} + +message PlusRequest { + int64 a = 1; + int64 b = 2; +} + +message PlusReply { + int64 result = 1; +} + +message MultipleHelloRequest { + string name = 1; + repeated string items = 2; + repeated Gender genders = 3; + repeated Person persons = 4; +} + +message MultipleHelloReply{ + string message = 1; + repeated string items = 2; + repeated Gender genders = 3; +} + +message ErrorDetail { + int64 code = 1; + string message = 2; + string type = 3; +} diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/helloworld_grpc.pb.go b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/helloworld_grpc.pb.go new file mode 100644 index 0000000..9314724 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/helloworld_grpc.pb.go @@ -0,0 +1,459 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.12.4 +// source: proto/helloworld.proto + +package proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// GreeterClient is the client API for Greeter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type GreeterClient interface { + // Unary RPC. + SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) + GetErrResp(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) + Plus(ctx context.Context, in *PlusRequest, opts ...grpc.CallOption) (*PlusReply, error) + SayHelloAfterDelay(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) + SayMultipleHello(ctx context.Context, in *MultipleHelloRequest, opts ...grpc.CallOption) (*MultipleHelloReply, error) + // Server side streaming. + SayHelloServerStream(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (Greeter_SayHelloServerStreamClient, error) + // Client side streaming. + SayHelloClientStream(ctx context.Context, opts ...grpc.CallOption) (Greeter_SayHelloClientStreamClient, error) + // Bidirectional streaming. + SayHelloBidirectionalStream(ctx context.Context, opts ...grpc.CallOption) (Greeter_SayHelloBidirectionalStreamClient, error) +} + +type greeterClient struct { + cc grpc.ClientConnInterface +} + +func NewGreeterClient(cc grpc.ClientConnInterface) GreeterClient { + return &greeterClient{cc} +} + +func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { + out := new(HelloReply) + err := c.cc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *greeterClient) GetErrResp(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { + out := new(HelloReply) + err := c.cc.Invoke(ctx, "/helloworld.Greeter/GetErrResp", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *greeterClient) Plus(ctx context.Context, in *PlusRequest, opts ...grpc.CallOption) (*PlusReply, error) { + out := new(PlusReply) + err := c.cc.Invoke(ctx, "/helloworld.Greeter/Plus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *greeterClient) SayHelloAfterDelay(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { + out := new(HelloReply) + err := c.cc.Invoke(ctx, "/helloworld.Greeter/SayHelloAfterDelay", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *greeterClient) SayMultipleHello(ctx context.Context, in *MultipleHelloRequest, opts ...grpc.CallOption) (*MultipleHelloReply, error) { + out := new(MultipleHelloReply) + err := c.cc.Invoke(ctx, "/helloworld.Greeter/SayMultipleHello", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *greeterClient) SayHelloServerStream(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (Greeter_SayHelloServerStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &Greeter_ServiceDesc.Streams[0], "/helloworld.Greeter/SayHelloServerStream", opts...) + if err != nil { + return nil, err + } + x := &greeterSayHelloServerStreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Greeter_SayHelloServerStreamClient interface { + Recv() (*HelloReply, error) + grpc.ClientStream +} + +type greeterSayHelloServerStreamClient struct { + grpc.ClientStream +} + +func (x *greeterSayHelloServerStreamClient) Recv() (*HelloReply, error) { + m := new(HelloReply) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *greeterClient) SayHelloClientStream(ctx context.Context, opts ...grpc.CallOption) (Greeter_SayHelloClientStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &Greeter_ServiceDesc.Streams[1], "/helloworld.Greeter/SayHelloClientStream", opts...) + if err != nil { + return nil, err + } + x := &greeterSayHelloClientStreamClient{stream} + return x, nil +} + +type Greeter_SayHelloClientStreamClient interface { + Send(*HelloRequest) error + CloseAndRecv() (*HelloReply, error) + grpc.ClientStream +} + +type greeterSayHelloClientStreamClient struct { + grpc.ClientStream +} + +func (x *greeterSayHelloClientStreamClient) Send(m *HelloRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *greeterSayHelloClientStreamClient) CloseAndRecv() (*HelloReply, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(HelloReply) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *greeterClient) SayHelloBidirectionalStream(ctx context.Context, opts ...grpc.CallOption) (Greeter_SayHelloBidirectionalStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &Greeter_ServiceDesc.Streams[2], "/helloworld.Greeter/SayHelloBidirectionalStream", opts...) + if err != nil { + return nil, err + } + x := &greeterSayHelloBidirectionalStreamClient{stream} + return x, nil +} + +type Greeter_SayHelloBidirectionalStreamClient interface { + Send(*HelloRequest) error + Recv() (*HelloReply, error) + grpc.ClientStream +} + +type greeterSayHelloBidirectionalStreamClient struct { + grpc.ClientStream +} + +func (x *greeterSayHelloBidirectionalStreamClient) Send(m *HelloRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *greeterSayHelloBidirectionalStreamClient) Recv() (*HelloReply, error) { + m := new(HelloReply) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GreeterServer is the server API for Greeter service. +// All implementations must embed UnimplementedGreeterServer +// for forward compatibility +type GreeterServer interface { + // Unary RPC. + SayHello(context.Context, *HelloRequest) (*HelloReply, error) + GetErrResp(context.Context, *HelloRequest) (*HelloReply, error) + Plus(context.Context, *PlusRequest) (*PlusReply, error) + SayHelloAfterDelay(context.Context, *HelloRequest) (*HelloReply, error) + SayMultipleHello(context.Context, *MultipleHelloRequest) (*MultipleHelloReply, error) + // Server side streaming. + SayHelloServerStream(*HelloRequest, Greeter_SayHelloServerStreamServer) error + // Client side streaming. + SayHelloClientStream(Greeter_SayHelloClientStreamServer) error + // Bidirectional streaming. + SayHelloBidirectionalStream(Greeter_SayHelloBidirectionalStreamServer) error + mustEmbedUnimplementedGreeterServer() +} + +// UnimplementedGreeterServer must be embedded to have forward compatible implementations. +type UnimplementedGreeterServer struct { +} + +func (UnimplementedGreeterServer) SayHello(context.Context, *HelloRequest) (*HelloReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method SayHello not implemented") +} +func (UnimplementedGreeterServer) GetErrResp(context.Context, *HelloRequest) (*HelloReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetErrResp not implemented") +} +func (UnimplementedGreeterServer) Plus(context.Context, *PlusRequest) (*PlusReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method Plus not implemented") +} +func (UnimplementedGreeterServer) SayHelloAfterDelay(context.Context, *HelloRequest) (*HelloReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method SayHelloAfterDelay not implemented") +} +func (UnimplementedGreeterServer) SayMultipleHello(context.Context, *MultipleHelloRequest) (*MultipleHelloReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method SayMultipleHello not implemented") +} +func (UnimplementedGreeterServer) SayHelloServerStream(*HelloRequest, Greeter_SayHelloServerStreamServer) error { + return status.Errorf(codes.Unimplemented, "method SayHelloServerStream not implemented") +} +func (UnimplementedGreeterServer) SayHelloClientStream(Greeter_SayHelloClientStreamServer) error { + return status.Errorf(codes.Unimplemented, "method SayHelloClientStream not implemented") +} +func (UnimplementedGreeterServer) SayHelloBidirectionalStream(Greeter_SayHelloBidirectionalStreamServer) error { + return status.Errorf(codes.Unimplemented, "method SayHelloBidirectionalStream not implemented") +} +func (UnimplementedGreeterServer) mustEmbedUnimplementedGreeterServer() {} + +// UnsafeGreeterServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to GreeterServer will +// result in compilation errors. +type UnsafeGreeterServer interface { + mustEmbedUnimplementedGreeterServer() +} + +func RegisterGreeterServer(s grpc.ServiceRegistrar, srv GreeterServer) { + s.RegisterService(&Greeter_ServiceDesc, srv) +} + +func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HelloRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GreeterServer).SayHello(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/helloworld.Greeter/SayHello", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Greeter_GetErrResp_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HelloRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GreeterServer).GetErrResp(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/helloworld.Greeter/GetErrResp", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GreeterServer).GetErrResp(ctx, req.(*HelloRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Greeter_Plus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PlusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GreeterServer).Plus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/helloworld.Greeter/Plus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GreeterServer).Plus(ctx, req.(*PlusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Greeter_SayHelloAfterDelay_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HelloRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GreeterServer).SayHelloAfterDelay(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/helloworld.Greeter/SayHelloAfterDelay", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GreeterServer).SayHelloAfterDelay(ctx, req.(*HelloRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Greeter_SayMultipleHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MultipleHelloRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GreeterServer).SayMultipleHello(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/helloworld.Greeter/SayMultipleHello", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GreeterServer).SayMultipleHello(ctx, req.(*MultipleHelloRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Greeter_SayHelloServerStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(HelloRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(GreeterServer).SayHelloServerStream(m, &greeterSayHelloServerStreamServer{stream}) +} + +type Greeter_SayHelloServerStreamServer interface { + Send(*HelloReply) error + grpc.ServerStream +} + +type greeterSayHelloServerStreamServer struct { + grpc.ServerStream +} + +func (x *greeterSayHelloServerStreamServer) Send(m *HelloReply) error { + return x.ServerStream.SendMsg(m) +} + +func _Greeter_SayHelloClientStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GreeterServer).SayHelloClientStream(&greeterSayHelloClientStreamServer{stream}) +} + +type Greeter_SayHelloClientStreamServer interface { + SendAndClose(*HelloReply) error + Recv() (*HelloRequest, error) + grpc.ServerStream +} + +type greeterSayHelloClientStreamServer struct { + grpc.ServerStream +} + +func (x *greeterSayHelloClientStreamServer) SendAndClose(m *HelloReply) error { + return x.ServerStream.SendMsg(m) +} + +func (x *greeterSayHelloClientStreamServer) Recv() (*HelloRequest, error) { + m := new(HelloRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Greeter_SayHelloBidirectionalStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GreeterServer).SayHelloBidirectionalStream(&greeterSayHelloBidirectionalStreamServer{stream}) +} + +type Greeter_SayHelloBidirectionalStreamServer interface { + Send(*HelloReply) error + Recv() (*HelloRequest, error) + grpc.ServerStream +} + +type greeterSayHelloBidirectionalStreamServer struct { + grpc.ServerStream +} + +func (x *greeterSayHelloBidirectionalStreamServer) Send(m *HelloReply) error { + return x.ServerStream.SendMsg(m) +} + +func (x *greeterSayHelloBidirectionalStreamServer) Recv() (*HelloRequest, error) { + m := new(HelloRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Greeter_ServiceDesc is the grpc.ServiceDesc for Greeter service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Greeter_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "helloworld.Greeter", + HandlerType: (*GreeterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SayHello", + Handler: _Greeter_SayHello_Handler, + }, + { + MethodName: "GetErrResp", + Handler: _Greeter_GetErrResp_Handler, + }, + { + MethodName: "Plus", + Handler: _Greeter_Plus_Handler, + }, + { + MethodName: "SayHelloAfterDelay", + Handler: _Greeter_SayHelloAfterDelay_Handler, + }, + { + MethodName: "SayMultipleHello", + Handler: _Greeter_SayMultipleHello_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "SayHelloServerStream", + Handler: _Greeter_SayHelloServerStream_Handler, + ServerStreams: true, + }, + { + StreamName: "SayHelloClientStream", + Handler: _Greeter_SayHelloClientStream_Handler, + ClientStreams: true, + }, + { + StreamName: "SayHelloBidirectionalStream", + Handler: _Greeter_SayHelloBidirectionalStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "proto/helloworld.proto", +} diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/import.pb.go b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/import.pb.go new file mode 100644 index 0000000..2c1ce79 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/import.pb.go @@ -0,0 +1,220 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.12.4 +// source: proto/import.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type User struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *User) Reset() { + *x = User{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_import_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *User) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*User) ProtoMessage() {} + +func (x *User) ProtoReflect() protoreflect.Message { + mi := &file_proto_import_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use User.ProtoReflect.Descriptor instead. +func (*User) Descriptor() ([]byte, []int) { + return file_proto_import_proto_rawDescGZIP(), []int{0} +} + +func (x *User) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Body string `protobuf:"bytes,1,opt,name=body,proto3" json:"body,omitempty"` +} + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_import_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_proto_import_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_proto_import_proto_rawDescGZIP(), []int{1} +} + +func (x *Response) GetBody() string { + if x != nil { + return x.Body + } + return "" +} + +var File_proto_import_proto protoreflect.FileDescriptor + +var file_proto_import_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x03, 0x70, 0x6b, 0x67, 0x22, 0x1a, 0x0a, 0x04, 0x55, 0x73, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x1e, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x62, 0x6f, 0x64, 0x79, 0x42, 0x09, 0x5a, 0x07, 0x2e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_import_proto_rawDescOnce sync.Once + file_proto_import_proto_rawDescData = file_proto_import_proto_rawDesc +) + +func file_proto_import_proto_rawDescGZIP() []byte { + file_proto_import_proto_rawDescOnce.Do(func() { + file_proto_import_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_import_proto_rawDescData) + }) + return file_proto_import_proto_rawDescData +} + +var file_proto_import_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_proto_import_proto_goTypes = []interface{}{ + (*User)(nil), // 0: pkg.User + (*Response)(nil), // 1: pkg.Response +} +var file_proto_import_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_proto_import_proto_init() } +func file_proto_import_proto_init() { + if File_proto_import_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_proto_import_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*User); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_proto_import_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_import_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_proto_import_proto_goTypes, + DependencyIndexes: file_proto_import_proto_depIdxs, + MessageInfos: file_proto_import_proto_msgTypes, + }.Build() + File_proto_import_proto = out.File + file_proto_import_proto_rawDesc = nil + file_proto_import_proto_goTypes = nil + file_proto_import_proto_depIdxs = nil +} diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/import.proto b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/import.proto new file mode 100644 index 0000000..b765059 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/import.proto @@ -0,0 +1,29 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package pkg; +option go_package = "./proto"; + +message User { + string name = 1; +} + +message Response { + string body = 1; +} diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/src.pb.go b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/src.pb.go new file mode 100644 index 0000000..c0b488f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/src.pb.go @@ -0,0 +1,179 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.12.4 +// source: proto/src.proto + +package proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + Body string `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` +} + +func (x *Request) Reset() { + *x = Request{} + if protoimpl.UnsafeEnabled { + mi := &file_proto_src_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Request) ProtoMessage() {} + +func (x *Request) ProtoReflect() protoreflect.Message { + mi := &file_proto_src_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Request.ProtoReflect.Descriptor instead. +func (*Request) Descriptor() ([]byte, []int) { + return file_proto_src_proto_rawDescGZIP(), []int{0} +} + +func (x *Request) GetUser() *User { + if x != nil { + return x.User + } + return nil +} + +func (x *Request) GetBody() string { + if x != nil { + return x.Body + } + return "" +} + +var File_proto_src_proto protoreflect.FileDescriptor + +var file_proto_src_proto_rawDesc = []byte{ + 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x72, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0a, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x1a, 0x12, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x3c, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x04, + 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x6b, 0x67, + 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x62, + 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x32, + 0x39, 0x0a, 0x0a, 0x54, 0x65, 0x73, 0x74, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x2b, 0x0a, + 0x03, 0x52, 0x75, 0x6e, 0x12, 0x13, 0x2e, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x77, 0x6f, 0x72, 0x6c, + 0x64, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0d, 0x2e, 0x70, 0x6b, 0x67, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x09, 0x5a, 0x07, 0x2e, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_proto_src_proto_rawDescOnce sync.Once + file_proto_src_proto_rawDescData = file_proto_src_proto_rawDesc +) + +func file_proto_src_proto_rawDescGZIP() []byte { + file_proto_src_proto_rawDescOnce.Do(func() { + file_proto_src_proto_rawDescData = protoimpl.X.CompressGZIP(file_proto_src_proto_rawDescData) + }) + return file_proto_src_proto_rawDescData +} + +var file_proto_src_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_proto_src_proto_goTypes = []interface{}{ + (*Request)(nil), // 0: helloworld.Request + (*User)(nil), // 1: pkg.User + (*Response)(nil), // 2: pkg.Response +} +var file_proto_src_proto_depIdxs = []int32{ + 1, // 0: helloworld.Request.user:type_name -> pkg.User + 0, // 1: helloworld.TestImport.Run:input_type -> helloworld.Request + 2, // 2: helloworld.TestImport.Run:output_type -> pkg.Response + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_proto_src_proto_init() } +func file_proto_src_proto_init() { + if File_proto_src_proto != nil { + return + } + file_proto_import_proto_init() + if !protoimpl.UnsafeEnabled { + file_proto_src_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_proto_src_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_src_proto_goTypes, + DependencyIndexes: file_proto_src_proto_depIdxs, + MessageInfos: file_proto_src_proto_msgTypes, + }.Build() + File_proto_src_proto = out.File + file_proto_src_proto_rawDesc = nil + file_proto_src_proto_goTypes = nil + file_proto_src_proto_depIdxs = nil +} diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/src.proto b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/src.proto new file mode 100644 index 0000000..11d9b66 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/src.proto @@ -0,0 +1,32 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package helloworld; +option go_package = "./proto"; + +import "proto/import.proto"; + +service TestImport { + rpc Run (Request) returns (pkg.Response) {} +} + +message Request { + pkg.User user = 1; + string body = 2; +} diff --git a/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/src_grpc.pb.go b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/src_grpc.pb.go new file mode 100644 index 0000000..d4015ed --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/grpc_server_example/proto/src_grpc.pb.go @@ -0,0 +1,105 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.12.4 +// source: proto/src.proto + +package proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// TestImportClient is the client API for TestImport service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type TestImportClient interface { + Run(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error) +} + +type testImportClient struct { + cc grpc.ClientConnInterface +} + +func NewTestImportClient(cc grpc.ClientConnInterface) TestImportClient { + return &testImportClient{cc} +} + +func (c *testImportClient) Run(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error) { + out := new(Response) + err := c.cc.Invoke(ctx, "/helloworld.TestImport/Run", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TestImportServer is the server API for TestImport service. +// All implementations must embed UnimplementedTestImportServer +// for forward compatibility +type TestImportServer interface { + Run(context.Context, *Request) (*Response, error) + mustEmbedUnimplementedTestImportServer() +} + +// UnimplementedTestImportServer must be embedded to have forward compatible implementations. +type UnimplementedTestImportServer struct { +} + +func (UnimplementedTestImportServer) Run(context.Context, *Request) (*Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Run not implemented") +} +func (UnimplementedTestImportServer) mustEmbedUnimplementedTestImportServer() {} + +// UnsafeTestImportServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to TestImportServer will +// result in compilation errors. +type UnsafeTestImportServer interface { + mustEmbedUnimplementedTestImportServer() +} + +func RegisterTestImportServer(s grpc.ServiceRegistrar, srv TestImportServer) { + s.RegisterService(&TestImport_ServiceDesc, srv) +} + +func _TestImport_Run_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestImportServer).Run(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/helloworld.TestImport/Run", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestImportServer).Run(ctx, req.(*Request)) + } + return interceptor(ctx, in, info, handler) +} + +// TestImport_ServiceDesc is the grpc.ServiceDesc for TestImport service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var TestImport_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "helloworld.TestImport", + HandlerType: (*TestImportServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Run", + Handler: _TestImport_Run_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "proto/src.proto", +} diff --git a/CloudronPackages/APISIX/apisix-source/t/http3/admin/basic.t b/CloudronPackages/APISIX/apisix-source/t/http3/admin/basic.t new file mode 100644 index 0000000..b33e60c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/http3/admin/basic.t @@ -0,0 +1,108 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); +run_tests(); + +__DATA__ + +=== TEST 1: create ssl for test.com +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: Successfully access test.com with QUIC +--- config + location /echo { + echo world; + } +--- exec +curl -k -v -H "Host: test.com" -H "content-length: 0" --http3-only --resolve "test.com:1994:127.0.0.1" https://test.com:1994/echo 2>&1 | cat +--- response_body eval +qr/world/ + + + +=== TEST 3: set route +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: Successfully access route with QUIC +--- exec +curl -k -v -H "Host: test.com:1994" -H "content-length: 0" --http3-only --resolve "test.com:1994:127.0.0.1" https://test.com:1994/hello 2>&1 | cat +--- response_body_like +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/jest.config.ts b/CloudronPackages/APISIX/apisix-source/t/jest.config.ts new file mode 100644 index 0000000..8fa91d0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/jest.config.ts @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import type { Config } from 'jest'; + +const config: Config = { + coverageProvider: 'v8', + testEnvironment: 'node', + testRegex: '(/__tests__/.*|(\\.|/)(spec|test))\\.(ts|mts)$', + transform: { + '^.+\\.ts$': ['ts-jest', { useESM: false }], + '^.+\\.mts$': ['ts-jest', { useESM: true, tsconfig: 'tsconfig.esm.json' }], + }, + extensionsToTreatAsEsm: ['.mts'], + moduleFileExtensions: ['ts', 'mts', 'js'], +}; + +export default config; diff --git a/CloudronPackages/APISIX/apisix-source/t/kubernetes/configs/account.yaml b/CloudronPackages/APISIX/apisix-source/t/kubernetes/configs/account.yaml new file mode 100644 index 0000000..da7cf01 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/kubernetes/configs/account.yaml @@ -0,0 +1,44 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: apisix-test + namespace: default +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: apisix-test +rules: + - apiGroups: [ "" ] + resources: [ endpoints ] + verbs: [ get,list,watch ] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: apisix-test +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: apisix-test +subjects: + - kind: ServiceAccount + name: apisix-test + namespace: default diff --git a/CloudronPackages/APISIX/apisix-source/t/kubernetes/configs/endpoint.yaml b/CloudronPackages/APISIX/apisix-source/t/kubernetes/configs/endpoint.yaml new file mode 100644 index 0000000..885f825 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/kubernetes/configs/endpoint.yaml @@ -0,0 +1,58 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: Namespace +apiVersion: v1 +metadata: + name: ns-a +--- + +kind: Endpoints +apiVersion: v1 +metadata: + name: ep + namespace: ns-a +subsets: [ ] +--- + +kind: Namespace +apiVersion: v1 +metadata: + name: ns-b +--- + +kind: Endpoints +apiVersion: v1 +metadata: + name: ep + namespace: ns-b +subsets: [ ] +--- + +kind: Namespace +apiVersion: v1 +metadata: + name: ns-c +--- + +kind: Endpoints +apiVersion: v1 +metadata: + name: ep + namespace: ns-c +subsets: [ ] +--- diff --git a/CloudronPackages/APISIX/apisix-source/t/kubernetes/configs/kind.yaml b/CloudronPackages/APISIX/apisix-source/t/kubernetes/configs/kind.yaml new file mode 100644 index 0000000..3db903f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/kubernetes/configs/kind.yaml @@ -0,0 +1,22 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +networking: + apiServerAddress: 127.0.0.1 + apiServerPort: 6443 diff --git a/CloudronPackages/APISIX/apisix-source/t/kubernetes/discovery/kubernetes.t b/CloudronPackages/APISIX/apisix-source/t/kubernetes/discovery/kubernetes.t new file mode 100644 index 0000000..5eb82b7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/kubernetes/discovery/kubernetes.t @@ -0,0 +1,423 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('warn'); +no_root_location(); +no_shuffle(); +workers(4); + +our $token_file = "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token"; +our $token_value = eval {`cat $token_file 2>/dev/null`}; + +add_block_preprocessor(sub { + my ($block) = @_; + + my $apisix_yaml = $block->apisix_yaml // <<_EOC_; +routes: [] +#END +_EOC_ + + $block->set_value("apisix_yaml", $apisix_yaml); + + my $main_config = $block->main_config // <<_EOC_; +env MyPort=6443; +env KUBERNETES_SERVICE_HOST=127.0.0.1; +env KUBERNETES_SERVICE_PORT=6443; +env KUBERNETES_CLIENT_TOKEN=$::token_value; +env KUBERNETES_CLIENT_TOKEN_FILE=$::token_file; +_EOC_ + + $block->set_value("main_config", $main_config); + + my $config = $block->config // <<_EOC_; + + location /compare { + content_by_lua_block { + local http = require("resty.http") + local core = require("apisix.core") + local local_conf = require("apisix.core.config_local").local_conf() + + local function deep_compare(tbl1, tbl2) + if tbl1 == tbl2 then + return true + elseif type(tbl1) == "table" and type(tbl2) == "table" then + for key1, value1 in pairs(tbl1) do + local value2 = tbl2[key1] + if value2 == nil then + -- avoid the type call for missing keys in tbl2 by directly comparing with nil + return false + elseif value1 ~= value2 then + if type(value1) == "table" and type(value2) == "table" then + if not deep_compare(value1, value2) then + return false + end + else + return false + end + end + end + for key2, _ in pairs(tbl2) do + if tbl1[key2] == nil then + return false + end + end + return true + end + + return false + end + + ngx.req.read_body() + local request_body = ngx.req.get_body_data() + local expect = core.json.decode(request_body) + local current = local_conf.discovery.kubernetes + if deep_compare(expect,current) then + ngx.say("true") + else + ngx.say("false, current is ",core.json.encode(current,true)) + end + } + } + + location /update_token { + content_by_lua_block { + local token_file = "$::token_file" + local file = io.open(token_file, "w") + file:write("invalid_token_value") + file:close() + ngx.sleep(3) + file = io.open(token_file, "w") + local token_value = [[$::token_value]] + file:write(token_value) + file:close() + } + } + +_EOC_ + + $block->set_value("config", $config); + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: default value with minimal configuration +--- yaml_config +apisix: + node_listen: 1984 + config_center: yaml +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /compare +{ + "service": { + "schema": "https", + "host": "${KUBERNETES_SERVICE_HOST}", + "port": "${KUBERNETES_SERVICE_PORT}" + }, + "client": { + "token": "${KUBERNETES_CLIENT_TOKEN}" + }, + "watch_endpoint_slices": false, + "shared_size": "1m", + "default_weight": 50 +} +--- more_headers +Content-type: application/json +--- response_body +true + + + +=== TEST 2: default value with minimal service and client configuration +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + service: {} + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /compare +{ + "service": { + "schema": "https", + "host": "${KUBERNETES_SERVICE_HOST}", + "port": "${KUBERNETES_SERVICE_PORT}" + }, + "client": { + "token": "${KUBERNETES_CLIENT_TOKEN}" + }, + "watch_endpoint_slices": false, + "shared_size": "1m", + "default_weight": 50 +} +--- more_headers +Content-type: application/json +--- response_body +true + + + +=== TEST 3: mixing set custom and default values +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + service: + host: "sample.com" + shared_size: "2m" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /compare +{ + "service": { + "schema": "https", + "host": "sample.com", + "port": "${KUBERNETES_SERVICE_PORT}" + }, + "client": { + "token": "${KUBERNETES_CLIENT_TOKEN}" + }, + "watch_endpoint_slices": false, + "shared_size": "2m", + "default_weight": 50 +} +--- more_headers +Content-type: application/json +--- response_body +true + + + +=== TEST 4: mixing set custom and default values +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + client: + token: ${KUBERNETES_CLIENT_TOKEN} + default_weight: 33 +--- request +GET /compare +{ + "service": { + "schema": "https", + "host": "${KUBERNETES_SERVICE_HOST}", + "port": "${KUBERNETES_SERVICE_PORT}" + }, + "client": { + "token": "${KUBERNETES_CLIENT_TOKEN}" + }, + "watch_endpoint_slices": false, + "shared_size": "1m", + "default_weight": 33 +} +--- more_headers +Content-type: application/json +--- response_body +true + + + +=== TEST 5: multi cluster mode configuration +--- http_config +lua_shared_dict kubernetes-debug 1m; +lua_shared_dict kubernetes-release 1m; +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: "debug" + service: + host: "1.cluster.com" + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} + - id: "release" + service: + schema: "http" + host: "2.cluster.com" + port: "${MyPort}" + client: + token: ${KUBERNETES_CLIENT_TOKEN} + default_weight: 33 + shared_size: "2m" +--- request +GET /compare +[ + { + "id": "debug", + "service": { + "schema": "https", + "host": "1.cluster.com", + "port": "6445" + }, + "client": { + "token": "${KUBERNETES_CLIENT_TOKEN}" + }, + "watch_endpoint_slices": false, + "default_weight": 50, + "shared_size": "1m" + }, + { + "id": "release", + "service": { + "schema": "http", + "host": "2.cluster.com", + "port": "${MyPort}" + }, + "client": { + "token": "${KUBERNETES_CLIENT_TOKEN}" + }, + "watch_endpoint_slices": false, + "default_weight": 33, + "shared_size": "2m" + } +] +--- more_headers +Content-type: application/json +--- response_body +true + + + +=== TEST 6: set watch_endpoint_slices true and use kubernetes endpointslices api +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + client: + token: ${KUBERNETES_CLIENT_TOKEN} + default_weight: 33 + watch_endpoint_slices: true +--- request +GET /compare +{ + "service": { + "schema": "https", + "host": "${KUBERNETES_SERVICE_HOST}", + "port": "${KUBERNETES_SERVICE_PORT}" + }, + "client": { + "token": "${KUBERNETES_CLIENT_TOKEN}" + }, + "watch_endpoint_slices": true, + "shared_size": "1m", + "default_weight": 33 +} +--- more_headers +Content-type: application/json +--- response_body +true + + + +=== TEST 7: auto read token file before get token value +--- yaml_config +apisix: + node_listen: 1984 + config_center: yaml +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + client: + token_file: "${KUBERNETES_CLIENT_TOKEN_FILE}" +--- request +GET /update_token +--- log_level: debug +--- grep_error_log eval +qr/re-read the token value/ +--- grep_error_log_out +re-read the token value +re-read the token value + + + +=== TEST 8: default value with minimal configuration and large shared_size +--- yaml_config +apisix: + node_listen: 1984 + config_center: yaml +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + client: + token: ${KUBERNETES_CLIENT_TOKEN} + shared_size: "1000m" +--- request +GET /compare +{ + "service": { + "schema": "https", + "host": "${KUBERNETES_SERVICE_HOST}", + "port": "${KUBERNETES_SERVICE_PORT}" + }, + "client": { + "token": "${KUBERNETES_CLIENT_TOKEN}" + }, + "watch_endpoint_slices": false, + "shared_size": "1000m", + "default_weight": 50 +} +--- more_headers +Content-type: application/json +--- response_body +true diff --git a/CloudronPackages/APISIX/apisix-source/t/kubernetes/discovery/kubernetes2.t b/CloudronPackages/APISIX/apisix-source/t/kubernetes/discovery/kubernetes2.t new file mode 100644 index 0000000..9ec58f5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/kubernetes/discovery/kubernetes2.t @@ -0,0 +1,751 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + our $token_file = "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token"; + our $token_value = eval {`cat $token_file 2>/dev/null`}; + + our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: "127.0.0.1" + port: "6443" + client: + token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token" + - id: second + service: + schema: "http" + host: "127.0.0.1" + port: "6445" + client: + token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token" + +_EOC_ + + our $scale_ns_c = <<_EOC_; +[ + { + "op": "replace_subsets", + "name": "ep", + "namespace": "ns-c", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + } + ], + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + } + ] + } +] +_EOC_ + +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('warn'); +no_root_location(); +no_shuffle(); +workers(4); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $apisix_yaml = $block->apisix_yaml // <<_EOC_; +routes: [] +#END +_EOC_ + + $block->set_value("apisix_yaml", $apisix_yaml); + + my $main_config = $block->main_config // <<_EOC_; +env KUBERNETES_SERVICE_HOST=127.0.0.1; +env KUBERNETES_SERVICE_PORT=6443; +env KUBERNETES_CLIENT_TOKEN=$::token_value; +env KUBERNETES_CLIENT_TOKEN_FILE=$::token_file; +_EOC_ + + $block->set_value("main_config", $main_config); + + my $config = $block->config // <<_EOC_; + location /queries { + content_by_lua_block { + local core = require("apisix.core") + local d = require("apisix.discovery.kubernetes") + + ngx.sleep(1) + + ngx.req.read_body() + local request_body = ngx.req.get_body_data() + local queries = core.json.decode(request_body) + local response_body = "{" + for _,query in ipairs(queries) do + local nodes = d.nodes(query) + if nodes==nil or #nodes==0 then + response_body=response_body.." "..0 + else + response_body=response_body.." "..#nodes + end + end + ngx.say(response_body.." }") + } + } + + location /operators { + content_by_lua_block { + local http = require("resty.http") + local core = require("apisix.core") + local ipairs = ipairs + + ngx.req.read_body() + local request_body = ngx.req.get_body_data() + local operators = core.json.decode(request_body) + + core.log.info("get body ", request_body) + core.log.info("get operators ", #operators) + for _, op in ipairs(operators) do + local method, path, body + local headers = { + ["Host"] = "127.0.0.1:6445" + } + + if op.op == "replace_subsets" then + method = "PATCH" + path = "/api/v1/namespaces/" .. op.namespace .. "/endpoints/" .. op.name + if #op.subsets == 0 then + body = '[{"path":"/subsets","op":"replace","value":[]}]' + else + local t = { { op = "replace", path = "/subsets", value = op.subsets } } + body = core.json.encode(t, true) + end + headers["Content-Type"] = "application/json-patch+json" + end + + if op.op == "replace_labels" then + method = "PATCH" + path = "/api/v1/namespaces/" .. op.namespace .. "/endpoints/" .. op.name + local t = { { op = "replace", path = "/metadata/labels", value = op.labels } } + body = core.json.encode(t, true) + headers["Content-Type"] = "application/json-patch+json" + end + + local httpc = http.new() + core.log.info("begin to connect ", "127.0.0.1:6445") + local ok, message = httpc:connect({ + scheme = "http", + host = "127.0.0.1", + port = 6445, + }) + if not ok then + core.log.error("connect 127.0.0.1:6445 failed, message : ", message) + ngx.say("FAILED") + end + local res, err = httpc:request({ + method = method, + path = path, + headers = headers, + body = body, + }) + if err ~= nil then + core.log.err("operator k8s cluster error: ", err) + return 500 + end + if res.status ~= 200 and res.status ~= 201 and res.status ~= 409 then + return res.status + end + end + ngx.say("DONE") + } + } + +_EOC_ + + $block->set_value("config", $config); + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: create namespace and endpoints +--- yaml_config eval: $::yaml_config +--- request +POST /operators +[ + { + "op": "replace_subsets", + "namespace": "ns-a", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "name": "p2", + "port": 5002 + } + ] + } + ] + }, + { + "op": "create_namespace", + "name": "ns-b" + }, + { + "op": "replace_subsets", + "namespace": "ns-b", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "name": "p2", + "port": 5002 + } + ] + } + ] + }, + { + "op": "create_namespace", + "name": "ns-c" + }, + { + "op": "replace_subsets", + "namespace": "ns-c", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "port": 5002 + } + ] + } + ] + } +] +--- more_headers +Content-type: application/json + + + +=== TEST 2: use default parameters +--- yaml_config eval: $::yaml_config +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 2 2 2 2 2 2 2 2 2 2 } + + + +=== TEST 3: use specify environment parameters +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token: ${KUBERNETES_CLIENT_TOKEN} + - id: second + service: + schema: "http" + host: "127.0.0.1" + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} + +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 2 2 2 2 2 2 2 2 2 2 } + + + +=== TEST 4: use namespace selector equal +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + equal: ns-a + - id: second + service: + schema: "http" + host: "127.0.0.1" + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 0 0 0 0 2 2 2 2 2 2 } + + + +=== TEST 5: use namespace selector not_equal +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + not_equal: ns-a + - id: second + service: + schema: "http" + host: "127.0.0.1" + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 0 0 2 2 2 2 2 2 2 2 2 2 } + + + +=== TEST 6: use namespace selector match +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + match: [ns-a,ns-b] + - id: second + service: + schema: "http" + host: "127.0.0.1" + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 2 2 0 0 2 2 2 2 2 2 } + + + +=== TEST 7: use namespace selector match with regex +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + match: ["ns-[ab]"] + - id: second + service: + schema: "http" + host: "127.0.0.1" + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 2 2 2 2 0 0 2 2 2 2 2 2 } + + + +=== TEST 8: use namespace selector not_match +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + not_match: ["ns-a"] + - id: second + service: + schema: "http" + host: "127.0.0.1" + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 0 0 2 2 2 2 2 2 2 2 2 2 } + + + +=== TEST 9: use namespace selector not_match with regex +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + namespace_selector: + not_match: ["ns-[ab]"] + - id: second + service: + schema: "http" + host: "127.0.0.1" + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 0 0 0 0 2 2 2 2 2 2 2 2 } + + + +=== TEST 10: use label selector +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} + label_selector: |- + first=1,second + - id: second + service: + schema: "http" + host: "127.0.0.1" + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} +--- request eval +[ + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-a\",\"labels\":{}}]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-b\",\"labels\":{}}]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{}}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-a\",\"labels\":{\"first\":\"1\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-b\",\"labels\":{\"first\":\"1\",\"second\":\"o\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{\"first\":\"2\",\"second\":\"o\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{\"first\":\"1\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +"POST /operators +[{\"op\":\"replace_labels\",\"name\":\"ep\",\"namespace\":\"ns-c\",\"labels\":{\"first\":\"1\",\"second\":\"o\" }}]", + +"GET /queries +[\"first/ns-a/ep:p1\",\"first/ns-b/ep:p1\",\"first/ns-c/ep:5001\"]", + +] +--- response_body eval +[ + "DONE\n", + "DONE\n", + "DONE\n", + "{ 0 0 0 }\n", + "DONE\n", + "{ 0 0 0 }\n", + "DONE\n", + "{ 0 2 0 }\n", + "DONE\n", + "{ 0 2 0 }\n", + "DONE\n", + "{ 0 2 0 }\n", + "DONE\n", + "{ 0 2 2 }\n", +] + + + +=== TEST 11: scale endpoints +--- yaml_config eval: $::yaml_config +--- request eval +[ + +"GET /queries +[ + \"first/ns-a/ep:p1\",\"first/ns-a/ep:p2\", + \"second/ns-a/ep:p1\",\"second/ns-a/ep:p2\" +]", + +"POST /operators +[{\"op\":\"replace_subsets\",\"name\":\"ep\",\"namespace\":\"ns-a\",\"subsets\":[]}]", + +"GET /queries +[ + \"first/ns-a/ep:p1\",\"first/ns-a/ep:p2\", + \"second/ns-a/ep:p1\",\"second/ns-a/ep:p2\" +]", + +"GET /queries +[ + \"first/ns-c/ep:5001\",\"first/ns-c/ep:5002\",\"first/ns-c/ep:p1\", + \"second/ns-c/ep:5001\",\"second/ns-c/ep:5002\",\"second/ns-c/ep:p1\" +]", + +"POST /operators +$::scale_ns_c", + +"GET /queries +[ + \"first/ns-c/ep:5001\",\"first/ns-c/ep:5002\",\"first/ns-c/ep:p1\", + \"second/ns-c/ep:5001\",\"second/ns-c/ep:5002\",\"second/ns-c/ep:p1\" +]" + +] +--- response_body eval +[ + "{ 2 2 2 2 }\n", + "DONE\n", + "{ 0 0 0 0 }\n", + "{ 2 2 0 2 2 0 }\n", + "DONE\n", + "{ 0 0 1 0 0 1 }\n", +] diff --git a/CloudronPackages/APISIX/apisix-source/t/kubernetes/discovery/kubernetes3.t b/CloudronPackages/APISIX/apisix-source/t/kubernetes/discovery/kubernetes3.t new file mode 100644 index 0000000..37a06b0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/kubernetes/discovery/kubernetes3.t @@ -0,0 +1,455 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + our $token_file = "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token"; + our $token_value = eval {`cat $token_file 2>/dev/null`}; + + our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: "127.0.0.1" + port: "6443" + client: + token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token" + watch_endpoint_slices: true + - id: second + service: + schema: "http" + host: "127.0.0.1" + port: "6445" + client: + token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token" + watch_endpoint_slices: true + +_EOC_ + + our $single_yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + service: + host: "127.0.0.1" + port: "6443" + client: + token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token" + watch_endpoint_slices: true +_EOC_ + + our $scale_ns_c = <<_EOC_; +[ + { + "op": "replace_endpointslices", + "name": "ep", + "namespace": "ns-c", + "endpoints": [ + { + "addresses": [ + "10.0.0.1" + ], + "conditions": { + "ready": true, + "serving": true, + "terminating": false + }, + "nodeName": "kind-control-plane" + } + ] + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + } +] +_EOC_ + +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('warn'); +no_root_location(); +no_shuffle(); +workers(4); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $apisix_yaml = $block->apisix_yaml // <<_EOC_; +routes: [] +#END +_EOC_ + + $block->set_value("apisix_yaml", $apisix_yaml); + + my $main_config = $block->main_config // <<_EOC_; +env KUBERNETES_SERVICE_HOST=127.0.0.1; +env KUBERNETES_SERVICE_PORT=6443; +env KUBERNETES_CLIENT_TOKEN=$::token_value; +env KUBERNETES_CLIENT_TOKEN_FILE=$::token_file; +_EOC_ + + $block->set_value("main_config", $main_config); + + my $config = $block->config // <<_EOC_; + location /queries { + content_by_lua_block { + local core = require("apisix.core") + local d = require("apisix.discovery.kubernetes") + + ngx.sleep(1) + + ngx.req.read_body() + local request_body = ngx.req.get_body_data() + local queries = core.json.decode(request_body) + local response_body = "{" + for _,query in ipairs(queries) do + local nodes = d.nodes(query) + if nodes==nil or #nodes==0 then + response_body=response_body.." "..0 + else + response_body=response_body.." "..#nodes + end + end + ngx.say(response_body.." }") + } + } + + location /operators { + content_by_lua_block { + local http = require("resty.http") + local core = require("apisix.core") + local ipairs = ipairs + + ngx.req.read_body() + local request_body = ngx.req.get_body_data() + local operators = core.json.decode(request_body) + + core.log.info("get body ", request_body) + core.log.info("get operators ", #operators) + for _, op in ipairs(operators) do + local method, path, body + local headers = { + ["Host"] = "127.0.0.1:6445" + } + + if op.op == "replace_endpointslices" then + method = "PATCH" + path = "/apis/discovery.k8s.io/namespaces/" .. op.namespace .. "/endpointslices/" .. op.name + if #op.endpoints == 0 then + body = '[{"path":"/endpoints","op":"replace","value":[]}]' + else + local t = { { op = "replace", path = "/endpoints", value = op.endpoints } } + body = core.json.encode(t, true) + end + headers["Content-Type"] = "application/json-patch+json" + end + + if op.op == "replace_labels" then + method = "PATCH" + path = "/apis/discovery.k8s.io/namespaces/" .. op.namespace .. "/endpointslices/" .. op.name + local t = { { op = "replace", path = "/metadata/labels", value = op.labels } } + body = core.json.encode(t, true) + headers["Content-Type"] = "application/json-patch+json" + end + + local httpc = http.new() + core.log.info("begin to connect ", "127.0.0.1:6445") + local ok, message = httpc:connect({ + scheme = "http", + host = "127.0.0.1", + port = 6445, + }) + if not ok then + core.log.error("connect 127.0.0.1:6445 failed, message : ", message) + ngx.say("FAILED") + end + local res, err = httpc:request({ + method = method, + path = path, + headers = headers, + body = body, + }) + if err ~= nil then + core.log.err("operator k8s cluster error: ", err) + return 500 + end + + ngx.sleep(1) + + local k8s = require("apisix.discovery.kubernetes") + local data = k8s.dump_data() + ngx.say(core.json.encode(data,true)) + + if res.status ~= 200 and res.status ~= 201 and res.status ~= 409 then + return res.status + end + end + ngx.say("DONE") + } + } + + location /dump { + content_by_lua_block { + local json_decode = require("toolkit.json").decode + local core = require("apisix.core") + local http = require "resty.http" + local httpc = http.new() + + ngx.sleep(1) + + local dump_uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/v1/discovery/kubernetes/dump" + local res, err = httpc:request_uri(dump_uri, { method = "GET"}) + if err then + ngx.log(ngx.ERR, err) + ngx.status = res.status + return + end + + local body = json_decode(res.body) + local endpoints = body.endpoints + ngx.say(core.json.encode(endpoints,true)) + } + } + +_EOC_ + + $block->set_value("config", $config); + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: create namespace and endpoints +--- yaml_config eval: $::yaml_config +--- request +POST /operators +[ + { + "op": "replace_endpointslices", + "namespace": "ns-a", + "name": "ep", + "endpoints": [ + { + "addresses": [ + "10.0.0.1", + "10.0.0.2" + ], + "conditions": { + "ready": true, + "serving": true, + "terminating": false + }, + "nodeName": "kind-control-plane" + }, + { + "addresses": [ + "20.0.0.1", + "20.0.0.2" + ], + "conditions": { + "ready": false, + "serving": false, + "terminating": false + }, + "nodeName": "kind-control-plane" + } + ], + "ports": [ + { + "name": "p", + "port": 5001 + } + ] + }, + { + "op": "create_namespace", + "name": "ns-b" + }, + { + "op": "replace_endpointslices", + "namespace": "ns-b", + "name": "ep", + "endpoints": [ + { + "addresses": [ + "10.0.0.1", + "10.0.0.2" + ], + "conditions": { + "ready": true, + "serving": true, + "terminating": false + }, + "nodeName": "kind-control-plane" + }, + { + "addresses": [ + "20.0.0.1", + "20.0.0.2" + ], + "conditions": { + "ready": false, + "serving": true, + "terminating": false + }, + "nodeName": "kind-control-plane" + } + ], + "ports": [ + { + "name": "p", + "port": 5002 + } + ] + }, + { + "op": "create_namespace", + "name": "ns-c" + }, + { + "op": "replace_endpointslices", + "namespace": "ns-c", + "name": "ep", + "endpoints": [ + { + "addresses": [ + "10.0.0.1", + "10.0.0.2" + ], + "conditions": { + "ready": true, + "serving": true, + "terminating": false + }, + "nodeName": "kind-control-plane" + }, + { + "addresses": [ + "20.0.0.1", + "20.0.0.2" + ], + "conditions": { + "ready": true, + "serving": true, + "terminating": false + }, + "nodeName": "kind-control-plane" + } + ], + "ports": [ + { + "name": "p", + "port": 5003 + } + ] + } +] +--- more_headers +Content-type: application/json +--- response_body_like +.*"name":"default/kubernetes".* + + + +=== TEST 2: use default parameters +--- yaml_config eval: $::yaml_config +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 0 0 2 2 0 0 0 0 2 2 0 0 } + + + +=== TEST 3: use specify environment parameters +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: ${KUBERNETES_SERVICE_HOST} + port: ${KUBERNETES_SERVICE_PORT} + client: + token: ${KUBERNETES_CLIENT_TOKEN} + watch_endpoint_slices: true + - id: second + service: + schema: "http" + host: "127.0.0.1" + port: "6445" + client: + token: ${KUBERNETES_CLIENT_TOKEN} + watch_endpoint_slices: true + +--- request +GET /queries +[ + "first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","first/ns-b/ep:p2","first/ns-c/ep:5001","first/ns-c/ep:5002", + "second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1","second/ns-b/ep:p2","second/ns-c/ep:5001","second/ns-c/ep:5002" +] +--- more_headers +Content-type: application/json +--- response_body eval +qr{ 0 0 2 2 0 0 0 0 2 2 0 0 } + + + +=== TEST 4: test dump +--- yaml_config eval: $::yaml_config +--- request +GET /dump +--- response_body_like +.*"name":"default/kubernetes".* + + + +=== TEST 5: test single mode dump +--- yaml_config eval: $::single_yaml_config +--- request +GET /dump +--- response_body_like +.*"name":"default/kubernetes".* diff --git a/CloudronPackages/APISIX/apisix-source/t/kubernetes/discovery/stream/kubernetes.t b/CloudronPackages/APISIX/apisix-source/t/kubernetes/discovery/stream/kubernetes.t new file mode 100644 index 0000000..a9058f5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/kubernetes/discovery/stream/kubernetes.t @@ -0,0 +1,344 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + our $token_file = "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token"; + our $token_value = eval {`cat $token_file 2>/dev/null`}; + + our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + kubernetes: + - id: first + service: + host: "127.0.0.1" + port: "6443" + client: + token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token" + - id: second + service: + schema: "http" + host: "127.0.0.1" + port: "6445" + client: + token_file: "/tmp/var/run/secrets/kubernetes.io/serviceaccount/token" + +_EOC_ + +} + +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +plan('no_plan'); + +repeat_each(1); +log_level('warn'); +no_root_location(); +no_shuffle(); +workers(4); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $apisix_yaml = $block->apisix_yaml // <<_EOC_; +routes: [] +#END +_EOC_ + + $block->set_value("apisix_yaml", $apisix_yaml); + + my $main_config = $block->main_config // <<_EOC_; +env KUBERNETES_SERVICE_HOST=127.0.0.1; +env KUBERNETES_SERVICE_PORT=6443; +env KUBERNETES_CLIENT_TOKEN=$::token_value; +env KUBERNETES_CLIENT_TOKEN_FILE=$::token_file; +_EOC_ + + $block->set_value("main_config", $main_config); + + my $config = $block->config // <<_EOC_; + location /operators { + content_by_lua_block { + local http = require("resty.http") + local core = require("apisix.core") + local ipairs = ipairs + + ngx.req.read_body() + local request_body = ngx.req.get_body_data() + local operators = core.json.decode(request_body) + + core.log.info("get body ", request_body) + core.log.info("get operators ", #operators) + for _, op in ipairs(operators) do + local method, path, body + local headers = { + ["Host"] = "127.0.0.1:6445" + } + + if op.op == "replace_subsets" then + method = "PATCH" + path = "/api/v1/namespaces/" .. op.namespace .. "/endpoints/" .. op.name + if #op.subsets == 0 then + body = '[{"path":"/subsets","op":"replace","value":[]}]' + else + local t = { { op = "replace", path = "/subsets", value = op.subsets } } + body = core.json.encode(t, true) + end + headers["Content-Type"] = "application/json-patch+json" + end + + if op.op == "replace_labels" then + method = "PATCH" + path = "/api/v1/namespaces/" .. op.namespace .. "/endpoints/" .. op.name + local t = { { op = "replace", path = "/metadata/labels", value = op.labels } } + body = core.json.encode(t, true) + headers["Content-Type"] = "application/json-patch+json" + end + + local httpc = http.new() + core.log.info("begin to connect ", "127.0.0.1:6445") + local ok, message = httpc:connect({ + scheme = "http", + host = "127.0.0.1", + port = 6445, + }) + if not ok then + core.log.error("connect 127.0.0.1:6445 failed, message : ", message) + ngx.say("FAILED") + end + local res, err = httpc:request({ + method = method, + path = path, + headers = headers, + body = body, + }) + if err ~= nil then + core.log.err("operator k8s cluster error: ", err) + return 500 + end + if res.status ~= 200 and res.status ~= 201 and res.status ~= 409 then + return res.status + end + end + ngx.say("DONE") + } + } + +_EOC_ + + $block->set_value("config", $config); + + my $stream_config = $block->stream_config // <<_EOC_; + server { + listen 8125; + content_by_lua_block { + local core = require("apisix.core") + local d = require("apisix.discovery.kubernetes") + + ngx.sleep(1) + + local sock = ngx.req.socket() + local request_body = sock:receive() + + core.log.info("get body ", request_body) + + local response_body = "{" + local queries = core.json.decode(request_body) + for _,query in ipairs(queries) do + local nodes = d.nodes(query) + if nodes==nil or #nodes==0 then + response_body=response_body.." "..0 + else + response_body=response_body.." "..#nodes + end + end + ngx.say(response_body.." }") + } + } + +_EOC_ + + $block->set_value("extra_stream_config", $stream_config); + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: create namespace and endpoints +--- yaml_config eval: $::yaml_config +--- request +POST /operators +[ + { + "op": "replace_subsets", + "namespace": "ns-a", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "name": "p2", + "port": 5002 + } + ] + } + ] + }, + { + "op": "create_namespace", + "name": "ns-b" + }, + { + "op": "replace_subsets", + "namespace": "ns-b", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "name": "p1", + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "name": "p2", + "port": 5002 + } + ] + } + ] + }, + { + "op": "create_namespace", + "name": "ns-c" + }, + { + "op": "replace_subsets", + "namespace": "ns-c", + "name": "ep", + "subsets": [ + { + "addresses": [ + { + "ip": "10.0.0.1" + }, + { + "ip": "10.0.0.2" + } + ], + "ports": [ + { + "port": 5001 + } + ] + }, + { + "addresses": [ + { + "ip": "20.0.0.1" + }, + { + "ip": "20.0.0.2" + } + ], + "ports": [ + { + "port": 5002 + } + ] + } + ] + } +] +--- more_headers +Content-type: application/json + + + +=== TEST 2: use default parameters +--- yaml_config eval: $::yaml_config +--- apisix_yaml +stream_routes: + - + id: 1 + server_port: 1985 + upstream_id: 1 + +upstreams: + - nodes: + "127.0.0.1:8125": 1 + type: roundrobin + id: 1 + +#END +--- stream_request +["first/ns-a/ep:p1","first/ns-a/ep:p2","first/ns-b/ep:p1","second/ns-a/ep:p1","second/ns-a/ep:p2","second/ns-b/ep:p1"] +--- stream_response eval +qr{ 2 2 2 2 2 2 } diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/apisix/plugins/jwt-auth.lua b/CloudronPackages/APISIX/apisix-source/t/lib/apisix/plugins/jwt-auth.lua new file mode 100644 index 0000000..56b5b1f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/apisix/plugins/jwt-auth.lua @@ -0,0 +1,122 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local jwt = require("resty.jwt") + +local ngx_time = ngx.time +local ngx_decode_base64 = ngx.decode_base64 +local pcall = pcall + + +local _M = {} + + +local function get_secret(conf) + local secret = conf.secret + + if conf.base64_secret then + return ngx_decode_base64(secret) + end + + return secret +end + +local function get_real_payload(key, exp, payload) + local real_payload = { + key = key, + exp = ngx_time() + exp + } + if payload then + local extra_payload = core.json.decode(payload) + core.table.merge(extra_payload, real_payload) + return extra_payload + end + return real_payload +end + +local function sign_jwt_with_HS(key, auth_conf, payload) + local auth_secret, err = get_secret(auth_conf) + if not auth_secret then + core.log.error("failed to sign jwt, err: ", err) + return nil, "failed to sign jwt: failed to get auth_secret" + end + local ok, jwt_token = pcall(jwt.sign, _M, + auth_secret, + { + header = { + typ = "JWT", + alg = auth_conf.algorithm + }, + payload = get_real_payload(key, auth_conf.exp, payload) + } + ) + if not ok then + core.log.error("failed to sign jwt, err: ", jwt_token.reason) + return nil, "failed to sign jwt" + end + return jwt_token +end + +local function sign_jwt_with_RS256_ES256(key, auth_conf, payload) + local ok, jwt_token = pcall(jwt.sign, _M, + auth_conf.private_key, + { + header = { + typ = "JWT", + alg = auth_conf.algorithm, + x5c = { + auth_conf.public_key, + } + }, + payload = get_real_payload(key, auth_conf.exp, payload) + } + ) + if not ok then + core.log.error("failed to sign jwt, err: ", jwt_token.reason) + return nil, "failed to sign jwt" + end + return jwt_token +end + +local function get_sign_handler(algorithm) + if not algorithm or algorithm == "HS256" or algorithm == "HS512" then + return sign_jwt_with_HS + elseif algorithm == "RS256" or algorithm == "ES256" then + return sign_jwt_with_RS256_ES256 + end +end + +local function gen_token(auth_conf, payload) + if not auth_conf.exp then + auth_conf.exp = 86400 + end + if not auth_conf.lifetime_grace_period then + auth_conf.lifetime_grace_period = 0 + end + if not auth_conf.algorithm then + auth_conf.algorithm = "HS256" + end + local sign_handler = get_sign_handler(auth_conf.algorithm) + local jwt_token, err = sign_handler(auth_conf.key, auth_conf, payload) + return jwt_token, err +end + + +_M.gen_token = gen_token + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/apisix/plugins/prometheus/exporter.lua b/CloudronPackages/APISIX/apisix-source/t/lib/apisix/plugins/prometheus/exporter.lua new file mode 100644 index 0000000..c9c71f6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/apisix/plugins/prometheus/exporter.lua @@ -0,0 +1,39 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local _M = {} + + +function _M.http_init() + return true +end + + +function _M.stream_init() + return true +end + + +function _M.export_metrics() + local process_type = require("ngx.process").type() + core.log.info("process type: ", process_type) + return core.response.exit(200) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/chaitin_waf_server.lua b/CloudronPackages/APISIX/apisix-source/t/lib/chaitin_waf_server.lua new file mode 100644 index 0000000..4130bd0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/chaitin_waf_server.lua @@ -0,0 +1,60 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local _M = {} + +local function get_socket() + ngx.flush(true) + local sock, err = ngx.req.socket(true) + if not sock then + ngx.log(ngx.ERR, "failed to get the request socket: " .. tostring(err)) + return nil + end + return sock +end + +function _M.pass() + local sock = get_socket() + sock:send({ string.char(65), string.char(1), string.char(0), string.char(0), string.char(0) }) + sock:send(".") + sock:send({ string.char(165), string.char(77), string.char(0), string.char(0), string.char(0) }) + sock:send("{\"event_id\":\"1e902e84bf5a4ead8f7760a0fe2c7719\",\"request_hit_whitelist\":false}") + + ngx.exit(200) +end + +function _M.reject() + local sock = get_socket() + sock:send({ string.char(65), string.char(1), string.char(0), string.char(0), string.char(0) }) + sock:send("?") + sock:send({ string.char(2), string.char(3), string.char(0), string.char(0), string.char(0) }) + sock:send("403") + sock:send({ string.char(37), string.char(77), string.char(0), string.char(0), string.char(0) }) + sock:send("{\"event_id\":\"b3c6ce574dc24f09a01f634a39dca83b\",\"request_hit_whitelist\":false}") + sock:send({ string.char(35), string.char(79), string.char(0), string.char(0), string.char(0) }) + sock:send("Set-Cookie:sl-session=ulgbPfMSuWRNsi/u7Aj9aA==; Domain=; Path=/; Max-Age=86400\n") + sock:send({ string.char(164), string.char(51), string.char(0), string.char(0), string.char(0) }) + sock:send("") + + ngx.exit(200) +end + +function _M.timeout() + ngx.sleep(100) + _M.pass() +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-interface/pom.xml b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-interface/pom.xml new file mode 100644 index 0000000..3087def --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-interface/pom.xml @@ -0,0 +1,45 @@ + + + 4.0.0 + + org.apache.dubbo.backend + dubbo-backend + 1.0.0-SNAPSHOT + + org.apache.dubbo.backend + dubbo-backend-interface + 1.0.0-SNAPSHOT + jar + ${project.artifactId} + + + true + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 1.8 + 1.8 + + + + + diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-interface/src/main/java/org/apache/dubbo/backend/DemoService.java b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-interface/src/main/java/org/apache/dubbo/backend/DemoService.java new file mode 100644 index 0000000..6944fad --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-interface/src/main/java/org/apache/dubbo/backend/DemoService.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.dubbo.backend; + +import java.util.Map; + +public interface DemoService {; + + /** + * standard samples tengine dubbo infterace demo + * @param context pass http infos + * @return Map pass to response http + **/ + Map hello(Map context); + + /** + * test for dubbo non-200 response + * @param context pass http infos + * @return Map pass to response http + **/ + Map fail(Map context); + + /** + * test for dubbo response timeout + * @param context pass http infos + * @return Map pass to response http + **/ + Map timeout(Map context); + + /** + * test for non-string status code + * @param context pass http infos + * @return Map pass to response http + **/ + Map badStatus(Map context); +} diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/pom.xml b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/pom.xml new file mode 100644 index 0000000..ff97bab --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/pom.xml @@ -0,0 +1,96 @@ + + + 4.0.0 + + org.apache.dubbo.backend + dubbo-backend + 1.0.0-SNAPSHOT + + org.apache.dubbo.backend + dubbo-backend-provider + 1.0.0-SNAPSHOT + jar + ${project.artifactId} + + + true + 1.7.25 + 2.12.0 + 2.7.21 + + + + + org.apache.dubbo.backend + dubbo-backend-interface + 1.0.0-SNAPSHOT + + + org.apache.dubbo + dubbo + ${dubbo.version} + + + org.apache.httpcomponents + httpclient + 4.5.13 + + + + + dubbo-demo-provider + + + org.apache.maven.plugins + maven-compiler-plugin + + 1.8 + 1.8 + + + + org.apache.maven.plugins + maven-dependency-plugin + + + org.apache.maven.plugins + maven-jar-plugin + + + + org.apache.dubbo.backend.provider.Provider + + + + + + com.jolira + onejar-maven-plugin + 1.4.4 + + + + one-jar + + + + + + + diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/java/org/apache/dubbo/backend/provider/DemoServiceImpl.java b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/java/org/apache/dubbo/backend/provider/DemoServiceImpl.java new file mode 100644 index 0000000..285df10 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/java/org/apache/dubbo/backend/provider/DemoServiceImpl.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.dubbo.backend.provider; + +import org.apache.dubbo.backend.DemoService; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.lang.InterruptedException; + +public class DemoServiceImpl implements DemoService { + @Override + public Map hello(Map context) { + Map ret = new HashMap(); + ret.put("body", "dubbo success\n"); + ret.put("status", "200"); + + for (Map.Entry entry : context.entrySet()) { + System.out.println("Key = " + entry.getKey() + ", Value = " + entry.getValue()); + if (entry.getKey().startsWith("extra-arg")) { + ret.put("Got-" + entry.getKey(), entry.getValue()); + } + } + + return ret; + } + + @Override + public Map fail(Map context) { + Map ret = new HashMap(); + ret.put("body", "dubbo fail\n"); + ret.put("status", "503"); + return ret; + } + + @Override + public Map timeout(Map context) { + Map ret = new HashMap(); + try { + TimeUnit.MILLISECONDS.sleep(500); + } catch (InterruptedException ex) {} + ret.put("body", "dubbo fail\n"); + ret.put("status", "503"); + return ret; + } + + @Override + public Map badStatus(Map context) { + Map ret = new HashMap(); + ret.put("body", "ok\n"); + ret.put("status", 200); + return ret; + } +} diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/java/org/apache/dubbo/backend/provider/Provider.java b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/java/org/apache/dubbo/backend/provider/Provider.java new file mode 100644 index 0000000..2860737 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/java/org/apache/dubbo/backend/provider/Provider.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.dubbo.backend.provider; + +import org.springframework.context.support.ClassPathXmlApplicationContext; + +import java.util.concurrent.TimeUnit; +import java.lang.InterruptedException; + +public class Provider { + + /** + * To get ipv6 address to work, add + * System.setProperty("java.net.preferIPv6Addresses", "true"); + * before running your application. + */ + public static void main(String[] args) throws Exception { + ClassPathXmlApplicationContext context = new ClassPathXmlApplicationContext(new String[]{"META-INF/spring/dubbo-demo-provider.xml"}); + context.start(); + while (true) { + try { + TimeUnit.MINUTES.sleep(1); + } catch (InterruptedException ex) {} + } + } +} diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/resources/META-INF/spring/dubbo-demo-provider.xml b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/resources/META-INF/spring/dubbo-demo-provider.xml new file mode 100644 index 0000000..99a1419 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/resources/META-INF/spring/dubbo-demo-provider.xml @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/resources/dubbo.properties b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/resources/dubbo.properties new file mode 100644 index 0000000..258fd3b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/resources/dubbo.properties @@ -0,0 +1,17 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +dubbo.application.qos.enable=false diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/resources/log4j.properties b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/resources/log4j.properties new file mode 100644 index 0000000..2f4f4ad --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/dubbo-backend-provider/src/main/resources/log4j.properties @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###set log levels### +log4j.rootLogger=info, stdout +###output to the console### +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%d{dd/MM/yy HH:mm:ss:SSS z}] %t %5p %c{2}: %m%n diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/pom.xml b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/pom.xml new file mode 100644 index 0000000..7ecc9fa --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-backend/pom.xml @@ -0,0 +1,97 @@ + + + 4.0.0 + org.apache.dubbo.backend + dubbo-backend + 1.0.0-SNAPSHOT + pom + ${project.artifactId} + A dubbo backend for test based on dubbo-samples-tengine + + true + 2.7.21 + + + dubbo-backend-interface + dubbo-backend-provider + + + + + + + org.springframework.boot + spring-boot-dependencies + 2.1.5.RELEASE + pom + import + + + org.apache.dubbo + dubbo-dependencies-bom + ${dubbo.version} + pom + import + + + org.apache.dubbo + dubbo + ${dubbo.version} + + + org.springframework + spring + + + javax.servlet + servlet-api + + + log4j + log4j + + + + + + + + + org.springframework.boot + spring-boot-starter + 2.1.5.RELEASE + + + org.apache.dubbo + dubbo-spring-boot-starter + 2.7.1 + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 1.8 + 1.8 + + + + + diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-interface/pom.xml b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-interface/pom.xml new file mode 100644 index 0000000..883ff36 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-interface/pom.xml @@ -0,0 +1,46 @@ + + + 4.0.0 + + org.apache.dubbo.backend + dubbo-serialization-backend + 1.0.0-SNAPSHOT + ../pom.xml + + org.apache.dubbo.backend + dubbo-serialization-backend-interface + 1.0.0-SNAPSHOT + jar + ${project.artifactId} + + + true + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 1.8 + 1.8 + + + + + diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-interface/src/main/java/org/apache/dubbo/backend/DubboSerializationTestService.java b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-interface/src/main/java/org/apache/dubbo/backend/DubboSerializationTestService.java new file mode 100644 index 0000000..fcc2a71 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-interface/src/main/java/org/apache/dubbo/backend/DubboSerializationTestService.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.dubbo.backend; + +public interface DubboSerializationTestService { + + PoJo testPoJo(PoJo input); + + PoJo[] testPoJos(PoJo[] input); + + void testVoid(); + + void testFailure(); + + void testTimeout(); +} diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-interface/src/main/java/org/apache/dubbo/backend/PoJo.java b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-interface/src/main/java/org/apache/dubbo/backend/PoJo.java new file mode 100644 index 0000000..150d035 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-interface/src/main/java/org/apache/dubbo/backend/PoJo.java @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.dubbo.backend; + +import java.util.HashMap; +import java.util.Map; + +public class PoJo { + private String aString; + private Boolean aBoolean; + private Byte aByte; + private Character acharacter; + private Integer aInt; + private Float aFloat; + private Double aDouble; + private Long aLong; + private Short aShort; + private String[] strings; + private Map stringMap; + + public String getaString() { + return aString; + } + + public void setaString(String aString) { + this.aString = aString; + } + + public Boolean getaBoolean() { + return aBoolean; + } + + public void setaBoolean(Boolean aBoolean) { + this.aBoolean = aBoolean; + } + + public Byte getaByte() { + return aByte; + } + + public void setaByte(Byte aByte) { + this.aByte = aByte; + } + + public Character getAcharacter() { + return acharacter; + } + + public void setAcharacter(Character acharacter) { + this.acharacter = acharacter; + } + + public Integer getaInt() { + return aInt; + } + + public void setaInt(Integer aInt) { + this.aInt = aInt; + } + + public Float getaFloat() { + return aFloat; + } + + public void setaFloat(Float aFloat) { + this.aFloat = aFloat; + } + + public Double getaDouble() { + return aDouble; + } + + public void setaDouble(Double aDouble) { + this.aDouble = aDouble; + } + + public Long getaLong() { + return aLong; + } + + public void setaLong(Long aLong) { + this.aLong = aLong; + } + + public Short getaShort() { + return aShort; + } + + public void setaShort(Short aShort) { + this.aShort = aShort; + } + + public Map getStringMap() { + return stringMap; + } + + public void setStringMap(Map stringMap) { + this.stringMap = stringMap; + } + + public String[] getStrings() { + return strings; + } + + public void setStrings(String[] strings) { + this.strings = strings; + } + + public static PoJo getTestInstance(){ + PoJo poJo = new PoJo(); + poJo.aBoolean =true; + poJo.aByte =1; + poJo.acharacter ='a'; + poJo.aInt =2; + poJo.aDouble = 1.1; + poJo.aFloat =1.2f; + poJo.aLong = 3L; + poJo.aShort = 4; + poJo.aString ="aa"; + HashMap poJoMap = new HashMap<>(); + poJoMap.put("key","value"); + poJo.stringMap = poJoMap; + poJo.strings = new String[]{"aa","bb"}; + return poJo; + } +} diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/pom.xml b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/pom.xml new file mode 100644 index 0000000..b5b762f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/pom.xml @@ -0,0 +1,97 @@ + + + 4.0.0 + + org.apache.dubbo.backend + dubbo-serialization-backend + 1.0.0-SNAPSHOT + ../pom.xml + + org.apache.dubbo.backend + dubbo-serialization-backend-provider + 1.0.0-SNAPSHOT + jar + ${project.artifactId} + + + true + 1.7.25 + 2.12.0 + 2.7.21 + + + + + org.apache.dubbo.backend + dubbo-serialization-backend-interface + 1.0.0-SNAPSHOT + + + org.apache.dubbo + dubbo + ${dubbo.version} + + + org.apache.httpcomponents + httpclient + 4.5.13 + + + + + dubbo-demo-provider + + + org.apache.maven.plugins + maven-compiler-plugin + + 1.8 + 1.8 + + + + org.apache.maven.plugins + maven-dependency-plugin + + + org.apache.maven.plugins + maven-jar-plugin + + + + org.apache.dubbo.backend.provider.Provider + + + + + + com.jolira + onejar-maven-plugin + 1.4.4 + + + + one-jar + + + + + + + diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/java/org/apache/dubbo/backend/provider/DubboSerializationTestServiceImpl.java b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/java/org/apache/dubbo/backend/provider/DubboSerializationTestServiceImpl.java new file mode 100644 index 0000000..41e9273 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/java/org/apache/dubbo/backend/provider/DubboSerializationTestServiceImpl.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.dubbo.backend.provider; + +import org.apache.dubbo.backend.DubboSerializationTestService; +import org.apache.dubbo.backend.PoJo; +import org.apache.dubbo.common.utils.ReflectUtils; + +import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.concurrent.TimeUnit; + +public class DubboSerializationTestServiceImpl implements DubboSerializationTestService { + + @Override + public PoJo testPoJo(PoJo input) { + return input; + } + + @Override + public PoJo[] testPoJos(PoJo[] input) { + return input; + } + + @Override + public void testVoid() { + } + + @Override + public void testFailure() { + throw new RuntimeException("testFailure"); + } + + @Override + public void testTimeout() { + try { + TimeUnit.SECONDS.sleep(10); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + +} diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/java/org/apache/dubbo/backend/provider/Provider.java b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/java/org/apache/dubbo/backend/provider/Provider.java new file mode 100644 index 0000000..dde4580 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/java/org/apache/dubbo/backend/provider/Provider.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.dubbo.backend.provider; + +import com.alibaba.fastjson.JSONObject; +import org.apache.dubbo.backend.DubboSerializationTestService; +import org.apache.dubbo.backend.PoJo; +import org.apache.dubbo.common.utils.ReflectUtils; +import org.springframework.context.support.ClassPathXmlApplicationContext; + +import java.lang.reflect.Method; +import java.util.concurrent.TimeUnit; +import java.lang.InterruptedException; + +public class Provider { + + /** + * To get ipv6 address to work, add + * System.setProperty("java.net.preferIPv6Addresses", "true"); + * before running your application. + */ + public static void main(String[] args) throws Exception { + + ClassPathXmlApplicationContext context = new ClassPathXmlApplicationContext(new String[]{"META-INF/spring/dubbo-demo-provider.xml"}); + String jsonString = JSONObject.toJSONString(PoJo.getTestInstance()); + System.out.println(jsonString); + context.start(); + while (true) { + try { + TimeUnit.MINUTES.sleep(1); + } catch (InterruptedException ex) {} + } + } +} diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/resources/META-INF/spring/dubbo-demo-provider.xml b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/resources/META-INF/spring/dubbo-demo-provider.xml new file mode 100644 index 0000000..8dae775 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/resources/META-INF/spring/dubbo-demo-provider.xml @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + + + + diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/resources/dubbo.properties b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/resources/dubbo.properties new file mode 100644 index 0000000..258fd3b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/resources/dubbo.properties @@ -0,0 +1,17 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +dubbo.application.qos.enable=false diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/resources/log4j.properties b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/resources/log4j.properties new file mode 100644 index 0000000..2f4f4ad --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/dubbo-serialization-backend-provider/src/main/resources/log4j.properties @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +###set log levels### +log4j.rootLogger=info, stdout +###output to the console### +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%d{dd/MM/yy HH:mm:ss:SSS z}] %t %5p %c{2}: %m%n diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/pom.xml b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/pom.xml new file mode 100644 index 0000000..fe9f042 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/dubbo-serialization-backend/pom.xml @@ -0,0 +1,97 @@ + + + 4.0.0 + org.apache.dubbo.backend + dubbo-serialization-backend + 1.0.0-SNAPSHOT + pom + ${project.artifactId} + A dubbo backend for test based on dubbo-samples-tengine + + true + 2.7.21 + + + dubbo-serialization-backend-interface + dubbo-serialization-backend-provider + + + + + + + org.springframework.boot + spring-boot-dependencies + 2.1.5.RELEASE + pom + import + + + org.apache.dubbo + dubbo-dependencies-bom + ${dubbo.version} + pom + import + + + org.apache.dubbo + dubbo + ${dubbo.version} + + + org.springframework + spring + + + javax.servlet + servlet-api + + + log4j + log4j + + + + + + + + + org.springframework.boot + spring-boot-starter + 2.1.5.RELEASE + + + org.apache.dubbo + dubbo-spring-boot-starter + 2.7.1 + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 1.8 + 1.8 + + + + + diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/etcd.proto b/CloudronPackages/APISIX/apisix-source/t/lib/etcd.proto new file mode 100644 index 0000000..50dd0e5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/etcd.proto @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +syntax = "proto3"; +package etcdserverpb; + +message StatusRequest { +} + +message StatusResponse { + // version is the cluster protocol version used by the responding member. + string version = 2; +} + +service Maintenance { + // Status gets the status of the member. + rpc Status(StatusRequest) returns (StatusResponse) {} +} diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/ext-plugin.lua b/CloudronPackages/APISIX/apisix-source/t/lib/ext-plugin.lua new file mode 100644 index 0000000..0ebf719 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/ext-plugin.lua @@ -0,0 +1,652 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local json = require("toolkit.json") +local ext = require("apisix.plugins.ext-plugin.init") +local constants = require("apisix.constants") +local flatbuffers = require("flatbuffers") +local err_code = require("A6.Err.Code") +local err_resp = require("A6.Err.Resp") +local prepare_conf_req = require("A6.PrepareConf.Req") +local prepare_conf_resp = require("A6.PrepareConf.Resp") +local a6_method = require("A6.Method") +local text_entry = require("A6.TextEntry") +local http_req_call_req = require("A6.HTTPReqCall.Req") +local http_req_call_resp = require("A6.HTTPReqCall.Resp") +local http_req_call_action = require("A6.HTTPReqCall.Action") +local http_req_call_stop = require("A6.HTTPReqCall.Stop") +local http_req_call_rewrite = require("A6.HTTPReqCall.Rewrite") +local http_resp_call_req = require("A6.HTTPRespCall.Req") +local http_resp_call_resp = require("A6.HTTPRespCall.Resp") +local extra_info = require("A6.ExtraInfo.Info") +local extra_info_req = require("A6.ExtraInfo.Req") +local extra_info_var = require("A6.ExtraInfo.Var") +local extra_info_resp = require("A6.ExtraInfo.Resp") +local extra_info_reqbody = require("A6.ExtraInfo.ReqBody") +local extra_info_respbody = require("A6.ExtraInfo.RespBody") + +local _M = {} +local builder = flatbuffers.Builder(0) + + +local function build_extra_info(info, ty) + extra_info_req.Start(builder) + extra_info_req.AddInfoType(builder, ty) + extra_info_req.AddInfo(builder, info) +end + + +local function build_action(action, ty) + http_req_call_resp.Start(builder) + http_req_call_resp.AddActionType(builder, ty) + http_req_call_resp.AddAction(builder, action) +end + + +local function ask_extra_info(sock, case_extra_info) + local data + for _, action in ipairs(case_extra_info) do + if action.type == "closed" then + ngx.exit(-1) + return + end + + if action.type == "var" then + local name = builder:CreateString(action.name) + extra_info_var.Start(builder) + extra_info_var.AddName(builder, name) + local var_req = extra_info_var.End(builder) + build_extra_info(var_req, extra_info.Var) + local req = extra_info_req.End(builder) + builder:Finish(req) + data = builder:Output() + local ok, err = ext.send(sock, constants.RPC_EXTRA_INFO, data) + if not ok then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, "send extra info req successfully") + + local ty, data = ext.receive(sock) + if not ty then + ngx.log(ngx.ERR, data) + return + end + + assert(ty == constants.RPC_EXTRA_INFO, ty) + local buf = flatbuffers.binaryArray.New(data) + local resp = extra_info_resp.GetRootAsResp(buf, 0) + local res = resp:ResultAsString() + assert(res == action.result, res) + end + + if action.type == "reqbody" then + extra_info_reqbody.Start(builder) + local reqbody_req = extra_info_reqbody.End(builder) + build_extra_info(reqbody_req, extra_info.ReqBody) + local req = extra_info_req.End(builder) + builder:Finish(req) + data = builder:Output() + local ok, err = ext.send(sock, constants.RPC_EXTRA_INFO, data) + if not ok then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, "send extra info req successfully") + + local ty, data = ext.receive(sock) + if not ty then + ngx.log(ngx.ERR, data) + return + end + + assert(ty == constants.RPC_EXTRA_INFO, ty) + local buf = flatbuffers.binaryArray.New(data) + local resp = extra_info_resp.GetRootAsResp(buf, 0) + local res = resp:ResultAsString() + assert(res == action.result, res) + end + + if action.type == "respbody" then + extra_info_respbody.Start(builder) + local respbody_req = extra_info_respbody.End(builder) + build_extra_info(respbody_req, extra_info.RespBody) + local req = extra_info_req.End(builder) + builder:Finish(req) + data = builder:Output() + local ok, err = ext.send(sock, constants.RPC_EXTRA_INFO, data) + if not ok then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, "send extra info req successfully") + + local ty, data = ext.receive(sock) + if not ty then + ngx.log(ngx.ERR, data) + return + end + + assert(ty == constants.RPC_EXTRA_INFO, ty) + local buf = flatbuffers.binaryArray.New(data) + local resp = extra_info_resp.GetRootAsResp(buf, 0) + local res = resp:ResultAsString() + assert(res == action.result, res) + end + end + +end + + +function _M.go(case) + local sock = ngx.req.socket(true) + local ty, data = ext.receive(sock) + if not ty then + ngx.log(ngx.ERR, data) + return + end + ngx.log(ngx.WARN, "receive rpc call successfully") + + if ty == constants.RPC_PREPARE_CONF then + if case.inject_error then + ty = constants.RPC_ERROR + err_resp.Start(builder) + err_resp.AddCode(builder, err_code.BAD_REQUEST) + local req = prepare_conf_req.End(builder) + builder:Finish(req) + data = builder:Output() + + else + local buf = flatbuffers.binaryArray.New(data) + local pc = prepare_conf_req.GetRootAsReq(buf, 0) + + if case.with_conf then + local conf = pc:Conf(1) + assert(conf:Name(), "foo") + assert(conf:Value(), "bar") + local conf = pc:Conf(2) + assert(conf:Name(), "cat") + assert(conf:Value(), "dog") + else + assert(pc:ConfLength() == 0) + end + + if case.expect_key_pattern then + local m = ngx.re.find(pc:Key(), case.expect_key_pattern, "jo") + assert(m ~= nil, pc:Key()) + else + assert(pc:Key() ~= "") + end + + prepare_conf_resp.Start(builder) + prepare_conf_resp.AddConfToken(builder, 233) + local req = prepare_conf_req.End(builder) + builder:Finish(req) + data = builder:Output() + end + + elseif case.no_token then + ty = constants.RPC_ERROR + err_resp.Start(builder) + err_resp.AddCode(builder, err_code.CONF_TOKEN_NOT_FOUND) + local req = prepare_conf_req.End(builder) + builder:Finish(req) + data = builder:Output() + + elseif ty == constants.RPC_HTTP_REQ_CALL then + local buf = flatbuffers.binaryArray.New(data) + local call_req = http_req_call_req.GetRootAsReq(buf, 0) + if case.check_input then + assert(call_req:Id() == 0) + assert(call_req:ConfToken() == 233) + assert(call_req:SrcIpLength() == 4) + assert(call_req:SrcIp(1) == 127) + assert(call_req:SrcIp(2) == 0) + assert(call_req:SrcIp(3) == 0) + assert(call_req:SrcIp(4) == 1) + assert(call_req:Method() == a6_method.PUT) + assert(call_req:Path() == "/hello") + + assert(call_req:ArgsLength() == 4) + local res = {} + for i = 1, call_req:ArgsLength() do + local entry = call_req:Args(i) + local r = res[entry:Name()] + if r then + res[entry:Name()] = {r, entry:Value()} + else + res[entry:Name()] = entry:Value() or true + end + end + assert(json.encode(res) == '{\"xx\":[\"y\",\"z\"],\"y\":\"\",\"z\":true}') + + assert(call_req:HeadersLength() == 5) + local res = {} + for i = 1, call_req:HeadersLength() do + local entry = call_req:Headers(i) + local r = res[entry:Name()] + if r then + res[entry:Name()] = {r, entry:Value()} + else + res[entry:Name()] = entry:Value() or true + end + end + assert(json.encode(res) == '{\"connection\":\"close\",\"host\":\"localhost\",' .. + '\"x-req\":[\"foo\",\"bar\"],\"x-resp\":\"cat\"}') + elseif case.check_input_ipv6 then + assert(call_req:SrcIpLength() == 16) + for i = 1, 15 do + assert(call_req:SrcIp(i) == 0) + end + assert(call_req:SrcIp(16) == 1) + elseif case.check_input_rewrite_host then + for i = 1, call_req:HeadersLength() do + local entry = call_req:Headers(i) + if entry:Name() == "host" then + assert(entry:Value() == "test.com") + end + end + elseif case.check_input_rewrite_path then + assert(call_req:Path() == "/xxx") + elseif case.check_input_rewrite_args then + assert(call_req:Path() == "/xxx") + assert(call_req:ArgsLength() == 1) + local entry = call_req:Args(1) + assert(entry:Name() == "x") + assert(entry:Value() == "z") + elseif case.get_request_body then + assert(call_req:Method() == a6_method.POST) + else + assert(call_req:Method() == a6_method.GET) + end + + if case.extra_info then + ask_extra_info(sock, case.extra_info) + end + + if case.stop == true then + local len = 3 + http_req_call_stop.StartBodyVector(builder, len) + builder:PrependByte(string.byte("t")) + builder:PrependByte(string.byte("a")) + builder:PrependByte(string.byte("c")) + local b = builder:EndVector(len) + + local hdrs = { + {"X-Resp", "foo"}, + {"X-Req", "bar"}, + {"X-Same", "one"}, + {"X-Same", "two"}, + } + local len = #hdrs + local textEntries = {} + for i = 1, len do + local name = builder:CreateString(hdrs[i][1]) + local value = builder:CreateString(hdrs[i][2]) + text_entry.Start(builder) + text_entry.AddName(builder, name) + text_entry.AddValue(builder, value) + local c = text_entry.End(builder) + textEntries[i] = c + end + http_req_call_stop.StartHeadersVector(builder, len) + for i = len, 1, -1 do + builder:PrependUOffsetTRelative(textEntries[i]) + end + local vec = builder:EndVector(len) + + http_req_call_stop.Start(builder) + if case.check_default_status ~= true then + http_req_call_stop.AddStatus(builder, 405) + end + http_req_call_stop.AddBody(builder, b) + http_req_call_stop.AddHeaders(builder, vec) + local action = http_req_call_stop.End(builder) + build_action(action, http_req_call_action.Stop) + + elseif case.rewrite == true or case.rewrite_host == true then + local hdrs + if case.rewrite_host then + hdrs = {{"host", "127.0.0.1"}} + else + hdrs = { + {"X-Delete", nil}, + {"X-Change", "bar"}, + {"X-Add", "bar"}, + } + end + + local len = #hdrs + local textEntries = {} + for i = 1, len do + local name = builder:CreateString(hdrs[i][1]) + local value + if hdrs[i][2] then + value = builder:CreateString(hdrs[i][2]) + end + text_entry.Start(builder) + text_entry.AddName(builder, name) + if value then + text_entry.AddValue(builder, value) + end + local c = text_entry.End(builder) + textEntries[i] = c + end + http_req_call_rewrite.StartHeadersVector(builder, len) + for i = len, 1, -1 do + builder:PrependUOffsetTRelative(textEntries[i]) + end + local vec = builder:EndVector(len) + + local path = builder:CreateString("/uri") + + http_req_call_rewrite.Start(builder) + http_req_call_rewrite.AddPath(builder, path) + http_req_call_rewrite.AddHeaders(builder, vec) + local action = http_req_call_rewrite.End(builder) + build_action(action, http_req_call_action.Rewrite) + + elseif case.rewrite_args == true or case.rewrite_args_only == true then + local path = builder:CreateString("/plugin_proxy_rewrite_args") + + local args = { + {"a", "foo"}, + {"d", nil}, + {"c", "bar"}, + {"a", "bar"}, + } + + local len = #args + local textEntries = {} + for i = 1, len do + local name = builder:CreateString(args[i][1]) + local value + if args[i][2] then + value = builder:CreateString(args[i][2]) + end + text_entry.Start(builder) + text_entry.AddName(builder, name) + if value then + text_entry.AddValue(builder, value) + end + local c = text_entry.End(builder) + textEntries[i] = c + end + http_req_call_rewrite.StartHeadersVector(builder, len) + for i = len, 1, -1 do + builder:PrependUOffsetTRelative(textEntries[i]) + end + local vec = builder:EndVector(len) + + http_req_call_rewrite.Start(builder) + if not case.rewrite_args_only then + http_req_call_rewrite.AddPath(builder, path) + end + http_req_call_rewrite.AddArgs(builder, vec) + local action = http_req_call_rewrite.End(builder) + build_action(action, http_req_call_action.Rewrite) + + elseif case.rewrite_bad_path == true then + local path = builder:CreateString("/plugin_proxy_rewrite_args?a=2") + http_req_call_rewrite.Start(builder) + http_req_call_rewrite.AddPath(builder, path) + local action = http_req_call_rewrite.End(builder) + build_action(action, http_req_call_action.Rewrite) + + elseif case.rewrite_resp_header == true or case.rewrite_vital_resp_header == true then + local hdrs = { + {"X-Resp", "foo"}, + {"X-Req", "bar"}, + {"Content-Type", "application/json"}, + {"Content-Encoding", "deflate"}, + } + local len = #hdrs + local textEntries = {} + for i = 1, len do + local name = builder:CreateString(hdrs[i][1]) + local value = builder:CreateString(hdrs[i][2]) + text_entry.Start(builder) + text_entry.AddName(builder, name) + text_entry.AddValue(builder, value) + local c = text_entry.End(builder) + textEntries[i] = c + end + http_req_call_rewrite.StartRespHeadersVector(builder, len) + for i = len, 1, -1 do + builder:PrependUOffsetTRelative(textEntries[i]) + end + local vec = builder:EndVector(len) + + local path = builder:CreateString("/plugin_proxy_rewrite_resp_header") + + http_req_call_rewrite.Start(builder) + http_req_call_rewrite.AddRespHeaders(builder, vec) + http_req_call_rewrite.AddPath(builder, path) + local action = http_req_call_rewrite.End(builder) + build_action(action, http_req_call_action.Rewrite) + + elseif case.rewrite_same_resp_header == true then + local hdrs = { + {"X-Resp", "foo"}, + {"X-Req", "bar"}, + {"X-Same", "one"}, + {"X-Same", "two"}, + } + local len = #hdrs + local textEntries = {} + for i = 1, len do + local name = builder:CreateString(hdrs[i][1]) + local value = builder:CreateString(hdrs[i][2]) + text_entry.Start(builder) + text_entry.AddName(builder, name) + text_entry.AddValue(builder, value) + local c = text_entry.End(builder) + textEntries[i] = c + end + http_req_call_rewrite.StartRespHeadersVector(builder, len) + for i = len, 1, -1 do + builder:PrependUOffsetTRelative(textEntries[i]) + end + local vec = builder:EndVector(len) + + local path = builder:CreateString("/plugin_proxy_rewrite_resp_header") + + http_req_call_rewrite.Start(builder) + http_req_call_rewrite.AddRespHeaders(builder, vec) + http_req_call_rewrite.AddPath(builder, path) + local action = http_req_call_rewrite.End(builder) + build_action(action, http_req_call_action.Rewrite) + + elseif case.rewrite_request_body == true then + local len = 4 + http_req_call_rewrite.StartBodyVector(builder, len) + builder:PrependByte(string.byte("\n")) + builder:PrependByte(string.byte("c")) + builder:PrependByte(string.byte("b")) + builder:PrependByte(string.byte("a")) + local b = builder:EndVector(len) + http_req_call_rewrite.Start(builder) + http_req_call_rewrite.AddBody(builder, b) + local action = http_req_call_rewrite.End(builder) + build_action(action, http_req_call_action.Rewrite) + + else + http_req_call_resp.Start(builder) + end + + local req = http_req_call_resp.End(builder) + builder:Finish(req) + data = builder:Output() + + elseif ty == constants.RPC_HTTP_RESP_CALL then + local buf = flatbuffers.binaryArray.New(data) + local call_req = http_resp_call_req.GetRootAsReq(buf, 0) + if case.check_input then + assert(call_req:Id() == 0) + assert(call_req:ConfToken() == 233) + assert(call_req:Status() == 200) + local len = call_req:HeadersLength() + + local headers = {} + for i = 1, len do + local entry = call_req:Headers(i) + local r = headers[entry:Name()] + if r then + headers[entry:Name()] = {r, entry:Value()} + else + headers[entry:Name()] = entry:Value() or true + end + end + assert(json.encode(headers), '{"Connection":"close","Content-Length":"12",' .. + '"Content-Type":"text/plain","Server":"openresty"}') + http_resp_call_resp.Start(builder) + + elseif case.modify_body then + local len = 3 + http_resp_call_resp.StartBodyVector(builder, len) + builder:PrependByte(string.byte("t")) + builder:PrependByte(string.byte("a")) + builder:PrependByte(string.byte("c")) + local b = builder:EndVector(len) + + + http_resp_call_resp.Start(builder) + http_resp_call_resp.AddBody(builder, b) + + elseif case.modify_header then + local len = call_req:HeadersLength() + + local headers = {} + for i = 1, len do + local entry = call_req:Headers(i) + local r = headers[entry:Name()] + if r then + headers[entry:Name()] = {r, entry:Value()} + else + headers[entry:Name()] = entry:Value() or true + end + end + + if case.same_header then + headers["x-same"] = {"one", "two"} + else + local runner = headers["x-runner"] + if runner and runner == "Go-runner" then + headers["x-runner"] = "Test-Runner" + end + + headers["Content-Type"] = "application/json" + end + + local i = 1 + local textEntries = {} + for k, v in pairs(headers) do + local name = builder:CreateString(k) + if type(v) == "table" then + for j = 1, #v do + local value = builder:CreateString(v[j]) + text_entry.Start(builder) + text_entry.AddName(builder, name) + text_entry.AddValue(builder, value) + local c = text_entry.End(builder) + textEntries[i] = c + i = i + 1 + end + else + local value = builder:CreateString(v) + text_entry.Start(builder) + text_entry.AddName(builder, name) + text_entry.AddValue(builder, value) + local c = text_entry.End(builder) + textEntries[i] = c + i = i + 1 + end + end + + len = #textEntries + http_resp_call_resp.StartHeadersVector(builder, len) + for i = len, 1, -1 do + builder:PrependUOffsetTRelative(textEntries[i]) + end + local vec = builder:EndVector(len) + + http_resp_call_resp.Start(builder) + http_resp_call_resp.AddHeaders(builder, vec) + + elseif case.modify_status then + local status = call_req:Status() + if status == 200 then + status = 304 + end + http_resp_call_resp.Start(builder) + http_resp_call_resp.AddStatus(builder, status) + + elseif case.extra_info then + ask_extra_info(sock, case.extra_info) + http_resp_call_resp.Start(builder) + else + http_resp_call_resp.Start(builder) + end + + local resp = http_resp_call_resp.End(builder) + builder:Finish(resp) + data = builder:Output() + end + + local ok, err = ext.send(sock, ty, data) + if not ok then + ngx.log(ngx.ERR, err) + return + end + ngx.log(ngx.WARN, "send rpc call response successfully") +end + + +function _M.header_too_short() + local sock = ngx.req.socket() + local ty, data = ext.receive(sock) + if not ty then + ngx.log(ngx.ERR, data) + return + end + ngx.log(ngx.WARN, "receive rpc call successfully") + + local ok, err = sock:send({string.char(2), string.char(1)}) + if not ok then + ngx.log(ngx.ERR, err) + return + end +end + + +function _M.data_too_short() + local sock = ngx.req.socket() + local ty, data = ext.receive(sock) + if not ty then + ngx.log(ngx.ERR, data) + return + end + ngx.log(ngx.WARN, "receive rpc call successfully") + + local ok, err = sock:send({string.char(2), string.char(1), string.rep(string.char(0), 3)}) + if not ok then + ngx.log(ngx.ERR, err) + return + end +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/grafana_loki.lua b/CloudronPackages/APISIX/apisix-source/t/lib/grafana_loki.lua new file mode 100644 index 0000000..fc11739 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/grafana_loki.lua @@ -0,0 +1,63 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local cjson = require("cjson") +local http = require("resty.http") + +local _M = {} + + +function _M.fetch_logs_from_loki(from, to, options) + options = options or {} + + local direction = options.direction or "backward" + local limit = options.limit or "10" + local query = options.query or [[{job="apisix"} | json]] + local url = options.url or "http://127.0.0.1:3100/loki/api/v1/query_range" + local headers = options.headers or { + ["X-Scope-OrgID"] = "tenant_1" + } + + local httpc = http.new() + local res, err = httpc:request_uri(url, { + query = { + start = from, + ["end"] = to, + direction = direction, + limit = limit, + query = query, + }, + headers = headers + }) + + if not res or err then + return nil, err + end + + if res.status > 300 then + return nil, "HTTP status code: " .. res.status .. ", body: " .. res.body + end + + local data = cjson.decode(res.body) + if not data then + return nil, "failed to decode response body: " .. res.body + end + return data, nil +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/keycloak.lua b/CloudronPackages/APISIX/apisix-source/t/lib/keycloak.lua new file mode 100644 index 0000000..51d2155 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/keycloak.lua @@ -0,0 +1,136 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local http = require "resty.http" + +local _M = {} + + +-- Request APISIX and redirect to keycloak, +-- Login keycloak and return the res of APISIX +function _M.login_keycloak(uri, username, password) + local httpc = http.new() + + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + return nil, err + elseif res.status ~= 302 then + -- Not a redirect which we expect. + -- Use 500 to indicate error. + return nil, "Initial request was not redirected to ID provider authorization endpoint." + else + -- Extract cookies. Important since OIDC module tracks state with a session cookie. + local cookies = res.headers['Set-Cookie'] + + -- Concatenate cookies into one string as expected when sent in request header. + local cookie_str = _M.concatenate_cookies(cookies) + + -- Call authorization endpoint we were redirected to. + -- Note: This typically returns a login form which is the case here for Keycloak as well. + -- However, how we process the form to perform the login is specific to Keycloak and + -- possibly even the version used. + res, err = httpc:request_uri(res.headers['Location'], {method = "GET"}) + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 200 then + -- Unexpected response. + return nil, res.body + end + + -- Check if response code was ok. + if res.status ~= 200 then + return nil, "unexpected status " .. res.status + end + + -- From the returned form, extract the submit URI and parameters. + local uri, params = res.body:match('.*action="(.*)%?(.*)" method="post">') + + -- Substitute escaped ampersand in parameters. + params = params:gsub("&", "&") + + -- Get all cookies returned. Probably not so important since not part of OIDC specification. + local auth_cookies = res.headers['Set-Cookie'] + + -- Concatenate cookies into one string as expected when sent in request header. + local auth_cookie_str = _M.concatenate_cookies(auth_cookies) + + -- Invoke the submit URI with parameters and cookies, adding username + -- and password in the body. + -- Note: Username and password are specific to the Keycloak Docker image used. + res, err = httpc:request_uri(uri .. "?" .. params, { + method = "POST", + body = "username=" .. username .. "&password=" .. password, + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + ["Cookie"] = auth_cookie_str + } + }) + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 302 then + -- Not a redirect which we expect. + return nil, "Login form submission did not return redirect to redirect URI." + end + + -- Extract the redirect URI from the response header. + -- TODO: Consider validating this against the plugin configuration. + local redirect_uri = res.headers['Location'] + + -- Invoke the redirect URI (which contains the authorization code as an URL parameter). + res, err = httpc:request_uri(redirect_uri, { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 302 then + -- Not a redirect which we expect. + return nil, "Invoking redirect URI with authorization code" .. + "did not return redirect to original URI." + end + + return res, nil + end +end + + +-- Concatenate cookies into one string as expected when sent in request header. +function _M.concatenate_cookies(cookies) + local cookie_str = "" + if type(cookies) == 'string' then + cookie_str = cookies:match('([^;]*); .*') + else + -- Must be a table. + local len = #cookies + if len > 0 then + cookie_str = cookies[1]:match('([^;]*); .*') + for i = 2, len do + cookie_str = cookie_str .. "; " .. cookies[i]:match('([^;]*); .*') + end + end + end + + return cookie_str, nil +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/keycloak_cas.lua b/CloudronPackages/APISIX/apisix-source/t/lib/keycloak_cas.lua new file mode 100644 index 0000000..7e57801 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/keycloak_cas.lua @@ -0,0 +1,215 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local http = require "resty.http" + +local _M = {} + +local default_opts = { + idp_uri = "http://127.0.0.1:8080/realms/test/protocol/cas", + cas_callback_uri = "/cas_callback", + logout_uri = "/logout", +} + +function _M.get_default_opts() + return default_opts +end + +-- Login keycloak and return the login original uri +function _M.login_keycloak(uri, username, password) + local httpc = http.new() + + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + return nil, err + elseif res.status ~= 302 then + return nil, "login was not redirected to keycloak." + else + local cookies = res.headers['Set-Cookie'] + local cookie_str = _M.concatenate_cookies(cookies) + + res, err = httpc:request_uri(res.headers['Location'], {method = "GET"}) + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 200 then + -- Unexpected response. + return nil, res.body + end + + -- From the returned form, extract the submit URI and parameters. + local uri, params = res.body:match('.*action="(.*)%?(.*)" method="post">') + + -- Substitute escaped ampersand in parameters. + params = params:gsub("&", "&") + + local auth_cookies = res.headers['Set-Cookie'] + + -- Concatenate cookies into one string as expected when sent in request header. + local auth_cookie_str = _M.concatenate_cookies(auth_cookies) + + -- Invoke the submit URI with parameters and cookies, adding username + -- and password in the body. + -- Note: Username and password are specific to the Keycloak Docker image used. + res, err = httpc:request_uri(uri .. "?" .. params, { + method = "POST", + body = "username=" .. username .. "&password=" .. password, + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + ["Cookie"] = auth_cookie_str + } + }) + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 302 then + -- Not a redirect which we expect. + return nil, "Login form submission did not return redirect to redirect URI." + end + + local keycloak_cookie_str = _M.concatenate_cookies(res.headers['Set-Cookie']) + + -- login callback + local redirect_uri = res.headers['Location'] + res, err = httpc:request_uri(redirect_uri, { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 302 then + -- Not a redirect which we expect. + return nil, "login callback: " .. + "did not return redirect to original URI." + end + + cookies = res.headers['Set-Cookie'] + cookie_str = _M.concatenate_cookies(cookies) + + return res, nil, cookie_str, keycloak_cookie_str + end +end + +-- Login keycloak and return the login original uri +function _M.login_keycloak_for_second_sp(uri, keycloak_cookie_str) + local httpc = http.new() + + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + return nil, err + elseif res.status ~= 302 then + return nil, "login was not redirected to keycloak." + end + + local cookies = res.headers['Set-Cookie'] + local cookie_str = _M.concatenate_cookies(cookies) + + res, err = httpc:request_uri(res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = keycloak_cookie_str + } + }) + ngx.log(ngx.INFO, keycloak_cookie_str) + + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 302 then + -- Not a redirect which we expect. + return nil, res.body + end + + -- login callback + res, err = httpc:request_uri(res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 302 then + -- Not a redirect which we expect. + return nil, "login callback: " .. + "did not return redirect to original URI." + end + + cookies = res.headers['Set-Cookie'] + cookie_str = _M.concatenate_cookies(cookies) + + return res, nil, cookie_str +end + +function _M.logout_keycloak(uri, cookie_str, keycloak_cookie_str) + local httpc = http.new() + + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + + if not res then + return nil, err + elseif res.status ~= 302 then + return nil, "logout was not redirected to keycloak." + else + -- keycloak logout + res, err = httpc:request_uri(res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = keycloak_cookie_str + } + }) + if not res then + -- No response, must be an error. + return nil, err + elseif res.status ~= 200 then + return nil, "Logout did not return 200." + end + + return res, nil + end +end + +-- Concatenate cookies into one string as expected when sent in request header. +function _M.concatenate_cookies(cookies) + local cookie_str = "" + if type(cookies) == 'string' then + cookie_str = cookies:match('([^;]*); .*') + else + -- Must be a table. + local len = #cookies + if len > 0 then + cookie_str = cookies[1]:match('([^;]*); .*') + for i = 2, len do + cookie_str = cookie_str .. "; " .. cookies[i]:match('([^;]*); .*') + end + end + end + + return cookie_str, nil +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/mock_layer4.lua b/CloudronPackages/APISIX/apisix-source/t/lib/mock_layer4.lua new file mode 100644 index 0000000..cc15bb0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/mock_layer4.lua @@ -0,0 +1,78 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local json_decode = require("toolkit.json").decode +local json_encode = require("toolkit.json").encode +local ngx = ngx +local socket = ngx.req.socket + +local _M = {} + +function _M.dogstatsd() + local sock, err = socket() + if not sock then + core.log.error("failed to get the request socket: ", err) + return + end + + while true do + local data, err = sock:receive() + + if not data then + if err and err ~= "no more data" then + core.log.error("socket error, returning: ", err) + end + return + end + core.log.warn("message received: ", data) + end +end + + +function _M.loggly() + local sock, err = socket() + if not sock then + core.log.error("failed to get the request socket: ", err) + return + end + + while true do + local data, err = sock:receive() + + if not data then + if err and err ~= "no more data" then + core.log.error("socket error, returning: ", err) + end + return + end + local m, err = ngx.re.match(data, "(^[ -~]*] )([ -~]*)") + if not m then + core.log.error("unknown data received, failed to extract: ", err) + return + end + if #m ~= 2 then + core.log.error("failed to match two (header, log body) subgroups", #m) + end + -- m[1] contains syslog header header <14>1 .... & m[2] contains actual log body + local logbody = json_decode(m[2]) + -- order keys + logbody = json_encode(logbody) + core.log.warn("message received: ", m[1] .. logbody) + end +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/pubsub.lua b/CloudronPackages/APISIX/apisix-source/t/lib/pubsub.lua new file mode 100644 index 0000000..a2729a8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/pubsub.lua @@ -0,0 +1,128 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local ws_client = require "resty.websocket.client" +local protoc = require("protoc") +local pb = require("pb") + +local _M = {} +local mt = { __index = _M } + + +local pb_state +local function load_proto() + pb.state(nil) + protoc.reload() + pb.option("int64_as_string") + local pubsub_protoc = protoc.new() + pubsub_protoc:addpath("apisix/include/apisix/model") + local ok, err = pcall(pubsub_protoc.loadfile, pubsub_protoc, "pubsub.proto") + if not ok then + ngx.log(ngx.ERR, "failed to load protocol: "..err) + return err + end + pb_state = pb.state(nil) +end + + +local function init_websocket_client(endpoint) + local ws, err = ws_client:new() + if not ws then + ngx.log(ngx.ERR, "failed to create websocket client: "..err) + return nil, err + end + local ok, err = ws:connect(endpoint) + if not ok then + ngx.log(ngx.ERR, "failed to connect: "..err) + return nil, err + end + return ws +end + + +function _M.new_ws(server) + local err = load_proto() + if err then + return nil, err + end + local ws, err = init_websocket_client(server) + if not ws then + return nil, err + end + + local obj = setmetatable({ + type = "ws", + ws_client = ws, + }, mt) + + return obj +end + + +function _M.send_recv_ws_binary(self, data, is_raw) + pb.state(pb_state) + local ws = self.ws_client + if not is_raw then + data = pb.encode("PubSubReq", data) + end + local _, err = ws:send_binary(data) + if err then + return nil, err + end + local raw_data, _, err = ws:recv_frame() + if not raw_data then + ngx.log(ngx.ERR, "failed to receive the frame: ", err) + return nil, err + end + local data, err = pb.decode("PubSubResp", raw_data) + if not data then + ngx.log(ngx.ERR, "failed to decode the frame: ", err) + return nil, err + end + + return data +end + + +function _M.send_recv_ws_text(self, text) + pb.state(pb_state) + local ws = self.ws_client + local _, err = ws:send_text(text) + if err then + return nil, err + end + local raw_data, _, err = ws:recv_frame() + if not raw_data then + ngx.log(ngx.ERR, "failed to receive the frame: ", err) + return nil, err + end + local data, err = pb.decode("PubSubResp", raw_data) + if not data then + ngx.log(ngx.ERR, "failed to decode the frame: ", err) + return nil, err + end + + return data +end + + +function _M.close_ws(self) + self.ws_client:send_close() +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/server.lua b/CloudronPackages/APISIX/apisix-source/t/lib/server.lua new file mode 100644 index 0000000..3098736 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/server.lua @@ -0,0 +1,787 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local json_decode = require("toolkit.json").decode +local json_encode = require("toolkit.json").encode + +local rsa_public_key = [[ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAw86xcJwNxL2MkWnjIGiw +94QY78Sq89dLqMdV/Ku2GIX9lYkbS0VDGtmxDGJLBOYW4cKTX+pigJyzglLgE+nD +z3VJf2oCqSV74gTyEdi7sw9e1rCyR6dR8VA7LEpIHwmhnDhhjXy1IYSKRdiVHLS5 +sYmaAGckpUo3MLqUrgydGj5tFzvK/R/ELuZBdlZM+XuWxYry05r860E3uL+VdVCO +oU4RJQknlJnTRd7ht8KKcZb6uM14C057i26zX/xnOJpaVflA4EyEo99hKQAdr8Sh +G70MOLYvGCZxl1o8S3q4X67MxcPlfJaXnbog2AOOGRaFar88XiLFWTbXMCLuz7xD +zQIDAQAB +-----END PUBLIC KEY-----]] + +local rsa_private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]] + +local _M = {} + + +local function inject_headers() + local hdrs = ngx.req.get_headers() + for k, v in pairs(hdrs) do + if k:sub(1, 5) == "resp-" then + ngx.header[k:sub(6)] = v + end + end +end + + +function _M.hello() + ngx.req.read_body() + local s = "hello world" + ngx.header['Content-Length'] = #s + 1 + ngx.say(s) +end + + +function _M.hello_chunked() + ngx.print("hell") + ngx.flush(true) + ngx.print("o w") + ngx.flush(true) + ngx.say("orld") +end + + +function _M.hello1() + ngx.say("hello1 world") +end + + +-- Fake endpoint, needed for testing authz-keycloak plugin. +function _M.course_foo() + ngx.say("course foo") +end + + +function _M.server_port() + ngx.print(ngx.var.server_port) +end +_M.server_port_route2 = _M.server_port +_M.server_port_hello = _M.server_port +_M.server_port_aa = _M.server_port + + +function _M.limit_conn() + ngx.sleep(0.3) + ngx.say("hello world") +end + + +function _M.plugin_proxy_rewrite() + ngx.say("uri: ", ngx.var.uri) + ngx.say("host: ", ngx.var.host) + ngx.say("scheme: ", ngx.var.scheme) + ngx.log(ngx.WARN, "plugin_proxy_rewrite get method: ", ngx.req.get_method()) +end + + +function _M.plugin_proxy_rewrite_args() + ngx.say("uri: ", ngx.var.uri) + local args = ngx.req.get_uri_args() + + local keys = {} + for k, _ in pairs(args) do + table.insert(keys, k) + end + table.sort(keys) + + for _, key in ipairs(keys) do + if type(args[key]) == "table" then + ngx.say(key, ": ", table.concat(args[key], ',')) + else + ngx.say(key, ": ", args[key]) + end + end +end + + +function _M.specific_status() + local status = ngx.var.http_x_test_upstream_status + if status ~= nil then + ngx.status = status + ngx.say("upstream status: ", status) + end +end + + +function _M.status() + ngx.log(ngx.WARN, "client request host: ", ngx.var.http_host) + ngx.say("ok") +end + + +function _M.ewma() + if ngx.var.server_port == "1981" + or ngx.var.server_port == "1982" then + ngx.sleep(0.2) + else + ngx.sleep(0.1) + end + ngx.print(ngx.var.server_port) +end + + +local builtin_hdr_ignore_list = { + ["x-forwarded-for"] = true, + ["x-forwarded-proto"] = true, + ["x-forwarded-host"] = true, + ["x-forwarded-port"] = true, +} + +function _M.uri() + ngx.say("uri: ", ngx.var.uri) + local headers = ngx.req.get_headers() + + local keys = {} + for k in pairs(headers) do + if not builtin_hdr_ignore_list[k] then + table.insert(keys, k) + end + end + table.sort(keys) + + for _, key in ipairs(keys) do + ngx.say(key, ": ", headers[key]) + end +end +_M.uri_plugin_proxy_rewrite = _M.uri +_M.uri_plugin_proxy_rewrite_args = _M.uri + + +function _M.old_uri() + ngx.say("uri: ", ngx.var.uri) + local headers = ngx.req.get_headers() + + local keys = {} + for k in pairs(headers) do + table.insert(keys, k) + end + table.sort(keys) + + for _, key in ipairs(keys) do + ngx.say(key, ": ", headers[key]) + end +end + + +function _M.opentracing() + ngx.say("opentracing") +end + + +function _M.with_header() + --split into multiple chunk + ngx.say("hello") + ngx.say("world") + ngx.say("!") +end + + +function _M.mock_zipkin() + ngx.req.read_body() + local data = ngx.req.get_body_data() + ngx.log(ngx.NOTICE, data) + + local spans = json_decode(data) + local ver = ngx.req.get_uri_args()['span_version'] + if ver == "1" then + if #spans ~= 5 then + ngx.log(ngx.ERR, "wrong number of spans: ", #spans) + ngx.exit(400) + end + else + if #spans ~= 3 then + -- request/proxy/response + ngx.log(ngx.ERR, "wrong number of spans: ", #spans) + ngx.exit(400) + end + end + + for _, span in pairs(spans) do + local prefix = string.sub(span.name, 1, 6) + if prefix ~= 'apisix' then + ngx.log(ngx.ERR, "wrong prefix of name", prefix) + ngx.exit(400) + end + if not span.traceId then + ngx.log(ngx.ERR, "missing trace id") + ngx.exit(400) + end + + if not span.localEndpoint then + ngx.log(ngx.ERR, "missing local endpoint") + ngx.exit(400) + end + + if span.localEndpoint.serviceName ~= 'APISIX' + and span.localEndpoint.serviceName ~= 'apisix' then + ngx.log(ngx.ERR, "wrong serviceName: ", span.localEndpoint.serviceName) + ngx.exit(400) + end + + if span.localEndpoint.port ~= 1984 then + ngx.log(ngx.ERR, "wrong port: ", span.localEndpoint.port) + ngx.exit(400) + end + + local server_addr = ngx.req.get_uri_args()['server_addr'] + if server_addr then + if span.localEndpoint.ipv4 ~= server_addr then + ngx.log(ngx.ERR, "server_addr mismatched") + ngx.exit(400) + end + end + + end +end + + +function _M.wolf_rbac_login_rest() + ngx.req.read_body() + local data = ngx.req.get_body_data() + local args = json_decode(data) + if not args.username then + ngx.say(json_encode({ok=false, reason="ERR_USERNAME_MISSING"})) + ngx.exit(0) + end + if not args.password then + ngx.say(json_encode({ok=false, reason="ERR_PASSWORD_MISSING"})) + ngx.exit(0) + end + if args.username ~= "admin" then + ngx.say(json_encode({ok=false, reason="ERR_USER_NOT_FOUND"})) + ngx.exit(0) + end + if args.password ~= "123456" then + ngx.say(json_encode({ok=false, reason="ERR_PASSWORD_ERROR"})) + ngx.exit(0) + end + + ngx.say(json_encode({ok=true, data={token="wolf-rbac-token", + userInfo={nickname="administrator",username="admin", id="100"}}})) +end + + +function _M.wolf_rbac_access_check() + local headers = ngx.req.get_headers() + local token = headers['x-rbac-token'] + if token ~= 'wolf-rbac-token' then + ngx.say(json_encode({ok=false, reason="ERR_TOKEN_INVALID"})) + ngx.exit(0) + end + + local args = ngx.req.get_uri_args() + local resName = args.resName + if resName == '/hello' or resName == '/wolf/rbac/custom/headers' then + ngx.say(json_encode({ok=true, + data={ userInfo={nickname="administrator", + username="admin", id="100"} }})) + elseif resName == '/hello/500' then + ngx.status = 500 + ngx.say(json_encode({ok=false, reason="ERR_SERVER_ERROR"})) + elseif resName == '/hello/401' then + ngx.status = 401 + ngx.say(json_encode({ok=false, reason="ERR_TOKEN_INVALID"})) + else + ngx.status = 403 + ngx.say(json_encode({ok=false, reason="ERR_ACCESS_DENIED"})) + end +end + + +function _M.wolf_rbac_user_info() + local headers = ngx.req.get_headers() + local token = headers['x-rbac-token'] + if token ~= 'wolf-rbac-token' then + ngx.say(json_encode({ok=false, reason="ERR_TOKEN_INVALID"})) + ngx.exit(0) + end + + ngx.say(json_encode({ok=true, + data={ userInfo={nickname="administrator", username="admin", id="100"} }})) +end + + +function _M.wolf_rbac_change_pwd() + ngx.req.read_body() + local data = ngx.req.get_body_data() + local args = json_decode(data) + if args.oldPassword ~= "123456" then + ngx.say(json_encode({ok=false, reason="ERR_OLD_PASSWORD_INCORRECT"})) + ngx.exit(0) + end + + ngx.say(json_encode({ok=true, data={ }})) +end + + +function _M.wolf_rbac_custom_headers() + local headers = ngx.req.get_headers() + ngx.say('id:' .. headers['X-UserId'] .. ',username:' .. headers['X-Username'] + .. ',nickname:' .. headers['X-Nickname']) +end + + +function _M.websocket_handshake() + local websocket = require "resty.websocket.server" + local wb, err = websocket:new() + if not wb then + ngx.log(ngx.ERR, "failed to new websocket: ", err) + return ngx.exit(400) + end + + local bytes, err = wb:send_text("hello") + if not bytes then + ngx.log(ngx.ERR, "failed to send text: ", err) + return ngx.exit(444) + end +end +_M.websocket_handshake_route = _M.websocket_handshake + + +function _M.api_breaker() + ngx.exit(tonumber(ngx.var.arg_code)) +end + + +function _M.mysleep() + ngx.sleep(tonumber(ngx.var.arg_seconds)) + if ngx.var.arg_abort then + ngx.exit(ngx.ERROR) + else + ngx.say(ngx.var.arg_seconds) + end +end + + +local function print_uri() + ngx.say(ngx.var.uri) +end +for i = 1, 100 do + _M["print_uri_" .. i] = print_uri +end + +function _M.print_uri_detailed() + ngx.say("ngx.var.uri: ", ngx.var.uri) + ngx.say("ngx.var.request_uri: ", ngx.var.request_uri) +end + +function _M.headers() + local args = ngx.req.get_uri_args() + for name, val in pairs(args) do + ngx.header[name] = nil + ngx.header[name] = val + end + + ngx.say("/headers") +end + + +function _M.echo() + ngx.req.read_body() + local hdrs = ngx.req.get_headers() + for k, v in pairs(hdrs) do + ngx.header[k] = v + end + ngx.print(ngx.req.get_body_data() or "") +end + + +function _M.log() + ngx.req.read_body() + local body = ngx.req.get_body_data() + local ct = ngx.var.content_type + if ct ~= "text/plain" then + body = json_decode(body) + body = json_encode(body) + end + ngx.log(ngx.WARN, "request log: ", body or "nil") +end + + +function _M.server_error() + error("500 Internal Server Error") +end + + +function _M.log_request() + ngx.log(ngx.WARN, "uri: ", ngx.var.uri) + local headers = ngx.req.get_headers() + + local keys = {} + for k in pairs(headers) do + table.insert(keys, k) + end + table.sort(keys) + + for _, key in ipairs(keys) do + ngx.log(ngx.WARN, key, ": ", headers[key]) + end +end + + +function _M.v3_auth_authenticate() + ngx.log(ngx.WARN, "etcd auth failed!") +end + + +function _M._well_known_openid_configuration() + local t = require("lib.test_admin") + local openid_data = t.read_file("t/plugin/openid-connect/configuration.json") + ngx.say(openid_data) +end + +function _M.google_logging_token() + local args = ngx.req.get_uri_args() + local args_token_type = args.token_type or "Bearer" + ngx.req.read_body() + local data = ngx.decode_args(ngx.req.get_body_data()) + local jwt = require("resty.jwt") + local access_scopes = "https://apisix.apache.org/logs:admin" + local verify = jwt:verify(rsa_public_key, data["assertion"]) + if not verify.verified then + ngx.status = 401 + ngx.say(json_encode({ error = "identity authentication failed" })) + return + end + + local scopes_valid = type(verify.payload.scope) == "string" and + verify.payload.scope:find(access_scopes) + if not scopes_valid then + ngx.status = 403 + ngx.say(json_encode({ error = "no access to this scopes" })) + return + end + + local expire_time = (verify.payload.exp or ngx.time()) - ngx.time() + if expire_time <= 0 then + expire_time = 0 + end + + local jwt_token = jwt:sign(rsa_private_key, { + header = { typ = "JWT", alg = "RS256" }, + payload = { exp = verify.payload.exp, scope = access_scopes } + }) + + ngx.say(json_encode({ + access_token = jwt_token, + expires_in = expire_time, + token_type = args_token_type + })) +end + +function _M.google_logging_entries() + local args = ngx.req.get_uri_args() + local args_token_type = args.token_type or "Bearer" + ngx.req.read_body() + local data = ngx.req.get_body_data() + local jwt = require("resty.jwt") + local access_scopes = "https://apisix.apache.org/logs:admin" + + local headers = ngx.req.get_headers() + local token = headers["Authorization"] + if not token then + ngx.status = 401 + ngx.say(json_encode({ error = "authentication header not exists" })) + return + end + + token = string.sub(token, #args_token_type + 2) + local verify = jwt:verify(rsa_public_key, token) + if not verify.verified then + ngx.status = 401 + ngx.say(json_encode({ error = "identity authentication failed" })) + return + end + + local scopes_valid = type(verify.payload.scope) == "string" and + verify.payload.scope:find(access_scopes) + if not scopes_valid then + ngx.status = 403 + ngx.say(json_encode({ error = "no access to this scopes" })) + return + end + + local expire_time = (verify.payload.exp or ngx.time()) - ngx.time() + if expire_time <= 0 then + ngx.status = 403 + ngx.say(json_encode({ error = "token has expired" })) + return + end + + ngx.say(data) +end + +function _M.google_secret_token() + local args = ngx.req.get_uri_args() + local args_token_type = args.token_type or "Bearer" + ngx.req.read_body() + local data = ngx.decode_args(ngx.req.get_body_data()) + local jwt = require("resty.jwt") + local access_scopes = "https://www.googleapis.com/auth/cloud" + local verify = jwt:verify(rsa_public_key, data["assertion"]) + if not verify.verified then + ngx.status = 401 + ngx.say(json_encode({ error = "identity authentication failed" })) + return + end + + local scopes_valid = type(verify.payload.scope) == "string" and + verify.payload.scope:find(access_scopes) + if not scopes_valid then + ngx.status = 403 + ngx.say(json_encode({ error = "no access to this scope" })) + return + end + + local expire_time = (verify.payload.exp or ngx.time()) - ngx.time() + if expire_time <= 0 then + expire_time = 0 + end + + local jwt_token = jwt:sign(rsa_private_key, { + header = { typ = "JWT", alg = "RS256" }, + payload = { exp = verify.payload.exp, scope = access_scopes } + }) + + ngx.say(json_encode({ + access_token = jwt_token, + expires_in = expire_time, + token_type = args_token_type + })) +end + +function _M.google_secret_apisix_jack() + local args = ngx.req.get_uri_args() + local args_token_type = args.token_type or "Bearer" + local jwt = require("resty.jwt") + local access_scopes = "https://www.googleapis.com/auth/cloud" + + local headers = ngx.req.get_headers() + local token = headers["Authorization"] + if not token then + ngx.status = 401 + ngx.say(json_encode({ error = "authentication header not exists" })) + return + end + + token = string.sub(token, #args_token_type + 2) + local verify = jwt:verify(rsa_public_key, token) + if not verify.verified then + ngx.status = 401 + ngx.say(json_encode({ error = "identity authentication failed" })) + return + end + + local scopes_valid = type(verify.payload.scope) == "string" and + verify.payload.scope:find(access_scopes) + if not scopes_valid then + ngx.status = 403 + ngx.say(json_encode({ error = "no access to this scope" })) + return + end + + local expire_time = (verify.payload.exp or ngx.time()) - ngx.time() + if expire_time <= 0 then + ngx.status = 403 + ngx.say(json_encode({ error = "token has expired" })) + return + end + + local response = { + name = "projects/647037004838/secrets/apisix/versions/1", + payload = { + data = "eyJrZXkiOiJ2YWx1ZSJ9", + dataCrc32c = "2296192492" + } + } + + ngx.status = 200 + ngx.say(json_encode(response)) +end + +function _M.google_secret_apisix_error_jack() + local args = ngx.req.get_uri_args() + local args_token_type = args.token_type or "Bearer" + local jwt = require("resty.jwt") + local access_scopes = "https://www.googleapis.com/auth/root/cloud" + + local headers = ngx.req.get_headers() + local token = headers["Authorization"] + if not token then + ngx.status = 401 + ngx.say(json_encode({ error = "authentication header not exists" })) + return + end + + token = string.sub(token, #args_token_type + 2) + local verify = jwt:verify(rsa_public_key, token) + if not verify.verified then + ngx.status = 401 + ngx.say(json_encode({ error = "identity authentication failed" })) + return + end + + local scopes_valid = type(verify.payload.scope) == "string" and + verify.payload.scope:find(access_scopes) + if not scopes_valid then + ngx.status = 403 + ngx.say(json_encode({ error = "no access to this scope" })) + return + end + + local expire_time = (verify.payload.exp or ngx.time()) - ngx.time() + if expire_time <= 0 then + ngx.status = 403 + ngx.say(json_encode({ error = "token has expired" })) + return + end + + local response = { + name = "projects/647037004838/secrets/apisix_error/versions/1", + payload = { + data = "eyJrZXkiOiJ2YWx1ZSJ9", + dataCrc32c = "2296192492" + } + } + + ngx.status = 200 + ngx.say(json_encode(response)) +end + +function _M.google_secret_apisix_mysql() + local args = ngx.req.get_uri_args() + local args_token_type = args.token_type or "Bearer" + local jwt = require("resty.jwt") + local access_scopes = "https://www.googleapis.com/auth/cloud" + + local headers = ngx.req.get_headers() + local token = headers["Authorization"] + if not token then + ngx.status = 401 + ngx.say(json_encode({ error = "authentication header not exists" })) + return + end + + token = string.sub(token, #args_token_type + 2) + local verify = jwt:verify(rsa_public_key, token) + if not verify.verified then + ngx.status = 401 + ngx.say(json_encode({ error = "identity authentication failed" })) + return + end + + local scopes_valid = type(verify.payload.scope) == "string" and + verify.payload.scope:find(access_scopes) + if not scopes_valid then + ngx.status = 403 + ngx.say(json_encode({ error = "no access to this scope" })) + return + end + + local expire_time = (verify.payload.exp or ngx.time()) - ngx.time() + if expire_time <= 0 then + ngx.status = 403 + ngx.say(json_encode({ error = "token has expired" })) + return + end + + local response = { + name = "projects/647037004838/secrets/apisix/versions/1", + payload = { + data = "c2VjcmV0", + dataCrc32c = "0xB03C4D4D" + } + } + + ngx.status = 200 + ngx.say(json_encode(response)) +end + +function _M.plugin_proxy_rewrite_resp_header() + ngx.req.read_body() + local s = "plugin_proxy_rewrite_resp_header" + ngx.header['Content-Length'] = #s + 1 + ngx.say(s) +end + +-- Please add your fake upstream above +function _M.go() + local action = string.sub(ngx.var.uri, 2) + action = string.gsub(action, "[/\\.-]", "_") + if not action or not _M[action] then + ngx.log(ngx.WARN, "undefined path in test server, uri: ", ngx.var.request_uri) + return ngx.exit(404) + end + + inject_headers() + return _M[action]() +end + + +function _M.clickhouse_logger_server() + ngx.req.read_body() + local data = ngx.req.get_body_data() + local headers = ngx.req.get_headers() + ngx.log(ngx.WARN, "clickhouse body: ", data) + for k, v in pairs(headers) do + ngx.log(ngx.WARN, "clickhouse headers: " .. k .. ":" .. v) + end + ngx.say("ok") +end + + +function _M.mock_compressed_upstream_response() + local s = "compressed_response" + ngx.header['Content-Encoding'] = 'gzip' + ngx.say(s) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/test_admin.lua b/CloudronPackages/APISIX/apisix-source/t/lib/test_admin.lua new file mode 100644 index 0000000..18caf81 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/test_admin.lua @@ -0,0 +1,272 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local http = require("resty.http") +local json = require("toolkit.json") +local core = require("apisix.core") +local aes = require "resty.aes" +local ngx_encode_base64 = ngx.encode_base64 +local str_find = core.string.find +local dir_names = {} + + +local _M = {} + + +local function com_tab(pattern, data, deep) + deep = deep or 1 + + for k, v in pairs(pattern) do + dir_names[deep] = k + + if v == ngx.null then + v = nil + end + + if type(v) == "table" and data[k] then + local ok, err = com_tab(v, data[k], deep + 1) + if not ok then + return false, err + end + + elseif v ~= data[k] then + return false, "path: " .. table.concat(dir_names, "->", 1, deep) + .. " expect: " .. tostring(v) .. " got: " + .. tostring(data[k]) + end + end + + return true +end + + +local methods = { + [ngx.HTTP_GET ] = "GET", + [ngx.HTTP_HEAD ] = "HEAD", + [ngx.HTTP_PUT ] = "PUT", + [ngx.HTTP_POST ] = "POST", + [ngx.HTTP_DELETE ] = "DELETE", + [ngx.HTTP_OPTIONS] = "OPTIONS", + [ngx.HTTP_PATCH] = "PATCH", + [ngx.HTTP_TRACE] = "TRACE", +} + + +function _M.test_ipv6(uri) + local sock = ngx.socket.tcp() + local ok, err = sock:connect("[::1]", 1984) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local req = "GET " .. uri .. " HTTP/1.0\r\nHost: localhost\r\n" + .. "Connection: close\r\n\r\n" + -- req = "OK" + -- ngx.log(ngx.WARN, "req: ", req) + + local bytes, err = sock:send(req) + if not bytes then + ngx.say("failed to send request: ", err) + return + end + + ngx.say("request sent: ", bytes) + + while true do + local line, err, part = sock:receive() + if line then + if line ~= "" then + ngx.say("received: ", line) + else + ngx.say("received:") + end + + else + ngx.say("failed to receive a line: ", err, " [", part, "]") + break + end + end + + ok, err = sock:close() + ngx.say("close: ", ok, " ", err) +end + + +function _M.comp_tab(left_tab, right_tab) + local err + dir_names = {} + + local _ + if type(left_tab) == "string" then + left_tab, _, err = json.decode(left_tab) + if not left_tab then + return false, "failed to decode expected data: " .. err + end + end + if type(right_tab) == "string" then + right_tab, _, err = json.decode(right_tab) + if not right_tab then + return false, "failed to decode expected data: " .. err + end + end + + local ok, err = com_tab(left_tab, right_tab) + if not ok then + return false, err + end + + return true +end + + +local function set_yaml(fn, data) + local profile = os.getenv("APISIX_PROFILE") + if profile then + fn = fn .. "-" .. profile .. ".yaml" + else + fn = fn .. ".yaml" + end + + local f = assert(io.open(os.getenv("TEST_NGINX_HTML_DIR") .. "/../conf/" .. fn, 'w')) + assert(f:write(data)) + f:close() +end + + +function _M.set_config_yaml(data) + set_yaml("config", data) +end + + +function _M.set_apisix_yaml(data) + set_yaml("apisix", data) +end + + +function _M.test(uri, method, body, pattern, headers) + if not headers then + headers = {} + end + if not headers["Content-Type"] then + headers["Content-Type"] = "application/x-www-form-urlencoded" + end + + if type(body) == "table" then + -- {} will be encoded as '[]' whether decode_array_with_array_mt or not + body = json.encode(body) + end + + if type(pattern) == "table" then + pattern = json.encode(pattern) + end + + if type(method) == "number" then + method = methods[method] + end + + local httpc = http.new() + -- https://github.com/ledgetech/lua-resty-http + uri = ngx.var.scheme .. "://" .. ngx.var.server_addr + .. ":" .. ngx.var.server_port .. uri + local res, err = httpc:request_uri(uri, + { + method = method, + body = body, + keepalive = false, + headers = headers, + } + ) + if not res then + ngx.log(ngx.ERR, "failed http: ", err) + return nil, err + end + + if res.status >= 300 then + return res.status, res.body, res.headers + end + + if pattern == nil then + return res.status, "passed", res.body, res.headers + end + + local res_data = json.decode(res.body) + local ok, err = _M.comp_tab(pattern, res_data) + if not ok then + return 500, "failed, " .. err, res_data + end + + return 200, "passed", res_data, res.headers +end + + +function _M.read_file(path) + local f = assert(io.open(path, "rb")) + local cert = f:read("*all") + f:close() + return cert +end + + +function _M.req_self_with_http(uri, method, body, headers) + if type(body) == "table" then + body = json.encode(body) + end + + if type(method) == "number" then + method = methods[method] + end + headers = headers or {} + + local httpc = http.new() + -- https://github.com/ledgetech/lua-resty-http + uri = ngx.var.scheme .. "://" .. ngx.var.server_addr + .. ":" .. ngx.var.server_port .. uri + headers["Content-Type"] = "application/x-www-form-urlencoded" + local res, err = httpc:request_uri(uri, + { + method = method, + body = body, + keepalive = false, + headers = headers, + } + ) + + return res, err +end + + +function _M.aes_encrypt(origin) + local iv = "1234567890123456" + local aes_128_cbc_with_iv = assert(aes:new(iv, nil, aes.cipher(128, "cbc"), {iv=iv})) + + if aes_128_cbc_with_iv ~= nil and str_find(origin, "---") then + local encrypted = aes_128_cbc_with_iv:encrypt(origin) + if encrypted == nil then + core.log.error("failed to encrypt key[", origin, "] ") + return origin + end + + return ngx_encode_base64(encrypted) + end + + return origin +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/lib/test_inspect.lua b/CloudronPackages/APISIX/apisix-source/t/lib/test_inspect.lua new file mode 100644 index 0000000..62de599 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/lib/test_inspect.lua @@ -0,0 +1,62 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +-- +-- Don't edit existing code, because the hooks are identified by line number. +-- Instead, append new code to this file. +-- +local _M = {} + +function _M.run1() + local var1 = "hello" + local var2 = "world" + return var1 .. var2 +end + +local upvar1 = 2 +local upvar2 = "yes" +function _M.run2() + return upvar1 +end + +function _M.run3() + return upvar1 .. upvar2 +end + +local str = string.rep("a", 8192) .. "llzz" + +local sk = require("socket") + +function _M.hot1() + local t1 = sk.gettime() + for i=1,100000 do + string.find(str, "ll", 1, true) + end + local t2 = sk.gettime() + return t2 - t1 +end + +function _M.hot2() + local t1 = sk.gettime() + for i=1,100000 do + string.find(str, "ll", 1, true) + end + local t2 = sk.gettime() + return t2 - t1 +end + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/misc/patch.t b/CloudronPackages/APISIX/apisix-source/t/misc/patch.t new file mode 100644 index 0000000..e5c6223 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/misc/patch.t @@ -0,0 +1,218 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +$ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: flatten send args +--- extra_init_by_lua +local sock = ngx.socket.tcp() +getmetatable(sock.sock).__index.send = function (_, data) + ngx.log(ngx.WARN, data) + return #data +end +sock:send({1, "a", {1, "b", true}}) +sock:send(1, "a", {1, "b", false}) +--- config + location /t { + return 200; + } +--- grep_error_log eval +qr/send\(\): \S+/ +--- grep_error_log_out +send(): 1a1btrue +send(): 1a1bfalse + + + +=== TEST 2: sslhandshake options +--- extra_init_by_lua +local sock = ngx.socket.tcp() +sock:settimeout(1) +local ok, err = sock:connect("0.0.0.0", 12379) +if not ok then + ngx.log(ngx.ERR, "failed to connect: ", err) + return +end + +local sess, err = sock:sslhandshake(true, "test.com", true, true) +if not sess then + ngx.log(ngx.ERR, "failed to do SSL handshake: ", err) +end + +local sock = ngx.socket.tcp() +local ok, err = sock:connect("0.0.0.0", 12379) +if not ok then + ngx.log(ngx.ERR, "failed to connect: ", err) + return +end +local sess, err = sock:sslhandshake(true, "test.com", nil, true) +if not sess then + ngx.log(ngx.ERR, "failed to do SSL handshake: ", err) +end + +sock:setkeepalive() +--- config + location /t { + return 200; + } +--- grep_error_log eval +qr/failed to do SSL handshake/ +--- grep_error_log_out +failed to do SSL handshake +--- error_log +reused_session is not supported yet +send_status_req is not supported yet + + + +=== TEST 3: unix socket +--- http_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + } +--- extra_init_worker_by_lua +local sock = ngx.socket.tcp() +sock:settimeout(1) +local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") +if not ok then + ngx.log(ngx.ERR, "failed to connect: ", err) + return +end + +local ok, err = sock:receive() +if not ok then + ngx.log(ngx.ERR, "failed to read: ", err) + return +end +--- config + location /t { + return 200; + } +--- error_log +failed to read: timeout + + + +=== TEST 4: resolve host by ourselves +--- yaml_config +apisix: + node_listen: 1984 + enable_resolv_search_opt: true +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local httpc = http.new() + local res, err = httpc:request_uri("http://apisix", {headers={Host="apisix.apache.org"}}) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.say("ok") + } + } +--- response_body +ok + + + +=== TEST 5: resolve host by ourselves (in stream sub-system) +--- yaml_config +apisix: + node_listen: 1984 + enable_resolv_search_opt: true +--- stream_enable +--- stream_server_config + content_by_lua_block { + local sock = ngx.req.socket(true) + -- drain the buffer + local _, err = sock:receive(1) + if err ~= nil then + ngx.log(ngx.ERR, err) + return ngx.exit(-1) + end + local http = require("resty.http") + local httpc = http.new() + local res, err = httpc:request_uri("http://apisix", {headers={Host="apisix.apache.org"}}) + if not res then + ngx.log(ngx.ERR, err) + return ngx.exit(-1) + end + sock:send("ok") + } +--- stream_request eval +m +--- stream_response: ok + + + +=== TEST 6: resolve host by ourselves (UDP) +--- yaml_config +apisix: + node_listen: 1984 + enable_resolv_search_opt: true +--- config + location /t { + content_by_lua_block { + local sock = ngx.socket.udp() + local res, err = sock:setpeername("apisix", 80) + if not res then + ngx.log(ngx.ERR, err) + end + } + } + + + +=== TEST 7: ensure our patch works with unix socket +--- stream_server_config + content_by_lua_block { + } +--- stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + content_by_lua_block { + } + } +--- config + location /t { + content_by_lua_block { + local sock = ngx.socket.udp() + local res, err = sock:setpeername("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not res then + ngx.log(ngx.ERR, err) + end + } + } diff --git a/CloudronPackages/APISIX/apisix-source/t/misc/pre-function.t b/CloudronPackages/APISIX/apisix-source/t/misc/pre-function.t new file mode 100644 index 0000000..316c93c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/misc/pre-function.t @@ -0,0 +1,325 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +$ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: invalid pre_function +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "_meta": { + "pre_function": "not a function" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to load _meta.pre_function in plugin limit-count: [string \"meta pre_function\"]:1: unexpected symbol near 'not'"} + + + +=== TEST 2: attempt setting pre_function in _meta with a typo in `pre_function` +# this is to test the case where user (or CP) would attempt configuring pre_function +# using incorrect field name, this validation is achieved by setting `additionalProperties = false` +# in schema_def.lua +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "_meta": { + "prefunction": "not a function" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: property \"_meta\" validation failed: additional properties forbidden, found prefunction"} + + + +=== TEST 3: pre_function with error in code +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "_meta": { + "pre_function": "return function() print(invalid.index) end" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 4: sending request will execute erroneous code and print error log +--- request +GET /hello +--- error_log +pre_function execution for plugin limit-count failed: [string "meta pre_function"]:1: attempt to index global 'invalid' (a nil value), + + + +=== TEST 5: test pre_function sanity: correct function +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "_meta": { + "pre_function": "return function(conf, ctx) ngx.log(ngx.WARN, 'hello ', ngx.req.get_headers()[\"User-Agent\"]) end" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 6: request +--- request +GET /hello +--- more_headers +User-Agent: test-nginx +--- error_log +hello test-nginx + + + +=== TEST 7: pre_function is executed in all phases +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "example-plugin": { + "i": 11, + "_meta": { + "pre_function": "return function(conf, ctx) ngx.log(ngx.WARN, 'hello: ', ngx.get_phase()) end" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 8: request +--- request +GET /hello +--- error_log +hello: access +hello: header_filter +hello: body_filter +hello: log + + + +=== TEST 9: test pre-function with proxy-rewrite, (rewrite phase) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/uri", + "headers": { + "x-api": "$example_var_name" + }, + "_meta": { + "pre_function": "return function(conf, ctx) local core = require \"apisix.core\" core.ctx.register_var(\"example_var_name\", function(ctx) return \"example_var_value\" end) end" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: hit route(header supports nginx variables) +--- request +GET /hello +--- response_body +uri: /uri +host: localhost +x-api: example_var_value +x-real-ip: 127.0.0.1 diff --git a/CloudronPackages/APISIX/apisix-source/t/misc/timers.t b/CloudronPackages/APISIX/apisix-source/t/misc/timers.t new file mode 100644 index 0000000..f572aa6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/misc/timers.t @@ -0,0 +1,54 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: always start a new timer even the previous one is blocked +--- config + location /t { + content_by_lua_block { + local timers = require("apisix.timers") + timers.register_timer("t", function() + ngx.log(ngx.WARN, "fire") + end) + timers.register_timer("c", function() + ngx.sleep(5) + end) + + ngx.sleep(2.1) + } + } +--- grep_error_log eval +qr/fire/ +--- grep_error_log_out +fire +fire diff --git a/CloudronPackages/APISIX/apisix-source/t/node/chash-balance.t b/CloudronPackages/APISIX/apisix-source/t/node/chash-balance.t new file mode 100644 index 0000000..05c6c81 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/chash-balance.t @@ -0,0 +1,656 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +worker_connections(1024); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(two upstream node) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "remote_addr", + "type": "chash", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit routes +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":12,"port":"1980"}] + + + +=== TEST 3: set route(three upstream node) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "remote_addr", + "type": "chash", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1, + "127.0.0.1:1982": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit routes +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":12,"port":"1982"}] + + + +=== TEST 5: set route(three upstream node) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "remote_addr", + "type": "chash", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 0, + "127.0.0.1:1982": 0 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: hit routes +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":12,"port":"1980"}] + + + +=== TEST 7 set route(three upstream node with querystring) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "query_string", + "type": "chash", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1, + "127.0.0.1:1982": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit routes with querystring +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port?var=2&var2=" + + local t = {} + local ports_count = {} + for i = 1, 180 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err = httpc:request_uri(uri..i, {method = "GET"}) + if not res then + ngx.log(ngx.ERR, err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.count > b.count + end + table.sort(ports_arr, cmd) + + if (ports_arr[1].count - ports_arr[3].count) / ports_arr[2].count > 0.2 then + ngx.say(require("toolkit.json").encode(ports_arr)) + else + ngx.say('ok') + end + } + } +--- request +GET /t +--- wait: 5 +--- response_body +ok + + + +=== TEST 9: set route(three upstream node with arg_xxx) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "arg_device_id", + "type": "chash", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1, + "127.0.0.1:1982": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: hit routes with args +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port?device_id=" + + local t = {} + local ports_count = {} + for i = 1, 180 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err = httpc:request_uri(uri..i, {method = "GET"}) + if not res then + ngx.log(ngx.ERR, err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.count > b.count + end + table.sort(ports_arr, cmd) + + if (ports_arr[1].count - ports_arr[3].count) / ports_arr[2].count > 0.2 then + ngx.say(require("toolkit.json").encode(ports_arr)) + else + ngx.say('ok') + end + } + } +--- request +GET /t +--- wait: 5 +--- response_body +ok + + + +=== TEST 11: set route(weight 0) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "arg_device_id", + "type": "chash", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1981": 0 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: hit routes +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port?device_id=1" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + + ngx.status = res.status + ngx.say(res.body) + } + } +--- request +GET /t +--- error_code_like: ^(?:50\d)$ +--- error_log +failed to find valid upstream server, no valid upstream node + + + +=== TEST 13: set route(ensure retry can try every node) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "arg_device_id", + "type": "chash", + "nodes": { + "127.0.0.1:1979": 1000, + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: hit routes +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port?device_id=1" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +200 +--- error_log +Connection refused + + + +=== TEST 15: set routes with very big weights +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "arg_device_id", + "type": "chash", + "nodes": { + "127.0.0.1:1980": 1000000000, + "127.0.0.1:1981": 2000000000, + "127.0.0.1:1982": 1000000000 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port?device_id=1" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + + -- a `size too large` error will be thrown if we don't reduce the weight + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +200 + + + +=== TEST 17: set routes with very big weights, some nodes have zero weight +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "arg_device_id", + "type": "chash", + "nodes": { + "127.0.0.1:1980": 1000000000, + "127.0.0.1:1981": 0, + "127.0.0.1:1982": 4000000000 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port?device_id=1" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + + -- a `size too large` error will be thrown if we don't reduce the weight + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +200 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/chash-hashon.t b/CloudronPackages/APISIX/apisix-source/t/node/chash-hashon.t new file mode 100644 index 0000000..7329332 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/chash-hashon.t @@ -0,0 +1,742 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: add two consumer with username and plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-jack" + } + } + }]], + [[{ + "value": { + "username": "jack", + "plugins": { + "key-auth": { + "key": "re62sf0vRJqOBjvJJ6RUcA==" + } + } + } + }]] + ) + + if code ~= 200 then + ngx.say("create consumer jack failed") + return + end + ngx.say(code .. " " ..body) + + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "tom", + "plugins": { + "key-auth": { + "key": "auth-tom" + } + } + }]], + [[{ + "value": { + "username": "tom", + "plugins": { + "key-auth": { + "key": "RAL/niDfEUpx+ynsoqWDuA==" + } + } + } + }]] + ) + ngx.say(code .. " " ..body) + } + } +--- request +GET /t +--- response_body +200 passed +200 passed + + + +=== TEST 2: add key auth plugin, chash hash_on consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + }, + "type": "chash", + "hash_on": "consumer" + }, + "uri": "/server_port" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: hit routes, hash_on one consumer +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local request_headers = {} + request_headers["apikey"] = "auth-jack" + + local ports_count = {} + for i = 1, 4 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", headers = request_headers}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":4,"port":"1981"}] +--- grep_error_log eval +qr/hash_on: consumer|chash_key: "jack"|chash_key: "tom"/ +--- grep_error_log_out +hash_on: consumer +chash_key: "jack" +hash_on: consumer +chash_key: "jack" +hash_on: consumer +chash_key: "jack" +hash_on: consumer +chash_key: "jack" + + + +=== TEST 4: hit routes, hash_on two consumer +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local request_headers = {} + local ports_count = {} + for i = 1, 4 do + if i%2 == 0 then + request_headers["apikey"] = "auth-tom" + else + request_headers["apikey"] = "auth-jack" + end + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", headers = request_headers}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":2,"port":"1981"},{"count":2,"port":"1980"}] +--- grep_error_log eval +qr/hash_on: consumer|chash_key: "jack"|chash_key: "tom"/ +--- grep_error_log_out +hash_on: consumer +chash_key: "jack" +hash_on: consumer +chash_key: "tom" +hash_on: consumer +chash_key: "jack" +hash_on: consumer +chash_key: "tom" + + + +=== TEST 5: set route(two upstream node, type chash), hash_on header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "custom_header", + "type": "chash", + "hash_on": "header", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: hit routes, hash_on custom header +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local request_headers = {} + request_headers["custom_header"] = "custom-one" + + local ports_count = {} + for i = 1, 4 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", headers = request_headers}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":4,"port":"1980"}] +--- grep_error_log eval +qr/hash_on: header|chash_key: "custom-one"/ +--- grep_error_log_out +hash_on: header +chash_key: "custom-one" +hash_on: header +chash_key: "custom-one" +hash_on: header +chash_key: "custom-one" +hash_on: header +chash_key: "custom-one" + + + +=== TEST 7: hit routes, hash_on custom header miss, use default +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local request_headers = {} + request_headers["miss-custom-header"] = "custom-one" + + local ports_count = {} + for i = 1, 4 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", headers = request_headers}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":4,"port":"1980"}] +--- grep_error_log eval +qr/chash_key fetch is nil, use default chash_key remote_addr: 127.0.0.1/ +--- grep_error_log_out +chash_key fetch is nil, use default chash_key remote_addr: 127.0.0.1 +chash_key fetch is nil, use default chash_key remote_addr: 127.0.0.1 +chash_key fetch is nil, use default chash_key remote_addr: 127.0.0.1 +chash_key fetch is nil, use default chash_key remote_addr: 127.0.0.1 + + + +=== TEST 8: set route(two upstream node, type chash), hash_on cookie +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "custom-cookie", + "type": "chash", + "hash_on": "cookie", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: hit routes, hash_on custom cookie +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local request_headers = {} + request_headers["Cookie"] = "custom-cookie=cuscookie" + + local ports_count = {} + for i = 1, 4 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", headers = request_headers}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":4,"port":"1981"}] +--- grep_error_log eval +qr/hash_on: cookie|chash_key: "cuscookie"/ +--- grep_error_log_out +hash_on: cookie +chash_key: "cuscookie" +hash_on: cookie +chash_key: "cuscookie" +hash_on: cookie +chash_key: "cuscookie" +hash_on: cookie +chash_key: "cuscookie" + + + +=== TEST 10: hit routes, hash_on custom cookie miss, use default +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local request_headers = {} + request_headers["Cookie"] = "miss-custom-cookie=cuscookie" + + local ports_count = {} + for i = 1, 4 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", headers = request_headers}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":4,"port":"1980"}] +--- grep_error_log eval +qr/chash_key fetch is nil, use default chash_key remote_addr: 127.0.0.1/ +--- grep_error_log_out +chash_key fetch is nil, use default chash_key remote_addr: 127.0.0.1 +chash_key fetch is nil, use default chash_key remote_addr: 127.0.0.1 +chash_key fetch is nil, use default chash_key remote_addr: 127.0.0.1 +chash_key fetch is nil, use default chash_key remote_addr: 127.0.0.1 + + + +=== TEST 11: set route(key contains uppercase letters and hyphen) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "X-Sessionid", + "type": "chash", + "hash_on": "header", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: hit routes with header +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local ports_count = {} + for i = 1, 6 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["X-Sessionid"] = "chash_val_" .. i + } + }) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":3,"port":"1981"},{"count":3,"port":"1980"}] +--- error_log +chash_key: "chash_val_1" +chash_key: "chash_val_2" +chash_key: "chash_val_3" +chash_key: "chash_val_4" +chash_key: "chash_val_5" +chash_key: "chash_val_6" + + + +=== TEST 13: set route(two upstream nodes, type chash), hash_on vars_combinations +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "$http_custom_header-$http_custom_header_second", + "type": "chash", + "hash_on": "vars_combinations", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: hit routes, hash_on custom header combinations +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local request_headers = {} + request_headers["custom_header"] = "custom-one" + request_headers["custom_header_second"] = "custom-two" + + local ports_count = {} + for i = 1, 4 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", headers = request_headers}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":4,"port":"1980"}] +--- grep_error_log eval +qr/hash_on: vars_combinations|chash_key: "custom-one-custom-two"/ +--- grep_error_log_out +hash_on: vars_combinations +chash_key: "custom-one-custom-two" +hash_on: vars_combinations +chash_key: "custom-one-custom-two" +hash_on: vars_combinations +chash_key: "custom-one-custom-two" +hash_on: vars_combinations +chash_key: "custom-one-custom-two" + + + +=== TEST 15: hit routes, hash_on custom header combinations +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local ports_count = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + } + } +--- request +GET /t +--- response_body +[{"count":2,"port":"1980"}] +--- grep_error_log eval +qr/chash_key fetch is nil, use default chash_key remote_addr: 127.0.0.1/ +--- grep_error_log_out +chash_key fetch is nil, use default chash_key remote_addr: 127.0.0.1 +chash_key fetch is nil, use default chash_key remote_addr: 127.0.0.1 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/client-mtls-openresty.t b/CloudronPackages/APISIX/apisix-source/t/node/client-mtls-openresty.t new file mode 100644 index 0000000..050394d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/client-mtls-openresty.t @@ -0,0 +1,272 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan('no_plan'); +} else { + plan(skip_all => "for vanilla OpenResty only"); +} + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set verification +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_ca_cert = t.read_file("t/certs/mtls_ca.crt") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + }, + }, + uri = "/hello" + } + assert(t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + )) + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "localhost", + client = { + ca = ssl_ca_cert, + depth = 2, + } + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 2: hit +--- exec +curl --cert t/certs/mtls_client.crt --key t/certs/mtls_client.key -k https://localhost:1994/hello +--- response_body +hello world + + + +=== TEST 3: no client certificate +--- exec +curl -k https://localhost:1994/hello +--- response_body eval +qr/400 Bad Request/ +--- error_log +client certificate was not present + + + +=== TEST 4: wrong client certificate +--- exec +curl --cert t/certs/apisix.crt --key t/certs/apisix.key -k https://localhost:1994/hello +--- response_body eval +qr/400 Bad Request/ +--- error_log eval +qr/client certificate verification is not passed: FAILED:self[- ]signed certificate/ + + + +=== TEST 5: hit with different host which doesn't require mTLS +--- exec +curl --cert t/certs/mtls_client.crt --key t/certs/mtls_client.key -k https://localhost:1994/hello -H "Host: test.com" +--- response_body +hello world + + + +=== TEST 6: set verification (2 ssl objects) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_ca_cert = t.read_file("t/certs/mtls_ca.crt") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + }, + }, + uri = "/hello" + } + assert(t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + )) + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com", + client = { + ca = ssl_ca_cert, + depth = 2, + } + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + return + end + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "localhost", + } + local code, body = t.test('/apisix/admin/ssls/2', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 7: hit without mTLS verify, with Host requires mTLS verification +--- exec +curl -k https://localhost:1994/hello -H "Host: test.com" +--- response_body eval +qr/400 Bad Request/ +--- error_log +client certificate was not present + + + +=== TEST 8: set verification (2 ssl objects, both have mTLS) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_ca_cert = t.read_file("t/certs/mtls_ca.crt") + local ssl_ca_cert2 = t.read_file("t/certs/apisix.crt") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + }, + }, + uri = "/hello" + } + assert(t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + )) + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "localhost", + client = { + ca = ssl_ca_cert, + depth = 2, + } + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + return + end + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com", + client = { + ca = ssl_ca_cert2, + depth = 2, + } + } + local code, body = t.test('/apisix/admin/ssls/2', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 9: hit with mTLS verify, with Host requires different mTLS verification +--- exec +curl --cert t/certs/mtls_client.crt --key t/certs/mtls_client.key -k https://localhost:1994/hello -H "Host: test.com" +--- response_body eval +qr/400 Bad Request/ +--- error_log +client certificate verified with SNI localhost, but the host is test.com diff --git a/CloudronPackages/APISIX/apisix-source/t/node/client-mtls.t b/CloudronPackages/APISIX/apisix-source/t/node/client-mtls.t new file mode 100644 index 0000000..78715b9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/client-mtls.t @@ -0,0 +1,655 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; +}); + +run_tests(); + +__DATA__ + +=== TEST 1: bad client certificate +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com", + client = { + ca = ("test.com"):rep(128), + } + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to validate client_cert: failed to parse cert: PEM_read_bio_X509_AUX() failed"} + + + +=== TEST 2: missing client certificate +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com", + client = { + } + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"client\" validation failed: property \"ca\" is required"} + + + +=== TEST 3: set verification +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_ca_cert = t.read_file("t/certs/mtls_ca.crt") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1994"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ssl_key, + } + }, + plugins = { + ["proxy-rewrite"] = { + uri = "/hello" + } + }, + uri = "/mtls" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local data = { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + }, + }, + uri = "/hello" + } + assert(t.test('/apisix/admin/routes/2', + ngx.HTTP_PUT, + json.encode(data) + )) + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "localhost", + client = { + ca = ssl_ca_cert, + depth = 2, + } + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 4: hit +--- request +GET /mtls +--- more_headers +Host: localhost +--- response_body +hello world + + + +=== TEST 5: no client certificate +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1994"] = 1, + }, + }, + plugins = { + ["proxy-rewrite"] = { + uri = "/hello" + } + }, + uri = "/mtls2" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 6: hit +--- request +GET /mtls2 +--- more_headers +Host: localhost +--- error_code: 502 +--- error_log +peer did not return a certificate + + + +=== TEST 7: wrong client certificate +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1994"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ssl_key, + } + }, + plugins = { + ["proxy-rewrite"] = { + uri = "/hello" + } + }, + uri = "/mtls3" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 8: hit +--- request +GET /mtls3 +--- more_headers +Host: localhost +--- error_code: 502 +--- error_log +certificate verify failed + + + +=== TEST 9: set verification +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_ca_cert = t.read_file("t/certs/mtls_ca.crt") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + }, + }, + uri = "/hello" + } + assert(t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + )) + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "localhost", + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 10: hit with different host which doesn't require mTLS +--- exec +curl --cert t/certs/mtls_client.crt --key t/certs/mtls_client.key -k https://localhost:1994/hello -H "Host: x.com" +--- response_body +hello world + + + +=== TEST 11: set verification (2 ssl objects) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_ca_cert = t.read_file("t/certs/mtls_ca.crt") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + }, + }, + uri = "/hello" + } + assert(t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + )) + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com", + client = { + ca = ssl_ca_cert, + depth = 2, + } + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + return + end + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "localhost", + } + local code, body = t.test('/apisix/admin/ssls/2', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 12: hit without mTLS verify, with Host requires mTLS verification +--- exec +curl -k https://localhost:1994/hello -H "Host: test.com" +--- response_body eval +qr/400 Bad Request/ +--- error_log +client certificate verified with SNI localhost, but the host is test.com + + + +=== TEST 13: set verification (2 ssl objects, both have mTLS) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_ca_cert = t.read_file("t/certs/mtls_ca.crt") + local ssl_ca_cert2 = t.read_file("t/certs/apisix.crt") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + }, + }, + uri = "/hello" + } + assert(t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + )) + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "localhost", + client = { + ca = ssl_ca_cert, + depth = 2, + } + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + return + end + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com", + client = { + ca = ssl_ca_cert2, + depth = 2, + } + } + local code, body = t.test('/apisix/admin/ssls/2', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 14: hit with mTLS verify, with Host requires different mTLS verification +--- exec +curl --cert t/certs/mtls_client.crt --key t/certs/mtls_client.key -k https://localhost:1994/hello -H "Host: test.com" +--- response_body eval +qr/400 Bad Request/ +--- error_log +client certificate verified with SNI localhost, but the host is test.com + + + +=== TEST 15: request localhost and save tls session to reuse +--- max_size: 1048576 +--- exec +echo "GET /hello HTTP/1.1\r\nHost: localhost\r\n" | \ + timeout 1 openssl s_client -ign_eof -connect 127.0.0.1:1994 \ + -servername localhost -cert t/certs/mtls_client.crt -key t/certs/mtls_client.key \ + -sess_out session.dat || true +--- response_body eval +qr/200 OK/ + + + +=== TEST 16: request test.com with saved tls session +--- max_size: 1048576 +--- exec +echo "GET /hello HTTP/1.1\r\nHost: test.com\r\n" | \ + openssl s_client -ign_eof -connect 127.0.0.1:1994 -servername test.com \ + -sess_in session.dat +--- response_body eval +qr/400 Bad Request/ +--- error_log +sni in client hello mismatch hostname of ssl session, sni: test.com, hostname: localhost + + + +=== TEST 17: set verification (2 ssl objects, both have mTLS) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_ca_cert = t.read_file("t/certs/mtls_ca.crt") + local ssl_ca_cert2 = t.read_file("t/certs/apisix.crt") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + }, + }, + uri = "/*" + } + assert(t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + )) + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "localhost", + client = { + ca = ssl_ca_cert, + depth = 2, + skip_mtls_uri_regex = { + "/hello[0-9]+", + } + } + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + return + end + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com", + client = { + ca = ssl_ca_cert2, + depth = 2, + } + } + local code, body = t.test('/apisix/admin/ssls/2', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 18: skip the mtls, although no client cert provided +--- exec +curl -k https://localhost:1994/hello1 +--- response_body eval +qr/hello1 world/ + + + +=== TEST 19: skip the mtls, although with wrong client cert +--- exec +curl -k --cert t/certs/test2.crt --key t/certs/test2.key -k https://localhost:1994/hello1 +--- response_body eval +qr/hello1 world/ + + + +=== TEST 20: mtls failed, returns 400 +--- exec +curl -k https://localhost:1994/hello +--- response_body eval +qr/400 Bad Request/ +--- error_log +client certificate was not present + + + +=== TEST 21: mtls failed, wrong client cert +--- exec +curl --cert t/certs/test2.crt --key t/certs/test2.key -k https://localhost:1994/hello +--- response_body eval +qr/400 Bad Request/ +--- error_log +client certificate verification is not passed: FAILED + + + +=== TEST 22: mtls failed, at handshake phase +--- exec +curl -k -v --resolve "test.com:1994:127.0.0.1" https://test.com:1994/hello +--- error_log +peer did not return a certificate diff --git a/CloudronPackages/APISIX/apisix-source/t/node/consumer-group.t b/CloudronPackages/APISIX/apisix-source/t/node/consumer-group.t new file mode 100644 index 0000000..5f863ad --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/consumer-group.t @@ -0,0 +1,312 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('warn'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: consumer group usage +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, err = t('/apisix/admin/consumer_groups/bar', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "body": "hello" + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "group_id": "bar", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, err = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "basic-auth": {} + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.sleep(0.5) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local headers = { + ["Authorization"] = "Basic Zm9vOmJhcg==" + } + local res, err = httpc:request_uri(uri, {headers = headers}) + ngx.say(res.body) + + local code, err = t('/apisix/admin/consumer_groups/bar', + ngx.HTTP_PATCH, + [[{ + "plugins": { + "response-rewrite": { + "body": "world" + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.sleep(0.1) + + local res, err = httpc:request_uri(uri, {headers = headers}) + ngx.say(res.body) + } + } +--- response_body +hello +world + + + +=== TEST 2: validated plugins configuration via incremental sync (malformed data) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local core = require("apisix.core") + + assert(core.etcd.set("/consumer_groups/bar", + {id = "bar", plugins = { ["uri-blocker"] = { block_rules = 1 }}} + )) + -- wait for sync + ngx.sleep(0.6) + + assert(core.etcd.delete("/consumer_groups/bar")) + } + } +--- error_log +property "block_rules" validation failed + + + +=== TEST 3: don't override the plugin in the consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, err = t('/apisix/admin/consumer_groups/bar', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "body": "hello" + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "group_id": "bar", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + }, + "response-rewrite": { + "body": "world" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, err = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "basic-auth": {} + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.sleep(0.1) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local headers = { + ["Authorization"] = "Basic Zm9vOmJhcg==" + } + local res, err = httpc:request_uri(uri, {headers = headers}) + ngx.say(res.body) + } + } +--- response_body +world + + + +=== TEST 4: check consumer_group_id var +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, err = t('/apisix/admin/consumer_groups/bar', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "phase": "access", + "functions" : ["return function(_, ctx) ngx.say(ctx.var.consumer_group_id); ngx.exit(200); end"] + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "group_id": "bar", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, err = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "basic-auth": {} + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.sleep(0.5) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local headers = { + ["Authorization"] = "Basic Zm9vOmJhcg==" + } + local res, err = httpc:request_uri(uri, {headers = headers}) + ngx.print(res.body) + } + } +--- response_body +bar diff --git a/CloudronPackages/APISIX/apisix-source/t/node/consumer-plugin.t b/CloudronPackages/APISIX/apisix-source/t/node/consumer-plugin.t new file mode 100644 index 0000000..b5e6d7e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/consumer-plugin.t @@ -0,0 +1,464 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: add consumer with username and plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + }, + "key-auth": { + "key": "auth-one" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: enable key auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: invalid consumer +--- request +GET /hello +--- more_headers +apikey: 123 +--- error_code: 401 +--- response_body +{"message":"Invalid API key in request"} + + + +=== TEST 4: valid consumer +--- request +GET /hello +--- more_headers +apikey: auth-one +--- response_body +hello world + + + +=== TEST 5: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- more_headers +apikey: auth-one +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 6: use the new configuration after the consumer's configuration is updated +--- config + location /t { + content_by_lua_block { + local function test() + local json_encode = require("toolkit.json").encode + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + + local status_count = {} + for i = 1, 5 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, + { + method = "GET", + headers = { + apikey = "auth-one", + } + } + ) + if not res then + ngx.say(err) + return + end + + local status = tostring(res.status) + status_count[status] = (status_count[status] or 0) + 1 + end + ngx.say(json_encode(status_count)) + end + + test() + + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "limit-count": { + "count": 4, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + }, + "key-auth": { + "key": "auth-one" + } + } + }]] + ) + + ngx.sleep(0.1) + + test() + } + } +--- request +GET /t +--- response_body +{"200":2,"503":3} +{"200":4,"503":1} + + + +=== TEST 7: consumer with multiple auth plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "John_Doe", + "desc": "new consumer", + "plugins": { + "key-auth": { + "key": "consumer-plugin-John_Doe" + }, + "hmac-auth": { + "key_id": "my-access-key", + "secret_key": "my-secret-key", + "clock_skew": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: bind to routes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.log(ngx.ERR, "failed to bind route 1") + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/status" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: hit consumer, key-auth +--- request +GET /hello +--- more_headers +apikey: consumer-plugin-John_Doe +--- response_body +hello world +--- error_log +find consumer John_Doe + + + +=== TEST 10: hit consumer, hmac-auth +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + + local signing_string = { + key_id, + "GET /status", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + core.log.info("signing_string:", signing_string) + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/status', + ngx.HTTP_GET, + nil, + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed +--- error_log +find consumer John_Doe + + + +=== TEST 11: the plugins bound on the service should use the latest configuration +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username":"jack", + "plugins": { + "key-auth": { + "key": "auth-jack" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": { + "header": "Authorization" + }, + "proxy-rewrite": { + "uri": "/hello1" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": [ + "GET" + ], + "uri": "/hello", + "service_id": "1", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local headers = { + ["Authorization"] = "auth-jack" + } + local res, err = httpc:request_uri(uri, {headers = headers}) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.print(res.body) + + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": { + "header": "Authorization" + }, + "proxy-rewrite": { + "uri": "/server_port" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + local res, err = httpc:request_uri(uri, {headers = headers}) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.say(res.body) + } + } +--- request +GET /t +--- response_body +hello1 world +1980 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/consumer-plugin2.t b/CloudronPackages/APISIX/apisix-source/t/node/consumer-plugin2.t new file mode 100644 index 0000000..6c79ad8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/consumer-plugin2.t @@ -0,0 +1,470 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level('info'); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->response_body) { + $block->set_value("response_body", "passed\n"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + + +our $debug_config = t::APISIX::read_file("conf/debug.yaml"); +$debug_config =~ s/basic:\n enable: false/basic:\n enable: true/; +$debug_config =~ s/hook_conf:\n enable: false/hook_conf:\n enable: true/; + +run_tests; + +__DATA__ + +=== TEST 1: configure non-auth plugins in the consumer and run it's rewrite phase +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-jack" + }, + "proxy-rewrite": { + "uri": "/uri/plugin_proxy_rewrite", + "headers": { + "X-Api-Engine": "APISIX", + "X-CONSUMER-ID": "1" + } + } + } + }]] + ) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit routes +--- request +GET /hello +--- more_headers +apikey: auth-jack +--- response_body +uri: /uri/plugin_proxy_rewrite +apikey: auth-jack +host: localhost +x-api-engine: APISIX +x-consumer-id: 1 +x-consumer-username: jack +x-real-ip: 127.0.0.1 + + + +=== TEST 3: trace plugins info for debug +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local ngx_re = require("ngx.re") + local http = require "resty.http" + local httpc = http.new() + local headers = {} + headers["apikey"] = "auth-jack" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = headers, + }) + local debug_header = res.headers["Apisix-Plugins"] + local arr = ngx_re.split(debug_header, ", ") + local hash = {} + for i, v in ipairs(arr) do + hash[v] = true + end + ngx.status = res.status + ngx.say(json.encode(hash)) + } + } +--- response_body +{"key-auth":true,"proxy-rewrite":true} + + + +=== TEST 4: configure non-auth plugins in the route and run it's rewrite phase +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-jack" + } + } + }]] + ) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {}, + "proxy-rewrite": { + "uri": "/uri/plugin_proxy_rewrite", + "headers": { + "X-Api-Engine": "APISIX", + "X-CONSUMER-ID": "1" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: hit routes +--- request +GET /hello +--- more_headers +apikey: auth-jack +--- response_body +uri: /uri/plugin_proxy_rewrite +apikey: auth-jack +host: localhost +x-api-engine: APISIX +x-consumer-id: 1 +x-consumer-username: jack +x-real-ip: 127.0.0.1 + + + +=== TEST 6: trace plugins info for debug +--- debug_config eval: $::debug_config +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local ngx_re = require("ngx.re") + local http = require "resty.http" + local httpc = http.new() + local headers = {} + headers["apikey"] = "auth-jack" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = headers, + }) + local debug_header = res.headers["Apisix-Plugins"] + local arr = ngx_re.split(debug_header, ", ") + local hash = {} + for i, v in ipairs(arr) do + hash[v] = true + end + ngx.status = res.status + ngx.say(json.encode(hash)) + } + } +--- response_body +{"key-auth":true,"proxy-rewrite":true} + + + +=== TEST 7: configure non-auth plugins in the consumer and run it's rewrite phase +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-jack" + }, + "ip-restriction": { + "blacklist": [ + "127.0.0.0/24" + ] + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit routes and ip-restriction work well +--- request +GET /hello +--- more_headers +apikey: auth-jack +--- error_code: 403 +--- response_body +{"message":"Your IP address is not allowed"} + + + +=== TEST 9: use the latest consumer modifiedIndex as lrucache key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ip-restriction": { + "whitelist": ["1.1.1.1"] + }, + "basic-auth": {} + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugin_config_id": "1", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local headers = { + ["Authorization"] = "Basic Zm9vOmJhcg==" + } + local res, err = httpc:request_uri(uri, {headers = headers}) + ngx.print(res.body) + + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ip-restriction": { + "whitelist": ["1.1.1.1", "127.0.0.1"] + }, + "basic-auth": {} + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local res, err = httpc:request_uri(uri, {headers = headers}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bala" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local headers = { + ["Authorization"] = "Basic Zm9vOmJhbGE=" + } + local res, err = httpc:request_uri(uri, {headers = headers}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- response_body +{"message":"Your IP address is not allowed"} +hello world +hello world + + + +=== TEST 10: consumer should work if the etcd connection failed during starting +--- extra_init_by_lua +local etcd_apisix = require("apisix.core.etcd") +etcd_apisix.get_etcd_syncer = function () + return nil, "", "ouch" +end +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local headers = { + ["Authorization"] = "Basic Zm9vOmJhbGE=" + } + local res, err = httpc:request_uri(uri, {headers = headers}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- response_body +hello world +--- error_log +failed to fetch data from etcd diff --git a/CloudronPackages/APISIX/apisix-source/t/node/consumer-plugin3.t b/CloudronPackages/APISIX/apisix-source/t/node/consumer-plugin3.t new file mode 100644 index 0000000..345bccd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/consumer-plugin3.t @@ -0,0 +1,159 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: add consumer with csrf plugin (data encryption enabled) +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "key-a" + }, + "csrf": { + "key": "userkey", + "expires": 1000000000 + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.1) + + -- verify csrf key is decrypted in admin API + local code, message, res = t('/apisix/admin/consumers/jack', + ngx.HTTP_GET + ) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + local consumer = json.decode(res) + ngx.say(consumer.value.plugins["csrf"].key) + + -- verify csrf key is encrypted in etcd + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/consumers/jack')) + ngx.say(res.body.node.value.plugins["csrf"].key) + } + } +--- request +GET /t +--- response_body +userkey +mt39FazQccyMqt4ctoRV7w== +--- no_error_log +[error] + + + +=== TEST 2: add route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: invalid request - no csrf token +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- request +POST /hello +--- more_headers +apikey: key-a +--- error_code: 401 +--- response_body +{"error_msg":"no csrf token in headers"} + + + +=== TEST 4: valid request - with csrf token +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- request +POST /hello +--- more_headers +apikey: key-a +apisix-csrf-token: eyJyYW5kb20iOjAuNDI5ODYzMTk3MTYxMzksInNpZ24iOiI0ODRlMDY4NTkxMWQ5NmJhMDc5YzQ1ZGI0OTE2NmZkYjQ0ODhjODVkNWQ0NmE1Y2FhM2UwMmFhZDliNjE5OTQ2IiwiZXhwaXJlcyI6MjY0MzExOTYyNH0= +Cookie: apisix-csrf-token=eyJyYW5kb20iOjAuNDI5ODYzMTk3MTYxMzksInNpZ24iOiI0ODRlMDY4NTkxMWQ5NmJhMDc5YzQ1ZGI0OTE2NmZkYjQ0ODhjODVkNWQ0NmE1Y2FhM2UwMmFhZDliNjE5OTQ2IiwiZXhwaXJlcyI6MjY0MzExOTYyNH0= +--- response_body +hello world +--- no_error_log +[error] diff --git a/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-basic-auth.t b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-basic-auth.t new file mode 100644 index 0000000..c2e55ac --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-basic-auth.t @@ -0,0 +1,137 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: enable basic-auth on the route /hello +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: create a consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: create a credential with basic-auth plugin enabled for the consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/34010989-ce4e-4d61-9493-b54cca8edb31', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": {"username": "foo", "password": "bar"} + } + }]], + [[{ + "value":{ + "id":"34010989-ce4e-4d61-9493-b54cca8edb31", + "plugins":{ + "basic-auth":{"username":"foo","password":"+kOEVUuRc5rC5ZwvvAMLwg=="} + } + }, + "key":"/apisix/consumers/jack/credentials/34010989-ce4e-4d61-9493-b54cca8edb31" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: access with invalid basic-auth (invalid password) +--- request +GET /hello +--- more_headers +Authorization: Basic Zm9vOmZvbwo= +--- error_code: 401 +--- response_body +{"message":"Invalid user authorization"} + + + +=== TEST 5: access with valid basic-auth +--- request +GET /hello +--- more_headers +Authorization: Basic Zm9vOmJhcg== +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-incremental-effective.t b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-incremental-effective.t new file mode 100644 index 0000000..ae619df --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-incremental-effective.t @@ -0,0 +1,125 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: test continuous watch etcd changes without APISIX reload +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- enable key-auth on /hello + t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + ngx.sleep(0.2) -- On some machines, changes may not be instantly watched, so sleep makes the test more robust. + + -- request /hello without key-auth should response status 401 + local code, body = t('/hello', ngx.HTTP_GET) + assert(code == 401) + + -- add a consumer jack + t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username":"jack" + }]], + [[{ + "key": "/apisix/consumers/jack", + "value": + { + "username":"jack" + } + }]] + ) + + -- create first credential for consumer jack + t('/apisix/admin/consumers/jack/credentials/the-first-one', + ngx.HTTP_PUT, + [[{ + "plugins":{"key-auth":{"key":"p7a3k6r4t9"}} + }]], + [[{ + "value":{ + "id":"the-first-one", + "plugins":{"key-auth":{"key":"p7a3k6r4t9"}} + }, + "key":"/apisix/consumers/jack/credentials/the-first-one" + }]] + ) + ngx.sleep(0.2) + + -- request /hello with credential a + local headers = {} + headers["apikey"] = "p7a3k6r4t9" + code, body = t('/hello', ngx.HTTP_GET, "", nil, headers) + assert(code == 200) + + -- create second credential for consumer jack + t('/apisix/admin/consumers/jack/credentials/the-second-one', + ngx.HTTP_PUT, + [[{ + "plugins":{"key-auth":{"key":"v8p3q6r7t9"}} + }]], + [[{ + "value":{ + "id":"the-second-one", + "plugins":{"key-auth":{"key":"v8p3q6r7t9"}} + }, + "key":"/apisix/consumers/jack/credentials/the-second-one" + }]] + ) + ngx.sleep(0.2) + + -- request /hello with credential b + headers["apikey"] = "v8p3q6r7t9" + code, body = t('/hello', ngx.HTTP_GET, "", nil, headers) + assert(code == 200) + + -- delete the first credential + code, body = t('/apisix/admin/consumers/jack/credentials/the-first-one', ngx.HTTP_DELETE) + assert(code == 200) + ngx.sleep(0.2) + + -- request /hello with credential a + headers["apikey"] = "p7a3k6r4t9" + code, body = t('/hello', ngx.HTTP_GET, "", nil, headers) + assert(code == 401) + } + } +--- request +GET /t diff --git a/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-jwt-auth.t b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-jwt-auth.t new file mode 100644 index 0000000..f95498d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-jwt-auth.t @@ -0,0 +1,137 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: enable jwt-auth on the route /hello +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: create a consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: create a credential with jwt-auth plugin enabled for the consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/34010989-ce4e-4d61-9493-b54cca8edb31', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {"key": "user-key", "secret": "my-secret-key"} + } + }]], + [[{ + "value":{ + "id":"34010989-ce4e-4d61-9493-b54cca8edb31", + "plugins":{ + "jwt-auth": {"key": "user-key", "secret": "kK0lkbzXrE7aiTiyK/Z0Sw=="} + } + }, + "key":"/apisix/consumers/jack/credentials/34010989-ce4e-4d61-9493-b54cca8edb31" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: access with invalid JWT token +--- request +GET /hello +--- more_headers +Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJqd3QtdmF1bHQta2V5IiwiZXhwIjoxNjk1MTM4NjM1fQ.Au2liSZ8eQXUJR3SJESwNlIfqZdNyRyxIJK03L4dk_g +--- error_code: 401 +--- response_body +{"message":"Invalid user key in JWT token"} + + + +=== TEST 5: access with valid JWT token in header +--- request +GET /hello +--- more_headers +Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-key-auth.t b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-key-auth.t new file mode 100644 index 0000000..558616d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-key-auth.t @@ -0,0 +1,137 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: enable key-auth on the route /hello +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: create consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: create a credential with key-auth plugin enabled for the consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/34010989-ce4e-4d61-9493-b54cca8edb31', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {"key": "p7a3k6r4t9"} + } + }]], + [[{ + "value":{ + "id":"34010989-ce4e-4d61-9493-b54cca8edb31", + "plugins":{ + "key-auth": {"key": "fsFPtg7BtXMXkvSnS9e1zw=="} + } + }, + "key":"/apisix/consumers/jack/credentials/34010989-ce4e-4d61-9493-b54cca8edb31" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: request with an invalid key: should be not OK +--- request +GET /hello +--- more_headers +apikey: 123 +--- error_code: 401 +--- response_body +{"message":"Invalid API key in request"} + + + +=== TEST 5: request with the valid key: should be OK +--- request +GET /hello +--- more_headers +apikey: p7a3k6r4t9 +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-multi-credentials.t b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-multi-credentials.t new file mode 100644 index 0000000..6b60bb3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-multi-credentials.t @@ -0,0 +1,236 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: enable key-auth plugin on /hello +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- basic-auth on route 1 + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: create a consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: create the first credential with the key-auth plugin enabled for the consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/the-first-one', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {"key": "p7a3k6r4t9"} + } + }]], + [[{ + "value":{ + "id":"the-first-one", + "plugins":{ + "key-auth": {"key": "fsFPtg7BtXMXkvSnS9e1zw=="} + } + }, + "key":"/apisix/consumers/jack/credentials/the-first-one" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: create the second credential with the key-auth plugin enabled for the consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/the-second-one', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {"key": "v8p3q6r7t9"} + } + }]], + [[{ + "value":{ + "id":"the-second-one", + "plugins":{ + "key-auth": {"key": "QwGua2GjZjOiq+Mj3Mef2g=="} + } + }, + "key":"/apisix/consumers/jack/credentials/the-second-one" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: request /hello with the key of the first credential: should be OK +--- request +GET /hello +--- more_headers +apikey: p7a3k6r4t9 +--- response_body +hello world + + + +=== TEST 6: request /hello with the key of second credential: should be OK +--- request +GET /hello +--- more_headers +apikey: v8p3q6r7t9 +--- response_body +hello world + + + +=== TEST 7: delete the first credential +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/the-first-one', ngx.HTTP_DELETE) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: request /hello with the key of the first credential: should be not OK +--- request +GET /hello +--- more_headers +apikey: p7a3k6r4t9 +--- error_code: 401 + + + +=== TEST 9: request /hello with the key of the second credential: should be OK +--- request +GET /hello +--- more_headers +apikey: v8p3q6r7t9 +--- response_body +hello world + + + +=== TEST 10: delete the second credential +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/the-second-one', ngx.HTTP_DELETE) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: request /hello with the key of the second credential: should be not OK +--- request +GET /hello +--- more_headers +apikey: v8p3q6r7t9 +--- error_code: 401 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-set-request-header.t b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-set-request-header.t new file mode 100644 index 0000000..51148d0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-set-request-header.t @@ -0,0 +1,245 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: enable key-auth on the route /echo +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: create consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: create a credential with key-auth plugin enabled and 'custom_id' label for the consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/34010989-ce4e-4d61-9493-b54cca8edb31', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {"key": "p7a3k6r4t9"} + }, + "labels": { + "custom_id": "271fc4a264bb" + } + }]], + [[{ + "value":{ + "id":"34010989-ce4e-4d61-9493-b54cca8edb31", + "plugins":{ + "key-auth": {"key": "fsFPtg7BtXMXkvSnS9e1zw=="} + }, + "labels": { + "custom_id": "271fc4a264bb" + } + }, + "key":"/apisix/consumers/jack/credentials/34010989-ce4e-4d61-9493-b54cca8edb31" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: request the route: 'x-consumer-username' and 'x-credential-identifier' is in response headers and 'x-consumer-custom-id' is not +--- request +GET /echo HTTP/1.1 +--- more_headers +apikey: p7a3k6r4t9 +--- response_headers +x-consumer-username: jack +x-credential-identifier: 34010989-ce4e-4d61-9493-b54cca8edb31 +!x-consumer-custom-id + + + +=== TEST 5: update the consumer add label "custom_id" +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "labels": { + "custom_id": "495aec6a" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: request the route: the value of 'x-consumer-custom-id' come from the consumer but not the credential or downstream +--- request +GET /echo HTTP/1.1 +--- more_headers +apikey: p7a3k6r4t9 +x-consumer-custom-id: 271fc4a264bb +--- response_headers +x-consumer-username: jack +x-credential-identifier: 34010989-ce4e-4d61-9493-b54cca8edb31 +x-consumer-custom-id: 495aec6a + + + +=== TEST 7: delete the credential +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/34010989-ce4e-4d61-9493-b54cca8edb31', ngx.HTTP_DELETE) + + assert(code == 200) + ngx.status = code + } + } +--- request +GET /t +--- response_body + + + +=== TEST 8: update the consumer to enable a key-auth plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "p7a3k6r4t9" + } + } + }]], + [[{ + "value": { + "username": "jack", + "plugins": { + "key-auth": { + "key": "fsFPtg7BtXMXkvSnS9e1zw==" + } + } + }, + "key": "/apisix/consumers/jack" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: request the route with headers x-credential-identifier and x-consumer-custom-id: these headers will be removed +--- request +GET /echo HTTP/1.1 +--- more_headers +apikey: p7a3k6r4t9 +x-credential-identifier: 34010989-ce4e-4d61-9493-b54cca8edb31 +x-consumer-custom-id: 271fc4a264bb +--- response_headers +x-consumer-username: jack +!x-credential-identifier +!x-consumer-custom-id diff --git a/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-work-with-other-plugin.t b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-work-with-other-plugin.t new file mode 100644 index 0000000..14bfc13 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/credential-plugin-work-with-other-plugin.t @@ -0,0 +1,171 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: enable key-auth on /hello +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- basic-auth on route 1 + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: create a consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: create a credential with the key-auth plugin enabled for the consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack/credentials/34010989-ce4e-4d61-9493-b54cca8edb31', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {"key": "p7a3k6r4t9"} + } + }]], + [[{ + "value":{ + "id":"34010989-ce4e-4d61-9493-b54cca8edb31", + "plugins":{ + "key-auth": {"key": "fsFPtg7BtXMXkvSnS9e1zw=="} + } + }, + "key":"/apisix/consumers/jack/credentials/34010989-ce4e-4d61-9493-b54cca8edb31" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: request the route /hello multi times: should be OK +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- more_headers +apikey: p7a3k6r4t9 +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 5: enable plugin `limit-count` for the consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: request the route /hello multi times: should be not OK, exceed the limit-count +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- more_headers +apikey: p7a3k6r4t9 +--- error_code eval +[200, 200, 503, 503] diff --git a/CloudronPackages/APISIX/apisix-source/t/node/data_encrypt.t b/CloudronPackages/APISIX/apisix-source/t/node/data_encrypt.t new file mode 100644 index 0000000..0dece7d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/data_encrypt.t @@ -0,0 +1,571 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +# the sensitive data is encrypted in etcd, and it is normal to read it from the admin API +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/consumers/foo', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["basic-auth"].password) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/consumers/foo')) + ngx.say(res.body.node.value.plugins["basic-auth"].password) + + } + } +--- response_body +bar +77+NmbYqNfN+oLm0aX5akg== + + + +=== TEST 2: enable basic auth plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: verify +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- request +GET /hello +--- more_headers +Authorization: Basic Zm9vOmJhcg== +--- response_body +hello world + + + +=== TEST 4: multiple auth plugins work well +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + }, + "key-auth": { + "key": "auth-one" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.1) + local code, message, res = t('/apisix/admin/consumers/foo', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 5: enable multiple auth plugins on route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": {}, + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: verify +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- request +GET /hello +--- more_headers +apikey: auth-one +Authorization: Basic Zm9vOmJhcg== +--- response_body +hello world + + + +=== TEST 7: disable data_encryption +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: false + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.1) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/consumers/foo')) + ngx.say(res.body.node.value.plugins["basic-auth"].password) + + } + } +--- response_body +bar + + + +=== TEST 8: etcd store unencrypted password, enable data_encryption, decryption fails, use original password +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local core = require("apisix.core") + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res, err = core.etcd.set("/consumers/foo2", core.json.decode([[{ + "username":"foo2", + "plugins":{ + "basic-auth":{ + "username":"foo2", + "password":"bar" + } + } + }]])) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/consumers/foo2', + ngx.HTTP_GET + ) + res = core.json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["basic-auth"].password) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/consumers/foo2')) + ngx.say(res.body.node.value.plugins["basic-auth"].password) + } + } +--- response_body +bar +bar +--- error_log +failed to decrypt the conf of plugin [basic-auth] key [password], err: decrypt ssl key failed + + + +=== TEST 9: etcd stores both encrypted and unencrypted data +# enable data_encryption, decryption of encrypted data succeeds +# decryption of unencrypted data fails, make sure it works well +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local core = require("apisix.core") + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res, err = core.etcd.set("/consumers/foo2", core.json.decode([[{ + "username":"foo2", + "plugins":{ + "basic-auth":{ + "username":"foo2", + "password":"bar" + }, + "key-auth": { + "key": "vU/ZHVJw7b0XscDJ1Fhtig==" + } + } + }]])) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/consumers/foo2', + ngx.HTTP_GET + ) + res = core.json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["basic-auth"].password) + ngx.say(res.value.plugins["key-auth"].key) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/consumers/foo2')) + ngx.say(res.body.node.value.plugins["basic-auth"].password) + ngx.say(res.body.node.value.plugins["key-auth"].key) + } + } +--- response_body +bar +auth-two +bar +vU/ZHVJw7b0XscDJ1Fhtig== +--- error_log +failed to decrypt the conf of plugin [basic-auth] key [password], err: decrypt ssl key failed + + + +=== TEST 10: verify, use the foo2 consumer +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- request +GET /hello +--- more_headers +apikey: auth-two +Authorization: Basic Zm9vMjpiYXI= +--- response_body +hello world + + + +=== TEST 11: keyring rotate, encrypt with edd1c9f0985e76a2 +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.1) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: keyring rotate, decrypt with qeddd145sfvddff3 would fail, but encrypt with edd1c9f0985e76a2 would success +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - qeddd145sfvddff3 + - edd1c9f0985e76a2 +--- request +GET /hello +--- more_headers +Authorization: Basic Zm9vOmJhcg== +--- response_body +hello world + + + +=== TEST 13: search consumer list +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + -- dletet exist consumers + t('/apisix/admin/consumers/foo', ngx.HTTP_DELETE) + t('/apisix/admin/consumers/foo2', ngx.HTTP_DELETE) + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.1) + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "test", + "plugins": { + "basic-auth": { + "username": "test", + "password": "test" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/consumers', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + local pwds = {} + table.insert(pwds, res.list[1].value.plugins["basic-auth"].password) + table.insert(pwds, res.list[2].value.plugins["basic-auth"].password) + + ngx.say(json.encode(pwds)) + } + } +--- response_body +["bar","test"] diff --git a/CloudronPackages/APISIX/apisix-source/t/node/data_encrypt2.t b/CloudronPackages/APISIX/apisix-source/t/node/data_encrypt2.t new file mode 100644 index 0000000..d947622 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/data_encrypt2.t @@ -0,0 +1,742 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: data encryption work well with plugins that not the auth plugins +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "abc123", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server", + "batch_max_size":1, + "inactive_timeout":1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + ngx.sleep(0.5) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["clickhouse-logger"].password) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["clickhouse-logger"].password) + } + } +--- response_body +abc123 +7ipXoKyiZZUAgf3WWNPI5A== + + + +=== TEST 2: verify +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +clickhouse body: INSERT INTO t FORMAT JSONEachRow +clickhouse headers: x-clickhouse-key:abc123 +clickhouse headers: x-clickhouse-user:default +clickhouse headers: x-clickhouse-database:default +--- wait: 5 + + + +=== TEST 3: POST and get list +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes', + ngx.HTTP_POST, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "abc123", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server", + "batch_max_size":1, + "inactive_timeout":1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/routes', + ngx.HTTP_GET + ) + res = json.decode(res) + + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.list[1].value.plugins["clickhouse-logger"].password) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local id = res.list[1].value.id + local key = "/routes/" .. id + local res = assert(etcd.get(key)) + + ngx.say(res.body.node.value.plugins["clickhouse-logger"].password) + } + } +--- response_body +abc123 +7ipXoKyiZZUAgf3WWNPI5A== + + + +=== TEST 4: PATCH +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "abc123", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server", + "batch_max_size":1, + "inactive_timeout":1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + ngx.sleep(0.1) + + local code, body = t('/apisix/admin/routes/1/plugins', + ngx.HTTP_PATCH, + [[{ + "clickhouse-logger": { + "user": "default", + "password": "def456", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server", + "batch_max_size":1, + "inactive_timeout":1 + } + }]] + ) + + ngx.sleep(0.1) + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["clickhouse-logger"].password) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["clickhouse-logger"].password) + } + } +--- response_body +def456 +3hlZu5mwUbqROm+cy0Vi9A== + + + +=== TEST 5: data encryption work well with services +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "abc123", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server", + "batch_max_size":1, + "inactive_timeout":1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.1) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "service_id": "1", + "uri": "/opentracing" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/services/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + ngx.say(res.value.plugins["clickhouse-logger"].password) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/services/1')) + ngx.say(res.body.node.value.plugins["clickhouse-logger"].password) + } + } +--- response_body +abc123 +7ipXoKyiZZUAgf3WWNPI5A== + + + +=== TEST 6: verify +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +clickhouse body: INSERT INTO t FORMAT JSONEachRow +clickhouse headers: x-clickhouse-key:abc123 +clickhouse headers: x-clickhouse-user:default +clickhouse headers: x-clickhouse-database:default +--- wait: 5 + + + +=== TEST 7: data encryption work well with plugin_configs +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, err = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "abc123", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server", + "batch_max_size":1, + "inactive_timeout":1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.1) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugin_config_id": 1, + "uri": "/opentracing", + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + ngx.say(res.value.plugins["clickhouse-logger"].password) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/plugin_configs/1')) + ngx.say(res.body.node.value.plugins["clickhouse-logger"].password) + } + } +--- response_body +abc123 +7ipXoKyiZZUAgf3WWNPI5A== + + + +=== TEST 8: verify +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +clickhouse body: INSERT INTO t FORMAT JSONEachRow +clickhouse headers: x-clickhouse-key:abc123 +clickhouse headers: x-clickhouse-user:default +clickhouse headers: x-clickhouse-database:default +--- wait: 5 + + + +=== TEST 9: data encryption work well with global rule +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "test", + "plugins": { + "basic-auth": { + "username": "test", + "password": "test" + } + }, + "desc": "test description" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return + end + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": {} + } + }]] + ) + if code >= 300 then + ngx.status = code + return + end + -- sleep for data sync + ngx.sleep(0.5) + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/consumers/test', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + ngx.say(res.value.plugins["basic-auth"].password) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/consumers/test')) + ngx.say(res.body.node.value.plugins["basic-auth"].password) + + -- hit the route with authorization + local code, body = t('/hello', + ngx.HTTP_PUT, + nil, + nil, + {Authorization = "Basic dGVzdDp0ZXN0"} + ) + if code ~= 200 then + ngx.status = code + return + end + + -- delete global rule + t('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE + ) + ngx.say(body) + } + } +--- request +GET /t +--- response_body +test +9QKrmTT3TkWGvjlIoe5XXw== +passed + + + +=== TEST 10: data encryption work well with consumer groups +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/consumer_groups/company_a', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.1) + + local code, body = t('/apisix/admin/consumers/foobar', + ngx.HTTP_PUT, + [[{ + "username": "foobar", + "plugins": { + "key-auth": { + "key": "auth-two" + } + }, + "group_id": "company_a" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, key is decrypted + local code, message, res = t('/apisix/admin/consumers/foobar', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["key-auth"].key) + + -- get plugin conf from etcd, key is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/consumers/foobar')) + ngx.say(res.body.node.value.plugins["key-auth"].key) + } + } +--- response_body +auth-two +vU/ZHVJw7b0XscDJ1Fhtig== + + + +=== TEST 11: verify data encryption +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local t = require("lib.test_admin").test + local code, err = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "key-auth": {} + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.sleep(0.1) + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["apikey"] = "auth-two" + } + }) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200,503] + + + +=== TEST 12: verify whether print warning log when disable data_encryption +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]] + ) + if code > 300 then + ngx.status = code + return + end + ngx.say(body) + } + } +--- reponse_body +passed +--- no_error_log +failed to get schema for plugin diff --git a/CloudronPackages/APISIX/apisix-source/t/node/ewma.t b/CloudronPackages/APISIX/apisix-source/t/node/ewma.t new file mode 100644 index 0000000..776a651 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/ewma.t @@ -0,0 +1,360 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +#no_long_string(); +no_root_location(); +log_level('info'); +worker_connections(256); +run_tests; + +__DATA__ + +=== TEST 1: add upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 100, + "127.0.0.1:1981": 100 + }, + "type": "ewma" + }, + "uri": "/ewma" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: about latency +--- timeout: 5 +--- config + location /t { + content_by_lua_block { + --node: "127.0.0.1:1980": latency is 0.001 + --node: "127.0.0.1:1981": latency is 0.005 + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/ewma" + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + httpc:set_timeout(1000) + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":1,"port":"1981"},{"count":11,"port":"1980"}] +--- error_code: 200 + + + +=== TEST 3: about frequency +--- timeout: 30 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/ewma" + + --node: "127.0.0.1:1980": latency is 0.001 + --node: "127.0.0.1:1981": latency is 0.005 + local ports_count = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + end + + --remove the 1981 node, + --add the 1982 node + --keep two nodes for triggering ewma logic in server_picker function of balancer phase + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 100, + "127.0.0.1:1982": 100 + }, + "type": "ewma" + }, + "uri": "/ewma" + }]] + ) + + if code ~= 200 then + ngx.say("update route failed") + return + end + + ngx.sleep(11) + --keep the node 1980 hot + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + end + + --recover the 1981 node + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 100, + "127.0.0.1:1981": 100 + }, + "type": "ewma" + }, + "uri": "/ewma" + }]] + ) + + if code ~= 200 then + ngx.say("update route failed") + return + end + + --should select the 1981 node,because it is idle + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + ngx.say(require("toolkit.json").encode({port = res.body, count = 1})) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +{"count":1,"port":"1981"} +--- error_code: 200 + + + +=== TEST 4: about filter tried servers +--- timeout: 10 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + --remove the 1981 node, + --add the 9527 node (invalid node) + --keep two nodes for triggering ewma logic in server_picker function of balancer phase + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:9527": 1 + }, + "type": "ewma", + "timeout": { + "connect": 0.1, + "send": 0.5, + "read": 0.5 + } + }, + "uri": "/ewma" + }]] + ) + + if code ~= 200 then + ngx.say("update route failed") + return + end + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/ewma" + + --should always select the 1980 node, because 0 is invalid + local t = {} + local ports_count = {} + for i = 1, 12 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + httpc:set_timeout(2000) + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":12,"port":"1980"}] +--- error_code: 200 +--- error_log +Connection refused) while connecting to upstream + + + +=== TEST 5: about all endpoints have been retried +--- timeout: 10 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + --add the 9527 node (invalid node) + --add the 9528 node (invalid node) + --keep two nodes for triggering ewma logic in server_picker function of balancer phase + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:9527": 1, + "127.0.0.1:9528": 1 + }, + "type": "ewma", + "timeout": { + "connect": 0.1, + "send": 0.5, + "read": 0.5 + } + }, + "uri": "/ewma" + }]] + ) + + if code ~= 200 then + ngx.say("update route failed") + return + end + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/ewma" + + --should always return 502, because both 9527 and 9528 are invalid + local t = {} + local ports_count = {} + for i = 1, 12 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + httpc:set_timeout(2000) + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + ports_count[res.status] = (ports_count[res.status] or 0) + 1 + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":12,"port":502}] +--- error_code: 200 +--- error_log +Connection refused) while connecting to upstream diff --git a/CloudronPackages/APISIX/apisix-source/t/node/filter_func.t b/CloudronPackages/APISIX/apisix-source/t/node/filter_func.t new file mode 100644 index 0000000..41886cb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/filter_func.t @@ -0,0 +1,75 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route with filter_func +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "filter_func": "function(vars) + return vars['arg_a1'] == 'a1' and vars['arg_a2'] == 'a2' + end", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route +--- request +GET /hello?a1=a1&a2=a2 +--- response_body +hello world + + + +=== TEST 3: miss route +--- request +GET /hello?a1=xxxx&a2=xxxx +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} diff --git a/CloudronPackages/APISIX/apisix-source/t/node/global-rule.t b/CloudronPackages/APISIX/apisix-source/t/node/global-rule.t new file mode 100644 index 0000000..0e61d68 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/global-rule.t @@ -0,0 +1,419 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: delete route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + + ngx.say("passed") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 4: /not_found +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 5: /not_found +--- request +GET /hello +--- error_code: 503 + + + +=== TEST 6: global rule for internal api (should limit) +--- yaml_config +plugins: + - limit-count + - node-status +--- request +GET /apisix/status +--- error_code: 503 + + + +=== TEST 7: update global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "X-VERSION":"1.0" + } + }, + "uri-blocker": { + "block_rules": ["select.+(from|limit)", "(?:(union(.*?)select))"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: set one more global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "X-TEST":"test" + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: hit global rules +--- request +GET /hello?name=;union%20select%20 +--- error_code: 403 +--- response_headers +X-VERSION: 1.0 +X-TEST: test + + + +=== TEST 10: hit global rules by internal api (only check uri-blocker) +--- yaml_config +plugins: + - response-rewrite + - uri-blocker + - node-status +--- request +GET /apisix/status?name=;union%20select%20 +--- error_code: 403 +--- response_headers +X-VERSION: 1.0 +X-TEST: test + + + +=== TEST 11: delete global rules +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/global_rules/2', ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + + local code, body = t('/not_found', ngx.HTTP_GET) + ngx.say(code) + local code, body = t('/not_found', ngx.HTTP_GET) + ngx.say(code) + } + } +--- request +GET /t +--- response_body +passed +404 +404 + + + +=== TEST 12: empty global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "body": "changed\n" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: hit global rules +--- request +GET /hello +--- response_body +changed + + + +=== TEST 14: global rule works with the consumer, after deleting the global rule, ensure no stale plugins remaining +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "test", + "plugins": { + "basic-auth": { + "username": "test", + "password": "test" + } + }, + "desc": "test description" + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": {} + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + -- sleep for data sync + ngx.sleep(0.5) + + -- hit the route without authorization, should be 401 + local code, body = t('/hello', + ngx.HTTP_PUT + ) + + if code ~= 401 then + ngx.status = 400 + return + end + + -- hit the route with authorization + local code, body = t('/hello', + ngx.HTTP_PUT, + nil, + nil, + {Authorization = "Basic dGVzdDp0ZXN0"} + ) + + if code ~= 200 then + ngx.status = code + return + end + + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE, + [[{ + "plugins": { + "basic-auth": {} + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + ngx.sleep(0.5) + -- hit the route with authorization, should be 200 + local code, body = t('/hello', + ngx.HTTP_PUT + ) + + if code ~= 200 then + ngx.status = code + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/node/grpc-proxy-mtls.t b/CloudronPackages/APISIX/apisix-source/t/node/grpc-proxy-mtls.t new file mode 100644 index 0000000..bb5efcc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/grpc-proxy-mtls.t @@ -0,0 +1,102 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +no_long_string(); +no_root_location(); +no_shuffle(); +add_block_preprocessor(sub { + my ($block) = @_; + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: Unary API grpcs proxy test with mTLS +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHello + methods: [ + POST + ] + upstream: + scheme: grpcs + tls: + client_cert: "-----BEGIN CERTIFICATE-----\nMIIDUzCCAjugAwIBAgIURw+Rc5FSNUQWdJD+quORtr9KaE8wDQYJKoZIhvcNAQEN\nBQAwWDELMAkGA1UEBhMCY24xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG\nWmh1SGFpMRYwFAYDVQQDDA1jYS5hcGlzaXguZGV2MQwwCgYDVQQLDANvcHMwHhcN\nMjIxMjAxMTAxOTU3WhcNNDIwODE4MTAxOTU3WjBOMQswCQYDVQQGEwJjbjESMBAG\nA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxGjAYBgNVBAMMEWNsaWVu\ndC5hcGlzaXguZGV2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzypq\nkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5oIHkQLfeaaLcd4ycFcZw\nFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6WxcOza4VmfcrKqj27oodr\noqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv+e6HaAuw8MvcsEo+MQwu\ncTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E0s+uYKzN0Cyef2C6VtBJ\nKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT/FpZSXm4iSy0a5qTYhkF\nrFdV1YuYYZL5YGl9aQIDAQABox8wHTAbBgNVHREEFDASghBhZG1pbi5hcGlzaXgu\nZGV2MA0GCSqGSIb3DQEBDQUAA4IBAQBepRpwWdckZ6QdL5EuufYwU7p5SIqkVL/+\nN4/l5YSjPoAZf/M6XkZu/PsLI9/kPZN/PX4oxjZSDH14dU9ON3JjxtSrebizcT8V\naQ13TeW9KSv/i5oT6qBmj+V+RF2YCUhyzXdYokOfsSVtSlA1qMdm+cv0vkjYcImV\nl3L9nVHRPq15dY9sbmWEtFBWvOzqNSuQYax+iYG+XEuL9SPaYlwKRC6eS/dbXa1T\nPPWDQad2X/WmhxPzEHvjSl2bsZF1u0GEdKyhXWMOLCLiYIJo15G7bMz8cTUvkDN3\n6WaWBd6bd2g13Ho/OOceARpkR/ND8PU78Y8cq+zHoOSqH+1aly5H\n-----END CERTIFICATE-----\n" + client_key: "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAzypqkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5\noIHkQLfeaaLcd4ycFcZwFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6W\nxcOza4VmfcrKqj27oodroqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv\n+e6HaAuw8MvcsEo+MQwucTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E\n0s+uYKzN0Cyef2C6VtBJKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT\n/FpZSXm4iSy0a5qTYhkFrFdV1YuYYZL5YGl9aQIDAQABAoIBAD7tUG//lnZnsj/4\nJXONaORaFj5ROrOpFPuRemS+egzqFCuuaXpC2lV6RHnr+XHq6SKII1WfagTb+lt/\nvs760jfmGQSxf1mAUidtqcP+sKc/Pr1mgi/SUTawz8AYEFWD6PHmlqBSLTYml+La\nckd+0pGtk49wEnYSb9n+cv640hra9AYpm9LXUFaypiFEu+xJhtyKKWkmiVGrt/X9\n3aG6MuYeZplW8Xq1L6jcHsieTOB3T+UBfG3O0bELBgTVexOQYI9O4Ejl9/n5/8WP\nAbIw7PaAYc7fBkwOGh7/qYUdHnrm5o9MiRT6dPxrVSf0PZVACmA+JoNjCPv0Typf\n3MMkHoECgYEA9+3LYzdP8j9iv1fP5hn5K6XZAobCD1mnzv3my0KmoSMC26XuS71f\nvyBhjL7zMxGEComvVTF9SaNMfMYTU4CwOJQxLAuT69PEzW6oVEeBoscE5hwhjj6o\n/lr5jMbt807J9HnldSpwllfj7JeiTuqRcCu/cwqKQQ1aB3YBZ7h5pZkCgYEA1ejo\nKrR1hN2FMhp4pj0nZ5+Ry2lyIVbN4kIcoteaPhyQ0AQ0zNoi27EBRnleRwVDYECi\nXAFrgJU+laKsg1iPjvinHibrB9G2p1uv3BEh6lPl9wPFlENTOjPkqjR6eVVZGP8e\nVzxYxIo2x/QLDUeOpxySdG4pdhEHGfvmdGmr2FECgYBeknedzhCR4HnjcTSdmlTA\nwI+p9gt6XYG0ZIewCymSl89UR9RBUeh++HQdgw0z8r+CYYjfH3SiLUdU5R2kIZeW\nzXiAS55OO8Z7cnWFSI17sRz+RcbLAr3l4IAGoi9MO0awGftcGSc/QiFwM1s3bSSz\nPAzYbjHUpKot5Gae0PCeKQKBgQCHfkfRBQ2LY2WDHxFc+0+Ca6jF17zbMUioEIhi\n/X5N6XowyPlI6MM7tRrBsQ7unX7X8Rjmfl/ByschsTDk4avNO+NfTfeBtGymBYWX\nN6Lr8sivdkwoZZzKOSSWSzdos48ELlThnO/9Ti706Lg3aSQK5iY+aakJiC+fXdfT\n1TtsgQKBgQDRYvtK/Cpaq0W6wO3I4R75lHGa7zjEr4HA0Kk/FlwS0YveuTh5xqBj\nwQz2YyuQQfJfJs7kbWOITBT3vuBJ8F+pktL2Xq5p7/ooIXOGS8Ib4/JAS1C/wb+t\nuJHGva12bZ4uizxdL2Q0/n9ziYTiMc/MMh/56o4Je8RMdOMT5lTsRQ==\n-----END RSA PRIVATE KEY-----\n" + nodes: + "127.0.0.1:10053": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} + + + +=== TEST 2: Bidirectional API grpcs proxy test with mTLS +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHelloBidirectionalStream + methods: [ + POST + ] + upstream: + scheme: grpcs + tls: + client_cert: "-----BEGIN CERTIFICATE-----\nMIIDUzCCAjugAwIBAgIURw+Rc5FSNUQWdJD+quORtr9KaE8wDQYJKoZIhvcNAQEN\nBQAwWDELMAkGA1UEBhMCY24xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG\nWmh1SGFpMRYwFAYDVQQDDA1jYS5hcGlzaXguZGV2MQwwCgYDVQQLDANvcHMwHhcN\nMjIxMjAxMTAxOTU3WhcNNDIwODE4MTAxOTU3WjBOMQswCQYDVQQGEwJjbjESMBAG\nA1UECAwJR3VhbmdEb25nMQ8wDQYDVQQHDAZaaHVIYWkxGjAYBgNVBAMMEWNsaWVu\ndC5hcGlzaXguZGV2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzypq\nkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5oIHkQLfeaaLcd4ycFcZw\nFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6WxcOza4VmfcrKqj27oodr\noqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv+e6HaAuw8MvcsEo+MQwu\ncTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E0s+uYKzN0Cyef2C6VtBJ\nKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT/FpZSXm4iSy0a5qTYhkF\nrFdV1YuYYZL5YGl9aQIDAQABox8wHTAbBgNVHREEFDASghBhZG1pbi5hcGlzaXgu\nZGV2MA0GCSqGSIb3DQEBDQUAA4IBAQBepRpwWdckZ6QdL5EuufYwU7p5SIqkVL/+\nN4/l5YSjPoAZf/M6XkZu/PsLI9/kPZN/PX4oxjZSDH14dU9ON3JjxtSrebizcT8V\naQ13TeW9KSv/i5oT6qBmj+V+RF2YCUhyzXdYokOfsSVtSlA1qMdm+cv0vkjYcImV\nl3L9nVHRPq15dY9sbmWEtFBWvOzqNSuQYax+iYG+XEuL9SPaYlwKRC6eS/dbXa1T\nPPWDQad2X/WmhxPzEHvjSl2bsZF1u0GEdKyhXWMOLCLiYIJo15G7bMz8cTUvkDN3\n6WaWBd6bd2g13Ho/OOceARpkR/ND8PU78Y8cq+zHoOSqH+1aly5H\n-----END CERTIFICATE-----\n" + client_key: "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEAzypqkrsJ8MaqpS0kr2SboE9aRKOJzd6mY3AZLq3tFpio5cK5\noIHkQLfeaaLcd4ycFcZwFTpxc+Eth6I0X9on+j4tEibc5IpDnRSAQlzHZzlrOG6W\nxcOza4VmfcrKqj27oodroqXv05r/5yIoRrEN9ZXfA8n2OnjhkP+C3Q68L6dBtPpv\n+e6HaAuw8MvcsEo+MQwucTZyWqWT2UzKVzToW29dHRW+yZGuYNWRh15X09VSvx+E\n0s+uYKzN0Cyef2C6VtBJKmJ3NtypAiPqw7Ebfov2Ym/zzU9pyWPi3P1mYPMKQqUT\n/FpZSXm4iSy0a5qTYhkFrFdV1YuYYZL5YGl9aQIDAQABAoIBAD7tUG//lnZnsj/4\nJXONaORaFj5ROrOpFPuRemS+egzqFCuuaXpC2lV6RHnr+XHq6SKII1WfagTb+lt/\nvs760jfmGQSxf1mAUidtqcP+sKc/Pr1mgi/SUTawz8AYEFWD6PHmlqBSLTYml+La\nckd+0pGtk49wEnYSb9n+cv640hra9AYpm9LXUFaypiFEu+xJhtyKKWkmiVGrt/X9\n3aG6MuYeZplW8Xq1L6jcHsieTOB3T+UBfG3O0bELBgTVexOQYI9O4Ejl9/n5/8WP\nAbIw7PaAYc7fBkwOGh7/qYUdHnrm5o9MiRT6dPxrVSf0PZVACmA+JoNjCPv0Typf\n3MMkHoECgYEA9+3LYzdP8j9iv1fP5hn5K6XZAobCD1mnzv3my0KmoSMC26XuS71f\nvyBhjL7zMxGEComvVTF9SaNMfMYTU4CwOJQxLAuT69PEzW6oVEeBoscE5hwhjj6o\n/lr5jMbt807J9HnldSpwllfj7JeiTuqRcCu/cwqKQQ1aB3YBZ7h5pZkCgYEA1ejo\nKrR1hN2FMhp4pj0nZ5+Ry2lyIVbN4kIcoteaPhyQ0AQ0zNoi27EBRnleRwVDYECi\nXAFrgJU+laKsg1iPjvinHibrB9G2p1uv3BEh6lPl9wPFlENTOjPkqjR6eVVZGP8e\nVzxYxIo2x/QLDUeOpxySdG4pdhEHGfvmdGmr2FECgYBeknedzhCR4HnjcTSdmlTA\nwI+p9gt6XYG0ZIewCymSl89UR9RBUeh++HQdgw0z8r+CYYjfH3SiLUdU5R2kIZeW\nzXiAS55OO8Z7cnWFSI17sRz+RcbLAr3l4IAGoi9MO0awGftcGSc/QiFwM1s3bSSz\nPAzYbjHUpKot5Gae0PCeKQKBgQCHfkfRBQ2LY2WDHxFc+0+Ca6jF17zbMUioEIhi\n/X5N6XowyPlI6MM7tRrBsQ7unX7X8Rjmfl/ByschsTDk4avNO+NfTfeBtGymBYWX\nN6Lr8sivdkwoZZzKOSSWSzdos48ELlThnO/9Ti706Lg3aSQK5iY+aakJiC+fXdfT\n1TtsgQKBgQDRYvtK/Cpaq0W6wO3I4R75lHGa7zjEr4HA0Kk/FlwS0YveuTh5xqBj\nwQz2YyuQQfJfJs7kbWOITBT3vuBJ8F+pktL2Xq5p7/ooIXOGS8Ib4/JAS1C/wb+t\nuJHGva12bZ4uizxdL2Q0/n9ziYTiMc/MMh/56o4Je8RMdOMT5lTsRQ==\n-----END RSA PRIVATE KEY-----\n" + nodes: + "127.0.0.1:10053": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHelloBidirectionalStream +--- response_body +{ + "message": "Hello apisix" +} +{ + "message": "stream ended" +} diff --git a/CloudronPackages/APISIX/apisix-source/t/node/grpc-proxy-stream.t b/CloudronPackages/APISIX/apisix-source/t/node/grpc-proxy-stream.t new file mode 100644 index 0000000..2e3da18 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/grpc-proxy-stream.t @@ -0,0 +1,134 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_long_string(); +no_root_location(); +no_shuffle(); +add_block_preprocessor(sub { + my ($block) = @_; + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: Test server side streaming method through gRPC proxy +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHelloServerStream + methods: [ + POST + ] + upstream: + scheme: grpc + nodes: + "127.0.0.1:10051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHelloServerStream +--- response_body +{ + "message": "Hello apisix" +} +{ + "message": "Hello apisix" +} +{ + "message": "Hello apisix" +} +{ + "message": "Hello apisix" +} +{ + "message": "Hello apisix" +} + + + +=== TEST 2: Test client side streaming method through gRPC proxy +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHelloClientStream + methods: [ + POST + ] + upstream: + scheme: grpc + nodes: + "127.0.0.1:10051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"} {"name":"apisix"} {"name":"apisix"} {"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHelloClientStream +--- response_body +{ + "message": "Hello apisix!Hello apisix!Hello apisix!Hello apisix!" +} + + + +=== TEST 3: Test bidirectional streaming method through gRPC proxy +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHelloBidirectionalStream + methods: [ + POST + ] + upstream: + scheme: grpc + nodes: + "127.0.0.1:10051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"} {"name":"apisix"} {"name":"apisix"} {"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHelloBidirectionalStream +--- response_body +{ + "message": "Hello apisix" +} +{ + "message": "Hello apisix" +} +{ + "message": "Hello apisix" +} +{ + "message": "Hello apisix" +} +{ + "message": "stream ended" +} diff --git a/CloudronPackages/APISIX/apisix-source/t/node/grpc-proxy-unary.t b/CloudronPackages/APISIX/apisix-source/t/node/grpc-proxy-unary.t new file mode 100644 index 0000000..62870dc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/grpc-proxy-unary.t @@ -0,0 +1,142 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_long_string(); +no_root_location(); +no_shuffle(); +add_block_preprocessor(sub { + my ($block) = @_; + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: Unary API gRPC proxy +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHello + methods: [ + POST + ] + upstream: + scheme: grpc + nodes: + "127.0.0.1:10051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} + + + +=== TEST 2: Unary API gRPC proxy test [the old way] +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHello + methods: [ + POST + ] + upstream: + scheme: grpc + nodes: + "127.0.0.1:10051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} + + + +=== TEST 3: Unary API grpcs proxy test +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHello + methods: [ + POST + ] + upstream: + scheme: grpcs + nodes: + "127.0.0.1:10052": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} + + + +=== TEST 4: Unary API gRPC proxy with tls +--- http2 +--- apisix_yaml +ssls: + - + id: 1 + cert: "-----BEGIN CERTIFICATE-----\nMIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV\nBAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G\nA1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa\nGA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n\nRG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM\nCHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe\ncvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb\nVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR\n2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr\nabf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2\nWjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/\nEvm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1\n/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh\n/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj\ncTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ\ntSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl\nc3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC\ntC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY\n1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl\nPYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob\nrJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy\nhme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1\n7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y\nIJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve\nU/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM=\n-----END CERTIFICATE-----\n" + key: "-----BEGIN RSA PRIVATE KEY-----\nMIIG5AIBAAKCAYEAyCM0rqJecvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5\njhZB3W6BkWUWR4oNFLLSqcVbVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfo\neLj0efMiOepOSZflj9Ob4yKR2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5s\nmPtW1Oc/BV5terhscJdOgmRrabf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt\n6iMWEGeQU6mwPENgvj1olji2WjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiy\nVt1TmtMWn1ztk6FfLRqwJWR/Evm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1o\nnpRVeXhrBajbCRDRBMwaNw/1/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2\nfzaqpIfyUbPST4GdqNG9NyIh/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI\n1cGrGwyXbrieNp63AgMBAAECggGBAJM8g0duoHmIYoAJzbmKe4ew0C5fZtFUQNmu\nO2xJITUiLT3ga4LCkRYsdBnY+nkK8PCnViAb10KtIT+bKipoLsNWI9Xcq4Cg4G3t\n11XQMgPPgxYXA6m8t+73ldhxrcKqgvI6xVZmWlKDPn+CY/Wqj5PA476B5wEmYbNC\nGIcd1FLl3E9Qm4g4b/sVXOHARF6iSvTR+6ol4nfWKlaXSlx2gNkHuG8RVpyDsp9c\nz9zUqAdZ3QyFQhKcWWEcL6u9DLBpB/gUjyB3qWhDMe7jcCBZR1ALyRyEjmDwZzv2\njlv8qlLFfn9R29UI0pbuL1eRAz97scFOFme1s9oSU9a12YHfEd2wJOM9bqiKju8y\nDZzePhEYuTZ8qxwiPJGy7XvRYTGHAs8+iDlG4vVpA0qD++1FTpv06cg/fOdnwshE\nOJlEC0ozMvnM2rZ2oYejdG3aAnUHmSNa5tkJwXnmj/EMw1TEXf+H6+xknAkw05nh\nzsxXrbuFUe7VRfgB5ElMA/V4NsScgQKBwQDmMRtnS32UZjw4A8DsHOKFzugfWzJ8\nGc+3sTgs+4dNIAvo0sjibQ3xl01h0BB2Pr1KtkgBYB8LJW/FuYdCRS/KlXH7PHgX\n84gYWImhNhcNOL3coO8NXvd6+m+a/Z7xghbQtaraui6cDWPiCNd/sdLMZQ/7LopM\nRbM32nrgBKMOJpMok1Z6zsPzT83SjkcSxjVzgULNYEp03uf1PWmHuvjO1yELwX9/\ngoACViF+jst12RUEiEQIYwr4y637GQBy+9cCgcEA3pN9W5OjSPDVsTcVERig8++O\nBFURiUa7nXRHzKp2wT6jlMVcu8Pb2fjclxRyaMGYKZBRuXDlc/RNO3uTytGYNdC2\nIptU5N4M7iZHXj190xtDxRnYQWWo/PR6EcJj3f/tc3Itm1rX0JfuI3JzJQgDb9Z2\ns/9/ub8RRvmQV9LM/utgyOwNdf5dyVoPcTY2739X4ZzXNH+CybfNa+LWpiJIVEs2\ntxXbgZrhmlaWzwA525nZ0UlKdfktdcXeqke9eBghAoHARVTHFy6CjV7ZhlmDEtqE\nU58FBOS36O7xRDdpXwsHLnCXhbFu9du41mom0W4UdzjgVI9gUqG71+SXrKr7lTc3\ndMHcSbplxXkBJawND/Q1rzLG5JvIRHO1AGJLmRgIdl8jNgtxgV2QSkoyKlNVbM2H\nWy6ZSKM03lIj74+rcKuU3N87dX4jDuwV0sPXjzJxL7NpR/fHwgndgyPcI14y2cGz\nzMC44EyQdTw+B/YfMnoZx83xaaMNMqV6GYNnTHi0TO2TAoHBAKmdrh9WkE2qsr59\nIoHHygh7Wzez+Ewr6hfgoEK4+QzlBlX+XV/9rxIaE0jS3Sk1txadk5oFDebimuSk\nlQkv1pXUOqh+xSAwk5v88dBAfh2dnnSa8HFN3oz+ZfQYtnBcc4DR1y2X+fVNgr3i\nnxruU2gsAIPFRnmvwKPc1YIH9A6kIzqaoNt1f9VM243D6fNzkO4uztWEApBkkJgR\n4s/yOjp6ovS9JG1NMXWjXQPcwTq3sQVLnAHxZRJmOvx69UmK4QKBwFYXXjeXiU3d\nbcrPfe6qNGjfzK+BkhWznuFUMbuxyZWDYQD5yb6ukUosrj7pmZv3BxKcKCvmONU+\nCHgIXB+hG+R9S2mCcH1qBQoP/RSm+TUzS/Bl2UeuhnFZh2jSZQy3OwryUi6nhF0u\nLDzMI/6aO1ggsI23Ri0Y9ZtqVKczTkxzdQKR9xvoNBUufjimRlS80sJCEB3Qm20S\nwzarryret/7GFW1/3cz+hTj9/d45i25zArr3Pocfpur5mfz3fJO8jg==\n-----END RSA PRIVATE KEY-----\n" + sni: test.com +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHello + methods: [ + POST + ] + upstream: + scheme: grpc + nodes: + "127.0.0.1:10051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -insecure -d '{"name":"apisix"}' test.com:1994 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} diff --git a/CloudronPackages/APISIX/apisix-source/t/node/grpc-proxy.t b/CloudronPackages/APISIX/apisix-source/t/node/grpc-proxy.t new file mode 100644 index 0000000..ba25c66 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/grpc-proxy.t @@ -0,0 +1,287 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +# As the test framework doesn't support sending grpc request, this +# test file is only for grpc irrelative configuration check. +# To avoid confusion, we configure a closed port so if th configuration works, +# the result will be `connect refused`. +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + if (!$block->request) { + $block->set_value("request", "POST /hello"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: with upstream_id +--- apisix_yaml +upstreams: + - id: 1 + type: roundrobin + scheme: grpc + nodes: + "127.0.0.1:9088": 1 +routes: + - id: 1 + methods: + - POST + uri: "/hello" + upstream_id: 1 +#END +--- error_code: 502 +--- error_log +proxy request to 127.0.0.1:9088 + + + +=== TEST 2: with consumer +--- apisix_yaml +consumers: + - username: jack + plugins: + key-auth: + key: user-key +#END +routes: + - id: 1 + methods: + - POST + uri: "/hello" + plugins: + key-auth: + consumer-restriction: + whitelist: + - jack + upstream: + scheme: grpc + type: roundrobin + nodes: + "127.0.0.1:9088": 1 +#END +--- more_headers +apikey: user-key +--- error_code: 502 +--- error_log +Connection refused + + + +=== TEST 3: with upstream_id (old way) +--- apisix_yaml +upstreams: + - id: 1 + type: roundrobin + scheme: grpc + nodes: + "127.0.0.1:9088": 1 +routes: + - id: 1 + methods: + - POST + uri: "/hello" + upstream_id: 1 +#END +--- error_code: 502 +--- error_log +proxy request to 127.0.0.1:9088 + + + +=== TEST 4: with consumer (old way) +--- apisix_yaml +consumers: + - username: jack + plugins: + key-auth: + key: user-key +#END +routes: + - id: 1 + methods: + - POST + uri: "/hello" + plugins: + key-auth: + consumer-restriction: + whitelist: + - jack + upstream: + type: roundrobin + scheme: grpc + nodes: + "127.0.0.1:9088": 1 +#END +--- more_headers +apikey: user-key +--- error_code: 502 +--- error_log +Connection refused + + + +=== TEST 5: use 443 as the grpcs' default port +--- apisix_yaml +routes: + - + uri: /hello + upstream: + scheme: grpcs + nodes: + "127.0.0.1": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 502 +--- error_log +connect() failed (111: Connection refused) while connecting to upstream + + + +=== TEST 6: use 80 as the grpc's default port +--- apisix_yaml +routes: + - + uri: /hello + upstream: + scheme: grpc + nodes: + "127.0.0.1": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 502 +--- error_log +connect() failed (111: Connection refused) while connecting to upstream + + + +=== TEST 7: set authority header +--- log_level: debug +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHello + methods: [ + POST + ] + upstream: + scheme: grpc + nodes: + "127.0.0.1:10051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} +--- grep_error_log eval +qr/grpc header: "(:authority|host): [^"]+"/ +--- grep_error_log_out eval +qr/grpc header: "(:authority|host): 127.0.0.1:1984"/ + + + +=== TEST 8: set authority header to node header +--- log_level: debug +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHello + methods: [ + POST + ] + upstream: + scheme: grpc + pass_host: node + nodes: + "127.0.0.1:10051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} +--- grep_error_log eval +qr/grpc header: "(:authority|host): [^"]+"/ +--- grep_error_log_out eval +qr/grpc header: "(:authority|host): 127.0.0.1:10051"/ + + + +=== TEST 9: set authority header to specific value +--- log_level: debug +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHello + methods: [ + POST + ] + upstream: + scheme: grpc + pass_host: rewrite + upstream_host: hello.world + nodes: + "127.0.0.1:10051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} +--- grep_error_log eval +qr/grpc header: "(:authority|host): [^"]+"/ +--- grep_error_log_out eval +qr/grpc header: "(:authority|host): hello.world"/ diff --git a/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-discovery.t b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-discovery.t new file mode 100644 index 0000000..8a9b0e9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-discovery.t @@ -0,0 +1,201 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + if ($block->apisix_yaml) { + my $upstream = <<_EOC_; +upstreams: + - service_name: mock + discovery_type: mock + type: roundrobin + id: 1 + checks: + active: + http_path: "/status" + host: 127.0.0.1 + port: 1988 + healthy: + interval: 1 + successes: 1 + unhealthy: + interval: 1 + http_failures: 1 +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $upstream); + } + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = 1}, + {host = "0.0.0.0", port = 1980, weight = 1}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + + ngx.sleep(1.5) + + ngx.say(res.status) + } + } +--- grep_error_log eval +qr/unhealthy TCP increment \(1\/2\) for '127.0.0.1\([^)]+\)'/ +--- grep_error_log_out +unhealthy TCP increment (1/2) for '127.0.0.1(127.0.0.1:1988)' +unhealthy TCP increment (1/2) for '127.0.0.1(0.0.0.0:1988)' + + + +=== TEST 2: create new checker when nodes change +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = 1}, + {host = "0.0.0.0", port = 1980, weight = 1}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.sleep(0.5) + + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = 1}, + {host = "127.0.0.2", port = 1980, weight = 1}, + {host = "127.0.0.3", port = 1980, weight = 1}, + } + end + } + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.say(res.status) + } + } +--- grep_error_log eval +qr/(create new checker|try to release checker): table/ +--- grep_error_log_out +create new checker: table +try to release checker: table +create new checker: table + + + +=== TEST 3: don't create new checker when nodes don't change +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = 1}, + {host = "0.0.0.0", port = 1980, weight = 1}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.sleep(0.5) + + discovery.mock = { + nodes = function() + return { + {host = "0.0.0.0", port = 1980, weight = 1}, + {host = "127.0.0.1", port = 1980, weight = 1}, + } + end + } + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.say(res.status) + } + } +--- grep_error_log eval +qr/(create new checker|try to release checker): table/ +--- grep_error_log_out +create new checker: table diff --git a/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-https.t b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-https.t new file mode 100644 index 0000000..b1f7b7b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-https.t @@ -0,0 +1,341 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_root_location(); +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->http_config) { + my $http_config = <<'_EOC_'; +server { + listen 8765 ssl; + ssl_certificate ../../certs/mtls_server.crt; + ssl_certificate_key ../../certs/mtls_server.key; + ssl_client_certificate ../../certs/mtls_ca.crt; + + location /ping { + return 200 '8765'; + } + + location /healthz { + return 200 'ok'; + } +} + +server { + listen 8766 ssl; + ssl_certificate ../../certs/mtls_server.crt; + ssl_certificate_key ../../certs/mtls_server.key; + ssl_client_certificate ../../certs/mtls_ca.crt; + + location /ping { + return 200 '8766'; + } + + location /healthz { + return 500; + } +} + + +server { + listen 8767 ssl; + ssl_certificate ../../certs/mtls_server.crt; + ssl_certificate_key ../../certs/mtls_server.key; + ssl_client_certificate ../../certs/mtls_ca.crt; + + location /ping { + return 200 '8766'; + } + + location /healthz { + return 200 'ok'; + } +} + +server { + listen 8768 ssl; + ssl_certificate ../../certs/mtls_server.crt; + ssl_certificate_key ../../certs/mtls_server.key; + ssl_client_certificate ../../certs/mtls_ca.crt; + + location /ping { + return 200 '8766'; + } + + location /healthz { + return 500; + } +} + +_EOC_ + $block->set_value("http_config", $http_config); + } + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: https health check (two health nodes) +--- config + location /t { + lua_ssl_trusted_certificate ../../certs/mtls_ca.crt; + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local cert = t.read_file("t/certs/mtls_client.crt") + local key = t.read_file("t/certs/mtls_client.key") + local data = { + uri = "/ping", + upstream = { + scheme = "https", + nodes = { + ["127.0.0.1:8765"] = 1, + ["127.0.0.1:8767"] = 1 + }, + tls = { + client_cert = cert, + client_key = key + }, + retries = 2, + checks = { + active = { + type = "https", + http_path = "/healthz", + https_verify_certificate = false, + healthy = { + interval = 1, + successes = 1 + }, + unhealthy = { + interval = 1, + http_failures = 1 + }, + } + } + } + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, core.json.encode(data)) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require("resty.http") + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/ping" + local _, _ = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.sleep(0.5) + + local healthcheck_uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/v1/healthcheck/routes/1" + local httpc = http.new() + local res, _ = httpc:request_uri(healthcheck_uri, {method = "GET", keepalive = false}) + local json_data = core.json.decode(res.body) + assert(json_data.type == "https") + assert(#json_data.nodes == 2) + + local function check_node_health(port, status) + for _, node in ipairs(json_data.nodes) do + if node.port == port and node.status == status then + return true + end + end + return false + end + + assert(check_node_health(8765, "healthy"), "Port 8765 is not healthy") + assert(check_node_health(8767, "healthy"), "Port 8767 is not healthy") + } + } +--- request +GET /t +--- error_code: 200 + + + +=== TEST 2: https health check (one healthy node, one unhealthy node) +--- config + location /t { + lua_ssl_trusted_certificate ../../certs/mtls_ca.crt; + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local cert = t.read_file("t/certs/mtls_client.crt") + local key = t.read_file("t/certs/mtls_client.key") + local data = { + uri = "/ping", + upstream = { + scheme = "https", + nodes = { + ["127.0.0.1:8765"] = 1, + ["127.0.0.1:8766"] = 1 + }, + tls = { + client_cert = cert, + client_key = key + }, + retries = 2, + checks = { + active = { + type = "https", + http_path = "/healthz", + https_verify_certificate = false, + healthy = { + interval = 1, + successes = 1 + }, + unhealthy = { + interval = 1, + http_failures = 1 + }, + } + } + } + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, core.json.encode(data)) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require("resty.http") + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/ping" + local _, _ = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.sleep(1.5) + + local healthcheck_uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/v1/healthcheck/routes/1" + local httpc = http.new() + local res, _ = httpc:request_uri(healthcheck_uri, {method = "GET", keepalive = false}) + local json_data = core.json.decode(res.body) + assert(json_data.type == "https") + assert(#json_data.nodes == 2) + + local function check_node_health(port, status) + for _, node in ipairs(json_data.nodes) do + if node.port == port and node.status == status then + return true + end + end + return false + end + + assert(check_node_health(8765, "healthy"), "Port 8765 is not healthy") + assert(check_node_health(8766, "unhealthy"), "Port 8766 is not unhealthy") + } + } +--- request +GET /t +--- grep_error_log eval +qr/\([^)]+\) unhealthy .* for '.*'/ +--- grep_error_log_out +(upstream#/apisix/routes/1) unhealthy HTTP increment (1/1) for '127.0.0.1(127.0.0.1:8766)' + + + +=== TEST 3: https health check (two unhealthy nodes) +--- config + location /t { + lua_ssl_trusted_certificate ../../certs/mtls_ca.crt; + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local cert = t.read_file("t/certs/mtls_client.crt") + local key = t.read_file("t/certs/mtls_client.key") + local data = { + uri = "/ping", + upstream = { + scheme = "https", + nodes = { + ["127.0.0.1:8766"] = 1, + ["127.0.0.1:8768"] = 1 + }, + tls = { + client_cert = cert, + client_key = key + }, + retries = 2, + checks = { + active = { + type = "https", + http_path = "/healthz", + https_verify_certificate = false, + healthy = { + interval = 1, + successes = 1 + }, + unhealthy = { + interval = 1, + http_failures = 1 + }, + } + } + } + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, core.json.encode(data)) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require("resty.http") + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/ping" + local _, _ = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.sleep(1.5) + + local healthcheck_uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/v1/healthcheck/routes/1" + local httpc = http.new() + local res, _ = httpc:request_uri(healthcheck_uri, {method = "GET", keepalive = false}) + local json_data = core.json.decode(res.body) + assert(json_data.type == "https") + assert(#json_data.nodes == 2) + + local function check_node_health(port, status) + for _, node in ipairs(json_data.nodes) do + if node.port == port and node.status == status then + return true + end + end + return false + end + + assert(check_node_health(8766, "unhealthy"), "Port 8766 is not unhealthy") + assert(check_node_health(8768, "unhealthy"), "Port 8768 is not unhealthy") + } + } +--- request +GET /t +--- error_code: 200 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-ipv6.t b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-ipv6.t new file mode 100644 index 0000000..dc33dec --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-ipv6.t @@ -0,0 +1,148 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); +worker_connections(256); + +add_block_preprocessor(sub { + my $block = shift; + $block->set_value("listen_ipv6", 1); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(two upstream node: one healthy + one unhealthy) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1970": 1 + }, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 1 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- grep_error_log eval +qr/^.*?\[error\](?!.*process exiting).*/ +--- grep_error_log_out + + + +=== TEST 2: hit routes (two upstream node: one healthy + one unhealthy) +--- config + location /t { + content_by_lua_block { + ngx.sleep(3) -- wait for new workers replacement to complete + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.log(ngx.ERR, "It works") + end + + ngx.sleep(2.5) + + local ports_count = {} + for i = 1, 12 do + ngx.log(ngx.ERR, "req ", i) + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + + ngx.log(ngx.ERR, "req ", i, " ", res.body) + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":12,"port":"1980"}] +--- grep_error_log eval +qr/unhealthy .* for '.*'/ +--- grep_error_log_out +unhealthy TCP increment (1/2) for 'foo.com(127.0.0.1:1970)' +unhealthy TCP increment (2/2) for 'foo.com(127.0.0.1:1970)' +--- timeout: 10 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-leak-bugfix.t b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-leak-bugfix.t new file mode 100644 index 0000000..1caf5d3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-leak-bugfix.t @@ -0,0 +1,112 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('warn'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: ensure the old check is cleared after configuration updated +--- extra_init_worker_by_lua + local healthcheck = require("resty.healthcheck") + local new = healthcheck.new + healthcheck.new = function(...) + local obj = new(...) + local clear = obj.delayed_clear + obj.delayed_clear = function(...) + ngx.log(ngx.WARN, "clear checker") + return clear(...) + end + return obj + end + +--- extra_init_by_lua + local utils = require("apisix.core.utils") + local count = 0 + utils.dns_parse = function (domain) -- mock: DNS parser + count = count + 1 + if domain == "test1.com" then + return {address = "127.0.0." .. count} + end + if domain == "test2.com" then + return {address = "127.0.0." .. count+100} + end + + error("unknown domain: " .. domain) + end + +--- config +location /t { + content_by_lua_block { + local cfg = [[{ + "upstream": { + "nodes": { + "test1.com:1980": 1, + "test2.com:1980": 1 + }, + "type": "roundrobin", + "checks":{ + "active":{ + "healthy":{ + "http_statuses":[ + 200, + 302 + ], + "interval":1, + "successes":2 + }, + "http_path":"/hello", + "timeout":1, + "type":"http", + "unhealthy":{ + "http_failures":5, + "http_statuses":[ + 429, + 404, + 500, + 501, + 502, + 503, + 504, + 505 + ], + "interval":1, + "tcp_failures":2, + "timeouts":3 + } + } + } + }, + "uri": "/hello" + }]] + local t = require("lib.test_admin").test + assert(t('/apisix/admin/routes/1', ngx.HTTP_PUT, cfg) < 300) + t('/hello', ngx.HTTP_GET) + assert(t('/apisix/admin/routes/1', ngx.HTTP_PUT, cfg) < 300) + ngx.sleep(1) + } +} + +--- request +GET /t +--- error_log +clear checker diff --git a/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-multiple-worker.t b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-multiple-worker.t new file mode 100644 index 0000000..fa6076e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-multiple-worker.t @@ -0,0 +1,141 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); +workers(2); +worker_connections(256); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(two upstream node: one healthy + one unhealthy) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1970": 1 + }, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- grep_error_log eval +qr/^.*?\[error\](?!.*process exiting).*/ +--- grep_error_log_out + + + +=== TEST 2: hit routes (two upstream node: one healthy + one unhealthy) +--- config + location /t { + content_by_lua_block { + ngx.sleep(3) -- wait for new workers replacement to complete + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + end + + ngx.sleep(2.5) + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":12,"port":"1980"}] +--- grep_error_log eval +qr/unhealthy TCP increment/ +--- grep_error_log_out +unhealthy TCP increment +unhealthy TCP increment +--- timeout: 20 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-passive-resty-events.t b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-passive-resty-events.t new file mode 100644 index 0000000..d90cbec --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-passive-resty-events.t @@ -0,0 +1,382 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + if ($ENV{TEST_EVENTS_MODULE} ne "lua-resty-events") { + $SkipReason = "Only for lua-resty-events events module"; + } +} + +use Test::Nginx::Socket::Lua $SkipReason ? (skip_all => $SkipReason) : (); +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); +worker_connections(256); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(passive) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 100, + "successes": 1 + }, + "unhealthy": { + "interval": 100, + "http_failures": 2 + } + },]] .. [[ + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [502], + "http_failures": 1, + "tcp_failures": 1 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit routes (two healthy nodes) +--- config + location /t { + content_by_lua_block { + ngx.sleep(3) -- wait for sync + + local json_sort = require("toolkit.json") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/server_port" + + local httpc = http.new() + -- Since a failed request attempt triggers a passive health check to report + -- a non-health condition, a request is first triggered manually here to + -- trigger a passive health check to refresh the monitoring state of the build + -- + -- The reason for this is to avoid delays in event synchronization timing due + -- to non-deterministic asynchronous connections when using lua-resty-events + -- as an events module. + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + ngx.sleep(1) -- Wait for health check unhealthy events sync + + local ports_count = {} + for i = 1, 6 do + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + + local status = tostring(res.status) + ports_count[status] = (ports_count[status] or 0) + 1 + end + + ngx.say(json_sort.encode(ports_count)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +{"200":5,"502":1} +--- error_log +(upstream#/apisix/routes/1) unhealthy HTTP increment (1/1) +--- timeout: 10 + + + +=== TEST 3: set route(only passive) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [502], + "http_failures": 1, + "tcp_failures": 1 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: object matches none of the required: [\"active\"] or [\"active\",\"passive\"]"} + + + +=== TEST 4: set route(only active + active & passive) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 100, + "successes": 1 + }, + "unhealthy": { + "interval": 100, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello_", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 100, + "successes": 1 + }, + "unhealthy": { + "interval": 100, + "http_failures": 2 + } + },]] .. [[ + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [502], + "http_failures": 1, + "tcp_failures": 1 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: only one route should have passive healthcheck +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json_sort = require("toolkit.json") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + local ports_count = {} + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello_") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + + -- only /hello_ has passive healthcheck + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +502 +502 +--- grep_error_log eval +qr/enabled healthcheck passive/ +--- grep_error_log_out +enabled healthcheck passive + + + +=== TEST 6: make sure passive healthcheck works (conf is not corrupted by the default value) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json_sort = require("toolkit.json") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + + -- The first time request to /hello_ + -- Ensure that the event that triggers the healthchecker to perform + -- add_target has been sent and processed correctly + -- + -- Due to the implementation of lua-resty-events, it relies on the kernel and + -- the Nginx event loop to process socket connections. + -- When lua-resty-healthcheck handles passive healthchecks and uses lua-resty-events + -- as the events module, the synchronization of the first event usually occurs + -- before the start of the passive healthcheck. So when the execution finishes and + -- healthchecker tries to record the healthcheck status, it will not be able to find + -- an existing target (because the synchronization event has not finished yet), which + -- will lead to some anomalies that deviate from the original test case, so compatibility + -- operations are performed here. + local res, err = httpc:request_uri(uri .. "/hello_") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + + ngx.sleep(1) -- Wait for health check unhealthy events sync + + -- The second time request to /hello_ + local res, err = httpc:request_uri(uri .. "/hello_") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +502 +502 +502 +--- grep_error_log eval +qr/\[healthcheck\] \([^)]+\) unhealthy HTTP increment/ +--- grep_error_log_out +[healthcheck] (upstream#/apisix/routes/2) unhealthy HTTP increment diff --git a/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-passive.t b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-passive.t new file mode 100644 index 0000000..7404ff0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-passive.t @@ -0,0 +1,344 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + if ($ENV{TEST_EVENTS_MODULE} ne "lua-resty-worker-events") { + $SkipReason = "Only for lua-resty-worker-events events module"; + } +} + +use Test::Nginx::Socket::Lua $SkipReason ? (skip_all => $SkipReason) : (); +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); +worker_connections(256); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(passive) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 100, + "successes": 1 + }, + "unhealthy": { + "interval": 100, + "http_failures": 2 + } + },]] .. [[ + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [502], + "http_failures": 1, + "tcp_failures": 1 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit routes (two healthy nodes) +--- config + location /t { + content_by_lua_block { + ngx.sleep(1) -- wait for sync + + local json_sort = require("toolkit.json") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/server_port" + + local ports_count = {} + for i = 1, 6 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + + local status = tostring(res.status) + ports_count[status] = (ports_count[status] or 0) + 1 + end + + ngx.say(json_sort.encode(ports_count)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +{"200":5,"502":1} +--- error_log +(upstream#/apisix/routes/1) unhealthy HTTP increment (1/1) + + + +=== TEST 3: set route(only passive) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [502], + "http_failures": 1, + "tcp_failures": 1 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"checks\" validation failed: object matches none of the required: [\"active\"] or [\"active\",\"passive\"]"} + + + +=== TEST 4: set route(only active + active & passive) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 100, + "successes": 1 + }, + "unhealthy": { + "interval": 100, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello_", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 0, + "127.0.0.1:1": 1 + }, + "retries": 0, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 100, + "successes": 1 + }, + "unhealthy": { + "interval": 100, + "http_failures": 2 + } + },]] .. [[ + "passive": { + "healthy": { + "http_statuses": [200, 201], + "successes": 3 + }, + "unhealthy": { + "http_statuses": [502], + "http_failures": 1, + "tcp_failures": 1 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: only one route should have passive healthcheck +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json_sort = require("toolkit.json") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + local ports_count = {} + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello_") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + + -- only /hello_ has passive healthcheck + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +502 +502 +--- grep_error_log eval +qr/enabled healthcheck passive/ +--- grep_error_log_out +enabled healthcheck passive + + + +=== TEST 6: make sure passive healthcheck works (conf is not corrupted by the default value) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json_sort = require("toolkit.json") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + local ports_count = {} + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + + local res, err = httpc:request_uri(uri .. "/hello_") + if not res then + ngx.say(err) + return + end + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +502 +502 +--- grep_error_log eval +qr/\[healthcheck\] \([^)]+\) unhealthy HTTP increment/ +--- grep_error_log_out +[healthcheck] (upstream#/apisix/routes/2) unhealthy HTTP increment diff --git a/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-stop-checker.t b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-stop-checker.t new file mode 100644 index 0000000..54ed617 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck-stop-checker.t @@ -0,0 +1,253 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); +worker_connections(256); + +# the healthcheck stop test requires exiting worker to keep watching etcd for a while, +# which is not the case when using gRPC. +my $yaml_config = <<_EOC_; +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + prefix: "/apisix" + host: + - "http://127.0.0.1:2379" + use_grpc: false + admin: + admin_key: null +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(two healthy upstream nodes) +--- request +PUT /apisix/admin/routes/1 +{"uri":"/server_port","upstream":{"type":"roundrobin","nodes":{"127.0.0.1:1980":1,"127.0.0.1:1981":1},"checks":{"active":{"http_path":"/status","host":"foo.com","healthy":{"interval":1,"successes":1},"unhealthy":{"interval":1,"http_failures":2}}}}} +--- error_code_like: ^20\d$ + + + +=== TEST 2: update + delete +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, status, body = t('/apisix/admin/routes/1', + "PUT", + [[{"uri":"/server_port","upstream":{"type":"roundrobin","nodes":{"127.0.0.1:1980":1,"127.0.0.1:1981":1},"checks":{"active":{"http_path":"/status","healthy":{"interval":1,"successes":1},"unhealthy":{"interval":1,"http_failures":2}}}}}]] + ) + + if code < 300 then + code = 200 + end + ngx.say("1 code: ", code) + + ngx.sleep(0.2) + local code, body = t('/server_port', "GET") + ngx.say("2 code: ", code) + + ngx.sleep(0.2) + code = t('/apisix/admin/routes/1', "DELETE") + ngx.say("3 code: ", code) + + ngx.sleep(0.2) + local code, body = t('/server_port', "GET") + ngx.say("4 code: ", code) + } + } +--- request +GET /t +--- response_body +1 code: 200 +2 code: 200 +3 code: 200 +4 code: 404 +--- grep_error_log eval +qr/create new checker: table: 0x|try to release checker: table: 0x/ +--- grep_error_log_out +create new checker: table: 0x +try to release checker: table: 0x + + + +=== TEST 3: set route(two healthy upstream nodes) +--- request +PUT /apisix/admin/routes/1 +{"uri":"/server_port","upstream":{"type":"roundrobin","nodes":{"127.0.0.1:1980":1,"127.0.0.1:1981":1},"checks":{"active":{"http_path":"/status","host":"foo.com","healthy":{"interval":1,"successes":1},"unhealthy":{"interval":1,"http_failures":2}}}}} +--- error_code: 201 + + + +=== TEST 4: update +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/server_port', "GET") + ngx.say("1 code: ", code) + + local code, status, body = t('/apisix/admin/routes/1', + "PUT", + [[{"uri":"/server_port","upstream":{"type":"roundrobin","nodes":{"127.0.0.1:1980":1,"127.0.0.1:1981":1},"checks":{"active":{"http_path":"/status","healthy":{"interval":1,"successes":1},"unhealthy":{"interval":1,"http_failures":2}}}}}]] + ) + + if code < 300 then + code = 200 + end + ngx.say("2 code: ", code) + + ngx.sleep(0.2) + local code, body = t('/server_port', "GET") + ngx.say("3 code: ", code) + } + } +--- request +GET /t +--- response_body +1 code: 200 +2 code: 200 +3 code: 200 +--- grep_error_log eval +qr/create new checker: table: 0x|try to release checker: table: 0x/ +--- grep_error_log_out +create new checker: table: 0x +try to release checker: table: 0x +create new checker: table: 0x + + + +=== TEST 5: update + delete for /upstreams +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, _, body = t('/apisix/admin/upstreams/stopchecker', + "PUT", + [[{"type":"roundrobin","nodes":{"127.0.0.1:1980":1,"127.0.0.1:1981":1},"checks":{"active":{"http_path":"/status","healthy":{"interval":1,"successes":1},"unhealthy":{"interval":1,"http_failures":2}}}}]] + ) + + if code > 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, _, body = t('/apisix/admin/routes/1', + "PUT", + [[{"uri":"/server_port","upstream_id":"stopchecker"}]] + ) + + if code > 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.2) + code, _, body = t('/server_port', "GET") + + if code > 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.5) + + -- update + code, _, body = t('/apisix/admin/upstreams/stopchecker', + "PUT", + [[{"type":"roundrobin","nodes":{"127.0.0.1:1980":1,"127.0.0.1:1981":1},"checks":{"active":{"http_path":"/void","healthy":{"interval":1,"successes":1},"unhealthy":{"interval":1,"http_failures":1}}}}]] + ) + + if code > 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.2) + code, _, body = t('/server_port', "GET") + + if code > 300 then + ngx.status = code + ngx.say(body) + return + end + + -- delete + code, _, body = t('/apisix/admin/routes/1', "DELETE") + + if code > 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) -- wait for routes delete event synced + + code, _, body = t('/apisix/admin/upstreams/stopchecker', "DELETE") + + if code > 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say("ok") + } + } +--- request +GET /t +--- response_body +ok +--- grep_error_log eval +qr/create new checker: table: 0x|try to release checker: table: 0x/ +--- grep_error_log_out +create new checker: table: 0x +try to release checker: table: 0x +create new checker: table: 0x +try to release checker: table: 0x diff --git a/CloudronPackages/APISIX/apisix-source/t/node/healthcheck.t b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck.t new file mode 100644 index 0000000..546b06d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck.t @@ -0,0 +1,916 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); +worker_connections(256); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(two healthy upstream nodes) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + }, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- grep_error_log eval +qr/^.*?\[error\](?!.*process exiting).*/ +--- grep_error_log_out + + + +=== TEST 2: hit routes (two healthy nodes) +--- config + location /t { + content_by_lua_block { + ngx.sleep(3) -- wait for sync + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + -- hit route before start test loop + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + + local ports_count = {} + for i = 1, 12 do + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":6,"port":"1981"},{"count":6,"port":"1980"}] +--- grep_error_log eval +qr/^.*?\[error\](?!.*process exiting).*/ +--- grep_error_log_out +--- timeout: 10 + + + +=== TEST 3: set route(two upstream node: one healthy + one unhealthy) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1970": 1 + }, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- grep_error_log eval +qr/^.*?\[error\](?!.*process exiting).*/ +--- grep_error_log_out + + + +=== TEST 4: hit routes (two upstream node: one healthy + one unhealthy) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + end + + ngx.sleep(2.5) + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":12,"port":"1980"}] +--- grep_error_log eval +qr/\([^)]+\) unhealthy .* for '.*'/ +--- grep_error_log_out +(upstream#/apisix/routes/1) unhealthy TCP increment (1/2) for 'foo.com(127.0.0.1:1970)' +(upstream#/apisix/routes/1) unhealthy TCP increment (2/2) for 'foo.com(127.0.0.1:1970)' +--- timeout: 10 + + + +=== TEST 5: chash route (two healthy nodes) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "chash", + "nodes": { + "127.0.0.1:1981": 1, + "127.0.0.1:1980": 1 + }, + "key": "remote_addr", + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- grep_error_log eval +qr/^.*?\[error\](?!.*process exiting).*/ +--- grep_error_log_out + + + +=== TEST 6: hit routes (two healthy nodes) +--- config + location /t { + content_by_lua_block { + ngx.sleep(2) -- wait for sync + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":12,"port":"1980"}] +--- grep_error_log eval +qr/^.*?\[error\](?!.*process exiting).*/ +--- grep_error_log_out +--- timeout: 6 + + + +=== TEST 7: chash route (upstream nodes: 1 healthy + 8 unhealthy) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "chash", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1970": 1, + "127.0.0.1:1971": 1, + "127.0.0.1:1972": 1, + "127.0.0.1:1973": 1, + "127.0.0.1:1974": 1, + "127.0.0.1:1975": 1, + "127.0.0.1:1976": 1, + "127.0.0.1:1977": 1 + }, + "key": "remote_addr", + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- grep_error_log eval +qr/^.*?\[error\](?!.*process exiting).*/ +--- grep_error_log_out + + + +=== TEST 8: hit routes (upstream nodes: 1 healthy + 8 unhealthy) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + end + + ngx.sleep(2.5) + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":12,"port":"1980"}] +--- grep_error_log eval +qr/^.*?\[error\](?!.*process exiting).*/ +--- grep_error_log_out eval +qr/Connection refused\) while connecting to upstream/ +--- timeout: 10 + + + +=== TEST 9: chash route (upstream nodes: 2 unhealthy) +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{"uri":"/server_port","upstream":{"type":"chash","nodes":{"127.0.0.1:1960":1,"127.0.0.1:1961":1},"key":"remote_addr","retries":3,"checks":{"active":{"http_path":"/status","host":"foo.com","healthy":{"interval":999,"successes":3},"unhealthy":{"interval":999,"http_failures":3}},"passive":{"healthy":{"http_statuses":[200,201],"successes":3},"unhealthy":{"http_statuses":[500],"http_failures":3,"tcp_failures":3}}}}}]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed +--- grep_error_log eval +qr/^.*?\[error\](?!.*process exiting).*/ +--- grep_error_log_out + + + +=== TEST 10: hit routes (passive + retries) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local ports_count = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, + {method = "GET", keepalive = false} + ) + ngx.say("res: ", res.status, " err: ", err) + end + } + } +--- request +GET /t +--- response_body +res: 502 err: nil +res: 502 err: nil +--- grep_error_log eval +qr{\[error\].*while connecting to upstream.*} +--- grep_error_log_out eval +qr{.*http://127.0.0.1:1960/server_port.* +.*http://127.0.0.1:1961/server_port.* +.*http://127.0.0.1:1961/server_port.* +.*http://127.0.0.1:1960/server_port.* +.*http://127.0.0.1:1961/server_port.* +.*http://127.0.0.1:1961/server_port.*} +--- timeout: 10 + + + +=== TEST 11: add new routh with healthcheck attribute +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for i = 1, 3 do + t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + }, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + } + }]] + ) + + ngx.sleep(0.1) + + local code, body = t('/server_port', ngx.HTTP_GET) + ngx.say("code: ", code, " body: ", body) + + code, body = t('/apisix/admin/routes/' .. i, ngx.HTTP_DELETE) + ngx.say("delete code: ", code) + + ngx.sleep(0.1) + end + } + } +--- request +GET /t +--- response_body +code: 200 body: passed +delete code: 200 +code: 200 body: passed +delete code: 200 +code: 200 body: passed +delete code: 200 + + + +=== TEST 12: add route (test health check config `host` valid) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1988": 1 + }, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: test health check config `host` valid +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + + ngx.sleep(2) + + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +200 +--- grep_error_log eval +qr/^.*?\[warn\].*/ +--- grep_error_log_out eval +qr/unhealthy TCP increment.*foo.com/ + + + +=== TEST 14: add route (test health check customized `port`) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + }, + "checks": { + "active": { + "http_path": "/status", + "port": 1988, + "host": "foo.com", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: test health check customized `port` +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + + ngx.sleep(2) + + ngx.say(res.status) + } + } +--- request +GET /t +--- response_body +200 +--- grep_error_log eval +qr/^.*?\[warn\].*/ +--- grep_error_log_out eval +qr/unhealthy TCP increment.*foo.com.*127.0.0.1:1988/ +--- timeout: 5 + + + +=== TEST 16: add route (test health check customized `port` out of minimum range) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + }, + "checks": { + "active": { + "http_path": "/status", + "port": 0, + "host": "foo.com", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/expected 0 to be at least 1/ +--- error_code chomp +400 + + + +=== TEST 17: add route (test health check customized `port` out of maximum range) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + }, + "checks": { + "active": { + "http_path": "/status", + "port": 65536, + "host": "foo.com", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/expected 65536 to be at most 65535/ +--- error_code chomp +400 + + + +=== TEST 18: set route + upstream (two upstream node: one healthy + one unhealthy) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1970": 1 + }, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- grep_error_log eval +qr/^.*?\[error\](?!.*process exiting).*/ +--- grep_error_log_out + + + +=== TEST 19: hit routes, ensure the checker is bound to the upstream +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + end + + ngx.sleep(2.5) + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":12,"port":"1980"}] +--- grep_error_log eval +qr/\([^)]+\) unhealthy .* for '.*'/ +--- grep_error_log_out +(upstream#/apisix/upstreams/1) unhealthy TCP increment (1/2) for 'foo.com(127.0.0.1:1970)' +(upstream#/apisix/upstreams/1) unhealthy TCP increment (2/2) for 'foo.com(127.0.0.1:1970)' +--- timeout: 10 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/healthcheck2.t b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck2.t new file mode 100644 index 0000000..d63e80e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck2.t @@ -0,0 +1,362 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); +worker_connections(256); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: can't use service_name with nodes +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +upstreams: + - service_name: abaaba + discovery_type: eureka + nodes: + "127.0.0.1:80": 1 + type: roundrobin + id: 1 +#END +--- error_log +value should match only one schema, but matches both schemas 1 and 2 +--- request +GET /hello +--- error_code: 502 + + + +=== TEST 2: route + service +--- apisix_yaml +services: + - id: 1 + upstream: + type: roundrobin + nodes: + "127.0.0.1:1980": 1 + "127.0.0.1:1970": 1 + checks: + active: + http_path: /status + host: foo.com + healthy: + interval: 1 + successes: 1 + unhealthy: + interval: 1 + http_failures: 2 +routes: + - service_id: 1 + uri: /server_port +#END +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + end + + ngx.sleep(2.5) + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- response_body +[{"count":12,"port":"1980"}] +--- grep_error_log eval +qr/\([^)]+\) unhealthy .* for '.*'/ +--- grep_error_log_out +(upstream#/services/1) unhealthy TCP increment (1/2) for 'foo.com(127.0.0.1:1970)' +(upstream#/services/1) unhealthy TCP increment (2/2) for 'foo.com(127.0.0.1:1970)' +--- timeout: 10 + + + +=== TEST 3: route override service +--- apisix_yaml +services: + - id: 1 + upstream: + type: roundrobin + nodes: + "127.0.0.2:1980": 1 + "127.0.0.2:1970": 1 + checks: + active: + http_path: /status + host: foo.com + healthy: + interval: 1 + successes: 1 + unhealthy: + interval: 1 + http_failures: 2 +routes: + - service_id: 1 + uri: /server_port + upstream: + type: roundrobin + nodes: + "127.0.0.1:1980": 1 + "127.0.0.1:1970": 1 + checks: + active: + http_path: /status + host: foo.com + healthy: + interval: 1 + successes: 1 + unhealthy: + interval: 1 + http_failures: 2 +#END +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + end + + ngx.sleep(2.5) + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- response_body +[{"count":12,"port":"1980"}] +--- grep_error_log eval +qr/\([^)]+\) unhealthy .* for '.*'/ +--- grep_error_log_out +(upstream#/routes/arr_1) unhealthy TCP increment (1/2) for 'foo.com(127.0.0.1:1970)' +(upstream#/routes/arr_1) unhealthy TCP increment (2/2) for 'foo.com(127.0.0.1:1970)' +--- timeout: 10 + + + +=== TEST 4: pass the configured host (pass_host == "pass") +--- apisix_yaml +routes: + - id: 1 + uri: /server_port + upstream: + type: roundrobin + nodes: + "localhost:1980": 1 + "127.0.0.1:1981": 1 + checks: + active: + http_path: /status + healthy: + interval: 1 + successes: 1 + unhealthy: + interval: 1 + http_failures: 2 +#END +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + end + + ngx.sleep(1) + } + } +--- no_error_log +client request host: localhost +--- error_log +client request host: 127.0.0.1 + + + +=== TEST 5: pass the configured host (pass_host == "node") +--- apisix_yaml +routes: + - id: 1 + uri: /server_port + upstream: + type: roundrobin + pass_host: node + nodes: + "localhost:1980": 1 + "127.0.0.1:1981": 1 + checks: + active: + http_path: /status + healthy: + interval: 1 + successes: 1 + unhealthy: + interval: 1 + http_failures: 2 +#END +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + end + + ngx.sleep(1) + } + } +--- error_log +client request host: localhost +client request host: 127.0.0.1 + + + +=== TEST 6: pass the configured host (pass_host == "rewrite") +--- apisix_yaml +routes: + - id: 1 + uri: /server_port + upstream: + type: roundrobin + pass_host: rewrite + upstream_host: foo.com + nodes: + "localhost:1980": 1 + "127.0.0.1:1981": 1 + checks: + active: + http_path: /status + healthy: + interval: 1 + successes: 1 + unhealthy: + interval: 1 + http_failures: 2 +#END +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + end + + ngx.sleep(1) + } + } +--- no_error_log +client request host: localhost +client request host: 127.0.0.1 +--- error_log +client request host: foo.com diff --git a/CloudronPackages/APISIX/apisix-source/t/node/healthcheck3.t b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck3.t new file mode 100644 index 0000000..a1209af --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/healthcheck3.t @@ -0,0 +1,122 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); +worker_connections(256); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(two healthy upstream nodes) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + }, + "checks": { + "active": { + "http_path": "/status", + "host": "foo.com", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 2 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- grep_error_log eval +qr/^.*?\[error\](?!.*process exiting).*/ +--- grep_error_log_out + + + +=== TEST 2: In case of concurrency only one request can create a checker +--- config + location /t { + content_by_lua_block { + local healthcheck = require("resty.healthcheck") + local test = healthcheck.new + healthcheck.new = function(...) + ngx.sleep(1) + return test(...) + end + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local send_request = function() + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.log(ngx.ERR, err) + return + end + end + + local t = {} + + for i = 1, 10 do + local th = assert(ngx.thread.spawn(send_request)) + table.insert(t, th) + end + + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + + ngx.exit(200) + } + } +--- request +GET /t +--- grep_error_log eval +qr/create new checker/ +--- grep_error_log_out +create new checker diff --git a/CloudronPackages/APISIX/apisix-source/t/node/hosts.t b/CloudronPackages/APISIX/apisix-source/t/node/hosts.t new file mode 100644 index 0000000..bda287e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/hosts.t @@ -0,0 +1,97 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "hosts": ["foo.com", "*.bar.com"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: /not_found +--- request +GET /hello +--- more_headers +Host: not_found.com +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 4: hit routes +--- request +GET /hello +--- more_headers +Host: foo.com +--- response_body +hello world + + + +=== TEST 5: hit routes +--- request +GET /hello +--- more_headers +Host: www.bar.com +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/http_host.t b/CloudronPackages/APISIX/apisix-source/t/node/http_host.t new file mode 100644 index 0000000..fb4d0fc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/http_host.t @@ -0,0 +1,68 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/uri", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit routes +--- request +GET /uri +--- more_headers +Host: foo.com:1984 +--- response_body +uri: /uri +host: foo.com:1984 +x-real-ip: 127.0.0.1 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/https-proxy.t b/CloudronPackages/APISIX/apisix-source/t/node/https-proxy.t new file mode 100644 index 0000000..efe2090 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/https-proxy.t @@ -0,0 +1,163 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: add route to HTTPS upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "127.0.0.1:1983": 1 + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit the upstream +--- request +GET /hello +--- more_headers +host: www.sni.com +--- error_log +Receive SNI: www.sni.com + + + +=== TEST 3: use 443 as the default port +--- apisix_yaml +routes: + - + uri: /hello + upstream: + scheme: https + nodes: + "127.0.0.1": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 502 +--- error_log +upstream: "https://127.0.0.1:443/hello" + + + +=== TEST 4: use 80 as the http's default port +--- apisix_yaml +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1": 1 + type: roundrobin +#END +--- request +GET /hello +--- error_code: 502 +--- error_log +upstream: "http://127.0.0.1:80/hello" + + + +=== TEST 5: rewrite SNI +--- log_level: debug +--- apisix_yaml +routes: + - + uri: /uri + upstream: + scheme: https + nodes: + "127.0.0.1:1983": 1 + type: roundrobin + pass_host: "rewrite" + upstream_host: "www.test.com" +#END +--- request +GET /uri +--- more_headers +host: www.sni.com +--- error_log +Receive SNI: www.test.com +--- response_body +uri: /uri +host: www.test.com +x-real-ip: 127.0.0.1 + + + +=== TEST 6: node's SNI +--- log_level: debug +--- apisix_yaml +routes: + - + uri: /uri + upstream: + scheme: https + nodes: + "localhost:1983": 1 + type: roundrobin + pass_host: "node" +#END +--- request +GET /uri +--- more_headers +host: www.sni.com +--- error_log +Receive SNI: localhost +--- response_body +uri: /uri +host: localhost:1983 +x-real-ip: 127.0.0.1 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/invalid-port.t b/CloudronPackages/APISIX/apisix-source/t/node/invalid-port.t new file mode 100755 index 0000000..a058769 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/invalid-port.t @@ -0,0 +1,105 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_root_location(); + +run_tests(); + +__DATA__ + +=== TEST 1: set upstream with a invalid node port +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [{ + "port": 65536, + "host": "127.0.0.1", + "weight": 1 + }], + "type": "roundrobin" + }]] + ) + + ngx.status = code + + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like +{"error_msg":"invalid configuration: property \\\"nodes\\\" validation failed: object matches none of the required"} + + + +=== TEST 2: set upstream with a node port greater than 65535 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:65536": 1 + } + }]] + ) + + ngx.status = code + + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like +{"error_msg":"invalid port 65536"} + + + +=== TEST 3: set upstream with a node port less than 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:0": 1 + } + }]] + ) + + ngx.status = code + + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like +{"error_msg":"invalid port 0"} diff --git a/CloudronPackages/APISIX/apisix-source/t/node/invalid-route.t b/CloudronPackages/APISIX/apisix-source/t/node/invalid-route.t new file mode 100644 index 0000000..6bf5a63 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/invalid-route.t @@ -0,0 +1,160 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_long_string(); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set invalid route(id: 1) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local res, err = core.etcd.set("/routes/1", [[mexxxxxxxxxxxxxxx]]) + + if res.status >= 300 then + res.status = code + end + + ngx.print(require("toolkit.json").encode(res.body)) + ngx.sleep(1) + } + } +--- request +GET /t +--- wait: 1 +--- grep_error_log eval +qr/\[error\].*/ +--- grep_error_log_out eval +qr{invalid item data of \[/apisix/routes/1\], val: mexxxxxxxxxxxxxxx, it should be an object} +--- response_body_like eval +qr/"value":"mexxxxxxxxxxxxxxx"/ + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} +--- wait: 1 +--- grep_error_log eval +qr/\[error\].*/ +--- grep_error_log_out eval +qr{invalid item data of \[/apisix/routes/1\], val: mexxxxxxxxxxxxxxx, it should be an object} + + + +=== TEST 3: set valid route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- wait: 1 +--- grep_error_log eval +qr/\[error\].*/ +--- grep_error_log_out eval +qr{invalid item data of \[/apisix/routes/1\], val: mexxxxxxxxxxxxxxx, it should be an object} + + + +=== TEST 4: no error log +--- config + location /t { + content_by_lua_block { + ngx.sleep(1) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 5: set route(with invalid host) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "key": "remote_addr", + "type": "chash", + "nodes": { + "xxxx.invalid:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: hit route +--- request +GET /server_port +--- error_code: 503 +--- error_log +failed to parse domain: xxxx.invalid +--- timeout: 10 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/invalid-service.t b/CloudronPackages/APISIX/apisix-source/t/node/invalid-service.t new file mode 100644 index 0000000..dbedcfa --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/invalid-service.t @@ -0,0 +1,115 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_long_string(); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set invalid service(id: 1) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local res, err = core.etcd.set("/services/1", [[mexxxxxxxxxxxxxxx]]) + + if res.status >= 300 then + ngx.status = code + return ngx.say(res.body) + end + + ngx.print(require("toolkit.json").encode(res.body)) + ngx.sleep(1) + } + } +--- request +GET /t +--- wait: 1 +--- grep_error_log eval +qr/\[error\].*/ +--- grep_error_log_out eval +qr{invalid item data of \[/apisix/services/1\], val: mexxxxxxxxxxxxxxx, it should be an object} +--- response_body_like eval +qr/"value":"mexxxxxxxxxxxxxxx"/ + + + +=== TEST 2: try /not_found, got error log +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} +--- wait: 1 +--- grep_error_log eval +qr/\[error\].*/ +--- grep_error_log_out eval +qr{invalid item data of \[/apisix/services/1\], val: mexxxxxxxxxxxxxxx, it should be an object} + + + +=== TEST 3: set valid service(id: 1), cover the old one +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local res, err = core.etcd.set("/services/1", core.json.decode([[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]])) + + if res.status >= 300 then + ngx.status = code + end + + ngx.print(require("toolkit.json").encode(res.body)) + } + } +--- request +GET /t +--- ret_code: 200 +--- response_body_like eval +qr/"nodes":\{"127.0.0.1:1980":1\}/ +--- grep_error_log eval +qr/\[error\].*/ +--- grep_error_log_out eval +qr{invalid item data of \[/apisix/services/1\], val: mexxxxxxxxxxxxxxx, it should be an object} + + + +=== TEST 4: no error log +--- config + location /t { + content_by_lua_block { + ngx.sleep(1) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done diff --git a/CloudronPackages/APISIX/apisix-source/t/node/invalid-upstream.t b/CloudronPackages/APISIX/apisix-source/t/node/invalid-upstream.t new file mode 100644 index 0000000..0191807 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/invalid-upstream.t @@ -0,0 +1,132 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_long_string(); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set invalid upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local res, err = core.etcd.set("/upstreams/1", [[mexxxxxxxxxxxxxxx]]) + + if res.status >= 300 then + res.status = code + end + + ngx.print(require("toolkit.json").encode(res.body)) + ngx.sleep(1) + } + } +--- request +GET /t +--- wait: 1 +--- grep_error_log eval +qr/\[error\].*/ +--- grep_error_log_out eval +qr{invalid item data of \[/apisix/upstreams/1\], val: mexxxxxxxxxxxxxxx, it should be an object} +--- response_body_like eval +qr/"value":"mexxxxxxxxxxxxxxx"/ + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} +--- wait: 1 +--- grep_error_log eval +qr/\[error\].*/ +--- grep_error_log_out eval +qr{invalid item data of \[/apisix/upstreams/1\], val: mexxxxxxxxxxxxxxx, it should be an object} + + + +=== TEST 3: delete invalid upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local res, err = core.etcd.delete("/upstreams/1") + + if res.status >= 300 then + res.status = code + end + + ngx.say("passed") + ngx.sleep(1) + } + } +--- request +GET /t +--- response_body +passed +--- grep_error_log eval +qr/\[error\].*/ +--- grep_error_log_out eval +qr{invalid item data of \[/apisix/upstreams/1\], val: mexxxxxxxxxxxxxxx, it should be an object} + + + +=== TEST 4: set valid upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local res, err = core.etcd.set("/upstreams/1", core.json.decode([[{ + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }]])) + if res.status >= 300 then + res.status = code + end + ngx.print(require("toolkit.json").encode(res.body)) + ngx.sleep(1) + } + } +--- request +GET /t +--- response_body_like eval +qr/"nodes":\{"127.0.0.1:1980":1\}/ + + + +=== TEST 5: no error log +--- config + location /t { + content_by_lua_block { + ngx.sleep(1) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done diff --git a/CloudronPackages/APISIX/apisix-source/t/node/least_conn.t b/CloudronPackages/APISIX/apisix-source/t/node/least_conn.t new file mode 100644 index 0000000..174252f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/least_conn.t @@ -0,0 +1,151 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +log_level('info'); +no_root_location(); +worker_connections(1024); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + my $route = <<_EOC_; +routes: + - upstream_id: 1 + uris: + - /mysleep +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $route); + + if (!$block->request) { + $block->set_value("request", "GET /mysleep?seconds=0.1"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: select highest weight +--- apisix_yaml +upstreams: + - id: 1 + type: least_conn + nodes: + "127.0.0.1:1980": 2 + "127.0.0.1:1981": 1 +--- grep_error_log eval +qr/proxy request to \S+ while connecting to upstream/ +--- grep_error_log_out +proxy request to 127.0.0.1:1980 while connecting to upstream + + + +=== TEST 2: select least conn +--- apisix_yaml +upstreams: + - id: 1 + type: least_conn + nodes: + "127.0.0.1:1980": 3 + "0.0.0.0:1980": 2 +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/mysleep?seconds=0.1" + + local t = {} + for i = 1, 3 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err = httpc:request_uri(uri..i, {method = "GET"}) + if not res then + ngx.log(ngx.ERR, err) + return + end + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + } + } +--- request +GET /t +--- grep_error_log eval +qr/proxy request to \S+ while connecting to upstream/ +--- grep_error_log_out +proxy request to 127.0.0.1:1980 while connecting to upstream +proxy request to 0.0.0.0:1980 while connecting to upstream +proxy request to 127.0.0.1:1980 while connecting to upstream + + + +=== TEST 3: retry +--- apisix_yaml +upstreams: + - id: 1 + type: least_conn + nodes: + "127.0.0.1:1999": 2 + "127.0.0.1:1980": 1 +--- error_log +connect() failed +--- grep_error_log eval +qr/proxy request to \S+ while connecting to upstream/ +--- grep_error_log_out +proxy request to 127.0.0.1:1999 while connecting to upstream +proxy request to 127.0.0.1:1980 while connecting to upstream + + + +=== TEST 4: retry all nodes, failed +--- apisix_yaml +upstreams: + - id: 1 + type: least_conn + nodes: + "127.0.0.1:1999": 2 + "0.0.0.0:1999": 1 +--- error_log +connect() failed +--- error_code: 502 +--- grep_error_log eval +qr/proxy request to \S+ while connecting to upstream/ +--- grep_error_log_out +proxy request to 127.0.0.1:1999 while connecting to upstream +proxy request to 0.0.0.0:1999 while connecting to upstream diff --git a/CloudronPackages/APISIX/apisix-source/t/node/least_conn2.t b/CloudronPackages/APISIX/apisix-source/t/node/least_conn2.t new file mode 100644 index 0000000..2a6f07c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/least_conn2.t @@ -0,0 +1,105 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +log_level('info'); +no_root_location(); +worker_connections(1024); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: upstream across multiple routes should not share the same version +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "type": "least_conn", + "nodes": { + "127.0.0.1:1980": 3, + "0.0.0.0:1980": 2 + } + }]] + ) + assert(code < 300, body) + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "host": "1.com", + "uri": "/mysleep", + "upstream_id": "1" + }]] + ) + assert(code < 300, body) + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "host": "2.com", + "uri": "/mysleep", + "upstream_id": "1" + }]] + ) + assert(code < 300, body) + } + } + + + +=== TEST 2: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/mysleep?seconds=0.1" + + local t = {} + for i = 1, 2 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {Host = i..".com"}}) + if not res then + ngx.log(ngx.ERR, err) + return + end + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + } + } +--- grep_error_log eval +qr/proxy request to \S+ while connecting to upstream/ +--- grep_error_log_out +proxy request to 127.0.0.1:1980 while connecting to upstream +proxy request to 0.0.0.0:1980 while connecting to upstream diff --git a/CloudronPackages/APISIX/apisix-source/t/node/merge-route.t b/CloudronPackages/APISIX/apisix-source/t/node/merge-route.t new file mode 100644 index 0000000..b09b272 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/merge-route.t @@ -0,0 +1,511 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +worker_connections(256); +no_root_location(); + +run_tests(); + +__DATA__ + +=== TEST 1: set service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: set route (different upstream) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.2) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + }, + "uri": "/server_port", + "service_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 4: hit routes +--- request +GET /server_port +--- response_headers +X-RateLimit-Limit: 2 +X-RateLimit-Remaining: 1 +--- response_body eval +qr/1981/ + + + +=== TEST 5: set route with empty plugins, should do nothing +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.2) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": {}, + "uri": "/server_port", + "service_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: hit routes +--- request +GET /server_port +--- response_headers +X-RateLimit-Limit: 2 +X-RateLimit-Remaining: 1 +--- response_body eval +qr/1980/ + + + +=== TEST 7: disable plugin `limit-count` +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.2) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "_meta": { + "disable": true + } + } + }, + "uri": "/server_port", + "service_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit routes +--- request +GET /server_port +--- raw_response_headers_unlike eval +qr/X-RateLimit-Limit/ +--- response_body eval +qr/1980/ + + + +=== TEST 9: hit routes two times, checker service configuration +--- config +location /t { + content_by_lua_block { + ngx.sleep(0.5) + local t = require("lib.test_admin").test + local code, body = t('/server_port', + ngx.HTTP_GET + ) + ngx.say(body) + + code, body = t('/server_port', + ngx.HTTP_GET + ) + ngx.say(body) + } +} +--- request +GET /t +--- error_log eval +[qr/merge_service_route.*"time_window":60/, +qr/merge_service_route.*"time_window":60/] + + + +=== TEST 10: set service(only upstream with host) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "scheme": "http", + "type": "roundrobin", + "nodes": { + "test.com:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: set route(bind service 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/fake", + "host": "test.com", + "plugins": { + "proxy-rewrite": { + "uri": "/echo" + } + }, + "service_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: hit route +--- request +GET /fake +--- more_headers +host: test.com +--- response_headers +host: test.com + + + +=== TEST 13: not hit route +--- request +GET /fake +--- more_headers +host: test.comxxx +--- error_code: 404 + + + +=== TEST 14: enabled websocket in service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "enable_websocket": true, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: set route(bind service 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/33', + ngx.HTTP_PUT, + [[{ + "uri": "/uri", + "service_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: hit route +--- request +GET /uri +--- response_body +uri: /uri +connection: close +host: localhost +x-real-ip: 127.0.0.1 +--- error_log +enabled websocket for route: 33 + + + +=== TEST 17: delete rout +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/33', + ngx.HTTP_DELETE + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: labels exist if only route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + local core = require(\"apisix.core\"); + ngx.say(core.json.encode(ctx.matched_route.value.labels)); + end"] + } + }, + "labels": { + "version": "v2" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 19: hit routes +--- request +GET /hello +--- response_body +{"version":"v2"} + + + +=== TEST 20: labels exist if merge route and service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.sleep(0.6) -- wait for sync + + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + local core = require(\"apisix.core\"); + ngx.say(core.json.encode(ctx.matched_route.value.labels)); + end"] + } + }, + "labels": { + "version": "v2" + }, + "uri": "/hello", + "service_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 21: hit routes +--- request +GET /hello +--- response_body +{"version":"v2"} diff --git a/CloudronPackages/APISIX/apisix-source/t/node/not-exist-service.t b/CloudronPackages/APISIX/apisix-source/t/node/not-exist-service.t new file mode 100644 index 0000000..8280f9b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/not-exist-service.t @@ -0,0 +1,103 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_long_string(); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: invalid service id +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local res, err = core.etcd.set("/routes/1", { + service_id = "999999999", + uri = "/hello" + }) + + + if res.status >= 300 then + ngx.status = res.status + return ngx.exit(res.status) + end + ngx.say("passed") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit routes +--- request +GET /hello +--- error_code: 404 +--- response_body eval +qr/404 Not Found/ +--- wait_etcd_sync: 0.3 +--- grep_error_log eval +qr/\[error\].*/ +--- grep_error_log_out eval +qr/failed to fetch service configuration by id/ + + + +=== TEST 3: set valid route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit routes +--- request +GET /hello +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/not-exist-upstream.t b/CloudronPackages/APISIX/apisix-source/t/node/not-exist-upstream.t new file mode 100644 index 0000000..2ad78ce --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/not-exist-upstream.t @@ -0,0 +1,82 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": {}, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: hit routes +--- request +GET /hello +--- error_code_like: ^(?:50\d)$ +--- response_body eval +qr/502 Bad Gateway|503 Service Temporarily Unavailable/ +--- grep_error_log eval +qr/\[error\].*/ +--- grep_error_log_out eval +qr/missing upstream configuration in Route or Service/ diff --git a/CloudronPackages/APISIX/apisix-source/t/node/plugin-configs.t b/CloudronPackages/APISIX/apisix-source/t/node/plugin-configs.t new file mode 100644 index 0000000..f601ae8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/plugin-configs.t @@ -0,0 +1,410 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: change plugin config will cause the conf_version change +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, err = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "body": "hello" + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + + local code, err = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugin_config_id": 1, + "plugins": { + "example-plugin": { + "i": 1 + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.sleep(0.1) + + local code, err, org_body = t('/hello') + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.say(org_body) + + local code, err = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PATCH, + [[{ + "plugins": { + "response-rewrite": { + "body": "world" + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.sleep(0.1) + + local code, err, org_body = t('/hello') + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.say(org_body) + } + } +--- response_body +hello +world +--- grep_error_log eval +qr/conf_version: \d+#\d+/ +--- grep_error_log_out eval +qr/conf_version: \d+#\d+ +conf_version: \d+#\d+ +/ + + + +=== TEST 2: validated plugins configuration via incremental sync +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local core = require("apisix.core") + + assert(core.etcd.set("/plugin_configs/1", + {id = 1, plugins = { ["uri-blocker"] = { block_rules = {"root.exe","root.m+"} }}} + )) + -- wait for sync + ngx.sleep(0.6) + + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello?x=root.exe" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + + ngx.status = res.status + ngx.say(uri) + ngx.say(res.body) + + } + } +--- error_code: 403 + + + +=== TEST 3: validated plugins configuration via incremental sync (malformed data) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local core = require("apisix.core") + + assert(core.etcd.set("/plugin_configs/1", + {id = 1, plugins = { ["uri-blocker"] = { block_rules = 1 }}} + )) + -- wait for sync + ngx.sleep(0.6) + + assert(core.etcd.delete("/plugin_configs/1")) + } + } +--- error_log +property "block_rules" validation failed + + + +=== TEST 4: recover plugin when plugin_config changed +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, err = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "body": "hello" + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + + local code, err = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugin_config_id": 1 + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.sleep(0.1) + + local code, err, org_body = t('/hello') + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.say(org_body) + + local code, err = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + + local code, err, org_body = t('/hello') + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.print(org_body) + } + } +--- response_body +hello +hello world + + + +=== TEST 5: don't override the plugin in the route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, err = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + }, + "response-rewrite": { + "body": "hello" + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + + local code, err = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/helloaa", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugin_config_id": 1, + "plugins": { + "response-rewrite": { + "body": "world" + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.sleep(0.1) + + local code, err, org_body = t('/helloaa') + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.say(org_body) + } + } +--- response_body +world + + + +=== TEST 6: use the latest plugin_consigs after merge the plugins from consumer and route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ip-restriction": { + "whitelist": ["1.1.1.1"] + }, + "basic-auth": {} + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugin_config_id": "1", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local headers = { + ["Authorization"] = "Basic Zm9vOmJhcg==" + } + local res, err = httpc:request_uri(uri, {headers = headers}) + ngx.print(res.body) + + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ip-restriction": { + "whitelist": ["1.1.1.1", "127.0.0.1"] + }, + "basic-auth": {} + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local res, err = httpc:request_uri(uri, {headers = headers}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- response_body +{"message":"Your IP address is not allowed"} +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/plugin.t b/CloudronPackages/APISIX/apisix-source/t/node/plugin.t new file mode 100644 index 0000000..80d964b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/plugin.t @@ -0,0 +1,44 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: set custom log format +--- extra_init_by_lua + local exp = require("apisix.plugins.example-plugin") + exp.destroy = function() + ngx.log(ngx.WARN, "destroy method called") + end +--- config + location /t { + return 200 "dummy"; + } +--- shutdown_error_log +destroy method called diff --git a/CloudronPackages/APISIX/apisix-source/t/node/plugin1.t b/CloudronPackages/APISIX/apisix-source/t/node/plugin1.t new file mode 100644 index 0000000..43500b5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/plugin1.t @@ -0,0 +1,104 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use strict; +use warnings FATAL => 'all'; +use t::APISIX 'no_plan'; + +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /apisix/admin/routes"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); +run_tests; +__DATA__ + +=== TEST 1: set up configuration +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-jack" + } + } + }]] + ) + if code >= 300 then + ngx.say(body) + return + end + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {}, + "proxy-rewrite": { + "headers": { + "add": { + "xtest": "123" + } + } + }, + "serverless-post-function": { + "functions": [ + "return function(conf, ctx) \n ngx.say(ngx.req.get_headers().xtest); \n end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + ngx.say(body) + } + } +--- request +GET /t +--- timeout: 15 +--- response_body +passed + + + +=== TEST 2: the proxy-rewrite runs at 'rewrite' phase and should get executed only once, hence the response body is expected '123' not '123123' +--- request +GET /hello +--- more_headers +apikey: auth-jack +--- timeout: 15 +--- response_body +123 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/priority-balancer/health-checker.t b/CloudronPackages/APISIX/apisix-source/t/node/priority-balancer/health-checker.t new file mode 100644 index 0000000..cd970c6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/priority-balancer/health-checker.t @@ -0,0 +1,184 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +worker_connections(1024); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ($block->apisix_yaml) { + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + my $route = <<_EOC_; +routes: + - upstream_id: 1 + uris: + - /hello +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $route); + } + + if (!$block->request) { + $block->set_value("request", "GET /hello"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: all are down detected by health checker +--- apisix_yaml +upstreams: + - id: 1 + type: least_conn + nodes: + - host: 127.0.0.1 + port: 1979 + weight: 2 + priority: 123 + - host: 127.0.0.2 + port: 1979 + weight: 3 + priority: -1 + checks: + active: + http_path: "/status" + healthy: + interval: 1 + successes: 1 + unhealthy: + interval: 1 + http_failures: 1 +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local httpc = http.new() + httpc:request_uri(uri, {method = "GET"}) + ngx.sleep(2.5) + -- still use all nodes + httpc:request_uri(uri, {method = "GET"}) + } + } +--- request +GET /t +--- error_log +connect() failed +unhealthy TCP increment (2/2) for '127.0.0.1(127.0.0.1:1979) +unhealthy TCP increment (2/2) for '127.0.0.2(127.0.0.2:1979) +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1979 +proxy request to 127.0.0.2:1979 +proxy request to 127.0.0.1:1979 +proxy request to 127.0.0.2:1979 + + + +=== TEST 2: use priority as backup (setup rule) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": [ + {"host": "127.0.0.1", "port": 1979, "weight": 2000}, + {"host": "127.0.0.1", "port": 1980, + "weight": 1, "priority": -1} + ], + "checks": { + "active": { + "http_path": "/status", + "healthy": { + "interval": 1, + "successes": 1 + }, + "unhealthy": { + "interval": 1, + "http_failures": 1 + } + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: use priority as backup +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local httpc = http.new() + httpc:request_uri(uri, {method = "GET"}) + ngx.sleep(2.5) + httpc:request_uri(uri, {method = "GET"}) + } + } +--- request +GET /t +--- error_log +connect() failed +unhealthy TCP increment (2/2) for '127.0.0.1(127.0.0.1:1979) +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1979 +proxy request to 127.0.0.1:1980 +proxy request to 127.0.0.1:1980 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/priority-balancer/sanity.t b/CloudronPackages/APISIX/apisix-source/t/node/priority-balancer/sanity.t new file mode 100644 index 0000000..11acc7f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/priority-balancer/sanity.t @@ -0,0 +1,322 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); # repeat each test to ensure after_balance is called correctly +log_level('info'); +no_root_location(); +worker_connections(1024); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ($block->apisix_yaml) { + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + my $route = <<_EOC_; +routes: + - upstream_id: 1 + uris: + - /hello + - /mysleep +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $route); + } + + if (!$block->request) { + $block->set_value("request", "GET /hello"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_yaml +upstreams: + - id: 1 + type: least_conn + nodes: + - host: 127.0.0.1 + port: 1979 + weight: 2 + priority: 1 + - host: 127.0.0.2 + port: 1979 + weight: 1 + priority: 1 + - host: 127.0.0.3 + port: 1979 + weight: 2 + priority: 0 + - host: 127.0.0.4 + port: 1979 + weight: 1 + priority: 0 + - host: 127.0.0.1 + port: 1980 + weight: 2 + priority: -1 +--- response_body +hello world +--- error_log +connect() failed +failed to get server from current priority 1, try next one +failed to get server from current priority 0, try next one +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1979 +proxy request to 127.0.0.2:1979 +proxy request to 127.0.0.3:1979 +proxy request to 127.0.0.4:1979 +proxy request to 127.0.0.1:1980 + + + +=== TEST 2: all failed +--- apisix_yaml +upstreams: + - id: 1 + type: least_conn + nodes: + - host: 127.0.0.1 + port: 1979 + weight: 2 + priority: 1 + - host: 127.0.0.2 + port: 1979 + weight: 1 + priority: 0 + - host: 127.0.0.1 + port: 1979 + weight: 2 + priority: -1 +--- error_code: 502 +--- error_log +connect() failed +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1979 +proxy request to 127.0.0.2:1979 +proxy request to 127.0.0.1:1979 + + + +=== TEST 3: default priority is zero +--- apisix_yaml +upstreams: + - id: 1 + type: least_conn + nodes: + - host: 127.0.0.1 + port: 1979 + weight: 2 + priority: 1 + - host: 127.0.0.2 + port: 1979 + weight: 1 + - host: 127.0.0.1 + port: 1980 + weight: 2 + priority: -1 +--- response_body +hello world +--- error_log +connect() failed +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1979 +proxy request to 127.0.0.2:1979 +proxy request to 127.0.0.1:1980 + + + +=== TEST 4: least_conn +--- apisix_yaml +upstreams: + - id: 1 + type: least_conn + nodes: + - host: 127.0.0.1 + port: 1979 + weight: 2 + priority: 1 + - host: 127.0.0.1 + port: 1980 + weight: 3 + priority: -1 + - host: 0.0.0.0 + port: 1980 + weight: 2 + priority: -1 +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/mysleep?seconds=0.1" + + local t = {} + for i = 1, 3 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + -- the retry can be happened before starting the new request + -- so we exclude all the first tries from the expected log + local res, err = httpc:request_uri(uri..i, {method = "GET"}) + if not res then + ngx.log(ngx.ERR, err) + return + end + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + } + } +--- request +GET /t +--- error_log +connect() failed +--- grep_error_log eval +qr/proxy request to \S+:1980 while connecting to upstream/ +--- grep_error_log_out +proxy request to 127.0.0.1:1980 while connecting to upstream +proxy request to 0.0.0.0:1980 while connecting to upstream +proxy request to 127.0.0.1:1980 while connecting to upstream + + + +=== TEST 5: roundrobin +--- apisix_yaml +upstreams: + - id: 1 + type: roundrobin + nodes: + - host: 127.0.0.1 + port: 1979 + weight: 1000 + priority: 1 + - host: 127.0.0.2 + port: 1979 + weight: 1 + priority: 1 + - host: 127.0.0.3 + port: 1979 + weight: 1000 + priority: -1 + - host: 127.0.0.4 + port: 1979 + weight: 1 + priority: -1 +--- error_code: 502 +--- error_log +connect() failed +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1979 +proxy request to 127.0.0.2:1979 +proxy request to 127.0.0.3:1979 +proxy request to 127.0.0.4:1979 + + + +=== TEST 6: ewma +--- apisix_yaml +upstreams: + - id: 1 + type: ewma + key: remote_addr + nodes: + - host: 127.0.0.1 + port: 1979 + weight: 2 + priority: 1 + - host: 127.0.0.2 + port: 1979 + weight: 1 + priority: 0 + - host: 127.0.0.3 + port: 1979 + weight: 2 + priority: -1 +--- error_code: 502 +--- error_log +connect() failed +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1979 +proxy request to 127.0.0.2:1979 +proxy request to 127.0.0.3:1979 + + + +=== TEST 7: chash +--- apisix_yaml +upstreams: + - id: 1 + type: chash + key: remote_addr + nodes: + - host: 127.0.0.1 + port: 1979 + weight: 2 + priority: 1 + - host: 127.0.0.2 + port: 1979 + weight: 1 + priority: 1 + - host: 127.0.0.3 + port: 1979 + weight: 2 + priority: -1 + - host: 127.0.0.4 + port: 1979 + weight: 1 + priority: -1 +--- error_code: 502 +--- error_log +connect() failed +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1979 +proxy request to 127.0.0.2:1979 +proxy request to 127.0.0.4:1979 +proxy request to 127.0.0.3:1979 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/remote-addr-ipv6.t b/CloudronPackages/APISIX/apisix-source/t/node/remote-addr-ipv6.t new file mode 100644 index 0000000..138ef73 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/remote-addr-ipv6.t @@ -0,0 +1,118 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_root_location(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route: remote addr = ::1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "::1", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: IPv6 /not_found +--- listen_ipv6 +--- config +location /t { + content_by_lua_block { + ngx.sleep(0.2) + local t = require("lib.test_admin").test_ipv6 + t('/not_found') + } +} +--- request +GET /t +--- response_body eval +qr/"error_msg":"404 Route Not Found"/ + + + +=== TEST 3: IPv4 /not_found +--- listen_ipv6 +--- request +GET /not_found +--- error_code: 404 +--- response_body eval +qr/"error_msg":"404 Route Not Found"/ + + + +=== TEST 4: IPv6 /hello +--- listen_ipv6 +--- config +location /t { + content_by_lua_block { + ngx.sleep(0.2) + local t = require("lib.test_admin").test_ipv6 + t('/hello') + } +} +--- request +GET /t +--- response_body eval +qr{connected: 1 +request sent: 59 +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: +received: hello world +failed to receive a line: closed \[\] +close: 1 nil} + + + +=== TEST 5: IPv4 /hello +--- listen_ipv6 +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} diff --git a/CloudronPackages/APISIX/apisix-source/t/node/remote-addr.t b/CloudronPackages/APISIX/apisix-source/t/node/remote-addr.t new file mode 100644 index 0000000..b76a26b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/remote-addr.t @@ -0,0 +1,154 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_root_location(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route: remote addr = 127.0.0.1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: hit route +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 4: set route: remote addr = 127.0.0.2 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.2", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: not hit route: 127.0.0.2 =~ 127.0.0.1 +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 6: remote addr: 127.0.0.3/24 =~ 127.0.0.1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.3/24", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: hit route +--- request +GET /hello +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/remote_addrs.t b/CloudronPackages/APISIX/apisix-source/t/node/remote_addrs.t new file mode 100644 index 0000000..0bc3bfb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/remote_addrs.t @@ -0,0 +1,111 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "remote_addrs": ["192.0.0.0/8", "127.0.0.3"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- http_config +real_ip_header X-Real-IP; +set_real_ip_from 127.0.0.1; +set_real_ip_from unix:; +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: /not_found +--- http_config +real_ip_header X-Real-IP; +set_real_ip_from 127.0.0.1; +set_real_ip_from unix:; +--- request +GET /hello +--- more_headers +Host: not_found.com +--- error_code: 404 + + + +=== TEST 4: hit routes +--- http_config +real_ip_header X-Real-IP; +set_real_ip_from 127.0.0.1; +set_real_ip_from unix:; +--- request +GET /hello +--- more_headers +X-Real-IP: 192.168.1.100 +--- response_body +hello world + + + +=== TEST 5: hit routes +--- http_config +real_ip_header X-Real-IP; +set_real_ip_from 127.0.0.1; +set_real_ip_from unix:; +--- request +GET /hello +--- more_headers +X-Real-IP: 127.0.0.3 +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/route-delete.t b/CloudronPackages/APISIX/apisix-source/t/node/route-delete.t new file mode 100644 index 0000000..c5f0ac2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/route-delete.t @@ -0,0 +1,142 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: clear all routes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + for i = 1, 200 do + t('/apisix/admin/routes/' .. i, ngx.HTTP_DELETE) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- timeout: 5 + + + +=== TEST 2: create 106 routes + delete them +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + for i = 1, 106 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + for i = 1, 106 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + for i = 1, 106 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_DELETE + ) + end + + ngx.sleep(0.5) + + for i = 1, 106 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello]] .. i .. [[" + }]] + ) + end + + ngx.sleep(0.5) + + for i = 1, 106 do + local code, body = t('/apisix/admin/routes/' .. i, + ngx.HTTP_DELETE + ) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- wait: 1 +--- grep_error_log eval +qr/\w+ (data by key: 103)/ +--- grep_error_log_out +insert data by key: 103 +insert data by key: 103 +update data by key: 103 +update data by key: 103 +delete data by key: 103 +delete data by key: 103 +insert data by key: 103 +insert data by key: 103 +delete data by key: 103 +delete data by key: 103 +--- timeout: 30 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/route-domain-with-local-dns.t b/CloudronPackages/APISIX/apisix-source/t/node/route-domain-with-local-dns.t new file mode 100644 index 0000000..ee4ec5d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/route-domain-with-local-dns.t @@ -0,0 +1,86 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + # for test + $ENV{ENABLE_LOCAL_DNS} = "true"; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1, + "www.apiseven.com:80": 0 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- error_log eval +qr/.*init_resolver\(\): dns resolver \[.+\]/ + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} +--- error_log eval +qr/.*init_resolver\(\): dns resolver \[.+\]/ + + + +=== TEST 3: hit route +--- request +GET /hello +--- response_body +hello world +--- error_log eval +qr/dns resolver domain: www.apiseven.com to \d+.\d+.\d+.\d+/ diff --git a/CloudronPackages/APISIX/apisix-source/t/node/route-domain.t b/CloudronPackages/APISIX/apisix-source/t/node/route-domain.t new file mode 100644 index 0000000..729aceb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/route-domain.t @@ -0,0 +1,212 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1, + "www.apiseven.com:80": 0 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: hit route +--- request +GET /hello +--- response_body +hello world +--- error_log eval +qr/dns resolver domain: www.apiseven.com to \d+.\d+.\d+.\d+/ +--- timeout: 10 + + + +=== TEST 4: set route(id: 1, using `rewrite` mode to pass upstream host) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "pass_host": "rewrite", + "upstream_host": "test.com" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: hit route +--- request +GET /echo +--- response_headers +host: test.com + + + +=== TEST 6: set route(id: 1, using `node` mode to pass upstream host) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "test.com:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream", + "pass_host": "node" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: hit route +--- request +GET /echo +--- response_headers +host: test.com:1980 + + + +=== TEST 8: test domain with roundrobin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "localhost:1981": 2, + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/server_port" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: hit +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 3 do + local _, _, body = t('/server_port', ngx.HTTP_GET) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- request +GET /t +--- response_body +1980, 1981, 1981 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/route-filter-func.t b/CloudronPackages/APISIX/apisix-source/t/node/route-filter-func.t new file mode 100644 index 0000000..2824837 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/route-filter-func.t @@ -0,0 +1,74 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "filter_func": "function(vars) return vars.arg_name == 'json' end", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: not hit: name=unknown +--- request +GET /hello?name=unknown +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: hit routes +--- request +GET /hello?name=json +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/route-host.t b/CloudronPackages/APISIX/apisix-source/t/node/route-host.t new file mode 100644 index 0000000..1cea20b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/route-host.t @@ -0,0 +1,160 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "host": "foo.com", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: /not_found +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 4: /not_found +--- request +GET /hello +--- more_headers +Host: not_found.com +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 5: hit routes +--- request +GET /hello +--- more_headers +Host: foo.com +--- response_body +hello world + + + +=== TEST 6: hit routes, uppercase +--- request +GET /hello +--- more_headers +Host: FOO.com +--- response_body +hello world + + + +=== TEST 7: set route(host is uppercase) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "host": "FOO.com", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit routes +--- request +GET /hello +--- more_headers +Host: foo.com +--- response_body +hello world + + + +=== TEST 9: hit routes, uppercase +--- request +GET /hello +--- more_headers +Host: FOO.com +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/route-status.t b/CloudronPackages/APISIX/apisix-source/t/node/route-status.t new file mode 100644 index 0000000..434afa0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/route-status.t @@ -0,0 +1,252 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: default enable route(id: 1) with uri match +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 3: disable route +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local data = {status = 0} + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: route not found, failed by disable +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 5: default enable route(id: 1) with host_uri match +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: hit route +--- request +GET /hello +--- more_headers +Host: foo.com +--- response_body +hello world + + + +=== TEST 7: disable route +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + + local data = {status = 0} + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: route not found, failed by disable +--- request +GET /hello +--- more_headers +Host: foo.com +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 9: specify an invalid status value +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "status": 100, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid configuration: property \\"status\\" validation failed: matches none of the enum values"\}/ + + + +=== TEST 10: compatible with old route data in etcd which not has status +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local res, err = core.etcd.set("/routes/1", core.json.decode([[{ + "uri": "/hello", + "priority": 0, + "id": "1", + "upstream": { + "hash_on": "vars", + "pass_host": "pass", + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]])) ---mock old route data in etcd + if res.status >= 300 then + res.status = code + end + ngx.print(require("toolkit.json").encode(res.body)) + ngx.sleep(1) + } + } +--- request +GET /t +--- response_body_unlike eval +qr/status/ + + + +=== TEST 11: hit route(old route data in etcd) +--- request +GET /hello +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/route-uris.t b/CloudronPackages/APISIX/apisix-source/t/node/route-uris.t new file mode 100644 index 0000000..2b5fdf7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/route-uris.t @@ -0,0 +1,80 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello","/hello1"] + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 + + + +=== TEST 3: hit routes1 +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 4: hit routes2 +--- request +GET /hello1 +--- response_body +hello1 world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/rr-balance.t b/CloudronPackages/APISIX/apisix-source/t/node/rr-balance.t new file mode 100644 index 0000000..74bbf9e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/rr-balance.t @@ -0,0 +1,316 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(two upstream node) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit routes +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":6,"port":"1981"},{"count":6,"port":"1980"}] + + + +=== TEST 3: set route(three upstream node) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1, + "127.0.0.1:1982": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit routes +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":4,"port":"1982"},{"count":4,"port":"1981"},{"count":4,"port":"1980"}] + + + +=== TEST 5: set route(three upstream node and different weight) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 3, + "127.0.0.1:1981": 2, + "127.0.0.1:1982": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: hit routes +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":2,"port":"1982"},{"count":4,"port":"1981"},{"count":6,"port":"1980"}] + + + +=== TEST 7: set route(weight is 0) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 3, + "127.0.0.1:1981": 0, + "127.0.0.1:1982": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit routes +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local ports_count = {} + for i = 1, 12 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +[{"count":3,"port":"1982"},{"count":9,"port":"1980"}] diff --git a/CloudronPackages/APISIX/apisix-source/t/node/sanity-radixtree.t b/CloudronPackages/APISIX/apisix-source/t/node/sanity-radixtree.t new file mode 100644 index 0000000..21a4cb2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/sanity-radixtree.t @@ -0,0 +1,139 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: hit routes +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 4: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: hit routes:/hello +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 6: hit routes: /hello1 +--- request +GET /hello1 +--- response_body +hello1 world + + + +=== TEST 7: hit routes: /hello2 +--- request +GET /hello2 +--- error_code: 404 +--- response_body eval +qr/404 Not Found/ + + + +=== TEST 8: hit routes: /hel +--- request +GET /hel +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} diff --git a/CloudronPackages/APISIX/apisix-source/t/node/service-empty.t b/CloudronPackages/APISIX/apisix-source/t/node/service-empty.t new file mode 100644 index 0000000..9b32d0b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/service-empty.t @@ -0,0 +1,89 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set empty service. (id: 1)(allow empty `service` object) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + '{}' + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: route binding empty service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "service_id": "1", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: /hello +--- request +GET /hello +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/ssl-protocols.t b/CloudronPackages/APISIX/apisix-source/t/node/ssl-protocols.t new file mode 100644 index 0000000..2dcc02b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/ssl-protocols.t @@ -0,0 +1,298 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +my $openssl_bin = $ENV{OPENSSL_BIN}; +if (! -x $openssl_bin) { + $ENV{OPENSSL_BIN} = '/usr/local/openresty/openssl3/bin/openssl'; + if (! -x $ENV{OPENSSL_BIN}) { + plan(skip_all => "openssl3 not installed"); + } +} + +plan('no_plan'); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 + proxy_mode: http&stream + stream_proxy: + tcp: + - 9100 + enable_resolv_search_opt: false + ssl: + ssl_protocols: TLSv1.1 TLSv1.2 TLSv1.3 + ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA +_EOC_ + + $block->set_value("yaml_config", $yaml_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello", "/world"] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: create ssl for test.com (unset ssl_protocols) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com", + "ssl_protocols": null, + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: Successfully, access test.com with TLSv1.3 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect 127.0.0.1:1994 -servername test.com -tls1_3 2>&1 | cat +--- response_body eval +qr/Server certificate/ + + + +=== TEST 4: Successfully, access test.com with TLSv1.2 +--- exec +curl -k -v --tls-max 1.2 --tlsv1.2 --resolve "test.com:1994:127.0.0.1" https://test.com:1994/hello 2>&1 | cat +--- response_body eval +qr/TLSv1\.2 \(IN\), TLS handshake, Server hello(?s).*hello world/ + + + +=== TEST 5: Successfully, access test.com with TLSv1.1 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect 127.0.0.1:1994 -servername test.com -tls1_1 2>&1 | cat +--- response_body eval +qr/Server certificate/ + + + +=== TEST 6: set TLSv1.2 and TLSv1.3 for test.com +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com", ssl_protocols = {"TLSv1.2", "TLSv1.3"}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com", + "ssl_protocols": ["TLSv1.2", "TLSv1.3"], + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: Set TLSv1.3 for the test2.com +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/test2.crt") + local ssl_key = t.read_file("t/certs/test2.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test2.com", ssl_protocols = {"TLSv1.3"}} + + local code, body = t.test('/apisix/admin/ssls/2', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test2.com" + }, + "key": "/apisix/ssls/2" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed +--- request +GET /t + + + +=== TEST 8: Successfully, access test.com with TLSv1.3 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect 127.0.0.1:1994 -servername test.com -tls1_3 2>&1 | cat +--- response_body eval +qr/Server certificate/ + + + +=== TEST 9: Successfully, access test.com with TLSv1.2 +--- exec +curl -k -v --tls-max 1.2 --tlsv1.2 --resolve "test.com:1994:127.0.0.1" https://test.com:1994/hello 2>&1 | cat +--- response_body eval +qr/TLSv1\.2 \(IN\), TLS handshake, Server hello(?s).*hello world/ + + + +=== TEST 10: Successfully, access test2.com with TLSv1.3 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect 127.0.0.1:1994 -servername test2.com -tls1_3 2>&1 | cat +--- response_body eval +qr/Server certificate/ + + + +=== TEST 11: Failed, access test2.com with TLSv1.2 +--- exec +curl -k -v --tls-max 1.2 --tlsv1.2 --resolve "test2.com:1994:127.0.0.1" https://test2.com:1994/hello 2>&1 | cat +--- response_body eval +qr/TLSv1\.2 \(IN\), TLS alert/ + + + +=== TEST 12: set TLSv1.1 for test.com +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com", ssl_protocols = {"TLSv1.1"}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com", + "ssl_protocols": ["TLSv1.1"], + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: Successfully, access test.com with TLSv1.1 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect 127.0.0.1:1994 -servername test.com -tls1_1 2>&1 | cat +--- response_body eval +qr/Server certificate/ + + + +=== TEST 14: Failed, access test.com with TLSv1.3 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect 127.0.0.1:1994 -servername test.com -tls1_3 2>&1 | cat +--- response_body eval +qr/tlsv1 alert/ diff --git a/CloudronPackages/APISIX/apisix-source/t/node/ssl.t b/CloudronPackages/APISIX/apisix-source/t/node/ssl.t new file mode 100644 index 0000000..3334736 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/ssl.t @@ -0,0 +1,243 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + sub set_env_from_file { + my ($env_name, $file_path) = @_; + + open my $fh, '<', $file_path or die $!; + my $content = do { local $/; <$fh> }; + close $fh; + + $ENV{$env_name} = $content; + } + # set env + set_env_from_file('TEST_CERT', 't/certs/apisix.crt'); + set_env_from_file('TEST_KEY', 't/certs/apisix.key'); + set_env_from_file('TEST2_CERT', 't/certs/test2.crt'); + set_env_from_file('TEST2_KEY', 't/certs/test2.key'); +} + +use t::APISIX 'no_plan'; + +log_level('info'); +no_root_location(); + +sub set_env_from_file { + my ($env_name, $file_path) = @_; + + open my $fh, '<', $file_path or die $!; + my $content = do { local $/; <$fh> }; + close $fh; + + $ENV{$env_name} = $content; +} + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: store two certs and keys in vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/ssl \ + test.com.crt=@t/certs/apisix.crt \ + test.com.key=@t/certs/apisix.key \ + test.com.2.crt=@t/certs/test2.crt \ + test.com.2.key=@t/certs/test2.key +--- response_body +Success! Data written to: kv/apisix/ssl + + + +=== TEST 2: set secret +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/secrets/vault/test', + ngx.HTTP_PUT, + [[{ + "uri": "http://0.0.0.0:8200", + "prefix": "kv/apisix", + "token": "root" + }]], + [[{ + "key": "/apisix/secrets/vault/test", + "value": { + "uri": "http://0.0.0.0:8200", + "prefix": "kv/apisix", + "token": "root" + } + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: set ssl with two certs and keys in vault +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local data = { + snis = {"test.com"}, + key = "$secret://vault/test/ssl/test.com.key", + cert = "$secret://vault/test/ssl/test.com.crt", + keys = {"$secret://vault/test/ssl/test.com.2.key"}, + certs = {"$secret://vault/test/ssl/test.com.2.crt"} + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "snis": ["test.com"], + "key": "$secret://vault/test/ssl/test.com.key", + "cert": "$secret://vault/test/ssl/test.com.crt", + "keys": ["$secret://vault/test/ssl/test.com.2.key"], + "certs": ["$secret://vault/test/ssl/test.com.2.crt"] + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: access to https with test.com +--- exec +curl -s -k https://test.com:1994/hello +--- response_body +hello world +--- error_log +fetching data from secret uri +fetching data from secret uri +fetching data from secret uri +fetching data from secret uri + + + +=== TEST 6: set ssl with two certs and keys in env +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local data = { + snis = {"test.com"}, + key = "$env://TEST_KEY", + cert = "$env://TEST_CERT", + keys = {"$env://TEST2_KEY"}, + certs = {"$env://TEST2_CERT"} + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "snis": ["test.com"], + "key": "$env://TEST_KEY", + "cert": "$env://TEST_CERT", + "keys": ["$env://TEST2_KEY"], + "certs": ["$env://TEST2_CERT"] + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: access to https with test.com +--- exec +curl -s -k https://test.com:1994/hello +--- response_body +hello world +--- error_log +fetching data from env uri +fetching data from env uri +fetching data from env uri +fetching data from env uri diff --git a/CloudronPackages/APISIX/apisix-source/t/node/timeout-upstream.t b/CloudronPackages/APISIX/apisix-source/t/node/timeout-upstream.t new file mode 100644 index 0000000..07f2d09 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/timeout-upstream.t @@ -0,0 +1,191 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "timeout": { + "connect": 0.5, + "send": 0.5, + "read": 0.5 + } + }, + "uri": "/mysleep" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit routes (timeout) +--- request +GET /mysleep?seconds=1 +--- error_code: 504 +--- response_body eval +qr/504 Gateway Time-out/ +--- error_log +timed out) while reading response header from upstream + + + +=== TEST 3: set custom timeout for route(overwrite upstream timeout) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "timeout": { + "connect": 0.5, + "send": 0.5, + "read": 0.5 + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "timeout": { + "connect": 2, + "send": 2, + "read": 2 + } + }, + "uri": "/mysleep" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit routes (timeout) +--- request +GET /mysleep?seconds=1 +--- error_code: 504 +--- response_body eval +qr/504 Gateway Time-out/ +--- error_log +timed out) while reading response header from upstream + + + +=== TEST 5: set route inherit hosts from service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local scode, sbody = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "desc":"test-service", + "hosts": ["foo.com"] + }]] + ) + + if scode >= 300 then + ngx.status = scode + end + ngx.say(sbody) + + local rcode, rbody = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "service_id": "1", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "timeout": { + "connect": 0.5, + "send": 0.5, + "read": 0.5 + } + }, + "uri": "/mysleep" + }]] + ) + + if rcode >= 300 then + ngx.status = rcode + end + ngx.say(rbody) + } + } +--- request +GET /t +--- response_body +passed +passed + + + +=== TEST 6: hit service route (timeout) +--- request +GET /mysleep?seconds=1 +--- more_headers +Host: foo.com +--- error_code: 504 +--- response_body eval +qr/504 Gateway Time-out/ +--- error_log +timed out) while reading response header from upstream diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-array-nodes.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-array-nodes.t new file mode 100644 index 0000000..31af4a7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-array-nodes.t @@ -0,0 +1,199 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [{ + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: hit routes +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 4: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: hit routes +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 6: set services(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": [{ + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + }], + "type": "roundrobin", + "desc": "new upstream" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "service_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit routes +--- request +GET /hello +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-discovery-dynamic.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-discovery-dynamic.t new file mode 100644 index 0000000..44d9d6e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-discovery-dynamic.t @@ -0,0 +1,133 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('warn'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: dynamic host based discovery +--- extra_yaml_config +nginx_config: + worker_processes: 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local discovery = require("apisix.discovery.init").discovery + local core = require("apisix.core") + discovery.demo_discover = { + nodes = function() + local demo_nodes_tab = { + a = { host = "127.0.0.1", port = 1111 }, + b = { host = "127.0.0.1", port = 2222 } + } + local host = ngx.var.host + local service_id = host:match("([^.]+).myhost.com") + local demo_node = demo_nodes_tab[service_id] + + local node_list = core.table.new(1, 0) + core.table.insert(node_list, { + host = demo_node.host, + port = tonumber(demo_node.port), + weight = 100, + }) + + return node_list + end + } + + local code, body = t('/apisix/admin/services/', + ngx.HTTP_PUT, + [[{ + "id": "demo_service", + "name": "demo_service", + "upstream": { + "discovery_type": "demo_discover", + "service_name": "demo_service", + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + + ngx.sleep(0.5) + + local code, body = t('/apisix/admin/routes/', + ngx.HTTP_PUT, + [[{ + "id": "demo_route", + "name": "demo_route", + "uri": "/*", + "hosts":[ + "*.myhost.com" + ], + "service_id": "demo_service" + }]] + ) + if code >= 300 then + ngx.status = code + end + + ngx.sleep(0.5) + + local hosts = { + "a.myhost.com", + "a.myhost.com", + "b.myhost.com", + "b.myhost.com", + "a.myhost.com", + "b.myhost.com", + "b.myhost.com", + "a.myhost.com", + "b.myhost.com", + "a.myhost.com", + } + + for i, url_host in ipairs(hosts) do + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false, headers = { + ["Host"] = url_host + }}) + end + } + } +--- request +GET /t +--- grep_error_log eval +qr/upstream: \S+, host: \S+/ +--- grep_error_log_out +upstream: "http://127.0.0.1:1111/", host: "a.myhost.com" +upstream: "http://127.0.0.1:1111/", host: "a.myhost.com" +upstream: "http://127.0.0.1:2222/", host: "b.myhost.com" +upstream: "http://127.0.0.1:2222/", host: "b.myhost.com" +upstream: "http://127.0.0.1:1111/", host: "a.myhost.com" +upstream: "http://127.0.0.1:2222/", host: "b.myhost.com" +upstream: "http://127.0.0.1:2222/", host: "b.myhost.com" +upstream: "http://127.0.0.1:1111/", host: "a.myhost.com" +upstream: "http://127.0.0.1:2222/", host: "b.myhost.com" +upstream: "http://127.0.0.1:1111/", host: "a.myhost.com" diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-discovery.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-discovery.t new file mode 100644 index 0000000..6be034a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-discovery.t @@ -0,0 +1,510 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + if ($block->apisix_yaml) { + my $upstream = <<_EOC_; +upstreams: + - service_name: mock + discovery_type: mock + type: roundrobin + id: 1 +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $upstream); + } + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: create new server picker when nodes change +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = 1}, + {host = "0.0.0.0", port = 1980, weight = 1}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.say(res.status) + + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.2", port = 1980, weight = 1}, + {host = "127.0.0.3", port = 1980, weight = 1}, + } + end + } + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + } + } +--- grep_error_log eval +qr/create_obj_fun\(\): upstream nodes:/ +--- grep_error_log_out +create_obj_fun(): upstream nodes: +create_obj_fun(): upstream nodes: + + + +=== TEST 2: don't create new server picker if nodes don't change +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = 1}, + {host = "0.0.0.0", port = 1980, weight = 1}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.say(res.status) + + discovery.mock = { + nodes = function() + return { + {host = "0.0.0.0", port = 1980, weight = 1}, + {host = "127.0.0.1", port = 1980, weight = 1}, + } + end + } + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + } + } +--- grep_error_log eval +qr/create_obj_fun\(\): upstream nodes:/ +--- grep_error_log_out +create_obj_fun(): upstream nodes: + + + +=== TEST 3: create new server picker when nodes change, up_conf doesn't come from upstream +--- apisix_yaml +routes: + - uris: + - /hello + service_id: 1 +services: + - id: 1 + upstream: + service_name: mock + discovery_type: mock + type: roundrobin +--- config + location /t { + content_by_lua_block { + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = 1}, + {host = "0.0.0.0", port = 1980, weight = 1}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.say(res.status) + + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.2", port = 1980, weight = 1}, + {host = "0.0.0.0", port = 1980, weight = 1}, + } + end + } + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + } + } +--- grep_error_log eval +qr/create_obj_fun\(\): upstream nodes:/ +--- grep_error_log_out +create_obj_fun(): upstream nodes: +create_obj_fun(): upstream nodes: + + + +=== TEST 4: don't create new server picker if nodes don't change (port missing) +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", weight = 1}, + {host = "0.0.0.0", weight = 1}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.say(res.status) + discovery.mock = { + nodes = function() + return { + {host = "0.0.0.0", weight = 1}, + {host = "127.0.0.1", weight = 1}, + } + end + } + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + } + } +--- grep_error_log eval +qr/create_obj_fun\(\): upstream nodes:/ +--- grep_error_log_out +create_obj_fun(): upstream nodes: +--- error_log +connect() failed + + + +=== TEST 5: create new server picker when priority change +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = 1}, + {host = "0.0.0.0", port = 1980, weight = 1}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.say(res.status) + + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = 1}, + {host = "0.0.0.0", port = 1980, weight = 1, priority = 1}, + } + end + } + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + } + } +--- grep_error_log eval +qr/create_obj_fun\(\): upstream nodes:/ +--- grep_error_log_out +create_obj_fun(): upstream nodes: +create_obj_fun(): upstream nodes: + + + +=== TEST 6: default priority of discovered node is 0 +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1979, weight = 1, priority = 1}, + {host = "0.0.0.0", port = 1980, weight = 1}, + {host = "127.0.0.2", port = 1979, weight = 1, priority = -1}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.say(res.status) + } + } +--- error_log +connect() failed +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1979 +proxy request to 0.0.0.0:1980 + + + +=== TEST 7: create new server picker when metadata change +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = 1, metadata = {a = 1}}, + {host = "0.0.0.0", port = 1980, weight = 1, metadata = {}}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.say(res.status) + + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = 1, metadata = {a = 1}}, + {host = "0.0.0.0", port = 1980, weight = 1, metadata = {b = 1}}, + } + end + } + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + } + } +--- grep_error_log eval +qr/create_obj_fun\(\): upstream nodes:/ +--- grep_error_log_out +create_obj_fun(): upstream nodes: +create_obj_fun(): upstream nodes: + + + +=== TEST 8: don't create new server picker when metadata doesn't change +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local discovery = require("apisix.discovery.init").discovery + local meta1 = {a = 1} + local meta2 = {b = 2} + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = 1, metadata = meta1}, + {host = "0.0.0.0", port = 1980, weight = 1, metadata = meta2}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.say(res.status) + + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = 1, metadata = meta1}, + {host = "0.0.0.0", port = 1980, weight = 1, metadata = meta2}, + } + end + } + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + } + } +--- grep_error_log eval +qr/create_obj_fun\(\): upstream nodes:/ +--- grep_error_log_out +create_obj_fun(): upstream nodes: + + + +=== TEST 9: bad nodes return by the discovery +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return { + {host = "127.0.0.1", port = 1980, weight = "0"}, + } + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + ngx.say(res.status) + } + } +--- response_body +503 +--- error_log +invalid nodes format: failed to validate item 1: property "weight" validation failed: wrong type: expected integer, got string + + + +=== TEST 10: compare nodes by value only once when nodes's address be changed but values are same +--- log_level: debug +--- apisix_yaml +routes: + - + uris: + - /hello + upstream_id: 1 +--- config + location /t { + content_by_lua_block { + local old_nodes = {{host = "127.0.0.1", port = 1980, weight = 1}, {host = "127.0.0.2", port = 1980, weight = 1}} + local discovery = require("apisix.discovery.init").discovery + discovery.mock = { + nodes = function() + return old_nodes + end + } + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + for i = 1, 10 do + local res = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if res.status ~= 200 then + ngx.say("request failed: ", res.status) + return + end + end + + local new_nodes = {{host = "127.0.0.2", port = 1980, weight = 1}, {host = "127.0.0.1", port = 1980, weight = 1}} + discovery.mock = { + nodes = function() + return new_nodes + end + } + for i = 1, 10 do + local res = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if res.status ~= 200 then + ngx.say("request failed: ", res.status) + return + end + end + ngx.say("pass") + } + } +--- response_body +pass +--- grep_error_log eval +qr/compare upstream nodes by value|fill node info for upstream/ +--- grep_error_log_out +fill node info for upstream +compare upstream nodes by value +fill node info for upstream diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-domain-with-special-dns.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-domain-with-special-dns.t new file mode 100644 index 0000000..650c87d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-domain-with-special-dns.t @@ -0,0 +1,230 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{CUSTOM_DNS_SERVER} = "127.0.0.1:1053"; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + my $routes = <<_EOC_; +routes: + - + uri: /hello + upstream_id: 1 +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $routes); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: AAAA +--- listen_ipv6 +--- apisix_yaml +upstreams: + - id: 1 + nodes: + ipv6.test.local:1980: 1 + type: roundrobin +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 2: default ttl +--- log_level: debug +--- apisix_yaml +upstreams: + - id: 1 + nodes: + ttl.test.local:1980: 1 + type: roundrobin +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + for i = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res or res.body ~= "hello world\n" then + ngx.say(err) + return + end + end + } + } +--- request +GET /t +--- error_log +"ttl":300 +--- grep_error_log eval +qr/connect to 127.0.0.1:1053/ +--- grep_error_log_out +connect to 127.0.0.1:1053 + + + +=== TEST 3: override ttl +--- log_level: debug +--- yaml_config +apisix: + node_listen: 1984 + dns_resolver_valid: 900 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_yaml +upstreams: + - id: 1 + nodes: + ttl.test.local:1980: 1 + type: roundrobin +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + for i = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res or res.body ~= "hello world\n" then + ngx.say(err) + return + end + end + } + } +--- request +GET /t +--- grep_error_log eval +qr/connect to 127.0.0.1:1053/ +--- grep_error_log_out +connect to 127.0.0.1:1053 +--- error_log +"ttl":900 + + + +=== TEST 4: cache expire +--- log_level: debug +--- apisix_yaml +upstreams: + - id: 1 + nodes: + ttl.1s.test.local:1980: 1 + type: roundrobin +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + for i = 1, 2 do + for j = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res or res.body ~= "hello world\n" then + ngx.say(err) + return + end + end + + if i < 2 then + ngx.sleep(1.1) + end + end + } + } +--- request +GET /t +--- grep_error_log eval +qr/connect to 127.0.0.1:1053/ +--- grep_error_log_out +connect to 127.0.0.1:1053 +connect to 127.0.0.1:1053 + + + +=== TEST 5: cache expire (override ttl) +--- log_level: debug +--- yaml_config +apisix: + node_listen: 1984 + dns_resolver_valid: 1 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_yaml +upstreams: + - id: 1 + nodes: + ttl.test.local:1980: 1 + type: roundrobin +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + for i = 1, 2 do + for j = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res or res.body ~= "hello world\n" then + ngx.say(err) + return + end + end + + if i < 2 then + ngx.sleep(1.1) + end + end + } + } +--- request +GET /t +--- grep_error_log eval +qr/connect to 127.0.0.1:1053/ +--- grep_error_log_out +connect to 127.0.0.1:1053 +connect to 127.0.0.1:1053 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-domain-with-special-ipv6-dns.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-domain-with-special-ipv6-dns.t new file mode 100644 index 0000000..dd90aec --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-domain-with-special-ipv6-dns.t @@ -0,0 +1,70 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{CUSTOM_DNS_SERVER} = "[::1]:1053"; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('debug'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + my $routes = <<_EOC_; +routes: + - + uri: /hello + upstream_id: 1 +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $routes); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: AAAA +--- listen_ipv6 +--- apisix_yaml +upstreams: + - id: 1 + nodes: + ipv6.test.local:1980: 1 + type: roundrobin +--- request +GET /hello +--- error_log +connect to [::1]:1053 +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-domain.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-domain.t new file mode 100644 index 0000000..2404846 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-domain.t @@ -0,0 +1,415 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "foo.com:80": 0, + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 4: hit routes +--- request +GET /hello +--- response_body +hello world +--- error_log eval +qr/dns resolver domain: foo.com to \d+.\d+.\d+.\d+/ + + + +=== TEST 5: set upstream(invalid node host) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "test.comx:80": 0 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local function test() + local code, body = t('/hello', ngx.HTTP_GET) + + ngx.say("status: ", code) + end + test() + test() + } + } +--- request +GET /t +--- response_body +status: 503 +status: 503 +--- error_log +failed to parse domain: test.comx +failed to parse domain: test.comx +--- timeout: 10 + + + +=== TEST 7: delete route +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: delete upstream +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: set upstream(with domain) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "foo.com:80": 0, + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: set empty service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "desc": "new service", + "plugins": { + "prometheus": {} + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: set route(with upstream) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "foo.com": 0, + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }, + "service_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: hit routes, parse the domain of upstream node +--- request +GET /hello +--- response_body +hello world +--- error_log eval +qr/dns resolver domain: foo.com to \d+.\d+.\d+.\d+/ + + + +=== TEST 13: set route(with upstream) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "localhost:1981": 2, + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "service_id": "1", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: roundrobin +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 3 do + local _, _, body = t('/server_port', ngx.HTTP_GET) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- request +GET /t +--- response_body +1980, 1981, 1981 + + + +=== TEST 15: set route(with upstream) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "foo.com.": 0, + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }, + "service_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: hit routes, parse the domain of upstream node +--- request +GET /hello +--- response_body +hello world +--- error_log eval +qr/dns resolver domain: foo.com. to \d+.\d+.\d+.\d+/ diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-ipv6.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-ipv6.t new file mode 100644 index 0000000..760109a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-ipv6.t @@ -0,0 +1,272 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my $block = shift; + $block->set_value("listen_ipv6", 1); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "[::1]:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 4: hit routes +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 5: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "weight": 100, + "priority": 0, + "host": "::1", + "port": 1980 + } + ], + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: hit routes +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 7: set upstream, one array item to specify node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "weight": 100, + "priority": 0, + "host": "[::1]", + "port": 1980 + } + ], + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit routes +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 9: set upstream, one hash key to specify node, in wrong format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "::1:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: hit routes +--- request +GET /hello +--- error_code: 502 +--- error_log +connect() to [::0.1.25.128]:80 failed + + + +=== TEST 11: set upstream, two array items to specify nodes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "weight": 100, + "priority": 0, + "host": "::1", + "port": 1980 + }, + { + "weight": 100, + "priority": 0, + "host": "::1", + "port": 1980 + } + ], + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: hit routes +--- request +GET /hello +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-keepalive-pool.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-keepalive-pool.t new file mode 100644 index 0000000..4fc4a1a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-keepalive-pool.t @@ -0,0 +1,807 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +log_level('debug'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: bad pool size +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "type": "roundrobin", + "nodes": { + "127.0.0.1:1983": 1 + }, + "keepalive_pool": { + "size": 0 + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"keepalive_pool\" validation failed: property \"size\" validation failed: expected 0 to be at least 1"} + + + +=== TEST 2: set route/upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + }, + "keepalive_pool": { + "size": 4, + "idle_timeout": 8, + "requests": 16 + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri":"/hello", + "upstream_id": 1 + }]]) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } + + + +=== TEST 3: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + for i = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + end + } + } +--- response_body +hello world +hello world +hello world +--- grep_error_log eval +qr/lua balancer: keepalive .*/ +--- grep_error_log_out eval +qr/^lua balancer: keepalive create pool, crc32: \S+, size: 4 +lua balancer: keepalive no free connection, cpool: \S+ +lua balancer: keepalive saving connection \S+, cpool: \S+, connections: 1 +lua balancer: keepalive reusing connection \S+, requests: 1, cpool: \S+ +lua balancer: keepalive saving connection \S+, cpool: \S+, connections: 1 +lua balancer: keepalive reusing connection \S+, requests: 2, cpool: \S+ +lua balancer: keepalive saving connection \S+, cpool: \S+, connections: 1 +$/ + + + +=== TEST 4: only reuse one time +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + }, + "keepalive_pool": { + "size": 1, + "idle_timeout": 8, + "requests": 2 + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } + + + +=== TEST 5: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + for i = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + end + } + } +--- response_body +hello world +hello world +hello world +--- grep_error_log eval +qr/lua balancer: keepalive .*/ +--- grep_error_log_out eval +qr/^lua balancer: keepalive create pool, crc32: \S+, size: 1 +lua balancer: keepalive no free connection, cpool: \S+ +lua balancer: keepalive saving connection \S+, cpool: \S+, connections: 1 +lua balancer: keepalive reusing connection \S+, requests: 1, cpool: \S+ +lua balancer: keepalive not saving connection \S+, cpool: \S+, connections: 0 +lua balancer: keepalive free pool \S+, crc32: \S+ +lua balancer: keepalive create pool, crc32: \S+, size: 1 +lua balancer: keepalive no free connection, cpool: \S+ +lua balancer: keepalive saving connection \S+, cpool: \S+, connections: 1 +$/ + + + +=== TEST 6: set upstream without keepalive_pool +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + } + } + + + +=== TEST 7: should not override default value +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + for i = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + end + } + } +--- response_body +hello world +hello world +hello world +--- grep_error_log eval +qr/lua balancer: keepalive .*/ +--- grep_error_log_out eval +qr/^lua balancer: keepalive create pool, crc32: \S+, size: 320 +lua balancer: keepalive no free connection, cpool: \S+ +lua balancer: keepalive saving connection \S+, cpool: \S+, connections: 1 +lua balancer: keepalive reusing connection \S+, requests: 1, cpool: \S+ +lua balancer: keepalive saving connection \S+, cpool: \S+, connections: 1 +lua balancer: keepalive reusing connection \S+, requests: 2, cpool: \S+ +lua balancer: keepalive saving connection \S+, cpool: \S+, connections: 1 +$/ + + + +=== TEST 8: upstreams with different client cert +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local test = require("lib.test_admin").test + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local ssl_cert2 = t.read_file("t/certs/apisix.crt") + local ssl_key2 = t.read_file("t/certs/apisix.key") + + local code, body = test('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "scheme": "https", + "type": "roundrobin", + "nodes": { + "127.0.0.1:1983": 1 + }, + "keepalive_pool": { + "size": 4 + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + local data = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ssl_key, + }, + keepalive_pool = { + size = 8 + } + } + local code, body = test('/apisix/admin/upstreams/2', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + local data = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert = ssl_cert2, + client_key = ssl_key2, + }, + keepalive_pool = { + size = 16 + } + } + local code, body = test('/apisix/admin/upstreams/3', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + for i = 1, 3 do + local code, body = test('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "uri":"/hello/]] .. i .. [[", + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream_id": ]] .. i .. [[ + }]]) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + end + } + } +--- response_body + + + +=== TEST 9: hit +--- upstream_server_config + ssl_client_certificate ../../certs/mtls_ca.crt; + ssl_verify_client on; +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + for i = 1, 12 do + local idx = (i % 3) + 1 + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello/" .. idx) + if not res then + ngx.say(err) + return + end + + if idx == 2 then + assert(res.status == 200) + else + assert(res.status == 400) + end + end + } + } + + + +=== TEST 10: upstreams with different client cert (without pool) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local test = require("lib.test_admin").test + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local ssl_cert2 = t.read_file("t/certs/apisix.crt") + local ssl_key2 = t.read_file("t/certs/apisix.key") + + local code, body = test('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "scheme": "https", + "type": "roundrobin", + "nodes": { + "127.0.0.1:1983": 1 + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + local data = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ssl_key, + } + } + local code, body = test('/apisix/admin/upstreams/2', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + local data = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert = ssl_cert2, + client_key = ssl_key2, + } + } + local code, body = test('/apisix/admin/upstreams/3', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + for i = 1, 3 do + local code, body = test('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "uri":"/hello/]] .. i .. [[", + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream_id": ]] .. i .. [[ + }]]) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + end + } + } +--- response_body + + + +=== TEST 11: hit +--- upstream_server_config + ssl_client_certificate ../../certs/mtls_ca.crt; + ssl_verify_client on; +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + + for i = 1, 12 do + local idx = (i % 3) + 1 + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello/" .. idx) + if not res then + ngx.say(err) + return + end + + if idx == 2 then + assert(res.status == 200) + else + assert(res.status == 400) + end + end + } + } + + + +=== TEST 12: upstreams with different SNI +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local test = require("lib.test_admin").test + local json = require("toolkit.json") + + local code, body = test('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "scheme": "https", + "type": "roundrobin", + "nodes": { + "127.0.0.1:1983": 1 + }, + "pass_host": "rewrite", + "upstream_host": "a.com", + "keepalive_pool": { + "size": 4 + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + local data = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + pass_host = "rewrite", + upstream_host = "b.com", + keepalive_pool = { + size = 8 + } + } + local code, body = test('/apisix/admin/upstreams/2', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + for i = 1, 2 do + local code, body = test('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "uri":"/hello/]] .. i .. [[", + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream_id": ]] .. i .. [[ + }]]) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + end + } + } +--- response_body + + + +=== TEST 13: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + for i = 1, 4 do + local idx = i % 2 + 1 + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello/" .. idx) + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + end + } + } +--- grep_error_log eval +qr/lua balancer: keepalive create pool, .*/ +--- grep_error_log_out eval +qr/^lua balancer: keepalive create pool, crc32: \S+, size: 8 +lua balancer: keepalive create pool, crc32: \S+, size: 4 +$/ + + + +=== TEST 14: upstreams with SNI, then without SNI +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local test = require("lib.test_admin").test + local json = require("toolkit.json") + + local code, body = test('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "scheme": "https", + "type": "roundrobin", + "nodes": { + "127.0.0.1:1983": 1 + }, + "pass_host": "rewrite", + "upstream_host": "a.com", + "keepalive_pool": { + "size": 4 + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + local data = { + scheme = "http", + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + }, + pass_host = "rewrite", + upstream_host = "b.com", + keepalive_pool = { + size = 8 + } + } + local code, body = test('/apisix/admin/upstreams/2', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + for i = 1, 2 do + local code, body = test('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "uri":"/hello/]] .. i .. [[", + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream_id": ]] .. i .. [[ + }]]) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + end + } + } +--- response_body + + + +=== TEST 15: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + for i = 0, 1 do + local idx = i % 2 + 1 + local httpc = http.new() + local res, err = httpc:request_uri(uri .. "/hello/" .. idx) + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + end + } + } +--- grep_error_log eval +qr/lua balancer: keepalive create pool, .*/ +--- grep_error_log_out eval +qr/^lua balancer: keepalive create pool, crc32: \S+, size: 4 +lua balancer: keepalive create pool, crc32: \S+, size: 8 +$/ + + + +=== TEST 16: backend serve http and grpc with the same port +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local test = require("lib.test_admin").test + local json = require("toolkit.json") + + local data = { + uri = "", + upstream = { + scheme = "", + type = "roundrobin", + nodes = { + ["127.0.0.1:10054"] = 1, + }, + keepalive_pool = { + size = 4 + } + } + } + + data.uri = "/helloworld.Greeter/SayHello" + data.upstream.scheme = "grpc" + local code, body = test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + data.uri = "/hello" + data.upstream.scheme = "http" + local code, body = test('/apisix/admin/routes/2', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + + } + } +--- response_body + + + +=== TEST 17: hit http +--- request +GET /hello +--- response_body chomp +hello http + + + +=== TEST 18: hit grpc +--- http2 +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-mtls.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-mtls.t new file mode 100644 index 0000000..b7bff23 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-mtls.t @@ -0,0 +1,684 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; +}); + +run_tests(); + +__DATA__ + +=== TEST 1: tls without key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert = ssl_cert, + } + }, + uri = "/hello" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"upstream\" validation failed: property \"tls\" validation failed: failed to validate dependent schema for \"client_cert\": property \"client_key\" is required"} + + + +=== TEST 2: tls with bad key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ("AAA"):rep(128), + } + }, + uri = "/hello" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to decrypt previous encrypted key"} +--- error_log +decrypt ssl key failed + + + +=== TEST 3: encrypt key by default +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ssl_key, + } + }, + uri = "/hello" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + res = json.decode(res) + ngx.say(res.value.upstream.tls.client_key == ssl_key) + + -- upstream + local data = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ssl_key, + } + } + local code, body = t.test('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body, res = t.test('/apisix/admin/upstreams/1', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + res = json.decode(res) + ngx.say(res.value.tls.client_key == ssl_key) + + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ssl_key, + } + }, + } + local code, body = t.test('/apisix/admin/services/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body, res = t.test('/apisix/admin/services/1', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + res = json.decode(res) + ngx.say(res.value.upstream.tls.client_key == ssl_key) + } + } +--- request +GET /t +--- response_body +false +false +false + + + +=== TEST 4: hit +--- upstream_server_config + ssl_client_certificate ../../certs/mtls_ca.crt; + ssl_verify_client on; +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 5: wrong cert +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ssl_key, + } + }, + uri = "/hello" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: hit +--- upstream_server_config + ssl_client_certificate ../../certs/mtls_ca.crt; + ssl_verify_client on; +--- request +GET /hello +--- error_code: 400 +--- error_log +client SSL certificate verify error + + + +=== TEST 7: clean old data +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + assert(t.test('/apisix/admin/routes/1', + ngx.HTTP_DELETE + )) + assert(t.test('/apisix/admin/services/1', + ngx.HTTP_DELETE + )) + assert(t.test('/apisix/admin/upstreams/1', + ngx.HTTP_DELETE + )) + } + } +--- request +GET /t + + + +=== TEST 8: don't encrypt key +--- yaml_config +apisix: + node_listen: 1984 + data_encryption: + keyring: null +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ssl_key, + } + }, + uri = "/hello" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + res = json.decode(res) + ngx.say(res.value.upstream.tls.client_key == ssl_key) + + -- upstream + local data = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ssl_key, + } + } + local code, body = t.test('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body, res = t.test('/apisix/admin/upstreams/1', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + res = json.decode(res) + ngx.say(res.value.tls.client_key == ssl_key) + + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ssl_key, + } + }, + } + local code, body = t.test('/apisix/admin/services/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body, res = t.test('/apisix/admin/services/1', + ngx.HTTP_GET + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + res = json.decode(res) + ngx.say(res.value.upstream.tls.client_key == ssl_key) + } + } +--- request +GET /t +--- response_body +true +true +true + + + +=== TEST 9: bind upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local data = { + upstream_id = 1, + uri = "/server_port" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } + } +--- request +GET /t + + + +=== TEST 10: hit +--- upstream_server_config + ssl_client_certificate ../../certs/mtls_ca.crt; + ssl_verify_client on; +--- request +GET /server_port +--- response_body chomp +1983 + + + +=== TEST 11: bind service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local data = { + service_id = 1, + uri = "/hello_chunked" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } + } +--- request +GET /t + + + +=== TEST 12: hit +--- upstream_server_config + ssl_client_certificate ../../certs/mtls_ca.crt; + ssl_verify_client on; +--- request +GET /hello_chunked +--- response_body +hello world + + + +=== TEST 13: get cert by tls.client_cert_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + type = "client", + cert = ssl_cert, + key = ssl_key + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + tls = { + client_cert_id = 1 + } + }, + uri = "/hello" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } + } +--- request +GET /t + + + +=== TEST 14: hit +--- upstream_server_config + ssl_client_certificate ../../certs/mtls_ca.crt; + ssl_verify_client on; +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 15: change ssl object type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + type = "server", + sni = "test.com", + cert = ssl_cert, + key = ssl_key + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } + } +--- request +GET /t + + + +=== TEST 16: hit, ssl object type mismatch +--- upstream_server_config + ssl_client_certificate ../../certs/mtls_ca.crt; + ssl_verify_client on; +--- request +GET /hello +--- error_code: 502 +--- error_log +failed to get ssl cert: ssl type should be 'client' + + + +=== TEST 17: delete ssl object +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + + local code, body = t.test('/apisix/admin/ssls/1', ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } + } +--- request +GET /t + + + +=== TEST 18: hit, ssl object not exits +--- upstream_server_config + ssl_client_certificate ../../certs/mtls_ca.crt; + ssl_verify_client on; +--- request +GET /hello +--- error_code: 502 +--- error_log +failed to get ssl cert: ssl id [1] not exits diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-node-dns.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-node-dns.t new file mode 100644 index 0000000..b8dbb0b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-node-dns.t @@ -0,0 +1,558 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: route with one upstream node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "test1.com:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route, resolve upstream node to "127.0.0.2" always +--- init_by_lua_block + require "resty.core" + apisix = require("apisix") + core = require("apisix.core") + apisix.http_init() + + local utils = require("apisix.core.utils") + utils.dns_parse = function (domain) -- mock: DNS parser + if domain == "test1.com" then + return {address = "127.0.0.2"} + end + + error("unknown domain: " .. domain) + end +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 3: hit route, resolve upstream node to different values +--- init_by_lua_block + require "resty.core" + apisix = require("apisix") + core = require("apisix.core") + apisix.http_init() + + local utils = require("apisix.core.utils") + local count = 0 + utils.dns_parse = function (domain) -- mock: DNS parser + count = count + 1 + + if domain == "test1.com" then + return {address = "127.0.0." .. count} + end + + error("unknown domain: " .. domain) + end + +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + core.log.info("call /hello") + local code, body = t('/hello', ngx.HTTP_GET) + } +} + +--- request +GET /t +--- grep_error_log eval +qr/dns resolver domain: test1.com to 127.0.0.\d|call \/hello|proxy request to 127.0.0.\d:1980/ +--- grep_error_log_out +call /hello +dns resolver domain: test1.com to 127.0.0.1 +proxy request to 127.0.0.1:1980 + + + +=== TEST 4: set route with two upstream nodes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "test1.com:1980": 1, + "test2.com:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: hit route, resolve the upstream node to "127.0.0.2" +--- init_by_lua_block + require "resty.core" + apisix = require("apisix") + core = require("apisix.core") + apisix.http_init() + + local utils = require("apisix.core.utils") + utils.dns_parse = function (domain) -- mock: DNS parser + if domain == "test1.com" or domain == "test2.com" then + return {address = "127.0.0.2"} + end + + error("unknown domain: " .. domain) + end +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 6: hit route, resolve upstream node to different values +--- init_by_lua_block + require "resty.core" + apisix = require("apisix") + core = require("apisix.core") + apisix.http_init() + + local utils = require("apisix.core.utils") + local count = 0 + utils.dns_parse = function (domain) -- mock: DNS parser + count = count + 1 + + if domain == "test1.com" or domain == "test2.com" then + return {address = "127.0.0." .. count} + end + + error("unknown domain: " .. domain) + end + +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + core.log.info("call /hello") + local code, body = t('/hello', ngx.HTTP_GET) + core.log.warn("code: ", code) + } +} + +--- request +GET /t +--- grep_error_log eval +qr/dns resolver domain: \w+.com to 127.0.0.\d|call \/hello|proxy request to 127.0.0.\d:1980/ +--- grep_error_log_out eval +qr/call \/hello( +dns resolver domain: test1.com to 127.0.0.1 +dns resolver domain: test2.com to 127.0.0.2| +dns resolver domain: test2.com to 127.0.0.1 +dns resolver domain: test1.com to 127.0.0.2) +proxy request to 127.0.0.[12]:1980 +/ + + + +=== TEST 7: upstream with one upstream node +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "test1.com:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: set route with upstream_id 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: hit route, resolve upstream node to different values +--- init_by_lua_block + require "resty.core" + apisix = require("apisix") + core = require("apisix.core") + apisix.http_init() + + local utils = require("apisix.core.utils") + local count = 0 + utils.dns_parse = function (domain) -- mock: DNS parser + count = count + 1 + + if domain == "test1.com" then + return {address = "127.0.0." .. count} + end + + error("unknown domain: " .. domain) + end + +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + core.log.info("call /hello") + local code, body = t('/hello', ngx.HTTP_GET) + } +} + +--- request +GET /t +--- grep_error_log eval +qr/dns resolver domain: test1.com to 127.0.0.\d|call \/hello|proxy request to 127.0.0.\d:1980/ +--- grep_error_log_out +call /hello +dns resolver domain: test1.com to 127.0.0.1 +proxy request to 127.0.0.1:1980 + + + +=== TEST 10: two upstream nodes in upstream object +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "test1.com:1980": 1, + "test2.com:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: hit route, resolve upstream node to different values +--- init_by_lua_block + require "resty.core" + apisix = require("apisix") + core = require("apisix.core") + apisix.http_init() + + local utils = require("apisix.core.utils") + local count = 0 + utils.dns_parse = function (domain) -- mock: DNS parser + count = count + 1 + + if domain == "test1.com" or domain == "test2.com" then + return {address = "127.0.0." .. count} + end + + error("unknown domain: " .. domain) + end + +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + core.log.info("call /hello") + local code, body = t('/hello', ngx.HTTP_GET) + } +} + +--- request +GET /t +--- grep_error_log eval +qr/dns resolver domain: \w+.com to 127.0.0.\d|call \/hello|proxy request to 127.0.0.\d:1980/ +--- grep_error_log_out eval +qr/call \/hello( +dns resolver domain: test1.com to 127.0.0.1 +dns resolver domain: test2.com to 127.0.0.2| +dns resolver domain: test2.com to 127.0.0.1 +dns resolver domain: test1.com to 127.0.0.2) +proxy request to 127.0.0.[12]:1980 +/ + + + +=== TEST 12: dns cached expired, resolve the domain always with same value +--- init_by_lua_block + require "resty.core" + apisix = require("apisix") + core = require("apisix.core") + apisix.http_init() + + local utils = require("apisix.core.utils") + local count = 1 + utils.dns_parse = function (domain) -- mock: DNS parser + if domain == "test1.com" or domain == "test2.com" then + return {address = "127.0.0.1"} + end + + error("unknown domain: " .. domain) + end + +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + core.log.info("call /hello") + local code, body = t('/hello', ngx.HTTP_GET) + } +} + +--- request +GET /t +--- grep_error_log eval +qr/dns resolver domain: \w+.com to 127.0.0.\d|call \/hello|proxy request to 127.0.0.\d:1980/ +--- grep_error_log_out eval +qr/call \/hello( +dns resolver domain: test1.com to 127.0.0.1 +dns resolver domain: test2.com to 127.0.0.1| +dns resolver domain: test2.com to 127.0.0.1 +dns resolver domain: test1.com to 127.0.0.1) +proxy request to 127.0.0.1:1980 +/ + + + +=== TEST 13: two upstream nodes in upstream object (one host + one IP) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "test1.com:1980": 1, + "127.0.0.5:1981": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: dns cached expired, resolve the domain with different values +--- init_by_lua_block + require "resty.core" + apisix = require("apisix") + core = require("apisix.core") + apisix.http_init() + + local utils = require("apisix.core.utils") + local count = 0 + utils.dns_parse = function (domain) -- mock: DNS parser + count = count + 1 + if domain == "test1.com" or domain == "test2.com" then + return {address = "127.0.0." .. count} + end + + error("unknown domain: " .. domain) + end + +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + core.log.info("call /hello") + local code, body = t('/hello', ngx.HTTP_GET) + } +} + +--- request +GET /t +--- grep_error_log eval +qr/dns resolver domain: \w+.com to 127.0.0.\d|call \/hello|proxy request to 127.0.0.\d:198\d/ +--- grep_error_log_out eval +qr/call \/hello +dns resolver domain: test1.com to 127.0.0.1 +proxy request to 127.0.0.(1:1980|5:1981) +/ + + + +=== TEST 15: route with upstream node, the domain's IP is changed +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "test1.com:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: hit +--- init_by_lua_block + require "resty.core" + apisix = require("apisix") + core = require("apisix.core") + apisix.http_init() + + local utils = require("apisix.core.utils") + local count = 0 + utils.dns_parse = function (domain) -- mock: DNS parser + count = count + 1 + if domain == "test1.com" then + return {address = "127.0.0." .. count} + end + + error("unknown domain: " .. domain) + end + +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/hello', ngx.HTTP_GET) + -- avoid adding more "dns_value" into the route + t('/hello', ngx.HTTP_GET) + } +} + +--- request +GET /t +--- grep_error_log eval +qr/parse route which contain domain: .+("dns_value":.+){3}/ +--- grep_error_log_out diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-retries.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-retries.t new file mode 100644 index 0000000..4d14a3d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-retries.t @@ -0,0 +1,304 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set upstream(id: 1), by default retries count = number of nodes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1": 1, + "127.0.0.2:1": 1, + "127.0.0.3:1": 1, + "127.0.0.4:1": 1 + }, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 4: hit routes +--- request +GET /hello +--- error_code: 502 +--- grep_error_log eval +qr/\[error\]/ +--- grep_error_log_out +[error] +[error] +[error] +[error] + + + +=== TEST 5: hit routes +--- request +GET /hello +--- error_code: 502 +--- error_log +connect() failed + + + +=== TEST 6: set upstream(id: 1), retries = 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1": 1, + "127.0.0.2:1": 1, + "127.0.0.3:1": 1, + "127.0.0.4:1": 1 + }, + "retries": 1, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: hit routes +--- request +GET /hello +--- error_code: 502 +--- grep_error_log eval +qr/\[error\]/ +--- grep_error_log_out +[error] +[error] + + + +=== TEST 8: set upstream(id: 1), retries = 0 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1": 1, + "127.0.0.2:1": 1, + "127.0.0.3:1": 1, + "127.0.0.4:1": 1 + }, + "retries": 0, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: hit routes +--- request +GET /hello +--- error_code: 502 +--- grep_error_log eval +qr/\[error\]/ +--- grep_error_log_out +[error] + + + +=== TEST 10: set upstream, retries > number of nodes, only try number of nodes time +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1": 1, + "127.0.0.2:1": 1 + }, + "retries": 3, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: hit routes +--- request +GET /hello +--- error_code: 502 +--- error_log +all upstream servers tried +--- grep_error_log eval +qr/connect\(\) failed/ +--- grep_error_log_out +connect() failed +connect() failed + + + +=== TEST 12: don't retry the same node twice +--- request +GET /hello +--- error_code: 502 +--- error_log +Connection refused +failed to find valid upstream server +proxy request to 127.0.0.1:1 +proxy request to 127.0.0.2:1 + + + +=== TEST 13: stop proxy to next upstream by retry_timeout +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 100, + "127.0.0.1:1981": 100, + "127.0.0.1:1982": 100 + }, + "retries": 10, + "retry_timeout": 0.15, + "type": "roundrobin" + }, + "uri": "/mysleep" + }]] + ) + + if code ~= 200 then + ngx.say(body) + return + end + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/mysleep?abort=true&seconds=0.1" + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.status = res.status + ngx.say(res.status) + } + } +--- request +GET /t +--- error_code: 502 +--- error_log +proxy retry timeout, retry count: 1 diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-status-5xx.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-status-5xx.t new file mode 100644 index 0000000..b07e8f9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-status-5xx.t @@ -0,0 +1,407 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) and available upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit the route and $upstream_status is 200 +--- request +GET /hello +--- response_body +hello world +--- grep_error_log eval +qr/X-APISIX-Upstream-Status: 200/ +--- grep_error_log_out + + + +=== TEST 3: set route(id: 1) and set the timeout field +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "timeout": { + "connect": 0.5, + "send": 0.5, + "read": 0.5 + } + }, + "uri": "/mysleep" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit routes (timeout) and $upstream_status is 504 +--- request +GET /mysleep?seconds=1 +--- error_code: 504 +--- response_body eval +qr/504 Gateway Time-out/ +--- response_headers +X-APISIX-Upstream-Status: 504 +--- error_log +Connection timed out + + + +=== TEST 5: set route(id: 1), upstream service is not available +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: hit routes and $upstream_status is 502 +--- request +GET /hello +--- error_code: 502 +--- response_body eval +qr/502 Bad Gateway/ +--- response_headers +X-APISIX-Upstream-Status: 502 +--- error_log +Connection refused + + + +=== TEST 7: set route(id: 1) and uri is `/server_error` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/server_error" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit routes and $upstream_status is 500 +--- request +GET /server_error +--- error_code: 500 +--- response_body eval +qr/500 Internal Server Error/ +--- response_headers +X-APISIX-Upstream-Status: 500 +--- error_log +500 Internal Server Error + + + +=== TEST 9: set upstream(id: 1, retries = 2), has available upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.2:1": 1, + "127.0.0.1:1980": 1 + }, + "retries": 2, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: set route(id: 1) and bind the upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: hit routes and $upstream_status is `502, 200` +--- request +GET /hello +--- response_body +hello world +--- grep_error_log eval +qr/X-APISIX-Upstream-Status: 502, 200/ +--- grep_error_log_out + + + +=== TEST 12: set upstream(id: 1, retries = 2), all upstream nodes are unavailable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.3:1": 1, + "127.0.0.2:1": 1, + "127.0.0.1:1": 1 + + }, + "retries": 2, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: hit routes, retry between upstream failed, $upstream_status is `502, 502, 502` +--- request +GET /hello +--- error_code: 502 +--- response_headers_raw_like eval +qr/X-APISIX-Upstream-Status: 502, 502, 502/ +--- error_log +Connection refused + + + +=== TEST 14: return 500 status code from APISIX +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 500, + "body": "Fault Injection!\n" + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: hit routes, status code is 500 +--- request +GET /hello +--- error_code: 500 +--- response_body +Fault Injection! +--- grep_error_log eval +qr/X-APISIX-Upstream-Status: 500/ +--- grep_error_log_out + + + +=== TEST 16: return 200 status code from APISIX +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "Fault Injection!\n" + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: hit routes, status code is 200 +--- request +GET /hello +--- response_body +Fault Injection! +--- grep_error_log eval +qr/X-APISIX-Upstream-Status: 200/ +--- grep_error_log_out diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-status-all.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-status-all.t new file mode 100644 index 0000000..a5b449c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-status-all.t @@ -0,0 +1,465 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) and available upstream and show_upstream_status_in_response_header: true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit the route and $upstream_status is 200 +--- yaml_config +apisix: + show_upstream_status_in_response_header: true +--- request +GET /hello +--- response_body +hello world +--- response_headers +X-APISIX-Upstream-Status: 200 + + + +=== TEST 3: set route(id: 1) and set the timeout field +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "timeout": { + "connect": 0.5, + "send": 0.5, + "read": 0.5 + } + }, + "uri": "/mysleep" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: hit routes (timeout) and $upstream_status is 504 +--- yaml_config +apisix: + show_upstream_status_in_response_header: true +--- request +GET /mysleep?seconds=1 +--- error_code: 504 +--- response_body eval +qr/504 Gateway Time-out/ +--- response_headers +X-APISIX-Upstream-Status: 504 +--- error_log +Connection timed out + + + +=== TEST 5: set route(id: 1), upstream service is not available +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: hit routes and $upstream_status is 502 +--- yaml_config +apisix: + show_upstream_status_in_response_header: true +--- request +GET /hello +--- error_code: 502 +--- response_body eval +qr/502 Bad Gateway/ +--- response_headers +X-APISIX-Upstream-Status: 502 +--- error_log +Connection refused + + + +=== TEST 7: set route(id: 1) and uri is `/server_error` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/server_error" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit routes and $upstream_status is 500 +--- yaml_config +apisix: + show_upstream_status_in_response_header: true +--- request +GET /server_error +--- error_code: 500 +--- response_body eval +qr/500 Internal Server Error/ +--- response_headers +X-APISIX-Upstream-Status: 500 +--- error_log +500 Internal Server Error + + + +=== TEST 9: set upstream(id: 1, retries = 2), has available upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.1.0.2:1": 1, + "127.0.0.1:1980": 1 + }, + "retries": 2, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: set route(id: 1) and bind the upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: hit routes and $upstream_status is `502, 200` +--- yaml_config +apisix: + show_upstream_status_in_response_header: true +--- request +GET /hello +--- response_body +hello world +--- response_headers_raw_like eval +qr/X-APISIX-Upstream-Status: 502, 200|X-APISIX-Upstream-Status: 200/ +--- error_log + + + +=== TEST 12: set upstream(id: 1, retries = 2), all upstream nodes are unavailable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.3:1": 1, + "127.0.0.2:1": 1, + "127.0.0.1:1": 1 + + }, + "retries": 2, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit routes, retry between upstream failed, $upstream_status is `502, 502, 502` +--- yaml_config +apisix: + show_upstream_status_in_response_header: true +--- request +GET /hello +--- error_code: 502 +--- response_headers_raw_like eval +qr/X-APISIX-Upstream-Status: 502, 502, 502/ +--- error_log +Connection refused + + + +=== TEST 14: return 500 status code from APISIX +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 500, + "body": "Fault Injection!\n" + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: hit routes, status code is 500 +--- yaml_config +apisix: + show_upstream_status_in_response_header: true +--- request +GET /hello +--- error_code: 500 +--- response_body +Fault Injection! +--- grep_error_log eval +qr/X-APISIX-Upstream-Status: 500/ +--- grep_error_log_out + + + +=== TEST 16: return 200 status code from APISIX +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "Fault Injection!\n" + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 17: hit routes, status code is 200 +--- yaml_config +apisix: + show_upstream_status_in_response_header: true +--- request +GET /hello +--- response_body +Fault Injection! +--- grep_error_log eval +qr/X-APISIX-Upstream-Status: 200/ +--- grep_error_log_out + + + +=== TEST 18: return 200 status code from APISIX (with show_upstream_status_in_response_header:false) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "Fault Injection!\n" + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 19: hit routes, status code is 200 +--- yaml_config +apisix: + show_upstream_status_in_response_header: false +--- request +GET /hello +--- response_body +Fault Injection! +--- grep_error_log eval +qr/X-APISIX-Upstream-Status: 200/ +--- grep_error_log_out diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream-websocket.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream-websocket.t new file mode 100644 index 0000000..a19a202 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream-websocket.t @@ -0,0 +1,295 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set upstream with websocket (id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "enable_websocket": true, + "uri": "/websocket_handshake" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: send websocket +--- raw_request eval +"GET /websocket_handshake HTTP/1.1\r +Host: server.example.com\r +Upgrade: websocket\r +Connection: Upgrade\r +Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r +Sec-WebSocket-Protocol: chat\r +Sec-WebSocket-Version: 13\r +Origin: http://example.com\r +\r +" +--- response_headers +Upgrade: websocket +Connection: upgrade +Sec-WebSocket-Accept: HSmrc0sMlYUkAGmm5OPpG2HaGWk= +Sec-WebSocket-Protocol: chat +!Content-Type +--- raw_response_headers_like: ^HTTP/1.1 101 Switching Protocols\r\n +--- response_body_like eval +qr/hello/ +--- error_code: 101 + + + +=== TEST 3: set upstream(id: 6) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/6', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: set route with upstream websocket enabled(id: 6) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/6', + ngx.HTTP_PUT, + [[{ + "uri": "/websocket_handshake/route", + "enable_websocket": true, + "upstream_id": "6" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: send success websocket to upstream +--- raw_request eval +"GET /websocket_handshake/route HTTP/1.1\r +Host: server.example.com\r +Upgrade: websocket\r +Connection: Upgrade\r +Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r +Sec-WebSocket-Protocol: chat\r +Sec-WebSocket-Version: 13\r +Origin: http://example.com\r +\r +" +--- response_headers +Upgrade: websocket +Connection: upgrade +Sec-WebSocket-Accept: HSmrc0sMlYUkAGmm5OPpG2HaGWk= +Sec-WebSocket-Protocol: chat +!Content-Type +--- raw_response_headers_like: ^HTTP/1.1 101 Switching Protocols\r\n +--- response_body_like eval +qr/hello/ +--- error_code: 101 + + + +=== TEST 6: disable websocket(id: 6) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/6', + ngx.HTTP_PUT, + [[{ + "uri": "/websocket_handshake/route", + "enable_websocket": false, + "upstream_id": "6" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: send websocket to upstream without header support +--- raw_request eval +"GET /websocket_handshake/route HTTP/1.1\r +Host: server.example.com\r +Upgrade: websocket\r +Connection: Upgrade\r +Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r +Sec-WebSocket-Protocol: chat\r +Sec-WebSocket-Version: 13\r +Origin: http://example.com\r +\r +" +[error] +--- error_code: 400 +--- grep_error_log eval +qr/failed to new websocket: bad "upgrade" request header: nil/ +--- grep_error_log_out + + + +=== TEST 8: set wss +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "scheme": "https", + "nodes": { + "127.0.0.1:1983": 1 + }, + "type": "roundrobin" + }, + "enable_websocket": true, + "uri": "/websocket_handshake" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "127.0.0.1"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: send websocket +--- config + location /t { + content_by_lua_block { + local client = require "resty.websocket.client" + local wb = client:new() + local uri = "wss://127.0.0.1:1994/websocket_handshake" + local opts = { + server_name = "127.0.0.1" + } + local ok, err = wb:connect(uri, opts) + if not ok then + ngx.say("failed to connect: " .. err) + return + end + + local typ + data, typ, err = wb:recv_frame() + if not data then + ngx.say("failed to receive 2nd frame: ", err) + return + end + + ngx.say("received: ", data, " (", typ, ")") + } + } +--- request +GET /t +--- response_body +received: hello (text) diff --git a/CloudronPackages/APISIX/apisix-source/t/node/upstream.t b/CloudronPackages/APISIX/apisix-source/t/node/upstream.t new file mode 100644 index 0000000..c99b10a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/upstream.t @@ -0,0 +1,630 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set upstream(id: 1) invalid parameters +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 + + + +=== TEST 2: set upstream(id: 1) nodes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 5: hit routes +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 6: delete upstream(id: 1) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.5) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this upstream, route [1] is still using it now"} + + + +=== TEST 7: delete route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 8: delete upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1', + ngx.HTTP_DELETE + ) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- request +GET /t +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 9: delete upstream again(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1', + ngx.HTTP_DELETE + ) + ngx.say("[delete] code: ", code) + } + } +--- request +GET /t +--- response_body +[delete] code: 404 + + + +=== TEST 10: set upstream(id: 1, using `node` mode to pass upstream host) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "test.com:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream", + "pass_host": "node" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: set route(id: 1, using `node` mode to pass upstream host) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: hit route +--- request +GET /echo +--- response_headers +host: test.com:1980 + + + +=== TEST 13: set upstream(using `rewrite` mode to pass upstream host) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "desc": "new upstream", + "pass_host": "rewrite", + "upstream_host": "test.com" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: set route(using `rewrite` mode to pass upstream host) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: hit route +--- request +GET /echo +--- response_headers +host: test.com + + + +=== TEST 16: delete upstream in used +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + ngx.sleep(0.5) -- wait for data synced + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"can not delete this upstream, route [1] is still using it now"} + + + +=== TEST 17: multi nodes with `node` mode to pass host +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "localhost:1979": 1000, + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "pass_host": "node" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/uri", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: hit route +--- request +GET /uri +--- response_body eval +qr/host: 127.0.0.1/ +--- error_log +proxy request to 127.0.0.1:1980 + + + +=== TEST 19: multi nodes with `node` mode to pass host, the second node has domain +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1979": 1000, + "localhost:1980": 1 + }, + "type": "roundrobin", + "pass_host": "node" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/uri", + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: hit route +--- request +GET /uri +--- response_body eval +qr/host: localhost/ +--- error_log +proxy request to 127.0.0.1:1980 + + + +=== TEST 21: check that including port in host header is supported when pass_host = node and port is not standard +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "localhost:1980": 1000 + }, + "type": "roundrobin", + "pass_host": "node" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream_id": "1", + "uri": "/uri" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: hit route +--- request +GET /uri +--- response_body eval +qr/host: localhost:1980/ + + + +=== TEST 23: check that including port in host header is supported when retrying and pass_host = node and port is not standard +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1979": 1000, + "localhost:1980": 1 + }, + "type": "roundrobin", + "pass_host": "node" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream_id": "1", + "uri": "/uri" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 24: hit route +--- log_level: debug +--- request +GET /uri +--- error_log +Host: 127.0.0.1:1979 + + + +=== TEST 25: distinguish different upstreams even they have the same addr +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + { + nodes = {["localhost:1980"] = 1}, + type = "roundrobin" + } + ) + assert(code < 300) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream_id": "1", + "uri": "/server_port" + }]] + ) + assert(code < 300) + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + local ports_count = {} + for i = 1, 24 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + { + nodes = {["localhost:" .. (1980 + i % 3)] = 1}, + type = "roundrobin" + } + ) + assert(code < 300) + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + } + } +--- request +GET /t +--- timeout: 5 +--- response_body +[{"count":8,"port":"1982"},{"count":8,"port":"1981"},{"count":8,"port":"1980"}] diff --git a/CloudronPackages/APISIX/apisix-source/t/node/vars.t b/CloudronPackages/APISIX/apisix-source/t/node/vars.t new file mode 100644 index 0000000..1f50101 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/vars.t @@ -0,0 +1,343 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(only arg_k) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "vars": [ ["arg_k", "==", "v"] ], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: /not_found +--- request +GET /hello?k=not-hit +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 4: hit routes +--- request +GET /hello?k=v +--- response_body +hello world + + + +=== TEST 5: set route(cookie) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "uri": "/hello", + "vars": [["cookie_k", "==", "v"]], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: /not_found +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 7: /not_found +--- more_headers +Cookie: k=not-hit; kkk=vvv; +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 8: hit routes +--- more_headers +Cookie: k=v; kkk=vvv; +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 9: set route(header) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "uri": "/hello", + "vars": [["http_k", "==", "v"]], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: /not_found +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 11: /not_found +--- more_headers +k: not-hit +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 12: hit routes +--- more_headers +k: v +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 13: set route(uri arg + header + cookie) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "uri": "/hello", + "vars": [["http_k", "==", "header"], ["cookie_k", "==", "cookie"], ["arg_k", "==", "uri_arg"]], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: /not_found +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 15: /not_found +--- more_headers +k: header +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 16: hit routes +--- more_headers +Cookie: k=cookie +k: header +--- request +GET /hello?k=uri_arg +--- response_body +hello world + + + +=== TEST 17: set route(only post arg) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "uri": "/hello", + "vars": [["post_arg_k", "==", "post_form"]], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: not_found (GET request) +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 19: not_found (wrong request body) +--- request +POST /hello +123 +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 20: not_found (wrong content type) +--- request +POST /hello +k=post_form +--- more_headers +Content-Type: multipart/form-data +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 21: hit routes +--- request +POST /hello +k=post_form +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/node/wildcard-host.t b/CloudronPackages/APISIX/apisix-source/t/node/wildcard-host.t new file mode 100644 index 0000000..0685138 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/node/wildcard-host.t @@ -0,0 +1,102 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_root_location(); + +run_tests(); + +__DATA__ + +=== TEST 1: host: *.foo.com +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "host": "*.foo.com", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: not found, missing host +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 4: host: a.foo.com +--- request +GET /hello +--- more_headers +Host: a.foo.com +--- response_body +hello world + + + +=== TEST 5: host: a.b.foo.com +--- request +GET /hello +--- more_headers +Host: a.b.foo.com +--- response_body +hello world + + + +=== TEST 6: host: .foo.com +--- request +GET /hello +--- more_headers +Host: .foo.com +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/package.json b/CloudronPackages/APISIX/apisix-source/t/package.json new file mode 100644 index 0000000..71ee3f9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/package.json @@ -0,0 +1,25 @@ +{ + "name": "apisix-test-suite", + "private": true, + "type": "module", + "scripts": { + "test": "NODE_OPTIONS=--experimental-vm-modules jest" + }, + "devDependencies": { + "@jest/globals": "^29.7.0", + "@trivago/prettier-plugin-sort-imports": "^5.2.2", + "@types/jest": "29.5.14", + "@types/node": "22.14.1", + "axios": "^1.9.0", + "docker-compose": "^1.2.0", + "graphql": "^16.11.0", + "graphql-request": "^7.1.2", + "jest": "29.7.0", + "lago-javascript-client": "^1.26.0", + "simple-git": "^3.27.0", + "ts-jest": "29.3.2", + "ts-node": "10.9.2", + "yaml": "^2.7.1" + }, + "packageManager": "pnpm@10.11.0+sha512.6540583f41cc5f628eb3d9773ecee802f4f9ef9923cc45b69890fb47991d4b092964694ec3a4f738a420c918a333062c8b925d312f42e4f0c263eb603551f977" +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-aws-content-moderation-secrets.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-aws-content-moderation-secrets.t new file mode 100644 index 0000000..a88171a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-aws-content-moderation-secrets.t @@ -0,0 +1,213 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{VAULT_TOKEN} = "root"; + $ENV{SECRET_ACCESS_KEY} = "super-secret"; + $ENV{ACCESS_KEY_ID} = "access-key-id"; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $main_config = $block->main_config // <<_EOC_; + env AWS_REGION=us-east-1; +_EOC_ + + $block->set_value("main_config", $main_config); + + my $http_config = $block->http_config // <<_EOC_; + server { + listen 2668; + + default_type 'application/json'; + + location / { + content_by_lua_block { + local json = require("cjson.safe") + local core = require("apisix.core") + local open = io.open + + local f = open('t/assets/content-moderation-responses.json', "r") + local resp = f:read("*a") + f:close() + + if not resp then + ngx.status(503) + ngx.say("[INTERNAL FAILURE]: failed to open response.json file") + end + + local responses = json.decode(resp) + if not responses then + ngx.status(503) + ngx.say("[INTERNAL FAILURE]: failed to decode response.json contents") + end + + local headers = ngx.req.get_headers() + local auth_header = headers["Authorization"] + if core.string.find(auth_header, "access-key-id") then + ngx.say(json.encode(responses["good_request"])) + return + end + ngx.status = 403 + ngx.say("unauthorized") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/foo secret_access_key=super-secret +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/foo access_key_id=access-key-id +--- response_body +Success! Data written to: kv/apisix/foo +Success! Data written to: kv/apisix/foo + + + +=== TEST 2: set secret_access_key and access_key_id as a reference to secret +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "root" + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "ai-aws-content-moderation": { + "comprehend": { + "access_key_id": "$secret://vault/test1/foo/access_key_id", + "secret_access_key": "$secret://vault/test1/foo/secret_access_key", + "region": "us-east-1", + "endpoint": "http://localhost:2668" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + ngx.say("success") + } + } +--- request +GET /t +--- response_body +success + + + +=== TEST 3: good request should pass +--- request +POST /echo +{"model":"gpt-4o-mini","messages":[{"role":"user","content":"good_request"}]} +--- error_code: 200 +--- response_body chomp +{"model":"gpt-4o-mini","messages":[{"role":"user","content":"good_request"}]} + + + +=== TEST 4: set secret_access_key as a reference to env variable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "ai-aws-content-moderation": { + "comprehend": { + "access_key_id": "$env://ACCESS_KEY_ID", + "secret_access_key": "$env://SECRET_ACCESS_KEY", + "region": "us-east-1", + "endpoint": "http://localhost:2668" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.say("success") + } + } +--- request +GET /t +--- response_body +success + + + +=== TEST 5: good request should pass +--- request +POST /echo +{"model":"gpt-4o-mini","messages":[{"role":"user","content":"good_request"}]} +--- error_code: 200 +--- response_body chomp +{"model":"gpt-4o-mini","messages":[{"role":"user","content":"good_request"}]} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-aws-content-moderation.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-aws-content-moderation.t new file mode 100644 index 0000000..7810dea --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-aws-content-moderation.t @@ -0,0 +1,301 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $main_config = $block->main_config // <<_EOC_; + env AWS_REGION=us-east-1; +_EOC_ + + $block->set_value("main_config", $main_config); + + my $http_config = $block->http_config // <<_EOC_; + server { + listen 2668; + + default_type 'application/json'; + + location / { + content_by_lua_block { + local json = require("cjson.safe") + local open = io.open + local f = open('t/assets/content-moderation-responses.json', "r") + local resp = f:read("*a") + f:close() + + if not resp then + ngx.status(503) + ngx.say("[INTERNAL FAILURE]: failed to open response.json file") + end + + local responses = json.decode(resp) + if not responses then + ngx.status(503) + ngx.say("[INTERNAL FAILURE]: failed to decode response.json contents") + end + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + if not body then + ngx.status(503) + ngx.say("[INTERNAL FAILURE]: failed to get request body: ", err) + end + + body, err = json.decode(body) + if not body then + ngx.status(503) + ngx.say("[INTERNAL FAILURE]: failed to decoded request body: ", err) + end + local result = body.TextSegments[1].Text + local final_response = responses[result] or "invalid" + + if final_response == "invalid" then + ngx.status = 500 + end + ngx.say(json.encode(final_response)) + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "ai-aws-content-moderation": { + "comprehend": { + "access_key_id": "access", + "secret_access_key": "ea+secret", + "region": "us-east-1", + "endpoint": "http://localhost:2668" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: toxic request should fail +--- request +POST /echo +toxic +--- error_code: 400 +--- response_body chomp +request body exceeds toxicity threshold + + + +=== TEST 3: good request should pass +--- request +POST /echo +good_request +--- error_code: 200 + + + +=== TEST 4: profanity filter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "ai-aws-content-moderation": { + "comprehend": { + "access_key_id": "access", + "secret_access_key": "ea+secret", + "region": "us-east-1", + "endpoint": "http://localhost:2668" + }, + "moderation_categories": { + "PROFANITY": 0.5 + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: profane request should fail +--- request +POST /echo +profane +--- error_code: 400 +--- response_body chomp +request body exceeds PROFANITY threshold + + + +=== TEST 6: very profane request should also fail +--- request +POST /echo +very_profane +--- error_code: 400 +--- response_body chomp +request body exceeds PROFANITY threshold + + + +=== TEST 7: good_request should pass +--- request +POST /echo +good_request +--- error_code: 200 + + + +=== TEST 8: set profanity = 0.7 (allow profane request but disallow very_profane) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "ai-aws-content-moderation": { + "comprehend": { + "access_key_id": "access", + "secret_access_key": "ea+secret", + "region": "us-east-1", + "endpoint": "http://localhost:2668" + }, + "moderation_categories": { + "PROFANITY": 0.7 + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: profane request should pass profanity check but fail toxicity check +--- request +POST /echo +profane +--- error_code: 400 +--- response_body chomp +request body exceeds toxicity threshold + + + +=== TEST 10: profane_but_not_toxic request should pass +--- request +POST /echo +profane_but_not_toxic +--- error_code: 200 + + + +=== TEST 11: but very profane request will fail +--- request +POST /echo +very_profane +--- error_code: 400 +--- response_body chomp +request body exceeds PROFANITY threshold + + + +=== TEST 12: good_request should pass +--- request +POST /echo +good_request +--- error_code: 200 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-aws-content-moderation2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-aws-content-moderation2.t new file mode 100644 index 0000000..869fcf0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-aws-content-moderation2.t @@ -0,0 +1,92 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + + +add_block_preprocessor(sub { + my ($block) = @_; + + my $main_config = $block->main_config // <<_EOC_; + env AWS_REGION=us-east-1; +_EOC_ + + $block->set_value("main_config", $main_config); + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "ai-aws-content-moderation": { + "comprehend": { + "access_key_id": "access", + "secret_access_key": "ea+secret", + "region": "us-east-1", + "endpoint": "http://localhost:2668" + }, + "llm_provider": "openai" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: request should fail +--- request +POST /echo +toxic +--- error_code: 500 +--- response_body chomp +Comprehend:detectToxicContent() failed to connect to 'http://localhost:2668': connection refused +--- error_log +failed to send request to http://localhost: Comprehend:detectToxicContent() failed to connect to 'http://localhost:2668': connection refused diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-prompt-decorator.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-prompt-decorator.t new file mode 100644 index 0000000..15f40ee --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-prompt-decorator.t @@ -0,0 +1,293 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity: configure prepend only +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-decorator": { + "prepend":[ + { + "role": "system", + "content": "some content" + } + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 2: test prepend +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body, actual_resp = t('/echo', + ngx.HTTP_POST, + [[{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }]], + [[{ + "messages": [ + { "role": "system", "content": "some content" }, + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say("failed") + return + end + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 3: sanity: configure append only +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-decorator": { + "append":[ + { + "role": "system", + "content": "some content" + } + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 4: test append +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body, actual_resp = t('/echo', + ngx.HTTP_POST, + [[{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }]], + [[{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" }, + { "role": "system", "content": "some content" } + ] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say("failed") + return + end + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 5: sanity: configure append and prepend both +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-decorator": { + "append":[ + { + "role": "system", + "content": "some append" + } + ], + "prepend":[ + { + "role": "system", + "content": "some prepend" + } + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 6: test append +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body, actual_resp = t('/echo', + ngx.HTTP_POST, + [[{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }]], + [[{ + "messages": [ + { "role": "system", "content": "some prepend" }, + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" }, + { "role": "system", "content": "some append" } + ] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say("failed") + return + end + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 7: sanity: configure neither append nor prepend should fail +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-decorator": { + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body_eval +qr/.*failed to check the configuration of plugin ai-prompt-decorator err.*/ +--- error_code: 400 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-prompt-guard.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-prompt-guard.t new file mode 100644 index 0000000..2a44cfd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-prompt-guard.t @@ -0,0 +1,413 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: wrong regex should fail validation +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-guard": { + "match_all_roles": true, + "deny_patterns": [ + "(abc" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body eval +qr/.*failed to check the configuration of plugin ai-prompt-guard.*/ +--- error_code: 400 + + + +=== TEST 2: setup route with both allow and deny with match_all_roles +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-guard": { + "match_all_roles": true, + "allow_patterns": [ + "goodword" + ], + "deny_patterns": [ + "badword" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 3: send request with good word +--- request +POST /hello +{ + "messages": [ + { "role": "system", "content": "goodword" } + ] +} + + + +=== TEST 4: send request with bad word +--- request +POST /hello +{ + "messages": [ + { "role": "system", "content": "badword" } + ] +} +--- response_body +{"message":"Request doesn't match allow patterns"} +--- error_code: 400 + + + +=== TEST 5: setup route with only deny with match_all_roles +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-guard": { + "match_all_roles": true, + "deny_patterns": [ + "badword" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 6: send request with good word +--- request +POST /hello +{ + "messages": [ + { "role": "system", "content": "goodword" } + ] +} + + + +=== TEST 7: send request with bad word +--- request +POST /hello +{ + "messages": [ + { "role": "system", "content": "badword" } + ] +} +--- response_body +{"message":"Request contains prohibited content"} +--- error_code: 400 + + + +=== TEST 8: setup route with only allow with match_all_roles=false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-guard": { + "allow_patterns": [ + "goodword" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 9: send request with bad word and it will pass for non user +--- request +POST /hello +{ + "messages": [ + { "role": "system", "content": "badword" } + ] +} + + + +=== TEST 10: send request with bad word +--- request +POST /hello +{ + "messages": [ + { "role": "user", "content": "badword" } + ] +} +--- response_body +{"message":"Request doesn't match allow patterns"} +--- error_code: 400 + + + +=== TEST 11: setup route with only deny with match_all_conversation_history +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-guard": { + "match_all_conversation_history": true, + "match_all_roles": true, + "deny_patterns": [ + "badword" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 12: send request with good word but had bad word in history +--- request +POST /hello +{ + "messages": [ + { "role": "system", "content": "goodword" }, + { "role": "system", "content": "badword" }, + { "role": "system", "content": "goodword" } + ] +} +--- response_body +{"message":"Request contains prohibited content"} +--- error_code: 400 + + + +=== TEST 13: setup route with only deny with match_all_conversation_history=false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-guard": { + "match_all_conversation_history": false, + "match_all_roles": true, + "deny_patterns": [ + "badword" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 14: send request with good word but had bad word in history +--- request +POST /hello +{ + "messages": [ + { "role": "system", "content": "goodword" }, + { "role": "system", "content": "badword" }, + { "role": "system", "content": "goodword" } + ] +} + + + +=== TEST 15: setup route + deny + match_all_roles + pattern match +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-guard": { + "match_all_roles": true, + "deny_patterns": [ + "^[A-Za-z0-9_]+badword$" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 16: send request with good word +--- request +POST /hello +{ + "messages": [ + { "role": "system", "content": "anaapsanaapbadword" } + ] +} +--- response_body +{"message":"Request contains prohibited content"} +--- error_code: 400 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-prompt-template.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-prompt-template.t new file mode 100644 index 0000000..050e0f2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-prompt-template.t @@ -0,0 +1,403 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-template": { + "templates":[ + { + "name": "programming question", + "template": { + "model": "some model", + "messages": [ + { "role": "system", "content": "You are a {{ language }} programmer." }, + { "role": "user", "content": "Write a {{ program_name }} program." } + ] + } + }, + { + "name": "level of detail", + "template": { + "model": "some model", + "messages": [ + { "role": "user", "content": "Explain about {{ topic }} in {{ level }}." } + ] + } + } + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 2: no templates +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-template": { + "templates":[] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- error_code: 400 +--- response_body eval +qr/.*property \\"templates\\" validation failed: expect array to have at least 1 items.*/ + + + +=== TEST 3: test template insertion +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("apisix.core.json") + local code, body, actual_resp = t('/echo', + ngx.HTTP_POST, + [[{ + "template_name": "programming question", + "language": "python", + "program_name": "quick sort" + }]], + [[{ + "model": "some model", + "messages": [ + { "role": "system", "content": "You are a python programmer." }, + { "role": "user", "content": "Write a quick sort program." } + ] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 4: multiple templates +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-template": { + "templates":[ + { + "name": "programming question", + "template": { + "model": "some model", + "messages": [ + { "role": "system", "content": "You are a {{ language }} programmer." }, + { "role": "user", "content": "Write a {{ program_name }} program." } + ] + } + }, + { + "name": "level of detail", + "template": { + "model": "some model", + "messages": [ + { "role": "user", "content": "Explain about {{ topic }} in {{ level }}." } + ] + } + } + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 5: test second template +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("apisix.core.json") + local code, body, actual_resp = t('/echo', + ngx.HTTP_POST, + [[{ + "template_name": "level of detail", + "topic": "psychology", + "level": "brief" + }]], + [[{ + "model": "some model", + "messages": [ + { "role": "user", "content": "Explain about psychology in brief." } + ] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 6: missing template items +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("apisix.core.json") + local code, body, actual_resp = t('/echo', + ngx.HTTP_POST, + [[{ + "template_name": "level of detail", + "topic-missing": "psychology", + "level-missing": "brief" + }]], + [[{ + "model": "some model", + "messages": [ + { "role": "user", "content": "Explain about in ." } + ] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 7: request body contains non-existent template +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("apisix.core.json") + local code, body, actual_resp = t('/echo', + ngx.HTTP_POST, + [[{ + "template_name": "random", + "some-key": "some-value" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.say("passed") + } + } +--- error_code: 400 +--- response_body eval +qr/.*template: random not configured.*/ + + + +=== TEST 8: request body contains non-existent template +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("apisix.core.json") + local code, body, actual_resp = t('/echo', + ngx.HTTP_POST, + [[{ + "missing-template-name": "haha" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.say("passed") + } + } +--- error_code: 400 +--- response_body eval +qr/.*template name is missing in request.*/ + + + +=== TEST 9: (cache test) same template name in different routes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for i = 1, 5, 1 do + local code = t('/apisix/admin/routes/' .. i, + ngx.HTTP_PUT, + [[{ + "uri": "/]] .. i .. [[", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ai-prompt-template": { + "templates":[ + { + "name": "same name", + "template": { + "model": "some model", + "messages": [ + { "role": "system", "content": "Field: {{ field }} in route]] .. i .. [[." } + ] + } + } + ] + }, + "proxy-rewrite": { + "uri": "/echo" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("failed") + return + end + end + + for i = 1, 5, 1 do + local code, body = t('/' .. i, + ngx.HTTP_POST, + [[{ + "template_name": "same name", + "field": "foo" + }]], + [[{ + "model": "some model", + "messages": [ + { "role": "system", "content": "Field: foo in route]] .. i .. [[." } + ] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + end + ngx.status = 200 + ngx.say("passed") + } + } + +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy-multi.balancer.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy-multi.balancer.t new file mode 100644 index 0000000..48ab5d8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy-multi.balancer.t @@ -0,0 +1,360 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + + +my $resp_file = 't/assets/ai-proxy-response.json'; +open(my $fh, '<', $resp_file) or die "Could not open file '$resp_file' $!"; +my $resp = do { local $/; <$fh> }; +close($fh); + +print "Hello, World!\n"; +print $resp; + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $user_yaml_config = <<_EOC_; +plugins: + - ai-proxy-multi +_EOC_ + $block->set_value("extra_yaml_config", $user_yaml_config); + + my $http_config = $block->http_config // <<_EOC_; + server { + server_name openai; + listen 6724; + + default_type 'application/json'; + + location /v1/chat/completions { + content_by_lua_block { + local json = require("cjson.safe") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + local header_auth = ngx.req.get_headers()["authorization"] + local query_auth = ngx.req.get_uri_args()["apikey"] + + if header_auth ~= "Bearer token" and query_auth ~= "apikey" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + if header_auth == "Bearer token" or query_auth == "apikey" then + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + if not body.messages or #body.messages < 1 then + ngx.status = 400 + ngx.say([[{ "error": "bad request"}]]) + return + end + + ngx.status = 200 + ngx.print("openai") + return + end + + + ngx.status = 503 + ngx.say("reached the end of the test suite") + } + } + + location /chat/completions { + content_by_lua_block { + local json = require("cjson.safe") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + local header_auth = ngx.req.get_headers()["authorization"] + local query_auth = ngx.req.get_uri_args()["apikey"] + + if header_auth ~= "Bearer token" and query_auth ~= "apikey" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + if header_auth == "Bearer token" or query_auth == "apikey" then + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + if not body.messages or #body.messages < 1 then + ngx.status = 400 + ngx.say([[{ "error": "bad request"}]]) + return + end + + ngx.status = 200 + ngx.print("deepseek") + return + end + + + ngx.status = 503 + ngx.say("reached the end of the test suite") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route with roundrobin balancer, weight 4 and 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "openai", + "provider": "openai", + "weight": 4, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-4", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724" + } + }, + { + "name": "deepseek", + "provider": "deepseek", + "weight": 1, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "deepseek-chat", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724/chat/completions" + } + } + ], + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: test +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/anything" + + local restab = {} + + local body = [[{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] }]] + for i = 1, 10 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "POST", body = body}) + if not res then + ngx.say(err) + return + end + table.insert(restab, res.body) + end + + table.sort(restab) + ngx.log(ngx.WARN, "test picked instances: ", table.concat(restab, ".")) + + } + } +--- request +GET /t +--- error_log +deepseek.deepseek.openai.openai.openai.openai.openai.openai.openai.openai + + + +=== TEST 3: set route with chash balancer, weight 4 and 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy-multi": { + "balancer": { + "algorithm": "chash", + "hash_on": "vars", + "key": "query_string" + }, + "instances": [ + { + "name": "openai", + "provider": "openai", + "weight": 4, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-4", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724" + } + }, + { + "name": "deepseek", + "provider": "deepseek", + "weight": 1, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "deepseek-chat", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724/chat/completions" + } + } + ], + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: test +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/anything" + + local restab = {} + + local body = [[{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] }]] + for i = 1, 10 do + local httpc = http.new() + local query = { + index = i + } + local res, err = httpc:request_uri(uri, {method = "POST", body = body, query = query}) + if not res then + ngx.say(err) + return + end + table.insert(restab, res.body) + end + + local count = {} + for _, value in ipairs(restab) do + count[value] = (count[value] or 0) + 1 + end + + for p, num in pairs(count) do + ngx.log(ngx.WARN, "distribution: ", p, ": ", num) + end + + } + } +--- request +GET /t +--- timeout: 10 +--- error_log +distribution: deepseek: 2 +distribution: openai: 8 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy-multi.openai-compatible.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy-multi.openai-compatible.t new file mode 100644 index 0000000..d5be5d2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy-multi.openai-compatible.t @@ -0,0 +1,296 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + + +my $resp_file = 't/assets/ai-proxy-response.json'; +open(my $fh, '<', $resp_file) or die "Could not open file '$resp_file' $!"; +my $resp = do { local $/; <$fh> }; +close($fh); + +print "Hello, World!\n"; +print $resp; + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $user_yaml_config = <<_EOC_; +plugins: + - ai-proxy-multi +_EOC_ + $block->set_value("extra_yaml_config", $user_yaml_config); + + my $http_config = $block->http_config // <<_EOC_; + server { + server_name openai; + listen 6724; + + default_type 'application/json'; + + location /v1/chat/completions { + content_by_lua_block { + local json = require("cjson.safe") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + local test_type = ngx.req.get_headers()["test-type"] + if test_type == "options" then + if body.foo == "bar" then + ngx.status = 200 + ngx.say("options works") + else + ngx.status = 500 + ngx.say("model options feature doesn't work") + end + return + end + + local header_auth = ngx.req.get_headers()["authorization"] + local query_auth = ngx.req.get_uri_args()["apikey"] + + if header_auth ~= "Bearer token" and query_auth ~= "apikey" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + if header_auth == "Bearer token" or query_auth == "apikey" then + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + if not body.messages or #body.messages < 1 then + ngx.status = 400 + ngx.say([[{ "error": "bad request"}]]) + return + end + + if body.messages[1].content == "write an SQL query to get all rows from student table" then + ngx.print("SELECT * FROM STUDENTS") + return + end + + ngx.status = 200 + ngx.say([[$resp]]) + return + end + + + ngx.status = 503 + ngx.say("reached the end of the test suite") + } + } + + location /random { + content_by_lua_block { + ngx.say("path override works") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route with right auth header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "self-hosted", + "provider": "openai-compatible", + "weight": 1, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "custom", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724/v1/chat/completions" + } + } + ], + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: send request +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- more_headers +Authorization: Bearer token +--- error_code: 200 +--- response_body eval +qr/\{ "content": "1 \+ 1 = 2\.", "role": "assistant" \}/ + + + +=== TEST 3: set route with stream = true (SSE) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "self-hosted", + "provider": "openai-compatible", + "weight": 1, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "custom-instruct", + "max_tokens": 512, + "temperature": 1.0, + "stream": true + }, + "override": { + "endpoint": "http://localhost:7737/v1/chat/completions" + } + } + ], + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: test is SSE works as expected +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local httpc = http.new() + local core = require("apisix.core") + + local ok, err = httpc:connect({ + scheme = "http", + host = "localhost", + port = ngx.var.server_port, + }) + + if not ok then + ngx.status = 500 + ngx.say(err) + return + end + + local params = { + method = "POST", + headers = { + ["Content-Type"] = "application/json", + }, + path = "/anything", + body = [[{ + "messages": [ + { "role": "system", "content": "some content" } + ] + }]], + } + + local res, err = httpc:request(params) + if not res then + ngx.status = 500 + ngx.say(err) + return + end + + local final_res = {} + while true do + local chunk, err = res.body_reader() -- will read chunk by chunk + if err then + core.log.error("failed to read response chunk: ", err) + break + end + if not chunk then + break + end + core.table.insert_tail(final_res, chunk) + end + + ngx.print(#final_res .. final_res[6]) + } + } +--- response_body_like eval +qr/6data: \[DONE\]\n\n/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy-multi.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy-multi.t new file mode 100644 index 0000000..83f3444 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy-multi.t @@ -0,0 +1,606 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + + +my $resp_file = 't/assets/ai-proxy-response.json'; +open(my $fh, '<', $resp_file) or die "Could not open file '$resp_file' $!"; +my $resp = do { local $/; <$fh> }; +close($fh); + +print "Hello, World!\n"; +print $resp; + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $user_yaml_config = <<_EOC_; +plugins: + - ai-proxy-multi +_EOC_ + $block->set_value("extra_yaml_config", $user_yaml_config); + + my $http_config = $block->http_config // <<_EOC_; + server { + server_name openai; + listen 6724; + + default_type 'application/json'; + + location /v1/chat/completions { + content_by_lua_block { + local json = require("cjson.safe") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + local test_type = ngx.req.get_headers()["test-type"] + if test_type == "options" then + if body.foo == "bar" then + ngx.status = 200 + ngx.say("options works") + else + ngx.status = 500 + ngx.say("model options feature doesn't work") + end + return + end + + local header_auth = ngx.req.get_headers()["authorization"] + local query_auth = ngx.req.get_uri_args()["apikey"] + + if header_auth ~= "Bearer token" and query_auth ~= "apikey" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + if header_auth == "Bearer token" or query_auth == "apikey" then + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + if not body.messages or #body.messages < 1 then + ngx.status = 400 + ngx.say([[{ "error": "bad request"}]]) + return + end + + if body.messages[1].content == "write an SQL query to get all rows from student table" then + ngx.print("SELECT * FROM STUDENTS") + return + end + + ngx.status = 200 + ngx.say([[$resp]]) + return + end + + + ngx.status = 503 + ngx.say("reached the end of the test suite") + } + } + + location /random { + content_by_lua_block { + ngx.say("path override works") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: minimal viable configuration +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-proxy-multi") + local ok, err = plugin.check_schema({ + instances = { + { + name = "openai-official", + provider = "openai", + options = { + model = "gpt-4", + }, + weight = 1, + auth = { + header = { + some_header = "some_value" + } + } + } + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 2: unsupported provider +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-proxy-multi") + local ok, err = plugin.check_schema({ + instances = { + { + name = "self-hosted", + provider = "some-unique", + options = { + model = "gpt-4", + }, + weight = 1, + auth = { + header = { + some_header = "some_value" + } + } + } + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body eval +qr/.*property "provider" validation failed: matches none of the enum values*/ + + + +=== TEST 3: set route with wrong auth header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "openai-official", + "provider": "openai", + "weight": 1, + "auth": { + "header": { + "Authorization": "Bearer wrongtoken" + } + }, + "options": { + "model": "gpt-4", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724" + } + } + ], + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: send request +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- error_code: 401 +--- response_body +Unauthorized + + + +=== TEST 5: set route with right auth header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "openai-official", + "provider": "openai", + "weight": 1, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-4", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724" + } + } + ], + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: send request +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- more_headers +Authorization: Bearer token +--- error_code: 200 +--- response_body eval +qr/\{ "content": "1 \+ 1 = 2\.", "role": "assistant" \}/ + + + +=== TEST 7: send request with empty body +--- request +POST /anything +--- more_headers +Authorization: Bearer token +--- error_code: 400 +--- response_body_chomp +failed to get request body: request body is empty + + + +=== TEST 8: send request with wrong method (GET) should work +--- request +GET /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- more_headers +Authorization: Bearer token +--- error_code: 200 +--- response_body eval +qr/\{ "content": "1 \+ 1 = 2\.", "role": "assistant" \}/ + + + +=== TEST 9: wrong JSON in request body should give error +--- request +GET /anything +{}"messages": [ { "role": "system", "cont +--- error_code: 400 +--- response_body +{"message":"could not get parse JSON request body: Expected the end but found T_STRING at character 3"} + + + +=== TEST 10: content-type should be JSON +--- request +POST /anything +prompt%3Dwhat%2520is%25201%2520%252B%25201 +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 400 +--- response_body chomp +unsupported content-type: application/x-www-form-urlencoded, only application/json is supported + + + +=== TEST 11: model options being merged to request body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "openai-official", + "provider": "openai", + "weight": 1, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "some-model", + "foo": "bar", + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724" + } + } + ], + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + [[{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }]], + nil, + { + ["test-type"] = "options", + ["Content-Type"] = "application/json", + } + ) + + ngx.status = code + ngx.say(actual_body) + + } + } +--- error_code: 200 +--- response_body_chomp +options_works + + + +=== TEST 12: override path +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "openai-official", + "provider": "openai", + "weight": 1, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "some-model", + "foo": "bar", + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724/random" + } + } + ], + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + [[{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }]], + nil, + { + ["test-type"] = "path", + ["Content-Type"] = "application/json", + } + ) + + ngx.status = code + ngx.say(actual_body) + + } + } +--- response_body_chomp +path override works + + + +=== TEST 13: set route with stream = true (SSE) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "openai-official", + "provider": "openai", + "weight": 1, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0, + "stream": true + }, + "override": { + "endpoint": "http://localhost:7737" + } + } + ], + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: test is SSE works as expected +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local httpc = http.new() + local core = require("apisix.core") + + local ok, err = httpc:connect({ + scheme = "http", + host = "localhost", + port = ngx.var.server_port, + }) + + if not ok then + ngx.status = 500 + ngx.say(err) + return + end + + local params = { + method = "POST", + headers = { + ["Content-Type"] = "application/json", + }, + path = "/anything", + body = [[{ + "messages": [ + { "role": "system", "content": "some content" } + ] + }]], + } + + local res, err = httpc:request(params) + if not res then + ngx.status = 500 + ngx.say(err) + return + end + + local final_res = {} + while true do + local chunk, err = res.body_reader() -- will read chunk by chunk + if err then + core.log.error("failed to read response chunk: ", err) + break + end + if not chunk then + break + end + core.table.insert_tail(final_res, chunk) + end + + ngx.print(#final_res .. final_res[6]) + } + } +--- response_body_like eval +qr/6data: \[DONE\]\n\n/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy-multi2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy-multi2.t new file mode 100644 index 0000000..536c98c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy-multi2.t @@ -0,0 +1,347 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + + +my $resp_file = 't/assets/ai-proxy-response.json'; +open(my $fh, '<', $resp_file) or die "Could not open file '$resp_file' $!"; +my $resp = do { local $/; <$fh> }; +close($fh); + +print "Hello, World!\n"; +print $resp; + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $user_yaml_config = <<_EOC_; +plugins: + - ai-proxy-multi +_EOC_ + $block->set_value("extra_yaml_config", $user_yaml_config); + + my $http_config = $block->http_config // <<_EOC_; + server { + server_name openai; + listen 6724; + + default_type 'application/json'; + + location /v1/chat/completions { + content_by_lua_block { + local json = require("cjson.safe") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + local query_auth = ngx.req.get_uri_args()["api_key"] + + if query_auth ~= "apikey" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + + ngx.status = 200 + ngx.say("passed") + } + } + + + location /test/params/in/overridden/endpoint { + content_by_lua_block { + local json = require("cjson.safe") + local core = require("apisix.core") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + + local query_auth = ngx.req.get_uri_args()["api_key"] + ngx.log(ngx.INFO, "found query params: ", core.json.stably_encode(ngx.req.get_uri_args())) + + if query_auth ~= "apikey" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + ngx.status = 200 + ngx.say("passed") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route with wrong query param +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "openai-official", + "provider": "openai", + "weight": 1, + "auth": { + "query": { + "api_key": "wrong_key" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724" + } + } + ], + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: send request +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- error_code: 401 +--- response_body +Unauthorized + + + +=== TEST 3: set route with right query param +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "openai-official", + "provider": "openai", + "weight": 1, + "auth": { + "query": { + "api_key": "apikey" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724" + } + } + ], + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: send request +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 5: set route without overriding the endpoint_url +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "openai-official", + "provider": "openai", + "weight": 1, + "auth": { + "header": { + "Authorization": "some-key" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0 + } + } + ], + "ssl_verify": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: send request +--- custom_trusted_cert: /etc/ssl/certs/ca-certificates.crt +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- error_code: 401 + + + +=== TEST 7: query params in override.endpoint should be sent to LLM +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy-multi": { + "instances": [ + { + "name": "openai-official", + "provider": "openai", + "weight": 1, + "auth": { + "query": { + "api_key": "apikey" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724/test/params/in/overridden/endpoint?some_query=yes" + } + } + ], + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: send request +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- error_code: 200 +--- error_log +found query params: {"api_key":"apikey","some_query":"yes"} +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy.openai-compatible.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy.openai-compatible.t new file mode 100644 index 0000000..a514764 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy.openai-compatible.t @@ -0,0 +1,321 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + + +my $resp_file = 't/assets/ai-proxy-response.json'; +open(my $fh, '<', $resp_file) or die "Could not open file '$resp_file' $!"; +my $resp = do { local $/; <$fh> }; +close($fh); + +print "Hello, World!\n"; +print $resp; + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + server { + server_name openai; + listen 6724; + + default_type 'application/json'; + + location /v1/chat/completions { + content_by_lua_block { + local json = require("cjson.safe") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + local test_type = ngx.req.get_headers()["test-type"] + if test_type == "options" then + if body.foo == "bar" then + ngx.status = 200 + ngx.say("options works") + else + ngx.status = 500 + ngx.say("model options feature doesn't work") + end + return + end + + local header_auth = ngx.req.get_headers()["authorization"] + local query_auth = ngx.req.get_uri_args()["apikey"] + + if header_auth ~= "Bearer token" and query_auth ~= "apikey" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + if header_auth == "Bearer token" or query_auth == "apikey" then + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + if not body.messages or #body.messages < 1 then + ngx.status = 400 + ngx.say([[{ "error": "bad request"}]]) + return + end + + if body.messages[1].content == "write an SQL query to get all rows from student table" then + ngx.print("SELECT * FROM STUDENTS") + return + end + + ngx.status = 200 + ngx.say([[$resp]]) + return + end + + + ngx.status = 503 + ngx.say("reached the end of the test suite") + } + } + + location /random { + content_by_lua_block { + ngx.say("path override works") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route with right auth header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "provider": "openai-compatible", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "custom", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724/v1/chat/completions" + }, + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: send request +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- more_headers +Authorization: Bearer token +--- error_code: 200 +--- response_body eval +qr/\{ "content": "1 \+ 1 = 2\.", "role": "assistant" \}/ + + + +=== TEST 3: override path +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "provider": "openai-compatible", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "some-model", + "foo": "bar", + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724/random" + }, + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + [[{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }]], + nil, + { + ["test-type"] = "path", + ["Content-Type"] = "application/json", + } + ) + + ngx.status = code + ngx.say(actual_body) + + } + } +--- response_body_chomp +path override works + + + +=== TEST 4: set route with stream = true (SSE) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "provider": "openai-compatible", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "custom", + "max_tokens": 512, + "temperature": 1.0, + "stream": true + }, + "override": { + "endpoint": "http://localhost:7737/v1/chat/completions" + }, + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: test is SSE works as expected +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local httpc = http.new() + local core = require("apisix.core") + + local ok, err = httpc:connect({ + scheme = "http", + host = "localhost", + port = ngx.var.server_port, + }) + + if not ok then + ngx.status = 500 + ngx.say(err) + return + end + + local params = { + method = "POST", + headers = { + ["Content-Type"] = "application/json", + }, + path = "/anything", + body = [[{ + "messages": [ + { "role": "system", "content": "some content" } + ] + }]], + } + + local res, err = httpc:request(params) + if not res then + ngx.status = 500 + ngx.say(err) + return + end diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy.t new file mode 100644 index 0000000..c99a6c1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy.t @@ -0,0 +1,673 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + + +my $resp_file = 't/assets/ai-proxy-response.json'; +open(my $fh, '<', $resp_file) or die "Could not open file '$resp_file' $!"; +my $resp = do { local $/; <$fh> }; +close($fh); + +print "Hello, World!\n"; +print $resp; + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + server { + server_name openai; + listen 6724; + + default_type 'application/json'; + + location /v1/chat/completions { + content_by_lua_block { + local json = require("cjson.safe") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + local test_type = ngx.req.get_headers()["test-type"] + if test_type == "options" then + if body.foo == "bar" then + ngx.status = 200 + ngx.say("options works") + else + ngx.status = 500 + ngx.say("model options feature doesn't work") + end + return + end + + local header_auth = ngx.req.get_headers()["authorization"] + local query_auth = ngx.req.get_uri_args()["apikey"] + + if header_auth ~= "Bearer token" and query_auth ~= "apikey" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + if header_auth == "Bearer token" or query_auth == "apikey" then + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + if not body.messages or #body.messages < 1 then + ngx.status = 400 + ngx.say([[{ "error": "bad request"}]]) + return + end + + if body.messages[1].content == "write an SQL query to get all rows from student table" then + ngx.print("SELECT * FROM STUDENTS") + return + end + + ngx.status = 200 + ngx.say([[$resp]]) + return + end + + + ngx.status = 503 + ngx.say("reached the end of the test suite") + } + } + + location /v1/embeddings { + content_by_lua_block { + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("unsupported request method: ", ngx.req.get_method()) + end + + local header_auth = ngx.req.get_headers()["authorization"] + if header_auth ~= "Bearer token" then + ngx.status = 401 + ngx.say("unauthorized") + return + end + + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + local json = require("cjson.safe") + body, err = json.decode(body) + if err then + ngx.status = 400 + ngx.say("failed to get request body: ", err) + end + + if body.model ~= "text-embedding-ada-002" then + ngx.status = 400 + ngx.say("unsupported model: ", body.model) + return + end + + if body.encoding_format ~= "float" then + ngx.status = 400 + ngx.say("unsupported encoding format: ", body.encoding_format) + return + end + + ngx.status = 200 + ngx.say([[ + { + "object": "list", + "data": [ + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + -0.0028842222 + ], + "index": 0 + } + ], + "model": "text-embedding-ada-002", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + ]]) + } + } + + location /random { + content_by_lua_block { + ngx.say("path override works") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: minimal viable configuration +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-proxy") + local ok, err = plugin.check_schema({ + provider = "openai", + options = { + model = "gpt-4", + }, + auth = { + header = { + some_header = "some_value" + } + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 2: unsupported provider +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-proxy") + local ok, err = plugin.check_schema({ + provider = "some-unique", + options = { + model = "gpt-4", + }, + auth = { + header = { + some_header = "some_value" + } + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body eval +qr/.*property "provider" validation failed: matches none of the enum values.*/ + + + +=== TEST 3: set route with wrong auth header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer wrongtoken" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724" + }, + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: send request +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- error_code: 401 +--- response_body +Unauthorized + + + +=== TEST 5: set route with right auth header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724" + }, + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: send request +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- more_headers +Authorization: Bearer token +--- error_code: 200 +--- response_body eval +qr/\{ "content": "1 \+ 1 = 2\.", "role": "assistant" \}/ + + + +=== TEST 7: send request with empty body +--- request +POST /anything +--- more_headers +Authorization: Bearer token +--- error_code: 400 +--- response_body_chomp +failed to get request body: request body is empty + + + +=== TEST 8: send request with wrong method (GET) should work +--- request +GET /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- more_headers +Authorization: Bearer token +--- error_code: 200 +--- response_body eval +qr/\{ "content": "1 \+ 1 = 2\.", "role": "assistant" \}/ + + + +=== TEST 9: wrong JSON in request body should give error +--- request +GET /anything +{}"messages": [ { "role": "system", "cont +--- error_code: 400 +--- response_body +{"message":"could not get parse JSON request body: Expected the end but found T_STRING at character 3"} + + + +=== TEST 10: content-type should be JSON +--- request +POST /anything +prompt%3Dwhat%2520is%25201%2520%252B%25201 +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 400 +--- response_body chomp +unsupported content-type: application/x-www-form-urlencoded, only application/json is supported + + + +=== TEST 11: model options being merged to request body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "some-model", + "foo": "bar", + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724" + }, + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + [[{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }]], + nil, + { + ["test-type"] = "options", + ["Content-Type"] = "application/json", + } + ) + + ngx.status = code + ngx.say(actual_body) + + } + } +--- error_code: 200 +--- response_body_chomp +options_works + + + +=== TEST 12: override path +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "provider": "openai", + "model": "some-model", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "foo": "bar", + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724/random" + }, + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + [[{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }]], + nil, + { + ["test-type"] = "path", + ["Content-Type"] = "application/json", + } + ) + + ngx.status = code + ngx.say(actual_body) + + } + } +--- response_body_chomp +path override works + + + +=== TEST 13: set route with stream = true (SSE) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0, + "stream": true + }, + "override": { + "endpoint": "http://localhost:7737" + }, + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: test is SSE works as expected +--- config + location /t { + content_by_lua_block { + local http = require("resty.http") + local httpc = http.new() + local core = require("apisix.core") + + local ok, err = httpc:connect({ + scheme = "http", + host = "localhost", + port = ngx.var.server_port, + }) + + if not ok then + ngx.status = 500 + ngx.say(err) + return + end + + local params = { + method = "POST", + headers = { + ["Content-Type"] = "application/json", + }, + path = "/anything", + body = [[{ + "messages": [ + { "role": "system", "content": "some content" } + ] + }]], + } + + local res, err = httpc:request(params) + if not res then + ngx.status = 500 + ngx.say(err) + return + end + + local final_res = {} + while true do + local chunk, err = res.body_reader() -- will read chunk by chunk + if err then + core.log.error("failed to read response chunk: ", err) + break + end + if not chunk then + break + end + core.table.insert_tail(final_res, chunk) + end + + ngx.print(#final_res .. final_res[6]) + } + } +--- response_body_like eval +qr/6data: \[DONE\]\n\n/ + + + +=== TEST 15: proxy embedding endpoint +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/embeddings", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "text-embedding-ada-002", + "encoding_format": "float" + }, + "override": { + "endpoint": "http://localhost:6724/v1/embeddings" + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 16: send request to embedding api +--- request +POST /embeddings +{ + "input": "The food was delicious and the waiter..." +} +--- error_code: 200 +--- response_body_like eval +qr/.*text-embedding-ada-002*/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy2.t new file mode 100644 index 0000000..43cdd30 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-proxy2.t @@ -0,0 +1,315 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + + +my $resp_file = 't/assets/ai-proxy-response.json'; +open(my $fh, '<', $resp_file) or die "Could not open file '$resp_file' $!"; +my $resp = do { local $/; <$fh> }; +close($fh); + +print "Hello, World!\n"; +print $resp; + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + server { + server_name openai; + listen 6724; + + default_type 'application/json'; + + location /v1/chat/completions { + content_by_lua_block { + local json = require("cjson.safe") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + local query_auth = ngx.req.get_uri_args()["api_key"] + + if query_auth ~= "apikey" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + + ngx.status = 200 + ngx.say("passed") + } + } + + + location /test/params/in/overridden/endpoint { + content_by_lua_block { + local json = require("cjson.safe") + local core = require("apisix.core") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + + local query_auth = ngx.req.get_uri_args()["api_key"] + ngx.log(ngx.INFO, "found query params: ", core.json.stably_encode(ngx.req.get_uri_args())) + + if query_auth ~= "apikey" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + ngx.status = 200 + ngx.say("passed") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route with wrong query param +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "query": { + "api_key": "wrong_key" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724" + }, + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: send request +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- error_code: 401 +--- response_body +Unauthorized + + + +=== TEST 3: set route with right query param +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "query": { + "api_key": "apikey" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724" + }, + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: send request +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 5: set route without overriding the endpoint_url +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "some-key" + } + }, + "options": { + "model": "gpt-4", + "max_tokens": 512, + "temperature": 1.0 + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: send request +--- custom_trusted_cert: /etc/ssl/certs/ca-certificates.crt +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- error_code: 401 + + + +=== TEST 7: query params in override.endpoint should be sent to LLM +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "query": { + "api_key": "apikey" + } + }, + "options": { + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:6724/test/params/in/overridden/endpoint?some_query=yes" + }, + "ssl_verify": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: send request +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- error_code: 200 +--- error_log +found query params: {"api_key":"apikey","some_query":"yes"} +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-rag.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-rag.t new file mode 100644 index 0000000..86b0c64 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-rag.t @@ -0,0 +1,392 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + + +my $resp_file = 't/assets/embeddings.json'; +open(my $fh, '<', $resp_file) or die "Could not open file '$resp_file' $!"; +my $embeddings = do { local $/; <$fh> }; +close($fh); + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + server { + listen 3623; + + default_type 'application/json'; + + location /embeddings { + content_by_lua_block { + local json = require("cjson.safe") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + return + end + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + local header_auth = ngx.req.get_headers()["api-key"] + + if header_auth ~= "key" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + ngx.status = 200 + ngx.say([[$embeddings]]) + } + } + + location /search { + content_by_lua_block { + local json = require("cjson.safe") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + + local header_auth = ngx.req.get_headers()["api-key"] + if header_auth ~= "key" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + if body.vectorQueries[1].vector[1] ~= 123456789 then + ngx.status = 500 + ngx.say({ error = "occurred" }) + return + end + + ngx.status = 200 + ngx.print("passed") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: minimal viable configuration +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-rag") + local ok, err = plugin.check_schema({ + embeddings_provider = { + azure_openai = { + api_key = "sdfjasdfh", + endpoint = "http://a.b.com" + } + }, + vector_search_provider = { + azure_ai_search = { + api_key = "iuhsdf", + endpoint = "http://a.b.com" + } + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 2: vector search provider missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-rag") + local ok, err = plugin.check_schema({ + embeddings_provider = { + azure_openai = { + api_key = "sdfjasdfh", + endpoint = "http://a.b.com" + } + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +property "vector_search_provider" is required + + + +=== TEST 3: embeddings provider missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-rag") + local ok, err = plugin.check_schema({ + vector_search_provider = { + azure_ai_search = { + api_key = "iuhsdf", + endpoint = "http://a.b.com" + } + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +property "embeddings_provider" is required + + + +=== TEST 4: wrong auth header for embeddings provider +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "ai-rag": { + "embeddings_provider": { + "azure_openai": { + "endpoint": "http://localhost:3623/embeddings", + "api_key": "wrongkey" + } + }, + "vector_search_provider": { + "azure_ai_search": { + "endpoint": "http://localhost:3623/search", + "api_key": "key" + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + }, + "scheme": "http", + "pass_host": "node" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: send request +--- request +POST /echo +{"ai_rag":{"vector_search":{"fields":"contentVector"},"embeddings":{"input":"which service is good for devops","dimensions":1024}}} +--- error_code: 401 +--- response_body +Unauthorized +--- error_log +could not get embeddings: Unauthorized + + + +=== TEST 6: wrong auth header for search provider +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "ai-rag": { + "embeddings_provider": { + "azure_openai": { + "endpoint": "http://localhost:3623/embeddings", + "api_key": "key" + } + }, + "vector_search_provider": { + "azure_ai_search": { + "endpoint": "http://localhost:3623/search", + "api_key": "wrongkey" + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + }, + "scheme": "http", + "pass_host": "node" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: send request +--- request +POST /echo +{"ai_rag":{"vector_search":{"fields":"contentVector"},"embeddings":{"input":"which service is good for devops","dimensions":1024}}} +--- error_code: 401 +--- error_log +could not get vector_search result: Unauthorized + + + +=== TEST 8: send request with empty body +--- request +POST /echo +--- error_code: 400 +--- response_body_chomp +failed to get request body: request body is empty + + + +=== TEST 9: send request with vector search fields missing +--- request +POST /echo +{"ai_rag":{"vector_search":{"missing-fields":"something"},"embeddings":{"input":"which service is good for devops","dimensions":1024}}} +--- error_code: 400 +--- error_log +request body fails schema check: property "ai_rag" validation failed: property "vector_search" validation failed: property "fields" is required + + + +=== TEST 10: send request with embedding input missing +--- request +POST /echo +{"ai_rag":{"vector_search":{"fields":"something"},"embeddings":{"missinginput":"which service is good for devops"}}} +--- error_code: 400 +--- error_log +request body fails schema check: property "ai_rag" validation failed: property "embeddings" validation failed: property "input" is required + + + +=== TEST 11: configure plugin with right auth headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "ai-rag": { + "embeddings_provider": { + "azure_openai": { + "endpoint": "http://localhost:3623/embeddings", + "api_key": "key" + } + }, + "vector_search_provider": { + "azure_ai_search": { + "endpoint": "http://localhost:3623/search", + "api_key": "key" + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + }, + "scheme": "http", + "pass_host": "node" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: send request with embedding input missing +--- request +POST /echo +{"ai_rag":{"vector_search":{"fields":"something"},"embeddings":{"input":"which service is good for devops"}}} +--- error_code: 200 +--- response_body eval +qr/\{"messages":\[\{"content":"passed","role":"user"\}\]\}|\{"messages":\[\{"role":"user","content":"passed"\}\]\}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-rate-limiting.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-rate-limiting.t new file mode 100644 index 0000000..8ac6677 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-rate-limiting.t @@ -0,0 +1,1047 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + + +my $resp_file = 't/assets/ai-proxy-response.json'; +open(my $fh, '<', $resp_file) or die "Could not open file '$resp_file' $!"; +my $resp = do { local $/; <$fh> }; +close($fh); + +print "Hello, World!\n"; +print $resp; + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + server { + server_name openai; + listen 16724; + + default_type 'application/json'; + + location /anything { + content_by_lua_block { + local json = require("cjson.safe") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + ngx.req.read_body() + local body = ngx.req.get_body_data() + + if body ~= "SELECT * FROM STUDENTS" then + ngx.status = 503 + ngx.say("passthrough doesn't work") + return + end + ngx.say('{"foo", "bar"}') + } + } + + location /v1/chat/completions { + content_by_lua_block { + local json = require("cjson.safe") + + if ngx.req.get_method() ~= "POST" then + ngx.status = 400 + ngx.say("Unsupported request method: ", ngx.req.get_method()) + end + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + local test_type = ngx.req.get_headers()["test-type"] + if test_type == "options" then + if body.foo == "bar" then + ngx.status = 200 + ngx.say("options works") + else + ngx.status = 500 + ngx.say("model options feature doesn't work") + end + return + end + + local header_auth = ngx.req.get_headers()["authorization"] + local query_auth = ngx.req.get_uri_args()["apikey"] + + if header_auth ~= "Bearer token" and query_auth ~= "apikey" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + if header_auth == "Bearer token" or query_auth == "apikey" then + ngx.req.read_body() + local body, err = ngx.req.get_body_data() + body, err = json.decode(body) + + if not body.messages or #body.messages < 1 then + ngx.status = 400 + ngx.say([[{ "error": "bad request"}]]) + return + end + + if body.messages[1].content == "write an SQL query to get all rows from student table" then + ngx.print("SELECT * FROM STUDENTS") + return + end + + ngx.status = 200 + ngx.say(string.format([[ +{ + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { "content": "1 + 1 = 2.", "role": "assistant" } + } + ], + "created": 1723780938, + "id": "chatcmpl-9wiSIg5LYrrpxwsr2PubSQnbtod1P", + "model": "%s", + "object": "chat.completion", + "system_fingerprint": "fp_abc28019ad", + "usage": { "completion_tokens": 5, "prompt_tokens": 8, "total_tokens": 10 } +} + ]], body.model)) + return + end + + + ngx.status = 503 + ngx.say("reached the end of the test suite") + } + } + + location /random { + content_by_lua_block { + ngx.say("path override works") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local configs = { + { + time_window = 60, + }, + { + limit = 30, + }, + { + limit = 30, + time_window = 60, + rejected_code = 199, + }, + { + limit = 30, + time_window = 60, + limit_strategy = "invalid", + }, + { + limit = 30, + time_window = 60, + instances = { + { + name = "instance1", + limit = 30, + time_window = 60, + }, + { + limit = 30, + time_window = 60, + } + }, + }, + { + time_window = 60, + instances = { + { + name = "instance1", + limit = 30, + time_window = 60, + } + }, + }, + { + limit = 30, + instances = { + { + name = "instance1", + limit = 30, + time_window = 60, + } + }, + }, + { + instances = { + { + name = "instance1", + limit = 30, + time_window = 60, + } + }, + }, + { + limit = 30, + time_window = 60, + rejected_code = 403, + rejected_msg = "rate limit exceeded", + limit_strategy = "completion_tokens", + }, + { + limit = 30, + time_window = 60, + instances = { + { + name = "instance1", + limit = 30, + time_window = 60, + } + }, + } + } + local core = require("apisix.core") + local plugin = require("apisix.plugins.ai-rate-limiting") + for _, config in ipairs(configs) do + local ok, err = plugin.check_schema(config) + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + end + ngx.say("done") + } + } +--- response_body +property "limit" is required when "time_window" is set +property "time_window" is required when "limit" is set +property "rejected_code" validation failed: expected 199 to be at least 200 +property "limit_strategy" validation failed: matches none of the enum values +property "instances" validation failed: failed to validate item 2: property "name" is required +property "limit" is required when "time_window" is set +property "time_window" is required when "limit" is set +passed +passed +passed +done + + + +=== TEST 2: set route 1, default limit_strategy: total_tokens +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/ai", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:16724" + }, + "ssl_verify": false + }, + "ai-rate-limiting": { + "limit": 30, + "time_window": 60 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "canbeanything.com": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: reject the 3th request +--- pipelined_requests eval +[ + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", +] +--- more_headers +Authorization: Bearer token +--- error_code eval +[200, 200, 200, 503] + + + +=== TEST 4: set rejected_code to 403, rejected_msg to "rate limit exceeded" +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/ai", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:16724" + }, + "ssl_verify": false + }, + "ai-rate-limiting": { + "limit": 30, + "time_window": 60, + "rejected_code": 403, + "rejected_msg": "rate limit exceeded" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "canbeanything.com": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: check code and message +--- pipelined_requests eval +[ + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", +] +--- more_headers +Authorization: Bearer token +--- error_code eval +[200, 200, 200, 403] +--- response_body eval +[ + qr/\{ "content": "1 \+ 1 = 2\.", "role": "assistant" \}/, + qr/\{ "content": "1 \+ 1 = 2\.", "role": "assistant" \}/, + qr/\{ "content": "1 \+ 1 = 2\.", "role": "assistant" \}/, + qr/\{"error_msg":"rate limit exceeded"\}/, +] + + + +=== TEST 6: check rate limit headers +--- request +POST /ai +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- more_headers +Authorization: Bearer token +--- response_headers +X-AI-RateLimit-Limit-ai-proxy: 30 +X-AI-RateLimit-Remaining-ai-proxy: 29 +X-AI-RateLimit-Reset-ai-proxy: 60 + + + +=== TEST 7: check rate limit headers after 4 requests +--- pipelined_requests eval +[ + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", +] +--- more_header +Authorization: Bearer token +--- error_code eval +[200, 200, 200, 403] +--- response_headers eval +[ + "X-AI-RateLimit-Remaining-ai-proxy: 29", + "X-AI-RateLimit-Remaining-ai-proxy: 19", + "X-AI-RateLimit-Remaining-ai-proxy: 9", + "X-AI-RateLimit-Remaining-ai-proxy: 0", +] + + + +=== TEST 8: set route2 with limit_strategy: completion_tokens +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/ai2", + "plugins": { + "ai-proxy": { + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-35-turbo-instruct", + "max_tokens": 512, + "temperature": 1.0 + }, + "override": { + "endpoint": "http://localhost:16724" + }, + "ssl_verify": false + }, + "ai-rate-limiting": { + "limit": 20, + "time_window": 45, + "limit_strategy": "completion_tokens" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "canbeanything.com": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: reject the 5th request +--- pipelined_requests eval +[ + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", +] +--- more_headers +Authorization: Bearer token +--- error_code eval +[200, 200, 200, 200, 503] + + + +=== TEST 10: check rate limit headers +--- request +POST /ai2 +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- more_headers +Authorization: Bearer token +--- response_headers +X-AI-RateLimit-Limit-ai-proxy: 20 +X-AI-RateLimit-Remaining-ai-proxy: 19 +X-AI-RateLimit-Reset-ai-proxy: 45 + + + +=== TEST 11: multi-request +--- pipelined_requests eval +[ + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", +] +--- more_header +Authorization: Bearer token +--- error_code eval +[200, 200, 200, 200, 503] +--- response_headers eval +[ + "X-AI-RateLimit-Remaining-ai-proxy: 19", + "X-AI-RateLimit-Remaining-ai-proxy: 14", + "X-AI-RateLimit-Remaining-ai-proxy: 9", + "X-AI-RateLimit-Remaining-ai-proxy: 4", + "X-AI-RateLimit-Remaining-ai-proxy: 0", +] + + + +=== TEST 12: request route 1 and route 2 +--- pipelined_requests eval +[ + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai2\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", +] +--- more_headers +Authorization: Bearer token +--- error_code eval +[200, 200, 200, 200, 200, 200, 200, 403, 503] + + + +=== TEST 13: ai-rate-limiting & ai-proxy-multi, with instance_health_and_rate_limiting strategy +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/ai", + "plugins": { + "ai-proxy-multi": { + "fallback_strategy": "instance_health_and_rate_limiting", + "instances": [ + { + "name": "openai-gpt4", + "provider": "openai", + "weight": 1, + "priority": 1, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-4" + }, + "override": { + "endpoint": "http://localhost:16724" + } + }, + { + "name": "openai-gpt3", + "provider": "openai", + "weight": 1, + "priority": 0, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-3" + }, + "override": { + "endpoint": "http://localhost:16724" + } + } + ], + "ssl_verify": false + }, + "ai-rate-limiting": { + "limit": 10, + "time_window": 60 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "canbeanything.com": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: fallback strategy should works +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local core = require("apisix.core") + local code, _, body = t("/ai", + ngx.HTTP_POST, + [[{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }]], + nil, + { + ["test-type"] = "options", + ["Content-Type"] = "application/json", + } + ) + + assert(code == 200, "first request should be successful") + assert(core.string.find(body, "gpt-4"), + "first request should be handled by higher priority instance") + + local code, _, body = t("/ai", + ngx.HTTP_POST, + [[{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }]], + nil, + { + ["test-type"] = "options", + ["Content-Type"] = "application/json", + } + ) + + assert(code == 200, "second request should be successful") + assert(core.string.find(body, "gpt-3"), + "second request should be handled by lower priority instance") + + local code, body = t("/ai", + ngx.HTTP_POST, + [[{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }]], + nil, + { + ["test-type"] = "options", + ["Content-Type"] = "application/json", + } + ) + + assert(code == 503, "third request should be failed") + assert(core.string.find(body, "all servers tried"), "all servers tried") + + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 15: limiting to only one instance +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/ai", + "plugins": { + "ai-proxy-multi": { + "fallback_strategy": "instance_health_and_rate_limiting", + "instances": [ + { + "name": "openai-gpt4", + "provider": "openai", + "weight": 1, + "priority": 1, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-4" + }, + "override": { + "endpoint": "http://localhost:16724" + } + }, + { + "name": "openai-gpt3", + "provider": "openai", + "weight": 1, + "priority": 0, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-3" + }, + "override": { + "endpoint": "http://localhost:16724" + } + } + ], + "ssl_verify": false + }, + "ai-rate-limiting": { + "instances": [ + { + "name": "openai-gpt4", + "limit": 20, + "time_window": 60 + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "canbeanything.com": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: 10 requests, 8 should be handled by gpt-3, 2 should be handled by gpt-4 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local core = require("apisix.core") + + local instances_count = {} + for i = 1, 10 do + local code, _, body = t("/ai", + ngx.HTTP_POST, + [[{ + "messages": [ + { "role": "system", "content": "You are a mathematician" }, + { "role": "user", "content": "What is 1+1?" } + ] + }]], + nil, + { + ["test-type"] = "options", + ["Content-Type"] = "application/json", + } + ) + assert(code == 200, "first request should be successful") + if core.string.find(body, "gpt-4") then + instances_count["gpt-4"] = (instances_count["gpt-4"] or 0) + 1 + else + instances_count["gpt-3"] = (instances_count["gpt-3"] or 0) + 1 + end + end + + ngx.log(ngx.INFO, "instances_count test:", core.json.delay_encode(instances_count)) + + assert(instances_count["gpt-4"] <= 2, "gpt-4 should be handled by higher priority instance") + assert(instances_count["gpt-3"] >= 8, "gpt-3 should be handled by lower priority instance") + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 17: each instance uses different current limiting +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/ai", + "plugins": { + "ai-proxy-multi": { + "fallback_strategy": "instance_health_and_rate_limiting", + "instances": [ + { + "name": "openai-gpt4", + "provider": "openai", + "weight": 1, + "priority": 1, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-4" + }, + "override": { + "endpoint": "http://localhost:16724" + } + }, + { + "name": "openai-gpt3", + "provider": "openai", + "weight": 1, + "priority": 0, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-3" + }, + "override": { + "endpoint": "http://localhost:16724" + } + } + ], + "ssl_verify": false + }, + "ai-rate-limiting": { + "instances": [ + { + "name": "openai-gpt3", + "limit": 50, + "time_window": 60 + }, + { + "name": "openai-gpt4", + "limit": 20, + "time_window": 60 + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "canbeanything.com": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: gpt3 allows 5 requests, gpt4 allows 2 requests +--- pipelined_requests eval +[ + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", +] +--- more_headers +Authorization: Bearer token +--- error_code eval +[200, 200, 200, 200, 200, 200, 200, 503, 503] + + + +=== TEST 19: set limit & instances +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/ai", + "plugins": { + "ai-proxy-multi": { + "fallback_strategy": "instance_health_and_rate_limiting", + "instances": [ + { + "name": "openai-gpt4", + "provider": "openai", + "weight": 1, + "priority": 1, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-4" + }, + "override": { + "endpoint": "http://localhost:16724" + } + }, + { + "name": "openai-gpt3", + "provider": "openai", + "weight": 1, + "priority": 0, + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "options": { + "model": "gpt-3" + }, + "override": { + "endpoint": "http://localhost:16724" + } + } + ], + "ssl_verify": false + }, + "ai-rate-limiting": { + "limit": 20, + "time_window": 60, + "instances": [ + { + "name": "openai-gpt3", + "limit": 50, + "time_window": 60 + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "canbeanything.com": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: gpt3 allows 5 requests, gpt4 allows 2 requests +--- pipelined_requests eval +[ + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", + "POST /ai\n" . "{ \"messages\": [ { \"role\": \"system\", \"content\": \"You are a mathematician\" }, { \"role\": \"user\", \"content\": \"What is 1+1?\"} ] }", +] +--- more_headers +Authorization: Bearer token +--- error_code eval +[200, 200, 200, 200, 200, 200, 200, 503, 503] diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-request-rewrite.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-request-rewrite.t new file mode 100644 index 0000000..fc25ac4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-request-rewrite.t @@ -0,0 +1,739 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + my $http_config = $block->http_config // <<_EOC_; + server { + server_name openai; + listen 6724; + + default_type 'application/json'; + + location /v1/chat/completions { + content_by_lua_block { + + ngx.req.read_body() + local body = ngx.req.get_body_data() + + local json = require("cjson.safe") + local request_data = json.decode(body) + local header_auth = ngx.req.get_headers()["authorization"] + local query_auth = ngx.req.get_uri_args()["api_key"] + + if header_auth ~= "Bearer token" and query_auth ~= "apikey" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + local response = { + choices = { + { + message = { + content = request_data.messages[1].content .. ' ' .. request_data.messages[2].content + } + } + } + } + local json = require("cjson.safe") + local json_response = json.encode(response) + ngx.say(json_response) + } + } + + location /random { + content_by_lua_block { + + local response = { + choices = { + { + message = { + content = 'return by random endpoint' + } + } + } + } + local json = require("cjson.safe") + local json_response = json.encode(response) + ngx.say(json_response) + } + } + + location /internalservererror { + content_by_lua_block { + ngx.status = 500 + ngx.say("Internal Server Error") + return + } + } + + location /bad_request { + content_by_lua_block { + ngx.status = 400 + ngx.say("Bad Request") + return + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: minimal viable configuration +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-request-rewrite") + local ok, err = plugin.check_schema({ + prompt = "some prompt", + provider = "openai", + auth = { + header = { + Authorization = "Bearer token" + } + } + }) + + if not ok then + ngx.print(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 2: missing prompt field should not pass +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-request-rewrite") + local ok, err = plugin.check_schema({ + provider = "openai", + auth = { + header = { + Authorization = "Bearer token" + } + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +property "prompt" is required + + + +=== TEST 3: missing auth field should not pass +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-request-rewrite") + local ok, err = plugin.check_schema({ + prompt = "some prompt", + provider = "openai", + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +property "auth" is required + + + +=== TEST 4: missing provider field should not pass +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-request-rewrite") + local ok, err = plugin.check_schema({ + prompt = "some prompt", + auth = { + header = { + Authorization = "Bearer token" + } + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +property "provider" is required + + + +=== TEST 5: provider must be one of: deepseek, openai, aimlapi, openai-compatible +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-request-rewrite") + local ok, err = plugin.check_schema({ + prompt = "some prompt", + provider = "invalid-provider", + auth = { + header = { + Authorization = "Bearer token" + } + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +property "provider" validation failed: matches none of the enum values + + + +=== TEST 6: provider deepseek +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-request-rewrite") + local ok, err = plugin.check_schema({ + prompt = "some prompt", + provider = "deepseek", + auth = { + header = { + Authorization = "Bearer token" + } + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 7: provider openai-compatible +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-request-rewrite") + local ok, err = plugin.check_schema({ + prompt = "some prompt", + provider = "openai-compatible", + auth = { + header = { + Authorization = "Bearer token" + } + }, + override = { + endpoint = "http://localhost:6724" + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 8: override endpoint works +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-request-rewrite": { + "prompt": "some prompt", + "provider": "openai", + "auth": { + "header": { + "Authorization": "Bearer token" + } + }, + "override": { + "endpoint": "http://localhost:6724/random" + }, + "ssl_verify": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + "some random content", + nil, + { + ["Content-Type"] = "text/plain", + } + ) + local json = require("cjson.safe") + local response_data = json.decode(actual_body) + + if response_data.data == 'return by random endpoint' then + ngx.say("passed") + else + ngx.say(actual_body) + end + } + } +--- response_body +passed + + + +=== TEST 9: set route with wrong auth header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-request-rewrite": { + "prompt": "some prompt", + "auth": { + "header": { + "Authorization": "Bearer wrong-token" + } + }, + "provider": "openai", + "override": { + "endpoint": "http://localhost:6724" + }, + "ssl_verify": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }]] + ) + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + "some random content", + nil, + { + ["Content-Type"] = "text/plain", + } + ) + + if code == 500 then + ngx.say('passed') + return + end + } + } + +--- error_log +LLM service returned error status: 401 +--- response_body +passed + + + +=== TEST 10: set route with correct query param +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-request-rewrite": { + "prompt": "some prompt", + "auth": { + "query": { + "api_key": "apikey" + } + }, + "provider": "openai", + "override": { + "endpoint": "http://localhost:6724" + }, + "ssl_verify": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }]] + ) + + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + "some random content", + nil, + { + ["Content-Type"] = "text/plain", + } + ) + + local json = require("cjson.safe") + local response_data = json.decode(actual_body) + + if response_data.data == "some prompt some random content" then + ngx.say("passed") + else + ngx.say("failed") + end + } + } +--- response_body +passed + + + +=== TEST 11: set route with wrong query param +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-request-rewrite": { + "prompt": "some prompt", + "auth": { + "query": { + "api_key": "wrong_key" + } + }, + "provider": "openai", + "override": { + "endpoint": "http://localhost:6724" + }, + "ssl_verify": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }]] + ) + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + "some random content", + nil, + { + ["Content-Type"] = "text/plain", + } + ) + + if code == 500 then + ngx.say('passed') + return + end + } + } + +--- error_log +LLM service returned error status: 401 +--- response_body +passed + + + +=== TEST 12: prompt passed correctly to LLM service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-request-rewrite": { + "prompt": "some prompt to test", + "auth": { + "query": { + "api_key": "apikey" + } + }, + "provider": "openai", + "override": { + "endpoint": "http://localhost:6724" + }, + "ssl_verify": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }]] + ) + + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + "some random content", + nil, + { + ["Content-Type"] = "text/plain", + } + ) + + local json = require("cjson.safe") + local response_data = json.decode(actual_body) + + if response_data.data == "some prompt to test some random content" then + ngx.say("passed") + else + ngx.say("failed") + end + } + } +--- response_body +passed + + + +=== TEST 13: check LLM bad request +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-request-rewrite": { + "prompt": "some prompt to test", + "auth": { + "query": { + "api_key": "apikey" + } + }, + "provider": "openai", + "override": { + "endpoint": "http://localhost:6724/bad_request" + }, + "ssl_verify": false, + "options": { + "model": "check_options_model", + "temperature": 0.5, + "max_tokens": 100, + "top_p": 1, + "frequency_penalty": 0, + "presence_penalty": 0 + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }]] + ) + + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + "some random content", + nil, + { + ["Content-Type"] = "text/plain", + } + ) + + if code == 500 then + ngx.say('passed') + return + end + } + } +--- error_log +LLM service returned error status: 400 +--- response_body +passed + + + +=== TEST 14: check LLM internal server error +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-request-rewrite": { + "prompt": "some prompt to test", + "auth": { + "query": { + "api_key": "apikey" + } + }, + "provider": "openai", + "override": { + "endpoint": "http://localhost:6724/internalservererror" + }, + "ssl_verify": false, + "options": { + "model": "check_options_model", + "temperature": 0.5, + "max_tokens": 100, + "top_p": 1, + "frequency_penalty": 0, + "presence_penalty": 0 + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }]] + ) + + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + "some random content", + nil, + { + ["Content-Type"] = "text/plain", + } + ) + + if code == 500 then + ngx.say('passed') + return + end + } + } +--- error_log +LLM service returned error status: 500 +--- response_body +passed + + + +=== TEST 15: provider aimlapi +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-request-rewrite") + local ok, err = plugin.check_schema({ + prompt = "some prompt", + provider = "aimlapi", + auth = { + header = { + Authorization = "Bearer token" + } + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai-request-rewrite2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-request-rewrite2.t new file mode 100644 index 0000000..f066f21 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai-request-rewrite2.t @@ -0,0 +1,287 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + my $http_config = $block->http_config // <<_EOC_; + server { + server_name openai; + listen 6724; + + default_type 'application/json'; + + + location /check_extra_options { + content_by_lua_block { + local json = require("cjson.safe") + + ngx.req.read_body() + local body = ngx.req.get_body_data() + local request_data = json.decode(body) + + if request_data.extra_option ~= "extra option" then + ngx.status = 400 + ngx.say("extra option not match") + return + end + + local response = { + choices = { + { + message = { + content = request_data.messages[1].content .. ' ' .. request_data.messages[2].content + } + } + } + } + local json = require("cjson.safe") + local json_response = json.encode(response) + ngx.say(json_response) + } + } + + location /test/params/in/overridden/endpoint { + content_by_lua_block { + local json = require("cjson.safe") + local core = require("apisix.core") + + local query_auth = ngx.req.get_uri_args()["api_key"] + ngx.log(ngx.INFO, "found query params: ", core.json.stably_encode(ngx.req.get_uri_args())) + + if query_auth ~= "apikey" then + ngx.status = 401 + ngx.say("Unauthorized") + return + end + + ngx.status = 200 + ngx.say("passed") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: check plugin options send to llm service correctly +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-request-rewrite": { + "prompt": "some prompt to test", + "auth": { + "query": { + "api_key": "apikey" + } + }, + "provider": "openai", + "override": { + "endpoint": "http://localhost:6724/check_extra_options" + }, + "ssl_verify": false, + "options": { + "model": "check_options_model", + "extra_option": "extra option" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }]] + ) + + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + "some random content", + nil, + { + ["Content-Type"] = "text/plain", + } + ) + + if code == 200 then + ngx.say('passed') + return + end + } + } +--- response_body +passed + + + +=== TEST 2: openai-compatible provider should use with override.endpoint +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ai-request-rewrite") + local ok, err = plugin.check_schema({ + prompt = "some prompt", + provider = "openai-compatible", + auth = { + header = { + Authorization = "Bearer token" + } + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +override.endpoint is required for openai-compatible provider + + + +=== TEST 3: query params in override.endpoint should be sent to LLM +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "auth": { + "query": { + "api_key": "apikey" + } + }, + "model": { + "provider": "openai", + "name": "gpt-35-turbo-instruct", + "options": { + "max_tokens": 512, + "temperature": 1.0 + } + }, + "override": { + "endpoint": "http://localhost:6724/test/params/in/overridden/endpoint?some_query=yes" + }, + "ssl_verify": false + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: send request without body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-request-rewrite": { + "prompt": "some prompt to test", + "auth": { + "query": { + "api_key": "apikey" + } + }, + "provider": "openai", + "override": { + "endpoint": "http://localhost:6724/check_extra_options" + }, + "ssl_verify": false, + "options": { + "model": "check_options_model", + "extra_option": "extra option" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }]] + ) + + + local code, body, actual_body = t("/anything", + ngx.HTTP_POST, + nil, + nil, + { + ["Content-Type"] = "text/plain", + } + ) + + if code == 200 then + ngx.say('passed') + return + end + } + } +--- error_log eval +qr/missing request body/ +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai.t new file mode 100644 index 0000000..83eab86 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai.t @@ -0,0 +1,908 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: enable route cache +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local t = {} + for i = 1, 2 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + + ngx.say("done") + } + } +--- response_body +done +--- error_log +use ai plane to match route + + + +=== TEST 2: route has vars, disable route cache +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "vars": [ ["arg_k", "~=", "v"] ], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code = t('/hello??k=a', ngx.HTTP_GET) + ngx.say(code) + + local code = t('/hello??k=v', ngx.HTTP_GET) + ngx.say(code) + } + } +--- response_body +200 +404 +--- no_error_log +use ai plane to match route + + + +=== TEST 3: method changed, create different route cache +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local t = {} + for i = 1, 4 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err + if i % 2 == 0 then + res, err = httpc:request_uri(uri, { method = "POST" }) + else + res, err = httpc:request_uri(uri) + end + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + + ngx.say("done") + } + } +--- response_body +done +--- error_log +use ai plane to match route + + + +=== TEST 4: route with plugins, enable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + + "plugins": { + "limit-count": { + "count": 9999, + "time_window": 60 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local t = {} + for i = 1, 2 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + + ngx.say("done") + } + } +--- response_body +done +--- error_log +use ai plane to match route + + + +=== TEST 5: enable -> disable -> enable -> disable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local uri2 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1?k=a" + local uri3 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1?k=v" + + -- round 1: all routes without vars or filter_fun, enable route cache + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local threads1 = {} + for i = 1, 2 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + end, i)) + table.insert(threads1, th) + end + + for i, th in ipairs(threads1) do + ngx.thread.wait(th) + end + + -- round 2: routes with vars or filter_fun, disable route cache + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "vars": [ ["arg_k", "~=", "v"] ], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local threads2 = {} + for i = 1, 2 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err + if i == 1 then + -- arg_k = a, match route 2 + res, err = httpc:request_uri(uri2) + assert(res.status == 200) + else + -- arg_k = v, not match route 2 + res, err = httpc:request_uri(uri3) + assert(res.status == 404) + end + if not res then + ngx.log(ngx.ERR, err) + return + end + end, i)) + table.insert(threads2, th) + end + + for i, th in ipairs(threads2) do + ngx.thread.wait(th) + end + + -- round 3: delete route with vars, the remaining route + -- has no vars or filter_fun, enable route cache + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local threads3 = {} + for i = 1, 2 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + end, i)) + table.insert(threads3, th) + end + + for i, th in ipairs(threads3) do + ngx.thread.wait(th) + end + + -- round 4: routes with vars or filter_fun, disable route cache + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "vars": [ ["arg_k", "~=", "v"] ], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local threads4 = {} + for i = 1, 2 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err + if i == 1 then + -- arg_k = a, match route 2 + res, err = httpc:request_uri(uri2) + assert(res.status == 200) + else + -- arg_k = v, not match route 2 + res, err = httpc:request_uri(uri3) + assert(res.status == 404) + end + if not res then + ngx.log(ngx.ERR, err) + return + end + end, i)) + table.insert(threads4, th) + end + + for i, th in ipairs(threads4) do + ngx.thread.wait(th) + end + + -- clean route 2 + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/use ai plane to match route/ +--- grep_error_log_out +use ai plane to match route +use ai plane to match route + + + +=== TEST 6: route key: uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(1) + + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + end + + ngx.say("done") + } + } +--- response_body +done +--- error_log +route cache key: /hello + + + +=== TEST 7: route key: uri + method +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(1) + + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + end + + ngx.say("done") + } + } +--- response_body +done +--- error_log +route cache key: /hello#GET + + + +=== TEST 8: route key: uri + method + host +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "host": "127.0.0.1", + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(1) + + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + end + + ngx.say("done") + } + } +--- response_body +done +--- error_log +route cache key: /hello#GET#127.0.0.1 + + + +=== TEST 9: enable sample upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.say("done") + } + } +--- response_body +done +--- error_log +enable sample upstream + + + +=== TEST 10: route has plugins and run before_proxy, disable samply upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "before_proxy", + "functions" : ["return function(conf, ctx) ngx.log(ngx.WARN, \"run before_proxy phase balancer_ip : \", ctx.balancer_ip) end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.say("done") + } + } +--- response_body +done +--- error_log +run before_proxy phase balancer_ip : 127.0.0.1 +--- no_error_log +enable sample upstream + + + +=== TEST 11: upstream has more than one nodes, disable sample upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.say("done") + } + } +--- response_body +done +--- no_error_log +enable sample upstream + + + +=== TEST 12: node has domain, disable sample upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "admin.apisix.dev:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.say("done") + } + } +--- response_body +done +--- no_error_log +enable sample upstream + + + +=== TEST 13: enable --> disable sample upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "enable_websocket": true, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/enable sample upstream/ +--- grep_error_log_out +enable sample upstream + + + +=== TEST 14: renew route cache +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + for k = 1, 2 do + local code, body = t('/apisix/admin/routes/' .. k, + ngx.HTTP_PUT, + [[{ + "host": "127.0.0.1", + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello]] .. k .. [[" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(1) + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri .. k) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + end + end + ngx.say("done") + } + } +--- response_body +done +--- error_log +renew route cache: count=3001 +renew route cache: count=3002 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai2.t new file mode 100644 index 0000000..9830f71 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai2.t @@ -0,0 +1,428 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + if (!defined $block->extra_init_by_lua) { + my $extra_init_by_lua = <<_EOC_; + local apisix = require("apisix") + apisix.http_header_filter_phase = function () + ngx.header.content_length = 14 + end + + apisix.http_body_filter_phase = function () + ngx.arg[1] = "do body filter" + end +_EOC_ + + $block->set_value("extra_init_by_lua", $extra_init_by_lua); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: enable skip body filter +--- extra_init_by_lua + local apisix = require("apisix") + apisix.http_header_filter_phase = function () + ngx.header.content_length = nil + end + + apisix.http_body_filter_phase = function () + ngx.arg[1] = "do body filter" + end +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.print(res.body) + } + } +--- response_body +hello world + + + +=== TEST 2: route with plugin_config_id, disable skip body filter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "before_proxy", + "functions" : ["return function(conf, ctx) ngx.log(ngx.WARN, \"run before_proxy phase balancer_ip : \", ctx.balancer_ip) end"] + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugin_config_id": "1", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.say(res.body) + } + } +--- response_body +do body filter +--- error_log +run before_proxy phase balancer_ip : 127.0.0.1 +--- no_error_log +enable sample upstream + + + +=== TEST 3: route with plugins, disable skip body filter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "new_consumer", + "plugins": { + "key-auth": { + "key": "auth-jack" + }, + "serverless-pre-function": { + "phase": "before_proxy", + "functions" : ["return function(conf, ctx) ngx.log(ngx.WARN, \"run before_proxy phase balancer_ip : \", ctx.balancer_ip) end"] + } + } + }]] + ) + ngx.sleep(0.5) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local headers = { + ["apikey"] = "auth-jack" + } + local res, err = httpc:request_uri(uri, {headers = headers}) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.say(res.body) + } + } +--- response_body +do body filter +--- error_log +run before_proxy phase balancer_ip : 127.0.0.1 +--- no_error_log +enable sample upstream + + + +=== TEST 4: one of route has plugins, disable skip body filter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "before_proxy", + "functions" : ["return function(conf, ctx) ngx.log(ngx.WARN, \"run before_proxy phase balancer_ip : \", ctx.balancer_ip) end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local httpc = http.new() + local headers = { + ["apikey"] = "auth-jack" + } + local res, err = httpc:request_uri(uri, {headers = headers}) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.say(res.body) + } + } +--- response_body +do body filter +--- no_error_log +enable sample upstream + + + +=== TEST 5: exist global_rules, disable skip body filter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "before_proxy", + "functions" : ["return function(conf, ctx) ngx.log(ngx.WARN, \"run before_proxy phase balancer_ip : \", ctx.balancer_ip) end"] + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.say(res.body) + } + } +--- response_body +do body filter +--- error_log +run before_proxy phase balancer_ip : 127.0.0.1 +--- no_error_log +enable sample upstream + + + +=== TEST 6: upstream with keepalive_pool, disable sample upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "keepalive_pool": { + "size": 1, + "idle_timeout": 8, + "requests": 2 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.5) + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.say(res.body) + } + } +--- response_body +do body filter +--- error_log +proxy request to 127.0.0.1:1980 +--- no_error_log +enable sample upstream diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai3.t new file mode 100644 index 0000000..b56d102 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai3.t @@ -0,0 +1,263 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: keep priority behavior consistent +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "priority": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/server_port" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "priority": 10, + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + }, + "uri": "/server_port" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/server_port" + local t = {} + for i = 1, 2 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.say(res.body) + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + } + } +--- response_body +1981 +1981 +--- error_log +use ai plane to match route + + + +=== TEST 2: keep route cache as latest data +# update the attributes that do not participate in the route cache key to ensure +# that the route cache use the latest data +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/pm', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/prometheus/metrics" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "foo", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "prometheus": { + "prefer_name": true + } + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + + local metrics_uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/apisix/prometheus/metrics" + local httpc = http.new() + local res, err = httpc:request_uri(metrics_uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + + local m, err = ngx.re.match(res.body, "apisix_bandwidth{type=\"ingress\",route=\"foo\"", "jo") + ngx.say(m[0]) + + -- update name by patch + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + [[{ + "name": "bar" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + + local metrics_uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/apisix/prometheus/metrics" + local httpc = http.new() + local res, err = httpc:request_uri(metrics_uri) + assert(res.status == 200) + if not res then + ngx.log(ngx.ERR, err) + return + end + local m, err = ngx.re.match(res.body, "apisix_bandwidth{type=\"ingress\",route=\"bar\"", "jo") + ngx.say(m[0]) + } + } +--- response_body +apisix_bandwidth{type="ingress",route="foo" +apisix_bandwidth{type="ingress",route="bar" + + + +==== TEST 3: route has filter_func, disable route cache +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "filter_func": "function(vars) return vars.arg_k ~= 'v' end", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code = t('/hello??k=a', ngx.HTTP_GET) + ngx.say(code) + + local code = t('/hello??k=v', ngx.HTTP_GET) + ngx.say(code) + } + } +--- response_body +200 +404 +--- no_error_log +use ai plane to match route diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai4.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai4.t new file mode 100644 index 0000000..dad1437 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai4.t @@ -0,0 +1,473 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->extra_init_by_lua) { + my $extra_init_by_lua = <<_EOC_; + add_eligible_route = function(id, uri) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/' .. id, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "]] .. uri .. [[" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + end + + add_ineligible_route = function(id, uri) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/' .. id, + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + }, + "uri": "]] .. uri .. [[" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + end + + update_route_to_ineligible = function(id) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/' .. id .. '/upstream/nodes', + ngx.HTTP_PATCH, + [[{ + "127.0.0.1:1980": 1, + "127.0.0.1:1981": 1 + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + end + + update_route_to_eligible = function(id) + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/' .. id .. '/upstream/nodes', + ngx.HTTP_PATCH, + [[{ + "127.0.0.1:1980": 1 + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + end + + clear_route = function(id) + local t = require("lib.test_admin").test + local code = t('/apisix/admin/routes/' .. id, ngx.HTTP_DELETE) + return code + end +_EOC_ + + $block->set_value("extra_init_by_lua", $extra_init_by_lua); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: enable sample upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + add_eligible_route(1, "/hello") + local code, body = t("/hello", ngx.HTTP_GET) + assert(code == 200) + + assert(clear_route(1) == 200) + ngx.say("done") + } + } +--- response_body +done +--- error_log +enable sample upstream + + + +=== TEST 2: enable sample upstream, add ineligible route lead to disable sample upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + add_eligible_route(1, "/hello") + local code, body = t("/hello", ngx.HTTP_GET) + assert(code == 200) + + add_ineligible_route(2, "/hello1") + local code, body = t("/hello1", ngx.HTTP_GET) + assert(code == 200) + + assert(clear_route(1) == 200) + assert(clear_route(2) == 200) + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/enable sample upstream|proxy request to/ +--- grep_error_log_out +enable sample upstream +proxy request to + + + +=== TEST 3: enable sample upstream, update route as ineligible lead to disable sample upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + add_eligible_route(1, "/hello") + add_eligible_route(2, "/hello1") + local code, body = t("/hello1", ngx.HTTP_GET) + assert(code == 200) + + update_route_to_ineligible(2) + local code, body = t("/hello1", ngx.HTTP_GET) + assert(code == 200) + + assert(clear_route(1) == 200) + assert(clear_route(2) == 200) + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/enable sample upstream|proxy request to/ +--- grep_error_log_out +enable sample upstream +proxy request to + + + +=== TEST 4: enable sample upstream, add eligible route and keep sample upstream as enable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + add_eligible_route(1, "/hello") + local code, body = t("/hello", ngx.HTTP_GET) + assert(code == 200) + + add_eligible_route(2, "/hello1") + local code, body = t("/hello1", ngx.HTTP_GET) + assert(code == 200) + + assert(clear_route(1) == 200) + assert(clear_route(2) == 200) + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/enable sample upstream/ +--- grep_error_log_out +enable sample upstream +enable sample upstream +--- no_error_log eval +qr/proxy request to \S+/ + + + +=== TEST 5: enable sample upstream, delete route and keep sample upstream as enable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + add_eligible_route(1, "/hello") + add_eligible_route(2, "/hello1") + + local code, body = t("/hello1", ngx.HTTP_GET) + assert(code == 200) + assert(clear_route(2) == 200) + local code, body = t("/hello", ngx.HTTP_GET) + assert(code == 200) + + assert(clear_route(1) == 200) + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/enable sample upstream/ +--- grep_error_log_out +enable sample upstream +enable sample upstream +--- no_error_log eval +qr/proxy request to \S+/ + + + +=== TEST 6: enable sample upstream, delete all routes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + add_eligible_route(1, "/hello") + add_eligible_route(2, "/hello1") + + local code, body = t("/hello", ngx.HTTP_GET) + assert(code == 200) + assert(clear_route(1) == 200) + assert(clear_route(2) == 200) + local code, body = t("/hello", ngx.HTTP_GET) + assert(code == 404) + + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/enable sample upstream/ +--- grep_error_log_out +enable sample upstream +--- no_error_log eval +qr/proxy request to \S+/ + + + +=== TEST 7: disable sample upstream, add eligible route and keep sample upstream as disable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + add_ineligible_route(1, "/hello") + local code, body = t("/hello", ngx.HTTP_GET) + assert(code == 200) + + add_ineligible_route(2, "/hello1") + local code, body = t("/hello1", ngx.HTTP_GET) + assert(code == 200) + + assert(clear_route(1) == 200) + assert(clear_route(2) == 200) + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/proxy request to/ +--- grep_error_log_out +proxy request to +proxy request to +--- no_error_log +enable sample upstream + + + +=== TEST 8: disable sample upstream, add eligible route and keep disable sample upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + add_ineligible_route(1, "/hello") + local code, body = t("/hello", ngx.HTTP_GET) + assert(code == 200) + + add_eligible_route(2, "/hello1") + local code, body = t("/hello1", ngx.HTTP_GET) + assert(code == 200) + + assert(clear_route(1) == 200) + assert(clear_route(2) == 200) + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/proxy request to/ +--- grep_error_log_out +proxy request to +proxy request to +--- no_error_log +enable sample upstream + + + +=== TEST 9: disable sample upstream, delete some ineligible route and keep sample upstream as disable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + add_ineligible_route(1, "/hello") + add_ineligible_route(2, "/hello1") + local code, body = t("/hello1", ngx.HTTP_GET) + assert(code == 200) + + assert(clear_route(2) == 200) + local code, body = t("/hello", ngx.HTTP_GET) + assert(code == 200) + + assert(clear_route(1) == 200) + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/proxy request to/ +--- grep_error_log_out +proxy request to +proxy request to +--- no_error_log +enable sample upstream + + + +=== TEST 10: disable sample upstream, update some of ineligible route to eligible, keep sample upstream as disable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + add_ineligible_route(1, "/hello") + add_ineligible_route(2, "/hello1") + local code, body = t("/hello1", ngx.HTTP_GET) + assert(code == 200) + + update_route_to_eligible(1) + local code, body = t("/hello", ngx.HTTP_GET) + assert(code == 200) + + assert(clear_route(1) == 200) + assert(clear_route(2) == 200) + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/proxy request to/ +--- grep_error_log_out +proxy request to +proxy request to +--- no_error_log +enable sample upstream + + + +=== TEST 11: disable sample upstream, delete all ineligible route, enable sample upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + add_ineligible_route(1, "/hello") + add_ineligible_route(2, "/hello1") + add_eligible_route(3, "/server_port") + + local code, body = t("/hello1", ngx.HTTP_GET) + assert(code == 200) + + assert(clear_route(1) == 200) + assert(clear_route(2) == 200) + local code, body = t("/server_port", ngx.HTTP_GET) + assert(code == 200) + + assert(clear_route(3) == 200) + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/enable sample upstream|proxy request to/ +--- grep_error_log_out +proxy request to +enable sample upstream + + + +=== TEST 12: disable sample upstream, update all of ineligible route to eligible, enable sample upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + add_ineligible_route(1, "/hello") + add_ineligible_route(2, "/hello1") + local code, body = t("/hello1", ngx.HTTP_GET) + assert(code == 200) + + update_route_to_eligible(1) + update_route_to_eligible(2) + local code, body = t("/hello", ngx.HTTP_GET) + assert(code == 200) + + assert(clear_route(1) == 200) + assert(clear_route(2) == 200) + + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/enable sample upstream|proxy request to/ +--- grep_error_log_out +proxy request to +enable sample upstream diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ai5.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ai5.t new file mode 100644 index 0000000..93ee8bb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ai5.t @@ -0,0 +1,270 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->extra_init_by_lua) { + my $extra_init_by_lua = <<_EOC_; + unload_ai_module = function () + local t = require("lib.test_admin").test + local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: + ]] + require("lib.test_admin").set_config_yaml(data) + + local code, body = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + end + + load_ai_module = function () + local t = require("lib.test_admin").test + local data = [[ +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: + - ai + ]] + require("lib.test_admin").set_config_yaml(data) + + local code, body = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + end +_EOC_ + + $block->set_value("extra_init_by_lua", $extra_init_by_lua); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: enable(default) -> disable -> enable +--- http_config eval: $::HttpConfig +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- register route + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "host": "127.0.0.1", + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + -- enable route cache + local code = t('/hello', ngx.HTTP_GET) + assert(code == 200) + + -- disable ai plugin + unload_ai_module() + + local code = t('/hello', ngx.HTTP_GET) + assert(code == 200) + + -- enable ai plugin + load_ai_module() + + -- TODO: The route cache should be enabled, but since no new routes are registered, + -- the route tree is not rebuilt, + -- so it is not possible to switch to route cache mode, we should fix it + local code = t('/hello', ngx.HTTP_GET) + assert(code == 200, "enable: access /hello") + + -- register a new route and trigger a route tree rebuild + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "host": "127.0.0.1", + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code = t('/hello', ngx.HTTP_GET) + assert(code == 200) + + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/route match mode: \S[^,]+/ +--- grep_error_log_out +route match mode: ai_match +route match mode: radixtree_host_uri +route match mode: radixtree_host_uri +route match mode: radixtree_host_uri +route match mode: ai_match +route match mode: radixtree_host_uri + + + +=== TEST 2: disable(default) -> enable -> disable +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: +--- http_config eval: $::HttpConfig +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- register route + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "host": "127.0.0.1", + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code = t('/hello', ngx.HTTP_GET) + assert(code == 200) + + -- enable ai plugin + load_ai_module() + + local code = t('/hello', ngx.HTTP_GET) + assert(code == 200) + + -- register a new route and trigger a route tree rebuild + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "host": "127.0.0.1", + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code = t('/hello', ngx.HTTP_GET) + assert(code == 200) + + -- disable ai plugin + unload_ai_module() + + local code = t('/hello', ngx.HTTP_GET) + assert(code == 200) + + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/route match mode: \S[^,]+/ +--- grep_error_log_out +route match mode: radixtree_host_uri +route match mode: radixtree_host_uri +route match mode: ai_match +route match mode: radixtree_host_uri +route match mode: radixtree_host_uri diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/api-breaker.t b/CloudronPackages/APISIX/apisix-source/t/plugin/api-breaker.t new file mode 100644 index 0000000..280bf01 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/api-breaker.t @@ -0,0 +1,654 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); +log_level('info'); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.api-breaker") + local ok, err = plugin.check_schema({ + break_response_code = 502, + unhealthy = { + http_statuses = {500}, + failures = 1, + }, + healthy = { + http_statuses = {200}, + successes = 1, + }, + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: default configuration +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.api-breaker") + local conf = { + break_response_code = 502 + } + + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- request +GET /t +--- response_body +{"break_response_code":502,"healthy":{"http_statuses":[200],"successes":3},"max_breaker_sec":300,"unhealthy":{"failures":3,"http_statuses":[500]}} + + + +=== TEST 3: default `healthy` +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.api-breaker") + local conf = { + break_response_code = 502, + healthy = {} + } + + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- request +GET /t +--- response_body +{"break_response_code":502,"healthy":{"http_statuses":[200],"successes":3},"max_breaker_sec":300,"unhealthy":{"failures":3,"http_statuses":[500]}} + + + +=== TEST 4: default `unhealthy` +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.api-breaker") + local conf = { + break_response_code = 502, + unhealthy = {} + } + + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- request +GET /t +--- response_body +{"break_response_code":502,"healthy":{"http_statuses":[200],"successes":3},"max_breaker_sec":300,"unhealthy":{"failures":3,"http_statuses":[500]}} + + + +=== TEST 5: bad break_response_code +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "api-breaker": { + "break_response_code": 199, + "unhealthy": { + "http_statuses": [500, 503], + "failures": 3 + }, + "healthy": { + "http_statuses": [200, 206], + "successes": 3 + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/api_breaker" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin api-breaker err: property \"break_response_code\" validation failed: expected 199 to be at least 200"} + + + +=== TEST 6: bad max_breaker_sec +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "api-breaker": { + "break_response_code": 200, + "max_breaker_sec": -1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/api_breaker" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 + + + +=== TEST 7: bad unhealthy.http_statuses +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "api-breaker": { + "break_response_code": 200, + "max_breaker_sec": 40, + "unhealthy": { + "http_statuses": [500, 603], + "failures": 3 + }, + "healthy": { + "http_statuses": [200, 206], + "successes": 3 + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/api_breaker" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 + + + +=== TEST 8: same http_statuses in healthy +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "api-breaker": { + "break_response_code": 500, + "unhealthy": { + "http_statuses": [500, 503], + "failures": 3 + }, + "healthy": { + "http_statuses": [206, 206], + "successes": 3 + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/api_breaker" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin api-breaker err: property \"healthy\" validation failed: property \"http_statuses\" validation failed: expected unique items but items 1 and 2 are equal"} + + + +=== TEST 9: set route, http_statuses: [500, 503] +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "api-breaker": { + "break_response_code": 599, + "unhealthy": { + "http_statuses": [500, 503], + "failures": 3 + }, + "healthy": { + "http_statuses": [200, 206], + "successes": 3 + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/api_breaker" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: trigger breaker +--- request eval +[ + "GET /api_breaker?code=200", "GET /api_breaker?code=500", + "GET /api_breaker?code=503", "GET /api_breaker?code=500", + "GET /api_breaker?code=500", "GET /api_breaker?code=500" +] +--- error_code eval +[200, 500, 503, 500, 599, 599] + + + +=== TEST 11: trigger reset status +--- request eval +[ + "GET /api_breaker?code=500", "GET /api_breaker?code=500", + + "GET /api_breaker?code=200", "GET /api_breaker?code=200", + "GET /api_breaker?code=200", + + "GET /api_breaker?code=500", "GET /api_breaker?code=500" +] +--- error_code eval +[ + 500, 500, + 200, 200, 200, + 500, 500 +] + + + +=== TEST 12: trigger del healthy numeration +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + + -- trigger to unhealth + for i = 1, 4 do + local code = t('/api_breaker?code=500', ngx.HTTP_GET) + ngx.say("code: ", code) + end + + -- break for 3 seconds + ngx.sleep(3) + + -- make a try + for i = 1, 4 do + local code = t('/api_breaker?code=200', ngx.HTTP_GET) + ngx.say("code: ", code) + end + + for i = 1, 4 do + local code = t('/api_breaker?code=500', ngx.HTTP_GET) + ngx.say("code: ", code) + end + } +} +--- request +GET /t +--- response_body +code: 500 +code: 500 +code: 500 +code: 599 +code: 200 +code: 200 +code: 200 +code: 200 +code: 500 +code: 500 +code: 500 +code: 599 +--- no_error_log +[error] +breaker_time: 4 +--- error_log +breaker_time: 2 + + + +=== TEST 13: add plugin with default config value +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "api-breaker": { + "break_response_code": 502, + "break_response_body": "{\"message\":\"breaker opened.\"}", + "break_response_headers": [{"key":"Content-Type","value":"application/json"},{"key":"Content-Type","value":"application/json+v1"}], + "unhealthy": { + "failures": 3 + }, + "healthy": { + "successes": 3 + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/api_breaker" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: default value +--- request +GET /api_breaker?code=500 +--- error_code: 500 + + + +=== TEST 15: trigger default value of unhealthy.http_statuses breaker +--- request eval +[ + "GET /api_breaker?code=200", "GET /api_breaker?code=500", + "GET /api_breaker?code=503", "GET /api_breaker?code=500", + "GET /api_breaker?code=500", "GET /api_breaker?code=500" +] +--- error_code eval +[200, 500, 503, 500, 500, 502] +--- response_headers eval +["Content-Type: text/plain", "Content-Type: text/html", "Content-Type: text/html", "Content-Type: text/html", "Content-Type: text/html", "Content-Type: application/json+v1"] +--- response_body_like eval +[".*", ".*", ".*", ".*", ".*", "{\"message\":\"breaker opened.\"}"] + + + +=== TEST 16: unhealthy -> timeout -> normal +--- config + location /mysleep { + proxy_pass "http://127.0.0.1:1980/mysleep?seconds=1"; + } +--- request eval +[ + "GET /api_breaker?code=500", + "GET /api_breaker?code=500", + "GET /api_breaker?code=500", + "GET /api_breaker?code=200", + + "GET /mysleep", + "GET /mysleep", + "GET /mysleep", + + "GET /api_breaker?code=200", + "GET /api_breaker?code=200", + "GET /api_breaker?code=200", + "GET /api_breaker?code=200", + "GET /api_breaker?code=200"] +--- error_code eval +[ + 500, 500, 500, 502, + 200, 200, 200, + 200, 200, 200, 200,200 +] + + + +=== TEST 17: unhealthy -> timeout -> unhealthy +--- config +location /mysleep { + proxy_pass "http://127.0.0.1:1980/mysleep?seconds=1"; +} +--- request eval +[ + "GET /api_breaker?code=500", "GET /api_breaker?code=500", + "GET /api_breaker?code=500", "GET /api_breaker?code=200", + + "GET /mysleep", "GET /mysleep", "GET /mysleep", + + "GET /api_breaker?code=500","GET /api_breaker?code=500", + "GET /api_breaker?code=500","GET /api_breaker?code=500" + ] +--- error_code eval +[ + 500, 500, 500, 502, + 200, 200, 200, + 500,502,502,502 +] + + + +=== TEST 18: enable plugin, unhealthy.failures=1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "api-breaker": { + "break_response_code": 502, + "max_breaker_sec": 10, + "unhealthy": { + "http_statuses": [500, 503], + "failures": 1 + }, + "healthy": { + "successes": 3 + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/api_breaker" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 19: hit route 20 times, confirm the breaker time +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + + local status_count = {} + for i = 1, 20 do + local code = t('/api_breaker?code=500', ngx.HTTP_GET) + code = tostring(code) + status_count[code] = (status_count[code] or 0) + 1 + ngx.sleep(1) + end + + ngx.say(json.encode(status_count)) + } + } +--- request +GET /t +--- no_error_log +[error] +phase_func(): breaker_time: 16 +--- error_log +phase_func(): breaker_time: 2 +phase_func(): breaker_time: 4 +phase_func(): breaker_time: 8 +phase_func(): breaker_time: 10 +--- response_body +{"500":4,"502":16} +--- timeout: 25 + + + +=== TEST 20: reject invalid schema +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for _, case in ipairs({ + {input = { + break_response_code = 200, + break_response_headers = {{["content-type"] = "application/json"}} + }}, + }) do + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + { + id = "1", + plugins = { + ["api-breaker"] = case.input + } + } + ) + ngx.print(require("toolkit.json").decode(body).error_msg) + end + } + } +--- request +GET /t +--- response_body eval +qr/failed to check the configuration of plugin api-breaker err: property \"break_response_headers\" validation failed: failed to validate item 1: property \"(key|value)\" is required/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/attach-consumer-label.t b/CloudronPackages/APISIX/apisix-source/t/plugin/attach-consumer-label.t new file mode 100644 index 0000000..615b1cf --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/attach-consumer-label.t @@ -0,0 +1,465 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: invalid schema (missing headers) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.attach-consumer-label") + local ok, err = plugin.check_schema({}) + if not ok then + ngx.say(err) + return + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "headers" is required +--- no_error_log +[error] + + + +=== TEST 2: invalid schema (headers is an empty object) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.attach-consumer-label") + local ok, err = plugin.check_schema({ + headers = {} + }) + if not ok then + ngx.say(err) + return + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "headers" validation failed: expect object to have at least 1 properties +--- no_error_log +[error] + + + +=== TEST 3: invalid schema (missing $ prefix) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.attach-consumer-label") + local ok, err = plugin.check_schema({ + headers = { + ["X-Consumer-Department"] = "department", + ["X-Consumer-Company"] = "$company" + } + }) + if not ok then + ngx.say(err) + return + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "headers" validation failed: failed to validate additional property X-Consumer-Department: failed to match pattern "^\\$.*" with "department" +--- no_error_log +[error] + + + +=== TEST 4: valid schema +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.attach-consumer-label") + local ok, err = plugin.check_schema({ + headers = { + ["X-Consumer-Department"] = "$department", + ["X-Consumer-Company"] = "$company" + } + }) + if not ok then + ngx.say(err) + return + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +[error] + + + +=== TEST 5: add consumer with labels +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "labels": { + "department": "devops", + "company": "api7" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/consumers/jack/credentials/a', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": { + "key": "key-a" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 6: add route with only attach-consumer-label plugin (no key-auth) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "attach-consumer-label": { + "_meta": { + "disable": false + }, + "headers": { + "X-Consumer-Department": "$department", + "X-Consumer-Company": "$company" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 7: access without auth (should not contain consumer labels) +--- request +GET /echo +--- response_headers +!X-Consumer-Department +!X-Consumer-Company +--- no_error_log +[error] + + + +=== TEST 8: add route with attach-consumer-label plugin (with key-auth) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "key-auth": {}, + "attach-consumer-label": { + "headers": { + "X-Consumer-Department": "$department", + "X-Consumer-Company": "$company", + "X-Consumer-Role": "$role" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 9: access with auth (should contain consumer labels headers, but no x-consumer-role) +--- request +GET /echo +--- more_headers +apikey: key-a +X-Consumer-Role: admin +--- response_headers +X-Consumer-Company: api7 +X-Consumer-Department: devops +!X-Consumer-Role +--- no_error_log +[error] + + + +=== TEST 10: modify consumer without labels +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_PUT, + [[{ + "username": "jack" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 11: access with auth (should not contain headers because consumer has no labels) +--- request +GET /echo +--- more_headers +apikey: key-a +--- response_headers +!X-Consumer-Company +!X-Consumer-Department +--- noerror_log +[error] + + + +=== TEST 12: modify consumer with labels +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "labels": { + "department": "devops", + "company": "api7" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 13: modify route without attach-consumer-label plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 14: add global rule with attach-consumer-label plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "attach-consumer-label": { + "headers": { + "X-Global-Consumer-Department": "$department", + "X-Global-Consumer-Company": "$company" + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 15: access with auth (should contain expected consumer labels headers) +--- request +GET /echo +--- more_headers +apikey: key-a +--- response_headers +X-Global-Consumer-Company: api7 +X-Global-Consumer-Department: devops +--- no_error_log +[error] diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/authz-casbin.t b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-casbin.t new file mode 100644 index 0000000..37ec3a4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-casbin.t @@ -0,0 +1,446 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casbin") + local conf = { + model_path = "/path/to/model.conf", + policy_path = "/path/to/policy.csv", + username = "user" + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: username missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casbin") + local conf = { + model_path = "/path/to/model.conf", + policy_path = "/path/to/policy.csv" + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- request +GET /t +--- response_body +value should match only one schema, but matches none + + + +=== TEST 3: put model and policy text in metadata +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casbin") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/authz-casbin', + ngx.HTTP_PUT, + [[{ + "model": "[request_definition] + r = sub, obj, act + + [policy_definition] + p = sub, obj, act + + [role_definition] + g = _, _ + + [policy_effect] + e = some(where (p.eft == allow)) + + [matchers] + m = (g(r.sub, p.sub) || keyMatch(r.sub, p.sub)) && keyMatch(r.obj, p.obj) && keyMatch(r.act, p.act)", + + "policy": "p, *, /, GET + p, admin, *, * + g, alice, admin" + }]] + ) + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: Enforcer from text without files +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casbin") + local t = require("lib.test_admin").test + + local conf = { + username = "user" + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 5: enable authz-casbin by Admin API +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-casbin": { + "username" : "user" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: no username header passed +--- request +GET /hello +--- error_code: 403 +--- response_body_like eval +qr/"Access Denied"/ + + + +=== TEST 7: username passed but user not authorized +--- request +GET /hello +--- more_headers +user: bob +--- error_code: 403 +--- response_body +{"message":"Access Denied"} + + + +=== TEST 8: authorized user +--- request +GET /hello +--- more_headers +user: admin +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 9: authorized user (rbac) +--- request +GET /hello +--- more_headers +user: alice +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 10: unauthorized user before policy update +--- request +GET /hello +--- more_headers +user: jack +--- error_code: 403 +--- response_body +{"message":"Access Denied"} + + + +=== TEST 11: update model and policy text in metadata +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casbin") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/authz-casbin', + ngx.HTTP_PUT, + [[{ + "model": "[request_definition] + r = sub, obj, act + + [policy_definition] + p = sub, obj, act + + [role_definition] + g = _, _ + + [policy_effect] + e = some(where (p.eft == allow)) + + [matchers] + m = (g(r.sub, p.sub) || keyMatch(r.sub, p.sub)) && keyMatch(r.obj, p.obj) && keyMatch(r.act, p.act)", + + "policy": "p, *, /, GET + p, admin, *, * + p, jack, /hello, GET + g, alice, admin" + }]] + ) + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: authorized user after policy update +--- request +GET /hello +--- more_headers +user: jack +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 13: enable authz-casbin using model/policy files +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-casbin": { + "model_path": "t/plugin/authz-casbin/model.conf", + "policy_path": "t/plugin/authz-casbin/policy.csv", + "username" : "user" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: authorized user as per policy +--- request +GET /hello +--- more_headers +user: alice +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 15: unauthorized user as per policy +--- request +GET /hello +--- more_headers +user: bob +--- error_code: 403 +--- response_body +{"message":"Access Denied"} + + + +=== TEST 16: enable authz-casbin using model/policy text +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-casbin": { + "model": " + [request_definition] + r = sub, obj, act + + [policy_definition] + p = sub, obj, act + + [role_definition] + g = _, _ + + [policy_effect] + e = some(where (p.eft == allow)) + + [matchers] + m = (g(r.sub, p.sub) || keyMatch(r.sub, p.sub)) && keyMatch(r.obj, p.obj) && keyMatch(r.act, p.act)", + "policy": " + p, *, /, GET + p, admin, *, * + g, jack, admin", + "username" : "user" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: authorized user as per policy +--- request +GET /hello +--- more_headers +user: jack +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 18: unauthorized user as per policy +--- request +GET /hello +--- more_headers +user: bob +--- error_code: 403 +--- response_body +{"message":"Access Denied"} + + + +=== TEST 19: disable authz-casbin by Admin API +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": {}, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/authz-casbin/model.conf b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-casbin/model.conf new file mode 100644 index 0000000..45d7777 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-casbin/model.conf @@ -0,0 +1,14 @@ +[request_definition] +r = sub, obj, act + +[policy_definition] +p = sub, obj, act + +[role_definition] +g = _, _ + +[policy_effect] +e = some(where (p.eft == allow)) + +[matchers] +m = (g(r.sub, p.sub) || keyMatch(r.sub, p.sub)) && keyMatch(r.obj, p.obj) && keyMatch(r.act, p.act) diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/authz-casbin/policy.csv b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-casbin/policy.csv new file mode 100644 index 0000000..aca6053 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-casbin/policy.csv @@ -0,0 +1,3 @@ +p, *, /, GET +p, admin, *, * +g, alice, admin diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/authz-casdoor.t b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-casdoor.t new file mode 100644 index 0000000..aef07fa --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-casdoor.t @@ -0,0 +1,514 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + server { + listen 10420; + location /api/login/oauth/access_token { + content_by_lua_block { + local json_encode = require("toolkit.json").encode + ngx.req.read_body() + local arg = ngx.req.get_post_args()["code"] + + local core = require("apisix.core") + local log = core.log + + if arg == "wrong" then + ngx.status = 200 + ngx.say(json_encode({ access_token = "bbbbbbbbbb", expires_in = 0 })) + return + end + + ngx.status = 200 + ngx.say(json_encode({ access_token = "aaaaaaaaaaaaaaaa", expires_in = 1000000 })) + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casdoor") + local fake_uri = "http://127.0.0.1:" .. ngx.var.server_port + local callback_url = "http://127.0.0.1:" .. ngx.var.server_port .. + "/anything/callback" + local conf = { + callback_url = callback_url, + endpoint_addr = fake_uri, + client_id = "7ceb9b7fda4a9061ec1c", + client_secret = "3416238e1edf915eac08b8fe345b2b95cdba7e04" + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + local conf2 = { + callback_url = callback_url .. "/?code=aaa", + endpoint_addr = fake_uri, + client_id = "7ceb9b7fda4a9061ec1c", + client_secret = "3416238e1edf915eac08b8fe345b2b95cdba7e04" + } + ok, err = plugin.check_schema(conf2) + if ok then + ngx.say("err: shouldn't have passed sanity check") + end + + local conf3 = { + callback_url = callback_url, + endpoint_addr = fake_uri .. "/", + client_id = "7ceb9b7fda4a9061ec1c", + client_secret = "3416238e1edf915eac08b8fe345b2b95cdba7e04" + } + ok, err = plugin.check_schema(conf3) + if ok then + ngx.say("err: shouldn't have passed sanity check") + end + + ngx.say("done") + + } + } +--- response_body +done + + + +=== TEST 2: enable plugin test redirect +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casdoor") + local t = require("lib.test_admin").test + + local fake_uri = "http://127.0.0.1:10420" + local callback_url = "http://127.0.0.1:" .. ngx.var.server_port .. + "/anything/callback" + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/anything/*", + "plugins": { + "authz-casdoor": { + "callback_url":"]] .. callback_url .. [[", + "endpoint_addr":"]] .. fake_uri .. [[", + "client_id":"7ceb9b7fda4a9061ec1c", + "client_secret":"3416238e1edf915eac08b8fe345b2b95cdba7e04" + }, + "proxy-rewrite": { + "uri": "/echo" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "test.com:1980": 1 + } + } + }]] + ) + if code >= 300 then + ngx.say("failed to set up routing rule") + end + ngx.say("done") + + } + } +--- response_body +done + + + +=== TEST 3: test redirect +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casdoor") + local t = require("lib.test_admin").test + + local code, body = t('/anything/d?param1=foo¶m2=bar', ngx.HTTP_GET, [[]]) + if code ~= 302 then + ngx.say("should have redirected") + end + + ngx.say("done") + + } + } +--- response_body +done + + + +=== TEST 4: enable fake casdoor +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/api/login/oauth/access_token", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: test fake casdoor +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casdoor") + local t = require("lib.test_admin").test + local httpc = require("resty.http").new() + local cjson = require("cjson") + local fake_uri = "http://127.0.0.1:10420/api/login/oauth/access_token" + + local res, err = httpc:request_uri(fake_uri, {method = "GET"}) + if not res then + ngx.say(err) + end + local data = cjson.decode(res.body) + if not data then + ngx.say("invalid res.body") + end + if not data.access_token == "aaaaaaaaaaaaaaaa" then + ngx.say("invalid token") + end + ngx.say("done") + + } + } +--- response_body +done + + + +=== TEST 6: test code handling +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casdoor") + local core = require("apisix.core") + local log = core.log + local t = require("lib.test_admin").test + local cjson = require("cjson") + local fake_uri = "http://127.0.0.1:" .. ngx.var.server_port .. + "/anything/d?param1=foo¶m2=bar" + local callback_url = "http://127.0.0.1:" .. ngx.var.server_port .. + "/anything/callback?code=aaa&state=" + + local httpc = require("resty.http").new() + local res1, err1 = httpc:request_uri(fake_uri, {method = "GET"}) + if not res1 then + ngx.say(err1) + end + + local cookie = res1.headers["Set-Cookie"] + local re_url = res1.headers["Location"] + local m, err = ngx.re.match(re_url, "state=([0-9]*)") + if err or not m then + log.error(err) + ngx.exit() + end + local state = m[1] + + local res2, err2 = httpc:request_uri(callback_url..state, { + method = "GET", + headers = {Cookie = cookie} + }) + if not res2 then + ngx.say(err2) + end + if res2.status ~= 302 then + log.error(res2.status) + end + + local cookie2 = res2.headers["Set-Cookie"] + local res3, err3 = httpc:request_uri(fake_uri, { + method = "GET", + headers = {Cookie = cookie2} + + }) + if not res3 then + ngx.say(err3) + end + if res3.status >= 300 then + log.error(res3.status,res3.headers["Location"]) + end + ngx.say("done") + + } + } +--- response_body +done + + + +=== TEST 7: incorrect test code handling +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casdoor") + local t = require("lib.test_admin").test + local cjson = require("cjson") + + local callback_url = "http://127.0.0.1:" .. ngx.var.server_port .. + "/anything/callback?code=aaa&state=bbb" + + local httpc = require("resty.http").new() + local res1, err1 = httpc:request_uri(callback_url, {method = "GET"}) + if res1.status ~= 503 then + ngx.say(res1.status) + end + ngx.say("done") + } + } +--- response_body +done +--- error_log +no session found + + + +=== TEST 8: incorrect state handling +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casdoor") + local core = require("apisix.core") + local log = core.log + local t = require("lib.test_admin").test + local cjson = require("cjson") + local fake_uri = "http://127.0.0.1:" .. ngx.var.server_port .. + "/anything/d?param1=foo¶m2=bar" + local callback_url = "http://127.0.0.1:" .. ngx.var.server_port .. + "/anything/callback?code=aaa&state=" + + local httpc = require("resty.http").new() + local res1, err1 = httpc:request_uri(fake_uri, {method = "GET"}) + if not res1 then + ngx.say(err1) + end + + local cookie = res1.headers["Set-Cookie"] + local re_url = res1.headers["Location"] + local m, err = ngx.re.match(re_url, "state=([0-9]*)") + if err or not m then + log.error(err) + end + local state = m[1]+10 + + local res2, err2 = httpc:request_uri(callback_url..state, { + method = "GET", + headers = {Cookie = cookie} + }) + if not res2 then + ngx.say(err2) + end + if res2.status ~= 302 then + log.error(res2.status) + end + + local cookie2 = res2.headers["Set-Cookie"] + local res3, err3 = httpc:request_uri(fake_uri, { + method = "GET", + headers = {Cookie = cookie2} + }) + if not res3 then + ngx.say(err3) + end + if res3.status ~= 503 then + log.error(res3.status) + end + ngx.say("done") + + } + } +--- response_body +done +--- error_log +invalid state + + + +=== TEST 9: test incorrect access_token +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casdoor") + local core = require("apisix.core") + local log = core.log + local t = require("lib.test_admin").test + local cjson = require("cjson") + local fake_uri = "http://127.0.0.1:" .. ngx.var.server_port .. + "/anything/d?param1=foo¶m2=bar" + local callback_url = "http://127.0.0.1:" .. ngx.var.server_port .. + "/anything/callback?code=wrong&state=" + + local httpc = require("resty.http").new() + local res1, err1 = httpc:request_uri(fake_uri, {method = "GET"}) + if not res1 then + ngx.say(err1) + end + + local cookie = res1.headers["Set-Cookie"] + local re_url = res1.headers["Location"] + local m, err = ngx.re.match(re_url, "state=([0-9]*)") + if err or not m then + log.error(err) + ngx.exit() + end + local state = m[1] + + local res2, err2 = httpc:request_uri(callback_url..state, { + method = "GET", + headers = {Cookie = cookie} + }) + if not res2 then + ngx.say(err2) + end + if res2.status ~= 302 then + log.error(res2.status) + end + + local cookie2 = res2.headers["Set-Cookie"] + local res3, err3 = httpc:request_uri(fake_uri, { + method = "GET", + headers = {Cookie = cookie2} + + }) + if not res3 then + ngx.say(err3) + end + if res3.status ~= 503 then + log.error(res3.status) + end + ngx.say("done") + } + } +--- response_body +done +--- error_log +failed when accessing token: invalid access_token + + + +=== TEST 10: data encryption for client_secret +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local callback_url = "http://127.0.0.1:" .. ngx.var.server_port .. + "/anything/callback" + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/anything/*", + "plugins": { + "authz-casdoor": { + "callback_url":"]] .. callback_url .. [[", + "endpoint_addr": "http://127.0.0.1:10420", + "client_id":"7ceb9b7fda4a9061ec1c", + "client_secret":"3416238e1edf915eac08b8fe345b2b95cdba7e04" + }, + "proxy-rewrite": { + "uri": "/echo" + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "test.com:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["authz-casdoor"].client_secret) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["authz-casdoor"].client_secret) + } + } +--- response_body +3416238e1edf915eac08b8fe345b2b95cdba7e04 +YUfqAO0kPXjZIoAbPSuryCkUDksEmwSq08UDTIUWolN6KQwEUrh72TazePueo4/S diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/authz-keycloak.t b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-keycloak.t new file mode 100644 index 0000000..58a6331 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-keycloak.t @@ -0,0 +1,647 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: minimal valid configuration w/o discovery +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-keycloak") + local ok, err = plugin.check_schema({ + client_id = "foo", + token_endpoint = "https://host.domain/realms/foo/protocol/openid-connect/token" + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: minimal valid configuration with discovery +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-keycloak") + local ok, err = plugin.check_schema({ + client_id = "foo", + discovery = "https://host.domain/realms/foo/.well-known/uma2-configuration" + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 3: minimal valid configuration w/o discovery when lazy_load_paths=true +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-keycloak") + local ok, err = plugin.check_schema({ + client_id = "foo", + lazy_load_paths = true, + token_endpoint = "https://host.domain/realms/foo/protocol/openid-connect/token", + resource_registration_endpoint = "https://host.domain/realms/foo/authz/protection/resource_set" + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 4: minimal valid configuration with discovery when lazy_load_paths=true +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-keycloak") + local ok, err = plugin.check_schema({ + client_id = "foo", + lazy_load_paths = true, + discovery = "https://host.domain/realms/foo/.well-known/uma2-configuration" + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 5: full schema check +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-keycloak") + local ok, err = plugin.check_schema({ + discovery = "https://host.domain/realms/foo/.well-known/uma2-configuration", + token_endpoint = "https://host.domain/realms/foo/protocol/openid-connect/token", + resource_registration_endpoint = "https://host.domain/realms/foo/authz/protection/resource_set", + client_id = "University", + client_secret = "secret", + grant_type = "urn:ietf:params:oauth:grant-type:uma-ticket", + policy_enforcement_mode = "ENFORCING", + permissions = {"res:customer#scopes:view"}, + lazy_load_paths = false, + http_method_as_scope = false, + timeout = 1000, + ssl_verify = false, + cache_ttl_seconds = 1000, + keepalive = true, + keepalive_timeout = 10000, + keepalive_pool = 5, + access_token_expires_in = 300, + access_token_expires_leeway = 0, + refresh_token_expires_in = 3600, + refresh_token_expires_leeway = 0, + password_grant_token_generation_incoming_uri = "/api/token", + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 6: token_endpoint and discovery both missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-keycloak") + local ok, err = plugin.check_schema({client_id = "foo"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +allOf 1 failed: object matches none of the required: ["discovery"] or ["token_endpoint"] +done + + + +=== TEST 7: client_id missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-keycloak") + local ok, err = plugin.check_schema({discovery = "https://host.domain/realms/foo/.well-known/uma2-configuration"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "client_id" is required +done + + + +=== TEST 8: resource_registration_endpoint and discovery both missing and lazy_load_paths is true +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-keycloak") + local ok, err = plugin.check_schema({ + client_id = "foo", + token_endpoint = "https://host.domain/realms/foo/protocol/openid-connect/token", + lazy_load_paths = true + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +allOf 2 failed: object matches none of the required +done + + + +=== TEST 9: Add https endpoint with ssl_verify true (default) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource#delete"], + "client_id": "course_management", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: TEST with fake token and https endpoint +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. "fake access token", + } + }) + + ngx.status = res.status + + if res.status == 200 then + ngx.say(true) + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +false +--- error_log +Error while sending authz request to https://127.0.0.1:8443/realms/University/protocol/openid-connect/token: 18 +--- error_code: 503 + + + +=== TEST 11: Add https endpoint with ssl_verify false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource#delete"], + "client_id": "course_management", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000, + "ssl_verify": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: TEST for https based token verification with ssl_verify false +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. "fake access token", + } + }) + + if res.status == 200 then + ngx.say(true) + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +false +--- error_log +Request denied: HTTP 401 Unauthorized. Body: {"error":"HTTP 401 Unauthorized"} + + + +=== TEST 13: set enforcement mode is "ENFORCING", lazy_load_paths and permissions use default values +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "http://127.0.0.1:8443/realms/University/protocol/openid-connect/token", + "client_id": "course_management", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "policy_enforcement_mode": "ENFORCING", + "timeout": 3000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: test for permission is empty and enforcement mode is "ENFORCING". +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. "fake access token", + } + }) + + ngx.say(res.body) + } + } +--- request +GET /t +--- response_body +{"error":"access_denied","error_description":"not_authorized"} +--- no_error_log + + + +=== TEST 15: set enforcement mode is "ENFORCING", lazy_load_paths and permissions use default values , access_denied_redirect_uri is "http://127.0.0.1/test" +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "http://127.0.0.1:8443/realms/University/protocol/openid-connect/token", + "client_id": "course_management", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "policy_enforcement_mode": "ENFORCING", + "timeout": 3000, + "access_denied_redirect_uri": "http://127.0.0.1/test" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: test for permission is empty and enforcement mode is "ENFORCING" , access_denied_redirect_uri is "http://127.0.0.1/test". +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. "fake access token", + } + }) + if res.status >= 300 then + ngx.status = res.status + ngx.header["Location"] = res.headers["Location"] + end + } + } +--- request +GET /t +--- response_headers +Location: http://127.0.0.1/test +--- error_code: 307 + + + +=== TEST 17: Add https endpoint with password_grant_token_generation_incoming_uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource#view"], + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000, + "ssl_verify": false, + "password_grant_token_generation_incoming_uri": "/api/token" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/api/token" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/api/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + }, + + body = ngx.encode_args({ + username = "teacher@gmail.com", + password = "123456", + }), + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + local refreshToken = body["refresh_token"] + + if accessToken and refreshToken then + ngx.say(true) + else + ngx.say(false) + end + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +true + + + +=== TEST 18: no username or password +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource#view"], + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000, + "ssl_verify": false, + "password_grant_token_generation_incoming_uri": "/api/token" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/api/token" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/api/token" + local headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + } + + -- no username + local res, err = httpc:request_uri(uri, { + method = "POST", + headers = headers, + body = ngx.encode_args({ + password = "123456", + }), + }) + ngx.print(res.body) + + -- no password + local res, err = httpc:request_uri(uri, { + method = "POST", + headers = headers, + body = ngx.encode_args({ + username = "teacher@gmail.com", + }), + }) + ngx.print(res.body) + } + } +--- request +GET /t +--- response_body +{"message":"username is missing."} +{"message":"password is missing."} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/authz-keycloak2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-keycloak2.t new file mode 100644 index 0000000..a8ced01 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-keycloak2.t @@ -0,0 +1,743 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: add plugin with view course permissions (using token endpoint) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource#view"], + "client_id": "course_management", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: Get access token for teacher and access view course route +--- config + location /t { + content_by_lua_block { + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. accessToken, + } + }) + + if res.status == 200 then + ngx.say(true) + else + ngx.say(res.status) + ngx.say(false) + end + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +true + + + +=== TEST 3: invalid access token +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer wrong_token", + } + }) + if res.status == 401 then + ngx.say(true) + end + } + } +--- request +GET /t +--- response_body +true +--- error_log +Invalid bearer token + + + +=== TEST 4: add plugin with view course permissions (using discovery) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/uma2-configuration", + "permissions": ["course_resource#view"], + "client_id": "course_management", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: Get access token for teacher and access view course route +--- config + location /t { + content_by_lua_block { + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. accessToken, + } + }) + + if res.status == 200 then + ngx.say(true) + else + ngx.say(false) + end + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +true + + + +=== TEST 6: invalid access token +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer wrong_token", + } + }) + if res.status == 401 then + ngx.say(true) + end + } + } +--- request +GET /t +--- response_body +true +--- error_log +Invalid bearer token + + + +=== TEST 7: add plugin for delete course route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource#delete"], + "client_id": "course_management", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: Get access token for student and delete course +--- config + location /t { + content_by_lua_block { + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=student@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. accessToken, + } + }) + + if res.status == 403 then + ngx.say(true) + else + ngx.say(false) + end + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +true +--- error_log +{"error":"access_denied","error_description":"not_authorized"} + + + +=== TEST 9: add plugin with lazy_load_paths and http_method_as_scope +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/uma2-configuration", + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "lazy_load_paths": true, + "http_method_as_scope": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/course/foo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: Get access token for teacher and access view course route. +--- config + location /t { + content_by_lua_block { + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/course/foo" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. accessToken, + } + }) + + if res.status == 200 then + ngx.say(true) + else + ngx.say(false) + end + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +true + + + +=== TEST 11: Get access token for student and access view course route. +--- config + location /t { + content_by_lua_block { + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=student@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/course/foo" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. accessToken, + } + }) + + if res.status == 200 then + ngx.say(true) + else + ngx.say(false) + end + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +true + + + +=== TEST 12: Get access token for teacher and delete course. +--- config + location /t { + content_by_lua_block { + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/course/foo" + local res, err = httpc:request_uri(uri, { + method = "DELETE", + headers = { + ["Authorization"] = "Bearer " .. accessToken, + } + }) + + if res.status == 200 then + ngx.say(true) + else + ngx.say(false) + end + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +true + + + +=== TEST 13: Get access token for student and try to delete course. Should fail. +--- config + location /t { + content_by_lua_block { + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=student@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/course/foo" + local res, err = httpc:request_uri(uri, { + method = "DELETE", + headers = { + ["Authorization"] = "Bearer " .. accessToken, + } + }) + + if res.status == 403 then + ngx.say(true) + else + ngx.say(false) + end + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +true +--- error_log +{"error":"access_denied","error_description":"not_authorized"} + + + +=== TEST 14: Get access token for teacher and access view course route. +--- config + location /t { + content_by_lua_block { + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/course/foo" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. accessToken, + } + }) + + if res.status == 200 then + ngx.say(true) + else + ngx.say(false) + end + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +true + + + +=== TEST 15: Get access token for student and access view course route. +--- config + location /t { + content_by_lua_block { + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=student@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/course/foo" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. accessToken, + } + }) + + if res.status == 200 then + ngx.say(true) + else + ngx.say(false) + end + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +true + + + +=== TEST 16: add plugin with lazy_load_paths when resource_registration_endpoint is neither in config nor in the discovery doc +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "lazy_load_paths": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/course/foo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: Get access token for student and access view course route. +--- config + location /t { + content_by_lua_block { + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=student@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/course/foo" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. accessToken, + } + }) + + if res.status == 503 then + ngx.say(true) + else + ngx.say(false) + end + else + ngx.say(false) + end + } + } +--- request +GET /t +--- response_body +true +--- error_log +Unable to determine registration endpoint. diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/authz-keycloak3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-keycloak3.t new file mode 100644 index 0000000..e467705 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-keycloak3.t @@ -0,0 +1,178 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: access_denied_redirect_uri works with request denied in token_endpoint +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token", + "access_denied_redirect_uri": "http://127.0.0.1/test", + "permissions": ["course_resource#delete"], + "client_id": "course_management", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit +--- config + location /t { + content_by_lua_block { + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=student@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + if res.status == 200 then + local body = json_decode(res.body) + local accessToken = body["access_token"] + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. accessToken, + } + }) + + ngx.status = res.status + ngx.header["Location"] = res.headers["Location"] + end + } + } +--- error_code: 307 +--- response_headers +Location: http://127.0.0.1/test + + + +=== TEST 3: data encryption for client_secret +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource#view"], + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000, + "ssl_verify": false, + "password_grant_token_generation_incoming_uri": "/api/token" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/api/token" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["authz-keycloak"].client_secret) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["authz-keycloak"].client_secret) + } + } +--- response_body +d1ec69e9-55d2-4109-a3ea-befa071579d5 +Fz1juZEEvh9PPXOmWFdMMJkREt3ZSzEVWcUZPxNP6achk3fosEvn37oN0qH4YgKB diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/authz-keycloak4.t b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-keycloak4.t new file mode 100644 index 0000000..60aea47 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/authz-keycloak4.t @@ -0,0 +1,245 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{VAULT_TOKEN} = "root"; + $ENV{CLIENT_SECRET} = "d1ec69e9-55d2-4109-a3ea-befa071579d5"; +} + +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/foo client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5 +--- response_body +Success! Data written to: kv/apisix/foo + + + +=== TEST 2: set client_secret as a reference to secret +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "root" + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource"], + "client_id": "course_management", + "client_secret": "$secret://vault/test1/foo/client_secret", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000, + "ssl_verify": false, + "password_grant_token_generation_incoming_uri": "/api/token" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/api/token" + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/api/token" + local headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + } + + -- no username + local res, err = httpc:request_uri(uri, { + method = "POST", + headers = headers, + body = ngx.encode_args({ + username = "teacher@gmail.com", + password = "123456", + }), + }) + if res.status == 200 then + ngx.print("success\n") + end + } + } +--- request +GET /t +--- response_body +success + + + +=== TEST 3: set client_secret as a reference to env variable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource"], + "client_id": "course_management", + "client_secret": "$env://CLIENT_SECRET", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000, + "ssl_verify": false, + "password_grant_token_generation_incoming_uri": "/api/token" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/api/token" + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/api/token" + local headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + } + + -- no username + local res, err = httpc:request_uri(uri, { + method = "POST", + headers = headers, + body = ngx.encode_args({ + username = "teacher@gmail.com", + password = "123456", + }), + }) + if res.status == 200 then + ngx.print("success\n") + end + } + } +--- request +GET /t +--- response_body +success + + + +=== TEST 4: set invalid client_secret as a reference to env variable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "authz-keycloak": { + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", + "permissions": ["course_resource"], + "client_id": "course_management", + "client_secret": "$env://INVALID_CLIENT_SECRET", + "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", + "timeout": 3000, + "ssl_verify": false, + "password_grant_token_generation_incoming_uri": "/api/token" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/api/token" + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/api/token" + local headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + } + + -- no username + local res, err = httpc:request_uri(uri, { + method = "POST", + headers = headers, + body = ngx.encode_args({ + username = "teacher@gmail.com", + password = "123456", + }), + }) + if res.status == 200 then + ngx.print("success\n") + end + } + } +--- request +GET /t +--- grep_error_log eval +qr/Invalid client secret/ +--- grep_error_log_out +Invalid client secret +Invalid client secret diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/aws-lambda.t b/CloudronPackages/APISIX/apisix-source/t/plugin/aws-lambda.t new file mode 100644 index 0000000..e941d84 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/aws-lambda.t @@ -0,0 +1,277 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $inside_lua_block = $block->inside_lua_block // ""; + chomp($inside_lua_block); + my $http_config = $block->http_config // <<_EOC_; + + server { + listen 8765; + + location /httptrigger { + content_by_lua_block { + ngx.req.read_body() + local msg = "aws lambda invoked" + ngx.header['Content-Length'] = #msg + 1 + ngx.header['Connection'] = "Keep-Alive" + ngx.say(msg) + } + } + + location /generic { + content_by_lua_block { + $inside_lua_block + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: checking iam schema +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.aws-lambda") + local ok, err = plugin.check_schema({ + function_uri = "https://api.amazonaws.com", + authorization = { + iam = { + accesskey = "key1", + secretkey = "key2" + } + } + }) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- response_body +done + + + +=== TEST 2: missing fields in iam schema +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.aws-lambda") + local ok, err = plugin.check_schema({ + function_uri = "https://api.amazonaws.com", + authorization = { + iam = { + secretkey = "key2" + } + } + }) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- response_body +property "authorization" validation failed: property "iam" validation failed: property "accesskey" is required + + + +=== TEST 3: create route with aws plugin enabled +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "aws-lambda": { + "function_uri": "http://localhost:8765/httptrigger", + "authorization": { + "apikey" : "testkey" + } + } + }, + "uri": "/aws" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: test plugin endpoint +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local core = require("apisix.core") + + local code, _, body, headers = t("/aws", "GET") + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + -- headers proxied 2 times -- one by plugin, another by this test case + core.response.set_header(headers) + ngx.print(body) + } + } +--- response_body +aws lambda invoked +--- response_headers +Content-Length: 19 + + + +=== TEST 5: check authz header - apikey +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- passing an apikey + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "aws-lambda": { + "function_uri": "http://localhost:8765/generic", + "authorization": { + "apikey": "test_key" + } + } + }, + "uri": "/aws" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say(body) + + local code, _, body = t("/aws", "GET") + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.print(body) + } + } +--- inside_lua_block +local headers = ngx.req.get_headers() or {} +ngx.say("Authz-Header - " .. headers["x-api-key"] or "") + +--- response_body +passed +Authz-Header - test_key + + + +=== TEST 6: check authz header - IAM v4 signing +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- passing the iam access and secret keys + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "aws-lambda": { + "function_uri": "http://localhost:8765/generic", + "authorization": { + "iam": { + "accesskey": "KEY1", + "secretkey": "KeySecret" + } + } + } + }, + "uri": "/aws" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say(body) + + local code, _, body, headers = t("/aws", "GET") + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.print(body) + } + } +--- inside_lua_block +local headers = ngx.req.get_headers() or {} +ngx.say("Authz-Header - " .. headers["Authorization"] or "") +ngx.say("AMZ-Date - " .. headers["X-Amz-Date"] or "") +ngx.print("invoked") + +--- response_body eval +qr/passed +Authz-Header - AWS4-HMAC-SHA256 [ -~]* +AMZ-Date - [\d]+T[\d]+Z +invoked/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/azure-functions.t b/CloudronPackages/APISIX/apisix-source/t/plugin/azure-functions.t new file mode 100644 index 0000000..72f9bbc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/azure-functions.t @@ -0,0 +1,510 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $inside_lua_block = $block->inside_lua_block // ""; + chomp($inside_lua_block); + my $http_config = $block->http_config // <<_EOC_; + + server { + listen 8765; + + location /httptrigger { + content_by_lua_block { + ngx.req.read_body() + local msg = "faas invoked" + ngx.header['Content-Length'] = #msg + 1 + ngx.header['X-Extra-Header'] = "MUST" + ngx.header['Connection'] = "Keep-Alive" + ngx.say(msg) + } + } + + location /api { + content_by_lua_block { + ngx.say("invocation /api successful") + } + } + + location /api/httptrigger { + content_by_lua_block { + ngx.say("invocation /api/httptrigger successful") + } + } + + location /api/http/trigger { + content_by_lua_block { + ngx.say("invocation /api/http/trigger successful") + } + } + + location /azure-demo { + content_by_lua_block { + $inside_lua_block + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.azure-functions") + local conf = { + function_uri = "http://some-url.com" + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: function_uri missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.azure-functions") + local ok, err = plugin.check_schema({}) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- response_body +property "function_uri" is required + + + +=== TEST 3: create route with azure-function plugin enabled +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "azure-functions": { + "function_uri": "http://localhost:8765/httptrigger" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/azure" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: Test plugin endpoint +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local core = require("apisix.core") + + local code, _, body, headers = t("/azure", "GET") + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + -- headers proxied 2 times -- one by plugin, another by this test case + core.response.set_header(headers) + ngx.print(body) + } + } +--- response_body +faas invoked +--- response_headers +Content-Length: 13 +X-Extra-Header: MUST + + + +=== TEST 5: http2 check response body and headers +--- http2 +--- request +GET /azure +--- more_headers +Content-Length: 0 +--- response_body +faas invoked + + + +=== TEST 6: check HTTP/2 response headers (must not contain any connection specific info) +First fetch the header from curl with -I then check the count of Connection +The full header looks like the format shown below + +HTTP/2 200 +content-type: text/plain +x-extra-header: MUST +content-length: 13 +date: Wed, 17 Nov 2021 13:53:08 GMT +server: APISIX/2.10.2 + +--- http2 +--- request +HEAD /azure +--- more_headers +Content-Length: 0 +--- response_headers +Connection: +Upgrade: +Keep-Alive: +content-type: text/plain +x-extra-header: MUST +content-length: 13 + + + +=== TEST 7: check authz header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- passing an apikey + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "azure-functions": { + "function_uri": "http://localhost:8765/azure-demo", + "authorization": { + "apikey": "test_key" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/azure" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say(body) + + local code, _, body = t("/azure", "GET") + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.print(body) + } + } +--- inside_lua_block +local headers = ngx.req.get_headers() or {} +ngx.say("Authz-Header - " .. headers["x-functions-key"] or "") + +--- response_body +passed +Authz-Header - test_key + + + +=== TEST 8: check if apikey doesn't get overridden passed by client to the gateway +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local header = {} + header["x-functions-key"] = "must_not_be_overrided" + + -- plugin schema already contains apikey with value "test_key" which won't be respected + local code, _, body = t("/azure", "GET", nil, nil, header) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.print(body) + } + } +--- inside_lua_block +local headers = ngx.req.get_headers() or {} +ngx.say("Authz-Header - " .. headers["x-functions-key"] or "") + +--- response_body +Authz-Header - must_not_be_overrided + + + +=== TEST 9: fall back to metadata master key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, meta_body = t('/apisix/admin/plugin_metadata/azure-functions', + ngx.HTTP_PUT, + [[{ + "master_apikey":"metadata_key" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(meta_body) + + -- update plugin attribute + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "azure-functions": { + "function_uri": "http://localhost:8765/azure-demo" + } + }, + "uri": "/azure" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say(body) + + -- plugin schema already contains apikey with value "test_key" which won't be respected + local code, _, body = t("/azure", "GET") + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.print(body) + } + } +--- inside_lua_block +local headers = ngx.req.get_headers() or {} +ngx.say("Authz-Header - " .. headers["x-functions-key"] or "") + +--- response_body +passed +passed +Authz-Header - metadata_key + + + +=== TEST 10: check if url path being forwarded correctly by creating a semi correct path uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- creating a semi path route + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "azure-functions": { + "function_uri": "http://localhost:8765/api" + } + }, + "uri": "/azure/*" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say(body) + + local code, _, body = t("/azure/httptrigger", "GET") + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.print(body) + } + } +--- response_body +passed +invocation /api/httptrigger successful + + + +=== TEST 11: check multilevel url path forwarding +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, _, body = t("/azure/http/trigger", "GET") + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.print(body) + } + } +--- response_body +invocation /api/http/trigger successful + + + +=== TEST 12: check url path forwarding containing multiple slashes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, _, body = t("/azure///http////trigger", "GET") + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.print(body) + } + } +--- response_body +invocation /api/http/trigger successful + + + +=== TEST 13: check url path forwarding with no excess path +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, _, body = t("/azure/", "GET") + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.print(body) + } + } +--- response_body +invocation /api successful + + + +=== TEST 14: create route with azure-function plugin enabled +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "azure-functions": { + "function_uri": "http://localhost:8765/httptrigger" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/azure" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: http2 failed to check response body and headers +--- http2 +--- request +GET /azure +--- error_code: 400 +--- error_log +HTTP2/HTTP3 request without a Content-Length header, diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/basic-auth-anonymous-consumer.t b/CloudronPackages/APISIX/apisix-source/t/plugin/basic-auth-anonymous-consumer.t new file mode 100644 index 0000000..64a84f0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/basic-auth-anonymous-consumer.t @@ -0,0 +1,224 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + + +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $user_yaml_config = <<_EOC_; +apisix: + data_encryption: + enable_encrypt_fields: false +_EOC_ + $block->set_value("yaml_config", $user_yaml_config); +}); + + +run_tests; + +__DATA__ + +=== TEST 1: add consumer jack and anonymous +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + }, + "limit-count": { + "count": 4, + "time_window": 60 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 60 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +passed + + + +=== TEST 2: add basic auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: normal consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + for i = 1, 5, 1 do + local code, body = t('/hello', + ngx.HTTP_GET, + nil, + nil, + { + Authorization = "Basic Zm9vOmJhcg==" + } + ) + + if code >= 300 then + ngx.say("failed" .. code) + return + end + ngx.say(body .. i) + end + } + } +--- request +GET /t +--- response_body +passed1 +passed2 +passed3 +passed4 +failed503 + + + +=== TEST 4: request without basic-auth header will be from anonymous consumer and it will pass +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 5: request without basic-auth header will be from anonymous consumer and different rate limit will apply +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 503, 503, 503] + + + +=== TEST 6: add basic auth plugin with non-existent anonymous_consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": { + "anonymous_consumer": "not-found-anonymous" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: anonymous-consumer configured in the route should not be found +--- request +GET /hello +--- error_code: 401 +--- error_log +failed to get anonymous consumer not-found-anonymous +--- response_body +{"message":"Invalid user authorization"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/basic-auth.t b/CloudronPackages/APISIX/apisix-source/t/plugin/basic-auth.t new file mode 100644 index 0000000..4a19cf1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/basic-auth.t @@ -0,0 +1,622 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{VAULT_TOKEN} = "root"; +} + +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +no_shuffle(); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.basic-auth") + local ok, err = plugin.check_schema({username = 'foo', password = 'bar'}, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: wrong type of string +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.basic-auth") + local ok, err = plugin.check_schema({username = 123, password = "bar"}, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "username" validation failed: wrong type: expected string, got number +done + + + +=== TEST 3: add consumer with username and plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: enable basic auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: verify, missing authorization +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing authorization in request"} + + + +=== TEST 6: verify, invalid basic authorization header +--- request +GET /hello +--- more_headers +Authorization: Bad_header YmFyOmJhcgo= +--- error_code: 401 +--- response_body +{"message":"Invalid authorization in request"} +--- grep_error_log eval +qr/Invalid authorization header format/ +--- grep_error_log_out +Invalid authorization header format + + + +=== TEST 7: verify, invalid authorization value (bad base64 str) +--- request +GET /hello +--- more_headers +Authorization: Basic aca_a +--- error_code: 401 +--- response_body +{"message":"Invalid authorization in request"} +--- grep_error_log eval +qr/Failed to decode authentication header: aca_a/ +--- grep_error_log_out +Failed to decode authentication header: aca_a + + + +=== TEST 8: verify, invalid authorization value (no password) +--- request +GET /hello +--- more_headers +Authorization: Basic YmFy +--- error_code: 401 +--- response_body +{"message":"Invalid authorization in request"} +--- grep_error_log eval +qr/Split authorization err: invalid decoded data: bar/ +--- grep_error_log_out +Split authorization err: invalid decoded data: bar + + + +=== TEST 9: verify, invalid username +--- request +GET /hello +--- more_headers +Authorization: Basic YmFyOmJhcgo= +--- error_code: 401 +--- response_body +{"message":"Invalid user authorization"} + + + +=== TEST 10: verify, invalid password +--- request +GET /hello +--- more_headers +Authorization: Basic Zm9vOmZvbwo= +--- error_code: 401 +--- response_body +{"message":"Invalid user authorization"} + + + +=== TEST 11: verify +--- request +GET /hello +--- more_headers +Authorization: Basic Zm9vOmJhcg== +--- response_body +hello world +--- error_log +find consumer foo + + + +=== TEST 12: invalid schema, only one field `username` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo" + } + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid plugins configuration: failed to check the configuration of plugin basic-auth err: property \"password\" is required"} + + + +=== TEST 13: invalid schema, not field given +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + } + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like eval +qr/\{"error_msg":"invalid plugins configuration: failed to check the configuration of plugin basic-auth err: property \\"(username|password)\\" is required"\}/ + + + +=== TEST 14: invalid schema, not a table +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": "blah" + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid plugins configuration: invalid plugin conf \"blah\" for plugin [basic-auth]"} + + + +=== TEST 15: get the default schema +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/plugins/basic-auth', + ngx.HTTP_GET, + nil, + [[ +{"properties":{},"title":"work with route or service object","type":"object"} + ]] + ) + ngx.status = code + } + } +--- request +GET /t + + + +=== TEST 16: get the schema by schema_type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/plugins/basic-auth?schema_type=consumer', + ngx.HTTP_GET, + nil, + [[ +{"title":"work with consumer object","required":["username","password"],"properties":{"username":{"type":"string"},"password":{"type":"string"}},"type":"object"} + ]] + ) + ngx.status = code + } + } +--- request +GET /t + + + +=== TEST 17: get the schema by error schema_type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/plugins/basic-auth?schema_type=consumer123123', + ngx.HTTP_GET, + nil, + [[ +{"properties":{},"title":"work with route or service object","type":"object"} + ]] + ) + ngx.status = code + } + } +--- request +GET /t + + + +=== TEST 18: enable basic auth plugin using admin api, set hide_credentials = true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": { + "hide_credentials": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 19: verify Authorization request header is hidden +--- request +GET /echo +--- more_headers +Authorization: Basic Zm9vOmJhcg== +--- response_headers +!Authorization + + + +=== TEST 20: enable basic auth plugin using admin api, hide_credentials = false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": { + "hide_credentials": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 21: verify Authorization request header should not hidden +--- request +GET /echo +--- more_headers +Authorization: Basic Zm9vOmJhcg== +--- response_headers +Authorization: Basic Zm9vOmJhcg== + + + +=== TEST 22: set basic-auth conf: password uses secret ref +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "root" + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + -- change consumer with secrets ref: vault + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "$secret://vault/test1/foo/passwd" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + -- set route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": { + "hide_credentials": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 23: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/foo passwd=bar +--- response_body +Success! Data written to: kv/apisix/foo + + + +=== TEST 24: verify Authorization with foo/bar, request header should not hidden +--- request +GET /echo +--- more_headers +Authorization: Basic Zm9vOmJhcg== +--- response_headers +Authorization: Basic Zm9vOmJhcg== + + + +=== TEST 25: set basic-auth conf with the token in an env var: password uses secret ref +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "$ENV://VAULT_TOKEN" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + -- change consumer with secrets ref: vault + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "$secret://vault/test1/foo/passwd" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + -- set route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "basic-auth": { + "hide_credentials": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 26: verify Authorization with foo/bar, request header should not hidden +--- request +GET /echo +--- more_headers +Authorization: Basic Zm9vOmJhcg== +--- response_headers +Authorization: Basic Zm9vOmJhcg== diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/batch-requests-grpc.t b/CloudronPackages/APISIX/apisix-source/t/plugin/batch-requests-grpc.t new file mode 100644 index 0000000..28b9d39 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/batch-requests-grpc.t @@ -0,0 +1,205 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } + + my $extra_yaml_config = <<_EOC_; +plugins: + - grpc-transcode + - public-api + - batch-requests +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: pre-create public API route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/batch-requests" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: set proto(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + }" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: set routes(id: 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHello" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit route +--- request +GET /grpctest?name=world +--- response_body eval +qr/\{"message":"Hello world"\}/ + + + +=== TEST 5: successful batch-requests for both grpc and http +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "headers": { + "Content-Type":"application/json" + }, + "pipeline":[ + { + "method":"GET", + "path":"/grpctest" + }, + { + "method":"GET", + "path":"/get" + } + ] + }]=], + [=[[ + { + "status": 200, + "body":"{\"message\":\"Hello \"}" + }, + { + "status": 200, + "body":"hello" + } + ]]=]) + + ngx.status = code + ngx.say(body) + } + } + + location = /get { + content_by_lua_block { + ngx.print("hello") + } + } +--- request +GET /aggregate +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/batch-requests.t b/CloudronPackages/APISIX/apisix-source/t/plugin/batch-requests.t new file mode 100644 index 0000000..3a98e6a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/batch-requests.t @@ -0,0 +1,1021 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("debug"); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $extra_yaml_config = <<_EOC_; +plugins: + - public-api + - batch-requests +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: pre-create public API route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/batch-requests" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: sanity +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "query": { + "base": "base_query", + "conflict": "query_value" + }, + "headers": { + "Base-Header": "base", + "ConflictHeader": "header_value", + "OuterConflict": "common_value" + }, + "pipeline":[ + { + "path": "/b", + "headers": { + "Header1": "hello", + "Header2": "world", + "ConflictHeader": "b-header-value" + } + },{ + "path": "/c", + "method": "PUT" + },{ + "path": "/d", + "query": { + "one": "thing", + "conflict": "d_value" + } + }] + }]=], + [=[[ + { + "status": 200, + "body":"B", + "headers": { + "Client-IP": "127.0.0.1", + "Base-Header": "base", + "Base-Query": "base_query", + "X-Res": "B", + "X-Header1": "hello", + "X-Header2": "world", + "X-Conflict-Header": "b-header-value", + "X-OuterConflict": "common_value" + } + }, + { + "status": 201, + "body":"C", + "headers": { + "Client-IP-From-Hdr": "127.0.0.1", + "Base-Header": "base", + "Base-Query": "base_query", + "X-Res": "C", + "X-Method": "PUT" + } + }, + { + "status": 202, + "body":"D", + "headers": { + "Base-Header": "base", + "Base-Query": "base_query", + "X-Res": "D", + "X-Query-One": "thing", + "X-Query-Conflict": "d_value" + } + } + ]]=], + { + ConflictHeader = "outer_header", + OuterConflict = "outer_conflict" + }) + + ngx.status = code + ngx.say(body) + } + } + + location = /b { + content_by_lua_block { + ngx.status = 200 + ngx.header["Client-IP"] = ngx.var.remote_addr + ngx.header["Base-Header"] = ngx.req.get_headers()["Base-Header"] + ngx.header["Base-Query"] = ngx.var.arg_base + ngx.header["X-Header1"] = ngx.req.get_headers()["Header1"] + ngx.header["X-Header2"] = ngx.req.get_headers()["Header2"] + ngx.header["X-Conflict-Header"] = ngx.req.get_headers()["ConflictHeader"] + ngx.header["X-OuterConflict"] = ngx.req.get_headers()["OuterConflict"] + ngx.header["X-Res"] = "B" + ngx.print("B") + } + } + location = /c { + content_by_lua_block { + ngx.status = 201 + ngx.header["Client-IP-From-Hdr"] = ngx.req.get_headers()["X-Real-IP"] + ngx.header["Base-Header"] = ngx.req.get_headers()["Base-Header"] + ngx.header["Base-Query"] = ngx.var.arg_base + ngx.header["X-Res"] = "C" + ngx.header["X-Method"] = ngx.req.get_method() + ngx.print("C") + } + } + location = /d { + content_by_lua_block { + ngx.status = 202 + ngx.header["Base-Header"] = ngx.req.get_headers()["Base-Header"] + ngx.header["Base-Query"] = ngx.var.arg_base + ngx.header["X-Query-One"] = ngx.var.arg_one + ngx.header["X-Query-Conflict"] = ngx.var.arg_conflict + ngx.header["X-Res"] = "D" + ngx.print("D") + } + } +--- request +GET /aggregate +--- response_body +passed + + + +=== TEST 3: missing pipeline +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "pipeline1":[ + { + "path": "/b", + "headers": { + "Header1": "hello", + "Header2": "world" + } + },{ + "path": "/c", + "method": "PUT" + },{ + "path": "/d" + }] + }]=] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /aggregate +--- error_code: 400 +--- response_body +{"error_msg":"bad request body: object matches none of the required: [\"pipeline\"]"} + + + +=== TEST 4: timeout is not number +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "timeout": "200", + "pipeline":[ + { + "path": "/b", + "headers": { + "Header1": "hello", + "Header2": "world" + } + },{ + "path": "/c", + "method": "PUT" + },{ + "path": "/d" + }] + }]=] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /aggregate +--- error_code: 400 +--- response_body +{"error_msg":"bad request body: property \"timeout\" validation failed: wrong type: expected integer, got string"} + + + +=== TEST 5: different response time +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "timeout": 2000, + "pipeline":[ + { + "path": "/b", + "headers": { + "Header1": "hello", + "Header2": "world" + } + },{ + "path": "/c", + "method": "PUT" + },{ + "path": "/d" + }] + }]=], + [=[[ + { + "status": 200 + }, + { + "status": 201 + }, + { + "status": 202 + } + ]]=] + ) + + ngx.status = code + ngx.say(body) + } + } + + location = /b { + content_by_lua_block { + ngx.sleep(0.02) + ngx.status = 200 + } + } + location = /c { + content_by_lua_block { + ngx.sleep(0.05) + ngx.status = 201 + } + } + location = /d { + content_by_lua_block { + ngx.sleep(1) + ngx.status = 202 + } + } +--- request +GET /aggregate +--- response_body +passed + + + +=== TEST 6: last request timeout +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "timeout": 100, + "pipeline":[ + { + "path": "/b", + "headers": { + "Header1": "hello", + "Header2": "world" + } + },{ + "path": "/c", + "method": "PUT" + },{ + "path": "/d" + }] + }]=], + [=[[ + { + "status": 200 + }, + { + "status": 201 + }, + { + "status": 504, + "reason": "upstream timeout" + } + ]]=] + ) + + ngx.status = code + ngx.say(body) + } + } + + location = /b { + content_by_lua_block { + ngx.status = 200 + } + } + location = /c { + content_by_lua_block { + ngx.status = 201 + } + } + location = /d { + content_by_lua_block { + ngx.sleep(1) + ngx.status = 202 + } + } +--- request +GET /aggregate +--- response_body +passed +--- error_log +timeout + + + +=== TEST 7: first request timeout +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "timeout": 100, + "pipeline":[ + { + "path": "/b", + "headers": { + "Header1": "hello", + "Header2": "world" + } + },{ + "path": "/c", + "method": "PUT" + },{ + "path": "/d" + }] + }]=], + [=[[ + { + "status": 504, + "reason": "upstream timeout" + } + ]]=] + ) + + ngx.status = code + ngx.say(body) + } + } + + location = /b { + content_by_lua_block { + ngx.sleep(1) + ngx.status = 200 + } + } + location = /c { + content_by_lua_block { + ngx.status = 201 + } + } + location = /d { + content_by_lua_block { + ngx.status = 202 + } + } +--- request +GET /aggregate +--- response_body +passed +--- error_log +timeout + + + +=== TEST 8: no body in request +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + nil, + nil + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /aggregate +--- error_code: 400 +--- response_body +{"error_msg":"no request body, you should give at least one pipeline setting"} + + + +=== TEST 9: invalid body +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + "invalid json string" + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /aggregate +--- error_code: 400 +--- response_body +{"error_msg":"invalid request body: invalid json string, err: Expected value but found invalid token at character 1"} + + + +=== TEST 10: invalid pipeline's path +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "pipeline":[ + { + "path": "" + }] + }]=] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /aggregate +--- error_code: 400 +--- response_body +{"error_msg":"bad request body: property \"pipeline\" validation failed: failed to validate item 1: property \"path\" validation failed: string too short, expected at least 1, got 0"} + + + +=== TEST 11: invalid pipeline's method +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "pipeline":[{ + "path": "/c", + "method": "put" + }] + }]=] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /aggregate +--- error_code: 400 +--- response_body +{"error_msg":"bad request body: property \"pipeline\" validation failed: failed to validate item 1: property \"method\" validation failed: matches none of the enum values"} + + + +=== TEST 12: invalid pipeline's version +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "pipeline":[{ + "path": "/d", + "version":1.2 + }] + }]=] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /aggregate +--- error_code: 400 +--- response_body +{"error_msg":"bad request body: property \"pipeline\" validation failed: failed to validate item 1: property \"version\" validation failed: matches none of the enum values"} + + + +=== TEST 13: invalid pipeline's ssl +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "pipeline":[{ + "path": "/d", + "ssl_verify":1.2 + }] + }]=] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /aggregate +--- error_code: 400 +--- response_body +{"error_msg":"bad request body: property \"pipeline\" validation failed: failed to validate item 1: property \"ssl_verify\" validation failed: wrong type: expected boolean, got number"} + + + +=== TEST 14: invalid pipeline's number +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "pipeline":[] + }]=] + ) + ngx.status = code + ngx.print(body) + } + } +--- request +GET /aggregate +--- error_code: 400 +--- response_body +{"error_msg":"bad request body: property \"pipeline\" validation failed: expect array to have at least 1 items"} + + + +=== TEST 15: when client body has been wrote to temp file +--- config + client_body_in_file_only on; + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "timeout": 100, + "pipeline":[ + { + "path": "/b", + "headers": { + "Header1": "hello", + "Header2": "world" + } + },{ + "path": "/c", + "method": "PUT" + },{ + "path": "/d" + }] + }]=], + [=[[ + { + "status": 200 + }, + { + "status": 201 + }, + { + "status": 202 + } + ]]=] + ) + + ngx.status = code + ngx.say(body) + } + } + + location = /b { + content_by_lua_block { + ngx.status = 200 + } + } + location = /c { + content_by_lua_block { + ngx.status = 201 + } + } + location = /d { + content_by_lua_block { + ngx.status = 202 + } + } +--- request +GET /aggregate +--- response_body +passed + + + +=== TEST 16: copy all header to every request except content +--- config + client_body_in_file_only on; + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "timeout": 1000, + "pipeline":[ + { + "path": "/b", + "headers": { + "Header1": "hello", + "Header2": "world" + } + },{ + "path": "/c", + "method": "PUT" + },{ + "path": "/d" + }] + }]=], + [=[[ + { + "status": 200, + "headers": { + "X-Cookie": "request-cookies-b", + "X-HeaderB": "request-header-b" + } + }, + { + "status": 201, + "headers": { + "X-Cookie": "request-cookies-c", + "X-HeaderC": "request-header-c" + } + }, + { + "status": 202, + "headers": { + "X-Cookie": "request-cookies-d", + "X-HeaderD": "request-header-d" + } + } + ]]=], + { + Cookie = "request-cookies", + OuterHeader = "request-header" + }) + + ngx.status = code + ngx.say(body) + } + } + + location = /b { + content_by_lua_block { + ngx.status = 200 + ngx.header["X-Cookie"] = ngx.req.get_headers()["Cookie"] .. "-b" + ngx.header["X-HeaderB"] = ngx.req.get_headers()["OuterHeader"] .. "-b" + } + } + location = /c { + content_by_lua_block { + ngx.status = 201 + ngx.header["X-Cookie"] = ngx.req.get_headers()["Cookie"] .. "-c" + ngx.header["X-HeaderC"] = ngx.req.get_headers()["OuterHeader"] .. "-c" + } + } + location = /d { + content_by_lua_block { + ngx.status = 202 + ngx.header["X-Cookie"] = ngx.req.get_headers()["Cookie"] .. "-d" + ngx.header["X-HeaderD"] = ngx.req.get_headers()["OuterHeader"] .. "-d" + } + } +--- request +GET /aggregate +--- response_body +passed + + + +=== TEST 17: exceed default body limit size (check header) +--- config + location = /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + ("1234"):rep(1024 * 1024) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 413 +--- response_body eval +qr/\{"error_msg":"request size 4194304 is greater than the maximum size 1048576 allowed"\}/ + + + +=== TEST 18: exceed default body limit size (check file size) +--- request eval +"POST /apisix/batch-requests +" . ("1000\r +" . ("11111111" x 512) . "\r +") x 257 . "0\r +\r +" +--- more_headers +Transfer-Encoding: chunked +--- error_code: 413 +--- response_body eval +qr/\{"error_msg":"request size 1052672 is greater than the maximum size 1048576 allowed"\}/ +--- error_log +attempt to read body from file + + + +=== TEST 19: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/batch-requests', + ngx.HTTP_PUT, + [[{ + "max_body_size": 2048 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: exceed body limit size +--- config + location = /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + ("1234"):rep(1024) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 413 +--- response_body eval +qr/\{"error_msg":"request size 4096 is greater than the maximum size 2048 allowed"\}/ + + + +=== TEST 21: exceed body limit size (expected) +--- config + location = /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body, res_data = t('/apisix/batch-requests', + ngx.HTTP_POST, + ("1234"):rep(1024), + nil, + {EXPECT = "100-CONTINUE", ["content-length"] = 4096} + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 413 +--- response_body eval +qr/\{"error_msg":"request size 4096 is greater than the maximum size 2048 allowed"\}/ + + + +=== TEST 22: don't exceed body limit size +--- config + location = /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "headers": { + "Base-Header": "base" + }, + "pipeline":[ + { + "path": "/a", + "headers": { + "Header1": "hello", + "Header2": "world" + } + } + ] + }]=], + [=[[ + { + "status": 200, + "body":"A", + "headers": { + "Base-Header": "base", + "X-Res": "a", + "X-Header1": "hello", + "X-Header2": "world" + } + } + ]]=]) + + ngx.status = code + ngx.say(body) + } + } + location = /a { + content_by_lua_block { + ngx.status = 200 + ngx.header["Base-Header"] = ngx.req.get_headers()["Base-Header"] + ngx.header["X-Header1"] = ngx.req.get_headers()["Header1"] + ngx.header["X-Header2"] = ngx.req.get_headers()["Header2"] + ngx.header["X-Res"] = "a" + ngx.print("A") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 23: invalid body size +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/batch-requests', + ngx.HTTP_PUT, + [[{ + "max_body_size": 0 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid configuration: property \\"max_body_size\\" validation failed: expected 0 to be greater than 0"\}/ + + + +=== TEST 24: keep environment clean +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/batch-requests', + ngx.HTTP_PUT, + [[{ + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/batch-requests2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/batch-requests2.t new file mode 100644 index 0000000..b7ac1c6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/batch-requests2.t @@ -0,0 +1,446 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } + + my $extra_yaml_config = <<_EOC_; +plugins: + - public-api + - batch-requests +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: pre-create public API route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/batch-requests" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: customize uri, not found +--- yaml_config +plugin_attr: + batch-requests: + uri: "/foo/bar" +--- config + location = /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "timeout": 100, + "pipeline":[ + { + "path": "/a" + }] + }]=], + [=[[ + { + "status": 200 + } + ]]=] + ) + + ngx.status = code + ngx.say(body) + } + } + + location = /a { + content_by_lua_block { + ngx.status = 200 + } + } +--- error_code: 404 + + + +=== TEST 3: create public API route for custom uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/foo/bar" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: customize uri, found +--- yaml_config +plugin_attr: + batch-requests: + uri: "/foo/bar" +--- config + location = /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/foo/bar', + ngx.HTTP_POST, + [=[{ + "timeout": 100, + "pipeline":[ + { + "path": "/b", + "headers": { + "Header1": "hello", + "Header2": "world" + } + },{ + "path": "/c", + "method": "PUT" + },{ + "path": "/d" + }] + }]=], + [=[[ + { + "status": 200 + }, + { + "status": 201 + }, + { + "status": 202 + } + ]]=] + ) + + ngx.status = code + ngx.say(body) + } + } + + location = /b { + content_by_lua_block { + ngx.status = 200 + } + } + location = /c { + content_by_lua_block { + ngx.status = 201 + } + } + location = /d { + content_by_lua_block { + ngx.status = 202 + } + } + + + +=== TEST 5: customize uri, missing plugin, use default +--- yaml_config +plugin_attr: + x: + uri: "/foo/bar" +--- config + location = /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "timeout": 100, + "pipeline":[ + { + "path": "/a" + }] + }]=], + [=[[ + { + "status": 200 + } + ]]=] + ) + + ngx.status = code + ngx.say(body) + } + } + + location = /a { + content_by_lua_block { + ngx.status = 200 + } + } + + + +=== TEST 6: customize uri, missing attr, use default +--- yaml_config +plugin_attr: + batch-requests: + xyz: "/foo/bar" +--- config + location = /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "timeout": 100, + "pipeline":[ + { + "path": "/a" + }] + }]=], + [=[[ + { + "status": 200 + } + ]]=] + ) + + ngx.status = code + ngx.say(body) + } + } + + location = /a { + content_by_lua_block { + ngx.status = 200 + } + } + + + +=== TEST 7: ensure real ip header is overridden +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "headers": { + "x-real-ip": "127.0.0.2" + }, + "pipeline":[ + { + "path": "/c", + "method": "PUT" + }] + }]=], + [=[[ + { + "status": 201, + "body":"C", + "headers": { + "Client-IP": "127.0.0.1", + "Client-IP-From-Hdr": "127.0.0.1" + } + } + ]]=]) + + ngx.status = code + ngx.say(body) + } + } + + location = /c { + content_by_lua_block { + ngx.status = 201 + ngx.header["Client-IP"] = ngx.var.remote_addr + ngx.header["Client-IP-From-Hdr"] = ngx.req.get_headers()["x-real-ip"] + ngx.print("C") + } + } +--- request +GET /aggregate +--- response_body +passed + + + +=== TEST 8: ensure real ip header is overridden, header from the pipeline +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "headers": { + }, + "pipeline":[ + { + "path": "/c", + "headers": { + "x-real-ip": "127.0.0.2" + }, + "method": "PUT" + }] + }]=], + [=[[ + { + "status": 201, + "body":"C", + "headers": { + "Client-IP": "127.0.0.1", + "Client-IP-From-Hdr": "127.0.0.1" + } + } + ]]=]) + + ngx.status = code + ngx.say(body) + } + } + + location = /c { + content_by_lua_block { + ngx.status = 201 + ngx.header["Client-IP"] = ngx.var.remote_addr + ngx.header["Client-IP-From-Hdr"] = ngx.req.get_headers()["x-real-ip"] + ngx.print("C") + } + } +--- request +GET /aggregate +--- response_body +passed + + + +=== TEST 9: ensure real ip header is overridden, header has underscore +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "headers": { + }, + "pipeline":[ + { + "path": "/c", + "headers": { + "x_real-ip": "127.0.0.2" + }, + "method": "PUT" + }] + }]=], + [=[[ + { + "status": 201, + "body":"C", + "headers": { + "Client-IP": "127.0.0.1", + "Client-IP-From-Hdr": "127.0.0.1" + } + } + ]]=]) + + ngx.status = code + ngx.say(body) + } + } + + location = /c { + content_by_lua_block { + ngx.status = 201 + ngx.header["Client-IP"] = ngx.var.remote_addr + ngx.header["Client-IP-From-Hdr"] = ngx.req.get_headers()["x-real-ip"] + ngx.print("C") + } + } +--- request +GET /aggregate +--- response_body +passed + + + +=== TEST 10: ensure the content-type is correct +--- request +POST /apisix/batch-requests +{ + "headers": { + }, + "pipeline":[ + { + "path": "/c", + "method": "PUT" + } + ] +} +--- response_headers +Content-Type: application/json diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/body-transformer-multipart.t b/CloudronPackages/APISIX/apisix-source/t/plugin/body-transformer-multipart.t new file mode 100644 index 0000000..9bf2282 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/body-transformer-multipart.t @@ -0,0 +1,269 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: multipart request body to json request body conversion +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "body-transformer": { + "request": { + "template": "{\"foo\":\"{{name .. \" world\"}}\",\"bar\":{{age+10}}}" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/echo" + + local body = ([[ +--AaB03x +Content-Disposition: form-data; name="name" + +Larry +--AaB03x +Content-Disposition: form-data; name="age" + +10 +--AaB03x--]]) + + local opt = {method = "POST", body = body, headers = {["Content-Type"] = "multipart/related; boundary=AaB03x"}} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + + ngx.status = res.status + ngx.say(res.body or res.reason) + } + } +--- response_body +{"foo":"Larry world","bar":20} + + + +=== TEST 2: multipart response body to json response body conversion +--- config + location /demo { + content_by_lua_block { + ngx.header["Content-Type"] = "multipart/related; boundary=AaB03x" + ngx.say([[ +--AaB03x +Content-Disposition: form-data; name="name" + +Larry +--AaB03x +Content-Disposition: form-data; name="age" + +10 +--AaB03x--]]) + } + } + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "proxy-rewrite": { + "uri": "/demo" + }, + "body-transformer": { + "response": { + "template": "{\"foo\":\"{{name .. \" world\"}}\",\"bar\":{{age+10}}}" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1984": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + + local opt = {method = "GET"} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + + ngx.status = res.status + ngx.say(res.body or res.reason) + } + } +--- response_body +{"foo":"Larry world","bar":20} + + + +=== TEST 3: multipart parse result accessible to template renderer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + + local req_template = ngx.encode_base64[[ + {% + local core = require 'apisix.core' + local cjson = require 'cjson' + if tonumber(context.age) > 18 then + context._multipart:set_simple("status", "major") + else + context._multipart:set_simple("status", "minor") + end + local body = context._multipart:tostring() + %}{* body *} + ]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/echo", + "plugins": { + "body-transformer": { + "response": { + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]], req_template) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + ------------------------#######################------------------- + + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/echo" + + local body_minor = ([[ +--AaB03x +Content-Disposition: form-data; name="name" + +Larry +--AaB03x +Content-Disposition: form-data; name="age" + +10 +--AaB03x--]]) + + + local opt = {method = "POST", body = body_minor, headers = {["Content-Type"] = "multipart/related; boundary=AaB03x"}} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + + ngx.say(res.body) + + } + } +--- response_body eval +qr/.*Content-Disposition: form-data; name=\"status\"\r\n\r\nminor.*/ + + + +=== TEST 4: multipart parse response accessible to template renderer (test with age == 19) +--- config + location /t { + content_by_lua_block { + + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/echo" + + local body_major = ([[ +--AaB03x +Content-Disposition: form-data; name="name" + +Larry +--AaB03x +Content-Disposition: form-data; name="age" + +19 +--AaB03x--]]) + + + local opt = {method = "POST", body = body_major, headers = {["Content-Type"] = "multipart/related; boundary=AaB03x"}} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + + ngx.say(res.body) + + } + } +--- response_body eval +qr/.*Content-Disposition: form-data; name=\"status\"\r\n\r\nmajor.*/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/body-transformer.t b/CloudronPackages/APISIX/apisix-source/t/plugin/body-transformer.t new file mode 100644 index 0000000..b6a266c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/body-transformer.t @@ -0,0 +1,1129 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: simulate simple SOAP proxy +--- config +location /demo { + content_by_lua_block { + local core = require("apisix.core") + local body = core.request.get_body() + local xml2lua = require("xml2lua") + local xmlhandler = require("xmlhandler.tree") + local handler = xmlhandler:new() + local parser = xml2lua.parser(handler) + parser:parse(body) + + ngx.print(string.format([[ + + + + + + %s + 46704314 + Madrid + EUR + + + + + ]], handler.root["soap-env:Envelope"]["soap-env:Body"]["ns0:getCountryRequest"]["ns0:name"])) + } +} + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local req_template = ngx.encode_base64[[ + + + + + {{_escape_xml(name)}} + + + + ]] + + local rsp_template = ngx.encode_base64[[ +{% if Envelope.Body.Fault == nil then %} +{ + "status":"{{_ctx.var.status}}", + "currency":"{{Envelope.Body.getCountryResponse.country.currency}}", + "population":{{Envelope.Body.getCountryResponse.country.population}}, + "capital":"{{Envelope.Body.getCountryResponse.country.capital}}", + "name":"{{Envelope.Body.getCountryResponse.country.name}}" +} +{% else %} +{ + "message":{*_escape_json(Envelope.Body.Fault.faultstring[1])*}, + "code":"{{Envelope.Body.Fault.faultcode}}" + {% if Envelope.Body.Fault.faultactor ~= nil then %} + , "actor":"{{Envelope.Body.Fault.faultactor}}" + {% end %} +} +{% end %} + ]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/ws", + "plugins": { + "proxy-rewrite": { + "uri": "/demo" + }, + "body-transformer": { + "request": { + "template": "%s" + }, + "response": { + "input_format": "xml", + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], req_template, rsp_template, ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/ws" + local body = [[{"name": "Spain"}]] + local opt = {method = "POST", body = body, headers = {["Content-Type"] = "application/json"}} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + local data1 = core.json.decode(res.body) + local data2 = core.json.decode[[{"status":"200","currency":"EUR","population":46704314,"capital":"Madrid","name":"Spain"}]] + assert(core.json.stably_encode(data1) == core.json.stably_encode(data2)) + } + } + + + +=== TEST 2: test JSON-to-JSON +--- config + location /demo { + content_by_lua_block { + local core = require("apisix.core") + local body = core.request.get_body() + local data = core.json.decode(body) + assert(data.foo == "hello world" and data.bar == 30) + } + } + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local req_template = [[{"foo":"{{name .. " world"}}","bar":{{age+10}}}]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/foobar", + "plugins": { + "proxy-rewrite": { + "uri": "/demo" + }, + "body-transformer": { + "request": { + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], req_template:gsub('"', '\\"'), ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/foobar" + local body = [[{"name":"hello","age":20}]] + local opt = {method = "POST", body = body, headers = {["Content-Type"] = "application/json"}} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + } + } + + + +=== TEST 3: specify wrong input_format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local req_template = [[{"foo":"{{name .. " world"}}","bar":{{age+10}}}]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/foobar", + "plugins": { + "proxy-rewrite": { + "uri": "/demo" + }, + "body-transformer": { + "request": { + "input_format": "xml", + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], req_template:gsub('"', '\\"'), ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/foobar" + local body = [[{"name":"hello","age":20}]] + local opt = {method = "POST", body = body, headers = {["Content-Type"] = "application/json"}} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 400) + } + } +--- error_log +Error Parsing XML + + + +=== TEST 4: invalid reference in template +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local req_template = [[{"foo":"{{name() .. " world"}}","bar":{{age+10}}}]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/foobar", + "plugins": { + "proxy-rewrite": { + "uri": "/demo" + }, + "body-transformer": { + "request": { + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], req_template:gsub('"', '\\"'), ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/foobar" + local body = [[{"name":"hello","age":20}]] + local opt = {method = "POST", body = body, headers = {["Content-Type"] = "application/json"}} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 503) + } + } +--- grep_error_log eval +qr/transform\(\): request template rendering:.*/ +--- grep_error_log_out eval +qr/attempt to call global 'name' \(a string value\)/ + + + +=== TEST 5: generate request body from scratch +--- config + location /demo { + content_by_lua_block { + local core = require("apisix.core") + local body = core.request.get_body() + local data = core.json.decode(body) + assert(data.foo == "hello world") + } + } + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local req_template = [[{ + "foo":"{{_ctx.var.arg_name .. " world"}}" + }]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/foobar", + "plugins": { + "proxy-rewrite": { + "uri": "/demo", + "method": "POST" + }, + "body-transformer": { + "request": { + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], req_template:gsub('"', '\\"'), ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/foobar?name=hello" + local opt = {method = "GET"} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + } + } + + + +=== TEST 6: html escape in template +--- config + location /demo { + content_by_lua_block { + local core = require("apisix.core") + local body = core.request.get_body() + local data = core.json.decode(body) + if (data == nil) or (data.agent:find("ngx_lua/", 0, true) == nil) then + return ngx.exit(400) + end + } + } + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + -- html escape would escape '/' to '/' in string, which may be unexpected. + -- 'lua-resty-http/0.16.1 (Lua) ngx_lua/10021' + -- would be escaped into + -- 'lua-resty-http/0.16.1 (Lua) ngx_lua/10021' + local req_template = [[{ + "agent":"{{_ctx.var.http_user_agent}}" + }]] + local admin_body = [[{ + "uri": "/foobar", + "plugins": { + "proxy-rewrite": { + "uri": "/demo", + "method": "POST" + }, + "body-transformer": { + "request": { + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format(admin_body, req_template:gsub('"', '\\"'), ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/foobar?name=hello" + local opt = {method = "GET"} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 400) + + -- disable html escape, now it's ok + local req_template = [[{"agent":"{*_ctx.var.http_user_agent*}"}]] + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format(admin_body, req_template:gsub('"', '\\"'), ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + } + } + + + +=== TEST 7: parse body in yaml format +--- config + location /demo { + content_by_lua_block { + local core = require("apisix.core") + local body = core.request.get_body() + local data = core.json.decode(body) + if data == nil or data.foobar ~= "hello world" then + return ngx.exit(400) + end + } + } + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local req_template = [[ + {% + local yaml = require("lyaml") + local body = yaml.load(_body) + %} + {"foobar":"{{body.foobar.foo .. " " .. body.foobar.bar}}"} + ]] + local admin_body = [[{ + "uri": "/foobar", + "plugins": { + "proxy-rewrite": { + "uri": "/demo", + "method": "POST" + }, + "body-transformer": { + "request": { + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format(admin_body, req_template:gsub('"', '\\"'), ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local body = [[ +foobar: + foo: hello + bar: world + ]] + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/foobar" + local opt = {method = "POST", body = body} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + } + } + + + +=== TEST 8: test _escape_json +--- config + location /demo { + content_by_lua_block { + local core = require("apisix.core") + local body = core.request.get_body() + local data = core.json.decode(body) + if data == nil or data.foobar ~= [[hello "world"]] then + return ngx.exit(400) + end + } + } + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local req_template = [[{"foobar":{*_escape_json(name)*}}]] + local admin_body = [[{ + "uri": "/foobar", + "plugins": { + "proxy-rewrite": { + "uri": "/demo", + "method": "POST" + }, + "body-transformer": { + "request": { + "input_format": "json", + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format(admin_body, req_template:gsub('"', '\\"'), ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local body = [[{"name":"hello \"world\""}]] + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/foobar" + local opt = {method = "POST", body = body} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + } + } + + + +=== TEST 9: test _escape_xml +--- config + location /demo { + content_by_lua_block { + local core = require("apisix.core") + local body = core.request.get_body() + local xml2lua = require("xml2lua") + local xmlhandler = require("xmlhandler.tree") + local handler = xmlhandler:new() + local parser = xml2lua.parser(handler) + parser:parse(body) + assert(handler.root.foobar == "") + } + } + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local req_template = [[{*_escape_xml(name)*}]] + local admin_body = [[{ + "uri": "/foobar", + "plugins": { + "proxy-rewrite": { + "uri": "/demo", + "method": "POST" + }, + "body-transformer": { + "request": { + "input_format": "json", + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format(admin_body, req_template:gsub('"', '\\"'), ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local body = [[{"name":""}]] + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/foobar" + local opt = {method = "POST", body = body} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + } + } + + + +=== TEST 10: cooperation of proxy-cache plugin +--- http_config +lua_shared_dict memory_cache 50m; +--- config +location /demo { + content_by_lua_block { + ngx.say([[ + + + + + hello + + + + ]]) + } +} + + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local req_template = ngx.encode_base64[[ + + + + + {{_escape_xml(country)}} + + + + ]] + + local rsp_template = ngx.encode_base64[[ + {"result": {*_escape_json(Envelope.Body.CapitalCityResponse.CapitalCityResult)*}} + ]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/capital", + "plugins": { + "proxy-rewrite": { + "set": { + "Accept-Encoding": "identity", + "Content-Type": "text/xml" + }, + "uri": "/demo" + }, + "proxy-cache":{ + "cache_strategy": "memory", + "cache_bypass": ["$arg_bypass"], + "cache_http_status": [200], + "cache_key": ["$uri", "-cache-id"], + "cache_method": ["POST"], + "hide_cache_headers": true, + "no_cache": ["$arg_test"], + "cache_zone": "memory_cache" + }, + "body-transformer": { + "request": { + "input_format": "json", + "template": "%s" + }, + "response": { + "input_format": "xml", + "template": "%s" + } + }, + "response-rewrite":{ + "headers": { + "set": { + "Content-Type": "application/json" + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], req_template, rsp_template, ngx.var.server_port) + ) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/capital" + local body = [[{"country": "foo"}]] + local opt = {method = "POST", body = body} + local httpc = http.new() + + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + local data = core.json.decode(res.body) + assert(data.result == "hello") + assert(res.headers["Apisix-Cache-Status"] == "MISS") + + local res2 = httpc:request_uri(uri, opt) + assert(res2.status == 200) + local data2 = core.json.decode(res2.body) + assert(data2.result == "hello") + assert(res2.headers["Apisix-Cache-Status"] == "HIT") + } + } + + + +=== TEST 11: return raw body with _body anytime +--- http_config +--- config + location /demo { + content_by_lua_block { + ngx.header.content_type = "application/json" + ngx.print('{"result": "hello world"}') + } + } + + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local rsp_template = ngx.encode_base64[[ + {"raw_body": {*_escape_json(_body)*}, "result": {*_escape_json(result)*}} + ]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/capital", + "plugins": { + "proxy-rewrite": { + "uri": "/demo" + }, + "body-transformer": { + "response": { + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], rsp_template, ngx.var.server_port) + ) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/capital" + local opt = {method = "GET", headers = {["Content-Type"] = "application/json"}} + local httpc = http.new() + + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + local data = core.json.decode(res.body) + assert(data.result == "hello world") + assert(data.raw_body == '{"result": "hello world"}') + } + } + + + +=== TEST 12: empty xml value should be rendered as empty string +--- config + location /demo { + content_by_lua_block { + ngx.print([[ + + + + 33333333333 + + 33333333333 + + + + + + ]]) + } + } + + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local rsp_template = ngx.encode_base64[[ +{ "KOVKood":"{{Envelope.Body.RR58isikEpiletResponse.response.KOVKood}}" } + ]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/ws", + "plugins": { + "proxy-rewrite": { + "uri": "/demo" + }, + "body-transformer": { + "response": { + "input_format": "xml", + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], rsp_template, ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/ws" + local opt = {method = "GET"} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + local data1 = core.json.decode(res.body) + local data2 = core.json.decode[[{"KOVKood":""}]] + assert(core.json.stably_encode(data1) == core.json.stably_encode(data2)) + } + } + + + +=== TEST 13: test x-www-form-urlencoded to JSON +--- config + location /demo { + content_by_lua_block { + local core = require("apisix.core") + local body = core.request.get_body() + local data = core.json.decode(body) + assert(data.foo == "hello world" and data.bar == 30) + } + } + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local req_template = [[{"foo":"{{name .. " world"}}","bar":{{age+10}}}]] + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/foobar", + "plugins": { + "proxy-rewrite": { + "uri": "/demo" + }, + "body-transformer": { + "request": { + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], req_template:gsub('"', '\\"'), ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/foobar" + local data = {name = "hello", age = 20} + local body = ngx.encode_args(data) + local opt = {method = "POST", body = body, headers = {["Content-Type"] = "application/x-www-form-urlencoded"}} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + } + } + + + +=== TEST 14: test get request to JSON +--- config + location /demo { + content_by_lua_block { + local core = require("apisix.core") + local body = core.request.get_body() + local data = core.json.decode(body) + assert(data.foo == "hello world" and data.bar == 30) + } + } + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local req_template = [[{"foo":"{{name .. " world"}}","bar":{{age+10}}}]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/foobar", + "plugins": { + "proxy-rewrite": { + "uri": "/demo" + }, + "body-transformer": { + "request": { + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], req_template:gsub('"', '\\"'), ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/foobar" .. "?name=hello&age=20" + local opt = {method = "GET"} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + } + } + + + +=== TEST 15: test input is in base64-encoded urlencoded format +--- config + location /demo { + content_by_lua_block { + local core = require("apisix.core") + local body = core.request.get_body() + local data = ngx.decode_args(body) + assert(data.foo == "hello world" and data.bar == "30") + } + } + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local req_template = ngx.encode_base64[[foo={{name .. " world"}}&bar={{age+10}}]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/foobar", + "plugins": { + "proxy-rewrite": { + "uri": "/demo" + }, + "body-transformer": { + "request": { + "template_is_base64": true, + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], req_template:gsub('"', '\\"'), ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/foobar" + local data = {name = "hello", age = 20} + local body = ngx.encode_args(data) + local opt = {method = "POST", body = body, headers = {["Content-Type"] = "application/x-www-form-urlencoded"}} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + } + } + + + +=== TEST 16: test for missing Content-Type and skip body parsing +--- config + location /demo { + content_by_lua_block { + local core = require("apisix.core") + local body = core.request.get_body() + assert(body == "{\"message\": \"actually json\"}") + } + } + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/foobar", + "plugins": { + "proxy-rewrite": { + "uri": "/demo" + }, + "body-transformer": { + "request": { + "input_format": "plain", + "template": "{\"message\": \"{* string.gsub(_body, 'not ', '') *}\"}" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local http = require("resty.http") + local httpc = http.new() + local res, err = httpc:request_uri("http://127.0.0.1:" .. ngx.var.server_port .. "/foobar", { + method = "POST", + body = "not actually json", + }) + assert(res.status == 200) + } + } +--- no_error_log +no input format to parse diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/body-transformer2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/body-transformer2.t new file mode 100644 index 0000000..1db206c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/body-transformer2.t @@ -0,0 +1,134 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: body transformer with decoded body (keyword: context) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + + local req_template = ngx.encode_base64[[ + {% + local core = require 'apisix.core' + local cjson = require 'cjson' + context.name = "bar" + context.address = nil + context.age = context.age + 1 + local body = core.json.encode(context) + %}{* body *} + ]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/echo", + "plugins": { + "body-transformer": { + "request": { + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]], req_template) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: verify the transformed body +--- request +POST /echo +{"name": "foo", "address":"LA", "age": 18} +-- response_body +{"name": "bar", "age": 19} + + + +=== TEST 3: body transformer plugin with key-auth that fails +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local core = require("apisix.core") + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/foobar", + "plugins": { + "body-transformer": { + "request": { + "template": "some-template" + } + }, + "key-auth": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/foobar" + local opt = {method = "POST", body = "body", headers = {["Content-Type"] = "application/json"}} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 401) + ngx.say(res.reason) + } + } +--- response_body +Unauthorized diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/brotli.t b/CloudronPackages/APISIX/apisix-source/t/plugin/brotli.t new file mode 100644 index 0000000..f0f6931 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/brotli.t @@ -0,0 +1,785 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + my $extra_yaml_config = <<_EOC_; +plugins: + - brotli +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "brotli": { + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 2: hit, single Accept-Encoding +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: br +Content-Type: text/html +--- response_headers +Content-Encoding: br +Vary: + + + +=== TEST 3: hit, single wildcard Accept-Encoding +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: * +Content-Type: text/html +--- response_headers +Content-Encoding: br +Vary: + + + +=== TEST 4: not hit, single Accept-Encoding +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip +Content-Type: text/html +--- response_headers +Vary: + + + +=== TEST 5: hit, br in multi Accept-Encoding +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip, br +Content-Type: text/html +--- response_headers +Content-Encoding: br +Vary: + + + +=== TEST 6: hit, no br in multi Accept-Encoding, but wildcard +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip, * +Content-Type: text/html +--- response_headers +Content-Encoding: br +Vary: + + + +=== TEST 7: not hit, no br in multi Accept-Encoding +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip, deflate +Content-Type: text/html +--- response_headers +Vary: + + + +=== TEST 8: hit, multi Accept-Encoding with quality +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip;q=0.5, br;q=0.6 +Content-Type: text/html +--- response_headers +Content-Encoding: br +Vary: + + + +=== TEST 9: not hit, multi Accept-Encoding with quality and disable br +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip;q=0.5, br;q=0 +Content-Type: text/html +--- response_headers +Vary: + + + +=== TEST 10: hit, multi Accept-Encoding with quality and wildcard +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip;q=0.8, deflate, sdch;q=0.6, *;q=0.1 +Content-Type: text/html +--- response_headers +Content-Encoding: br +Vary: + + + +=== TEST 11: default buffers and compress level +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.brotli") + local core = require("apisix.core") + local json = require("toolkit.json") + + for _, conf in ipairs({ + {}, + {mode = 1}, + {comp_level = 5}, + {comp_level = 5, lgwin = 12}, + {comp_level = 5, lgwin = 12, vary = true}, + {comp_level = 5, lgwin = 12, lgblock = 16, vary = true}, + {mode = 2, comp_level = 5, lgwin = 12, lgblock = 16, vary = true}, + }) do + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + return + end + ngx.say(json.encode(conf)) + end + } + } +--- response_body +{"comp_level":6,"http_version":1.1,"lgblock":0,"lgwin":19,"min_length":20,"mode":0,"types":["text/html"]} +{"comp_level":6,"http_version":1.1,"lgblock":0,"lgwin":19,"min_length":20,"mode":1,"types":["text/html"]} +{"comp_level":5,"http_version":1.1,"lgblock":0,"lgwin":19,"min_length":20,"mode":0,"types":["text/html"]} +{"comp_level":5,"http_version":1.1,"lgblock":0,"lgwin":12,"min_length":20,"mode":0,"types":["text/html"]} +{"comp_level":5,"http_version":1.1,"lgblock":0,"lgwin":12,"min_length":20,"mode":0,"types":["text/html"],"vary":true} +{"comp_level":5,"http_version":1.1,"lgblock":16,"lgwin":12,"min_length":20,"mode":0,"types":["text/html"],"vary":true} +{"comp_level":5,"http_version":1.1,"lgblock":16,"lgwin":12,"min_length":20,"mode":2,"types":["text/html"],"vary":true} + + + +=== TEST 12: compress level +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "uri": "/echo", + "vars": [["http_x", "==", "1"]], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "brotli": { + "comp_level": 0 + } + } + }]=] + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "uri": "/echo", + "vars": [["http_x", "==", "2"]], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "brotli": { + "comp_level": 11 + } + } + }]=] + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 13: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/echo" + local httpc = http.new() + local res, err = httpc:request_uri(uri, + {method = "POST", headers = {x = "1"}, body = ("0123"):rep(1024)}) + if not res then + ngx.say(err) + return + end + local less_compressed = res.body + local res, err = httpc:request_uri(uri, + {method = "POST", headers = {x = "2"}, body = ("0123"):rep(1024)}) + if not res then + ngx.say(err) + return + end + if #less_compressed < 4096 and #less_compressed < #res.body then + ngx.say("ok") + end + } + } +--- response_body +ok + + + +=== TEST 14: min length +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "brotli": { + "min_length": 21 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 15: not hit +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: br +Content-Type: text/html +--- response_headers +Content-Encoding: + + + +=== TEST 16: http version +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "brotli": { + "http_version": 1.1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 17: not hit +--- request +POST /echo HTTP/1.0 +0123456789 +012345678 +--- more_headers +Accept-Encoding: br +Content-Type: text/html +--- response_headers +Content-Encoding: + + + +=== TEST 18: hit again +--- request +POST /echo HTTP/1.1 +0123456789 +012345678 +--- more_headers +Accept-Encoding: br +Content-Type: text/html +--- response_headers +Content-Encoding: br + + + +=== TEST 19: types +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "brotli": { + "types": ["text/plain", "text/xml"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 20: not hit +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: br +Content-Type: text/html +--- response_headers +Content-Encoding: + + + +=== TEST 21: hit again +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: br +Content-Type: text/xml +--- response_headers +Content-Encoding: br + + + +=== TEST 22: hit with charset +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: br +Content-Type: text/plain; charset=UTF-8 +--- response_headers +Content-Encoding: br + + + +=== TEST 23: match all types +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "brotli": { + "types": "*" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 24: hit +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: br +Content-Type: video/3gpp +--- response_headers +Content-Encoding: br + + + +=== TEST 25: vary +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "brotli": { + "vary": true + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 26: hit +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: br +Vary: upstream +Content-Type: text/html +--- response_headers +Content-Encoding: br +Vary: upstream, Accept-Encoding + + + +=== TEST 27: schema check +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for _, case in ipairs({ + {input = { + types = {} + }}, + {input = { + min_length = 0 + }}, + {input = { + mode = 4 + }}, + {input = { + comp_level = 12 + }}, + {input = { + http_version = 2 + }}, + {input = { + lgwin = 100 + }}, + {input = { + lgblock = 8 + }}, + {input = { + vary = 0 + }} + }) do + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + { + id = "1", + plugins = { + ["brotli"] = case.input + } + } + ) + ngx.print(body) + end + } +} +--- response_body +{"error_msg":"failed to check the configuration of plugin brotli err: property \"types\" validation failed: object matches none of the required"} +{"error_msg":"failed to check the configuration of plugin brotli err: property \"min_length\" validation failed: expected 0 to be at least 1"} +{"error_msg":"failed to check the configuration of plugin brotli err: property \"mode\" validation failed: expected 4 to be at most 2"} +{"error_msg":"failed to check the configuration of plugin brotli err: property \"comp_level\" validation failed: expected 12 to be at most 11"} +{"error_msg":"failed to check the configuration of plugin brotli err: property \"http_version\" validation failed: matches none of the enum values"} +{"error_msg":"failed to check the configuration of plugin brotli err: property \"lgwin\" validation failed: matches none of the enum values"} +{"error_msg":"failed to check the configuration of plugin brotli err: property \"lgblock\" validation failed: matches none of the enum values"} +{"error_msg":"failed to check the configuration of plugin brotli err: property \"vary\" validation failed: wrong type: expected boolean, got number"} + + + +=== TEST 28: body checksum +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "brotli": { + "types": "*" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 29: hit - decompressed respone body same as requset body +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/echo" + local httpc = http.new() + local req_body = ("abcdf01234"):rep(1024) + local res, err = httpc:request_uri(uri, + {method = "POST", headers = {["Accept-Encoding"] = "br"}, body = req_body}) + if not res then + ngx.say(err) + return + end + + local brotli = require "brotli" + local decompressor = brotli.decompressor:new() + local chunk = decompressor:decompress(res.body) + local chunk_fin = decompressor:finish() + local chunks = chunk .. chunk_fin + if #chunks == #req_body then + ngx.say("ok") + end + } + } +--- response_body +ok + + + +=== TEST 30: mock upstream compressed response +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/mock_compressed_upstream_response", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "brotli": { + "types": "*" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 31: hit - skip brotli compression of compressed upstream response +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/mock_compressed_upstream_response" + local httpc = http.new() + local req_body = ("abcdf01234"):rep(1024) + local res, err = httpc:request_uri(uri, + {method = "POST", headers = {["Accept-Encoding"] = "gzip, br"}, body = req_body}) + if not res then + ngx.say(err) + return + end + if res.headers["Content-Encoding"] == 'gzip' then + ngx.say("ok") + end + } + } +--- request +GET /t +--- more_headers +Accept-Encoding: gzip, br +Vary: upstream +Content-Type: text/html +--- response_body +ok diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/cas-auth.t b/CloudronPackages/APISIX/apisix-source/t/plugin/cas-auth.t new file mode 100644 index 0000000..4a2bfe7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/cas-auth.t @@ -0,0 +1,223 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('warn'); +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: Add route for sp1 +--- config + location /t { + content_by_lua_block { + local kc = require("lib.keycloak_cas") + local core = require("apisix.core") + + local default_opts = kc.get_default_opts() + local opts = core.table.deepcopy(default_opts) + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/cas1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST"], + "host" : "127.0.0.1", + "plugins": { + "cas-auth": ]] .. core.json.encode(opts) .. [[ + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: login and logout ok +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local kc = require "lib.keycloak_cas" + + local path = "/uri" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + local username = "test" + local password = "test" + + local res, err, cas_cookie, keycloak_cookie = kc.login_keycloak(uri .. path, username, password) + if err or res.headers['Location'] ~= path then + ngx.log(ngx.ERR, err) + ngx.exit(500) + end + res, err = httpc:request_uri(uri .. res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = cas_cookie + } + }) + assert(res.status == 200) + ngx.say(res.body) + + res, err = kc.logout_keycloak(uri .. "/logout", cas_cookie, keycloak_cookie) + assert(res.status == 200) + } + } +--- response_body_like +uri: /uri +cookie: .* +host: 127.0.0.1:1984 +user-agent: .* +x-real-ip: 127.0.0.1 + + + +=== TEST 3: Add route for sp2 +--- config + location /t { + content_by_lua_block { + local kc = require("lib.keycloak_cas") + local core = require("apisix.core") + + local default_opts = kc.get_default_opts() + local opts = core.table.deepcopy(default_opts) + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/cas2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST"], + "host" : "127.0.0.2", + "plugins": { + "cas-auth": ]] .. core.json.encode(opts) .. [[ + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: login sp1 and sp2, then do single logout +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local kc = require "lib.keycloak_cas" + + local path = "/uri" + + -- login to sp1 + local uri = "http://127.0.0.1:" .. ngx.var.server_port + local username = "test" + local password = "test" + + local res, err, cas_cookie, keycloak_cookie = kc.login_keycloak(uri .. path, username, password) + if err or res.headers['Location'] ~= path then + ngx.log(ngx.ERR, err) + ngx.exit(500) + end + res, err = httpc:request_uri(uri .. res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = cas_cookie + } + }) + assert(res.status == 200) + + -- login to sp2, which would skip login at keycloak side + local uri2 = "http://127.0.0.2:" .. ngx.var.server_port + + local res, err, cas_cookie2 = kc.login_keycloak_for_second_sp(uri2 .. path, keycloak_cookie) + if err or res.headers['Location'] ~= path then + ngx.log(ngx.ERR, err) + ngx.exit(500) + end + res, err = httpc:request_uri(uri2 .. res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = cas_cookie2 + } + }) + assert(res.status == 200) + + -- SLO (single logout) + res, err = kc.logout_keycloak(uri .. "/logout", cas_cookie, keycloak_cookie) + assert(res.status == 200) + + -- login to sp2, which would do normal login process at keycloak side + local res, err, cas_cookie2, keycloak_cookie = kc.login_keycloak(uri2 .. path, username, password) + if err or res.headers['Location'] ~= path then + ngx.log(ngx.ERR, err) + ngx.exit(500) + end + res, err = httpc:request_uri(uri .. res.headers['Location'], { + method = "GET", + headers = { + ["Cookie"] = cas_cookie2 + } + }) + assert(res.status == 200) + + -- logout sp2 + res, err = kc.logout_keycloak(uri2 .. "/logout", cas_cookie2, keycloak_cookie) + assert(res.status == 200) + } + } diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/chaitin-waf-reject.t b/CloudronPackages/APISIX/apisix-source/t/plugin/chaitin-waf-reject.t new file mode 100644 index 0000000..fcef07c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/chaitin-waf-reject.t @@ -0,0 +1,212 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $stream_default_server = <<_EOC_; + server { + listen 8088; + listen 8089; + content_by_lua_block { + require("lib.chaitin_waf_server").reject() + } + } +_EOC_ + + $block->set_value("extra_stream_config", $stream_default_server); + $block->set_value("stream_conf_enable", 1); + + # setup default conf.yaml + my $extra_yaml_config = $block->extra_yaml_config // <<_EOC_; +apisix: + stream_proxy: # TCP/UDP L4 proxy + only: true # Enable L4 proxy only without L7 proxy. + tcp: + - addr: 9100 # Set the TCP proxy listening ports. + tls: true + - addr: "127.0.0.1:9101" + udp: # Set the UDP proxy listening ports. + - 9200 + - "127.0.0.1:9201" +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + # use /do instead of /t because stream server will inject a default /t location + $block->set_value("request", "GET /do"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/chaitin-waf', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "host": "127.0.0.1", + "port": 8088 + }, + { + "host": "127.0.0.1", + "port": 8089 + } + ] + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.print(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "chaitin-waf": { + "upstream": { + "servers": ["httpbun.org"] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: pass +--- request +GET /hello +--- error_code: 403 +--- response_body +{"code": 403, "success":false, "message": "blocked by Chaitin SafeLine Web Application Firewall", "event_id": "b3c6ce574dc24f09a01f634a39dca83b"} +--- error_log +--- response_headers +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-STATUS: 403 +X-APISIX-CHAITIN-WAF-ACTION: reject +--- response_headers_like +X-APISIX-CHAITIN-WAF-TIME: + + + +=== TEST 3: plugin mode monitor prepare +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/plugin_metadata/chaitin-waf', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "host": "127.0.0.1", + "port": 8089 + } + ] + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.print(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "chaitin-waf": { + "mode": "monitor", + "match": [ + { + "vars": [ + ["http_waf", "==", "true"] + ] + } + ] + } + }, + "uri": "/*", + "upstream": { + "nodes": { "127.0.0.1:1980": 1 }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.print(body) + end + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 4: plugin mode monitor +--- request +GET /hello +--- more_headers +waf: true +trigger: block +--- error_code: 200 +--- response_body +hello world +--- response_headers +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-STATUS: 403 +X-APISIX-CHAITIN-WAF-ACTION: reject diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/chaitin-waf-timeout.t b/CloudronPackages/APISIX/apisix-source/t/plugin/chaitin-waf-timeout.t new file mode 100644 index 0000000..063f1bc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/chaitin-waf-timeout.t @@ -0,0 +1,139 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $stream_default_server = <<_EOC_; + server { + listen 8088; + listen 8089; + content_by_lua_block { + require("lib.chaitin_waf_server").timeout() + } + } +_EOC_ + + $block->set_value("extra_stream_config", $stream_default_server); + $block->set_value("stream_conf_enable", 1); + + # setup default conf.yaml + my $extra_yaml_config = $block->extra_yaml_config // <<_EOC_; +apisix: + stream_proxy: # TCP/UDP L4 proxy + only: true # Enable L4 proxy only without L7 proxy. + tcp: + - addr: 9100 # Set the TCP proxy listening ports. + tls: true + - addr: "127.0.0.1:9101" + udp: # Set the UDP proxy listening ports. + - 9200 + - "127.0.0.1:9201" +plugins: + - chaitin-waf +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + # use /do instead of /t because stream server will inject a default /t location + $block->set_value("request", "GET /do"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/chaitin-waf', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "host": "127.0.0.1", + "port": 8088 + }, + { + "host": "127.0.0.1", + "port": 8089 + } + ] + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.print(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "chaitin-waf": { + "upstream": { + "servers": ["httpbun.org"] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: timeout +--- request +GET /hello +--- error_code: 200 +--- response_body +hello world +--- error_log +--- response_headers +X-APISIX-CHAITIN-WAF: timeout +--- response_headers_like +X-APISIX-CHAITIN-WAF-TIME: diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/chaitin-waf.t b/CloudronPackages/APISIX/apisix-source/t/plugin/chaitin-waf.t new file mode 100644 index 0000000..ebff234 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/chaitin-waf.t @@ -0,0 +1,407 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $stream_default_server = <<_EOC_; + server { + listen 8088; + listen 8089; + listen unix:/tmp/safeline-snserver.sock; + content_by_lua_block { + require("lib.chaitin_waf_server").pass() + } + } +_EOC_ + + $block->set_value("extra_stream_config", $stream_default_server); + $block->set_value("stream_conf_enable", 1); + + # setup default conf.yaml + my $extra_yaml_config = $block->extra_yaml_config // <<_EOC_; +apisix: + stream_proxy: # TCP/UDP L4 proxy + only: true # Enable L4 proxy only without L7 proxy. + tcp: + - addr: 9100 # Set the TCP proxy listening ports. + tls: true + - addr: "127.0.0.1:9101" + udp: # Set the UDP proxy listening ports. + - 9200 + - "127.0.0.1:9201" +plugins: + - chaitin-waf +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + # use /do instead of /t because stream server will inject a default /t location + $block->set_value("request", "GET /do"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: wrong schema: nodes empty +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/chaitin-waf', + ngx.HTTP_PUT, + [[{ + "nodes": [] + } + ]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: expect array to have at least 1 items"} + + + +=== TEST 2: wrong schema: nodes misses host +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/chaitin-waf', + ngx.HTTP_PUT, + [[{ + "nodes": [ + {} + ] + } + ]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: failed to validate item 1: property \"host\" is required"} + + + +=== TEST 3: sanity +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/chaitin-waf', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "host": "127.0.0.1", + "port": 8088 + }, + { + "host": "127.0.0.1", + "port": 8089 + }, + { + "host": "unix:/tmp/safeline-snserver.sock", + "port": 8000 + } + ] + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.print(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "chaitin-waf": { + "upstream": { + "servers": ["httpbun.org"] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: pass +--- request +GET /hello +--- error_code: 200 +--- response_body +hello world +--- error_log +--- response_headers +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-STATUS: 200 +X-APISIX-CHAITIN-WAF-ACTION: pass +--- response_headers_like +X-APISIX-CHAITIN-WAF-TIME: + + + +=== TEST 5: match condition +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "chaitin-waf": { + "upstream": { + "servers": ["httpbun.org"] + }, + "match": [ + { + "vars": [ + ["http_waf","==","true"] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: no match +--- request +GET /hello +--- error_code: 200 +--- response_body +hello world +--- error_log +--- response_headers +X-APISIX-CHAITIN-WAF: no + + + +=== TEST 7: matched +--- request +GET /hello +--- more_headers +waf: true +--- error_code: 200 +--- response_body +hello world +--- error_log +--- response_headers +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-STATUS: 200 +X-APISIX-CHAITIN-WAF-ACTION: pass +--- response_headers_like +X-APISIX-CHAITIN-WAF-TIME: + + + +=== TEST 8: plugin mode off prepare +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "chaitin-waf": { + "mode": "off", + "upstream": { + "servers": ["httpbun.org"] + }, + "match": [ + { + "vars": [ + ["http_waf","==","true"] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: plugin mode off +--- request +GET /hello +--- more_headers +trigger: true +--- error_code: 200 +--- response_body +hello world +--- response_headers +X-APISIX-CHAITIN-WAF: off + + + +=== TEST 10: real_client_ip = false prepare +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/plugin_metadata/chaitin-waf', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "host": "127.0.0.1", + "port": 8088 + } + ] + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.print(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "chaitin-waf": { + "match": [ + { + "vars": [ + ["http_trigger", "==", "true"] + ] + } + ], + "config": { + "real_client_ip": false + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.print(body) + end + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 11: real_client_ip = false +--- request +GET /hello +--- more_headers +X-Forwarded-For: 1.2.3.4 +trigger: true +--- error_code: 200 +--- response_body +hello world +--- response_headers +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-ACTION: pass +X-APISIX-CHAITIN-WAF-STATUS: 200 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/clickhouse-logger.t b/CloudronPackages/APISIX/apisix-source/t/plugin/clickhouse-logger.t new file mode 100644 index 0000000..4efcf11 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/clickhouse-logger.t @@ -0,0 +1,315 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + server { + listen 10420; + + location /clickhouse-logger/test1 { + content_by_lua_block { + ngx.req.read_body() + local data = ngx.req.get_body_data() + local headers = ngx.req.get_headers() + ngx.log(ngx.WARN, "clickhouse body: ", data) + for k, v in pairs(headers) do + ngx.log(ngx.WARN, "clickhouse headers: " .. k .. ":" .. v) + end + ngx.say("ok") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Full configuration verification +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.clickhouse-logger") + local ok, err = plugin.check_schema({timeout = 3, + retry_delay = 1, + batch_max_size = 500, + user = "default", + password = "a", + database = "default", + logtable = "t", + endpoint_addr = "http://127.0.0.1:1980/clickhouse_logger_server", + max_retry_count = 1, + name = "clickhouse logger", + ssl_verify = false + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 2: Basic configuration verification +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.clickhouse-logger") + local ok, err = plugin.check_schema({user = "default", + password = "a", + database = "default", + logtable = "t", + endpoint_addr = "http://127.0.0.1:1980/clickhouse_logger_server" + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 3: auth configure undefined +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.clickhouse-logger") + local ok, err = plugin.check_schema({user = "default", + password = "a", + database = "default", + logtable = "t" + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +value should match only one schema, but matches none + + + +=== TEST 4: add plugin on routes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "a", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server", + "batch_max_size":1, + "inactive_timeout":1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: add plugin on routes using multi clickhouse-logger +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "", + "database": "default", + "logtable": "test", + "endpoint_addrs": ["http://127.0.0.1:8123", + "http://127.0.0.1:8124"], + "batch_max_size":1, + "inactive_timeout":1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 6: hit route +--- request +GET /opentracing +--- error_code: 200 +--- wait: 5 + + + +=== TEST 7: get log +--- exec +echo "select * from default.test" | curl 'http://localhost:8123/' --data-binary @- +echo "select * from default.test" | curl 'http://localhost:8124/' --data-binary @- +--- response_body_like +.*127.0.0.1.*1.* + + + +=== TEST 8: to show that different endpoints will be chosen randomly +--- config + location /t { + content_by_lua_block { + local code_count = {} + local t = require("lib.test_admin").test + for i = 1, 12 do + local code, body = t('/opentracing', ngx.HTTP_GET) + if code ~= 200 then + ngx.say("code: ", code, " body: ", body) + end + code_count[code] = (code_count[code] or 0) + 1 + end + + local code_arr = {} + for code, count in pairs(code_count) do + table.insert(code_arr, {code = code, count = count}) + end + + ngx.say(require("toolkit.json").encode(code_arr)) + ngx.exit(200) + } + } +--- response_body +[{"code":200,"count":12}] +--- error_log +sending a batch logs to http://127.0.0.1:8123 +sending a batch logs to http://127.0.0.1:8124 + + + +=== TEST 9: use single clickhouse server +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "", + "database": "default", + "logtable": "test", + "endpoint_addr": "http://127.0.0.1:8123" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: hit route +--- request +GET /opentracing +--- error_code: 200 +--- wait: 5 + + + +=== TEST 11: get log +--- exec +echo "select * from default.test" | curl 'http://localhost:8123/' --data-binary @- +--- response_body_like +.*127.0.0.1.*1.* diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/clickhouse-logger2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/clickhouse-logger2.t new file mode 100644 index 0000000..d36ada3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/clickhouse-logger2.t @@ -0,0 +1,244 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level("info"); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: collect response body option +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "a", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server", + "inactive_timeout":1, + "include_resp_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: collect response log +--- request +GET /hello +--- error_log eval +qr/clickhouse body: .*\{.*"body":"hello world\\n"/ +--- wait: 3 + + + +=== TEST 3: collect response body with eval option +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "a", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server", + "inactive_timeout":1, + "include_resp_body": true, + "include_resp_body_expr": [ + [ + "arg_foo", + "==", + "bar" + ] + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: skip collect response log for condition +--- request +GET /hello?foo=unknown +--- no_error_log eval +qr/clickhouse body: .*\{.*response":\{.*"body":"hello world\\n"/ +--- wait: 3 + + + +=== TEST 5: collect request body log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "a", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server", + "inactive_timeout":1, + "include_req_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: collect request body with eval option +--- request +POST /hello?foo=bar +{"sample":"hello"} +--- error_log eval +qr/clickhouse body: .*\{.*request":\{.*"body":"\{\\"sample\\":\\"hello\\"/ +--- wait: 3 + + + +=== TEST 7: collect request body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "clickhouse-logger": { + "user": "default", + "password": "a", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server", + "inactive_timeout":1, + "include_req_body": true, + "include_req_body_expr": [ + [ + "arg_foo", + "==", + "bar" + ] + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: skip collect request body log for condition +--- request +POST /hello?foo=unknown +{"sample":"hello"} +--- no_error_log eval +qr/clickhouse body: .*\{.*request":\{.*"body":"\{\\"sample\\":\\"hello\\"/ +--- wait: 3 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/client-control.t b/CloudronPackages/APISIX/apisix-source/t/plugin/client-control.t new file mode 100644 index 0000000..3f174eb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/client-control.t @@ -0,0 +1,187 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "client-control": { + "max_body_size": 5 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit, failed +--- request +POST /hello +123456 +--- error_code: 413 + + + +=== TEST 3: hit, failed with chunked +--- more_headers +Transfer-Encoding: chunked +--- request eval +qq{POST /hello +6\r +Hello \r +0\r +\r +} +--- error_code: 413 +--- error_log +client intended to send too large chunked body + + + +=== TEST 4: hit +--- request +POST /hello +12345 + + + +=== TEST 5: bad body size +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "client-control": { + "max_body_size": -1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } +} +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin client-control err: property \"max_body_size\" validation failed: expected -1 to be at least 0"} + + + +=== TEST 6: 0 means no limit +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "client-control": { + "max_body_size": 0 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: hit +--- request +POST /hello +1 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/consumer-bug-fix.t b/CloudronPackages/APISIX/apisix-source/t/plugin/consumer-bug-fix.t new file mode 100644 index 0000000..ec9ef48 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/consumer-bug-fix.t @@ -0,0 +1,137 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: add consumer jack1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack1", + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "echo":{"body": "before change"} + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: add route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "key-auth": {} + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: verify 20 times +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello","GET /hello", "GET /hello", "GET /hello", "GET /hello","GET /hello", "GET /hello", "GET /hello", "GET /hello","GET /hello", "GET /hello", "GET /hello", "GET /hello","GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- more_headers +apikey: auth-one +--- response_body eval +["before change","before change","before change","before change","before change","before change","before change","before change","before change","before change","before change","before change","before change","before change","before change","before change","before change","before change","before change","before change"] + + + +=== TEST 4: modify consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack1", + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "echo":{"body": "after change"} + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: verify 20 times +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello","GET /hello", "GET /hello", "GET /hello", "GET /hello","GET /hello", "GET /hello", "GET /hello", "GET /hello","GET /hello", "GET /hello", "GET /hello", "GET /hello","GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- more_headers +apikey: auth-one +--- response_body eval +["after change","after change","after change","after change","after change","after change","after change","after change","after change","after change","after change","after change","after change","after change","after change","after change","after change","after change","after change","after change"] diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/consumer-restriction.t b/CloudronPackages/APISIX/apisix-source/t/plugin/consumer-restriction.t new file mode 100644 index 0000000..9ba590d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/consumer-restriction.t @@ -0,0 +1,1408 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.consumer-restriction") + local conf = { + title = "whitelist", + whitelist = { + "jack1", + "jack2" + } + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- request +GET /t +--- response_body +{"rejected_code":403,"title":"whitelist","type":"consumer_name","whitelist":["jack1","jack2"]} + + + +=== TEST 2: blacklist > whitelist > allowed_by_methods +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.consumer-restriction") + local ok, err = plugin.check_schema({whitelist={"jack1"}, blacklist={"jack2"}, allowed_by_methods={}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 3: add consumer jack1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack1", + "plugins": { + "basic-auth": { + "username": "jack2019", + "password": "123456" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: add consumer jack2 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack2", + "plugins": { + "basic-auth": { + "username": "jack2020", + "password": "123456" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: set whitelist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "whitelist": [ + "jack1" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: verify unauthorized +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing authorization in request"} + + + +=== TEST 7: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- response_body +hello world + + + +=== TEST 8: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- error_code: 403 +--- response_body +{"message":"The consumer_name is forbidden."} + + + +=== TEST 9: set blacklist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "blacklist": [ + "jack1" + ], + "rejected_msg": "request is forbidden" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: verify unauthorized +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing authorization in request"} + + + +=== TEST 11: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- error_code: 403 +--- response_body +{"message":"request is forbidden"} + + + +=== TEST 12: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- response_body +hello world + + + +=== TEST 13: set whitelist without authorization +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "consumer-restriction": { + "whitelist": [ + "jack1" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: verify unauthorized +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"The request is rejected, please check the consumer_name for this request"} + + + +=== TEST 15: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- error_code: 401 +--- response_body +{"message":"The request is rejected, please check the consumer_name for this request"} + + + +=== TEST 16: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- error_code: 401 +--- response_body +{"message":"The request is rejected, please check the consumer_name for this request"} + + + +=== TEST 17: set blacklist without authorization +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "consumer-restriction": { + "blacklist": [ + "jack1" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: verify unauthorized +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"The request is rejected, please check the consumer_name for this request"} + + + +=== TEST 19: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- error_code: 401 +--- response_body +{"message":"The request is rejected, please check the consumer_name for this request"} + + + +=== TEST 20: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- error_code: 401 +--- response_body +{"message":"The request is rejected, please check the consumer_name for this request"} + + + +=== TEST 21: set allowed_by_methods +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "allowed_by_methods":[{ + "user":"jack1", + "methods":["POST"] + }] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- error_code: 403 +--- response_body +{"message":"The consumer_name is forbidden."} + + + +=== TEST 23: set allowed_by_methods +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "allowed_by_methods":[{ + "user": "jack1", + "methods": ["POST","GET"] + }] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 24: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- response_body +hello world + + + +=== TEST 25: test blacklist priority +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "blacklist": [ + "jack1" + ], + "allowed_by_methods":[{ + "user": "jack1", + "methods": ["POST","GET"] + }] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 26: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- error_code: 403 +--- response_body +{"message":"The consumer_name is forbidden."} + + + +=== TEST 27: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- response_body +hello world + + + +=== TEST 28: whitelist blacklist priority +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "whitelist": ["jack1"], + "allowed_by_methods":[{ + "user":"jack1", + "methods":["POST"] + }] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 29: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- response_body +hello world + + + +=== TEST 30: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- error_code: 403 +--- response_body +{"message":"The consumer_name is forbidden."} + + + +=== TEST 31: remove consumer-restriction +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 32: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- response_body +hello world + + + +=== TEST 33: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- response_body +hello world + + + +=== TEST 34: verify unauthorized +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 35: create service (id:1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "new service 001" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 36: add consumer with plugin hmac-auth and consumer-restriction, and set whitelist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "hmac-auth": { + "key_id": "my-access-key", + "secret_key": "my-secret-key" + }, + "consumer-restriction": { + "type": "service_id", + "whitelist": [ "1" ], + "rejected_code": 401 + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 37: Route binding `hmac-auth` plug-in and whitelist `service_id` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "service_id": 1, + "uri": "/hello", + "plugins": { + "hmac-auth": {} + } + + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 38: verify: valid whitelist `service_id` +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + + local signing_string = { + key_id, + "GET /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 39: create service (id:2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "new service 002" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 40: Route binding `hmac-auth` plug-in and invalid whitelist `service_id` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "service_id": 2, + "uri": "/hello", + "plugins": { + "hmac-auth": {} + } + + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 41: verify: invalid whitelist `service_id` +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + + local signing_string = { + key_id, + "GET /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } +} +--- request +GET /t +--- error_code: 401 +--- response_body eval +qr/\{"message":"The service_id is forbidden."\}/ + + + +=== TEST 42: add consumer with plugin hmac-auth and consumer-restriction, and set blacklist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "hmac-auth": { + "key_id": "my-access-key", + "secret_key": "my-secret-key" + }, + "consumer-restriction": { + "type": "service_id", + "blacklist": [ "1" ], + "rejected_code": 401 + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 43: Route binding `hmac-auth` plug-in and blacklist `service_id` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "service_id": 1, + "uri": "/hello", + "plugins": { + "hmac-auth": {} + } + + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 44: verify: valid blacklist `service_id` +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + + local signing_string = { + key_id, + "GET /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- error_code: 401 +--- response_body eval +qr/\{"message":"The service_id is forbidden."\}/ + + + +=== TEST 45: Route binding `hmac-auth` plug-in and invalid blacklist `service_id` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "service_id": 2, + "uri": "/hello", + "plugins": { + "hmac-auth": {} + } + + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 46: verify: invalid blacklist `service_id` +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + + local signing_string = { + key_id, + "GET /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 47: update consumer with plugin hmac-auth and consumer-restriction, and set whitelist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "hmac-auth": { + "key_id": "my-access-key", + "secret_key": "my-secret-key" + }, + "consumer-restriction": { + "type": "route_id", + "whitelist": [ "1" ], + "rejected_code": 401 + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 48: verify: valid whitelist `route_id` +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time() + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + local secret_key = "my-secret-key" + local gmt = ngx_http_time(ngx_time) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + + local signing_string = { + key_id, + "GET /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 49: update consumer with plugin hmac-auth and consumer-restriction, and set blacklist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "hmac-auth": { + "key_id": "my-access-key", + "secret_key": "my-secret-key" + }, + "consumer-restriction": { + "type": "route_id", + "blacklist": [ "1" ], + "rejected_code": 401 + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 50: verify: valid blacklist `route_id` +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + + local signing_string = { + key_id, + "GET /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- error_code: 401 +--- response_body eval +qr/\{"message":"The route_id is forbidden."\}/ + + + +=== TEST 51: delete: route (id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t( '/apisix/admin/routes/1', ngx.HTTP_DELETE ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 52: delete: `service_id` is 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t( '/apisix/admin/services/1', ngx.HTTP_DELETE ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 53: delete: `service_id` is 2 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t( '/apisix/admin/services/2', ngx.HTTP_DELETE ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/consumer-restriction2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/consumer-restriction2.t new file mode 100644 index 0000000..febae31 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/consumer-restriction2.t @@ -0,0 +1,414 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: create consumer group(group1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumer_groups/group1', + ngx.HTTP_PUT, + [[{ + "plugins": {} + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: create consumer group(group2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumer_groups/group2', + ngx.HTTP_PUT, + [[{ + "plugins": {} + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: consumer jack1 with consumer group(group1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack1", + "plugins": { + "basic-auth": { + "username": "jack2019", + "password": "123456" + } + }, + "group_id": "group1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: consumer jack2 with consumer group(group2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack2", + "plugins": { + "basic-auth": { + "username": "jack2020", + "password": "123456" + } + }, + "group_id": "group2" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: consumer jack3 with no consumer group +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack3", + "plugins": { + "basic-auth": { + "username": "jack2021", + "password": "123456" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: set whitelist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "type": "consumer_group_id", + "whitelist": [ + "group1" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: verify unauthorized +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing authorization in request"} + + + +=== TEST 8: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- response_body +hello world + + + +=== TEST 9: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- error_code: 403 +--- response_body +{"message":"The consumer_group_id is forbidden."} + + + +=== TEST 10: set blacklist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "basic-auth": {}, + "consumer-restriction": { + "type": "consumer_group_id", + "blacklist": [ + "group1" + ], + "rejected_msg": "request is forbidden" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: verify unauthorized +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing authorization in request"} + + + +=== TEST 12: verify jack1 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- error_code: 403 +--- response_body +{"message":"request is forbidden"} + + + +=== TEST 13: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- response_body +hello world + + + +=== TEST 14: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjE6MTIzNDU2 +--- error_code: 401 +--- response_body +{"message":"The request is rejected, please check the consumer_group_id for this request"} + + + +=== TEST 15: set blacklist with service_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "consumer-restriction": { + "type": "service_id", + "blacklist": [ + "1" + ], + "rejected_msg": "request is forbidden" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: hit +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"The request is rejected, please check the service_id for this request"} + + + +=== TEST 17: set whitelist with service_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "consumer-restriction": { + "type": "service_id", + "whitelist": [ + "1" + ], + "rejected_msg": "request is forbidden" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: hit +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"The request is rejected, please check the service_id for this request"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/cors.t b/CloudronPackages/APISIX/apisix-source/t/plugin/cors.t new file mode 100644 index 0000000..79e3251 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/cors.t @@ -0,0 +1,929 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.cors") + local ok, err = plugin.check_schema({ + allow_origins = 'http://test.com', + allow_methods = '', + allow_headers = '', + expose_headers = '', + max_age = 600, + allow_credential = true + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: wrong value of key +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.cors") + local ok, err = plugin.check_schema({ + allow_origins = 'http://test.com', + allow_methods = '', + allow_headers = '', + expose_headers = '', + max_age = '600', + allow_credential = true + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + + } + } +--- request +GET /t +--- response_body +property "max_age" validation failed: wrong type: expected integer, got string +done + + + +=== TEST 3: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: update plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "**", + "allow_methods": "**", + "allow_headers": "request-h", + "expose_headers": "expose-h", + "madx_age": 5, + "allow_credential": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: disable plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: set route(default) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "cors": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: cors default +--- request +GET /hello HTTP/1.1 +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: * +Vary: +Access-Control-Allow-Methods: * +Access-Control-Allow-Headers: * +Access-Control-Expose-Headers: +Access-Control-Max-Age: 5 +Access-Control-Allow-Credentials: + + + +=== TEST 8: set route (cors specified) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "http://sub.domain.com,http://sub2.domain.com", + "allow_methods": "GET,POST", + "allow_headers": "headr1,headr2", + "expose_headers": "ex-headr1,ex-headr2", + "max_age": 50, + "allow_credential": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: cors specified +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://sub2.domain.com +resp-vary: Via +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: http://sub2.domain.com +Vary: Via, Origin +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: headr1,headr2 +Access-Control-Expose-Headers: ex-headr1,ex-headr2 +Access-Control-Max-Age: 50 +Access-Control-Allow-Credentials: true + + + +=== TEST 10: cors specified no match origin +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://sub3.domain.com +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: + + + +=== TEST 11: set route(force wildcard) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "**", + "allow_methods": "**", + "allow_headers": "**", + "expose_headers": "*" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: cors force wildcard +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: https://sub.domain.com +ExternalHeader1: val +ExternalHeader2: val +ExternalHeader3: val +Access-Control-Request-Headers: req-header1,req-header2 +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: https://sub.domain.com +Vary: Origin +Access-Control-Allow-Methods: GET,POST,PUT,DELETE,PATCH,HEAD,OPTIONS,CONNECT,TRACE +Access-Control-Allow-Headers: req-header1,req-header2 +Access-Control-Expose-Headers: * +Access-Control-Max-Age: 5 +Access-Control-Allow-Credentials: + + + +=== TEST 13: cors force wildcard no origin +--- request +GET /hello HTTP/1.1 +--- more_headers +ExternalHeader1: val +ExternalHeader2: val +ExternalHeader3: val +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: * +Access-Control-Allow-Methods: GET,POST,PUT,DELETE,PATCH,HEAD,OPTIONS,CONNECT,TRACE +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: * +Access-Control-Max-Age: 5 +Access-Control-Allow-Credentials: + + + +=== TEST 14: options return directly +--- request +OPTIONS /hello HTTP/1.1 +--- response_body + + + +=== TEST 15: set route(auth plugins fails) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {}, + "cors": { + "allow_origins": "**", + "allow_methods": "**", + "allow_headers": "*", + "expose_headers": "*" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: auth failed still work +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: https://sub.domain.com +ExternalHeader1: val +ExternalHeader2: val +ExternalHeader3: val +--- response_body +{"message":"Missing API key in request"} +--- error_code: 401 +--- response_headers +Access-Control-Allow-Origin: https://sub.domain.com +Access-Control-Allow-Methods: GET,POST,PUT,DELETE,PATCH,HEAD,OPTIONS,CONNECT,TRACE +Access-Control-Allow-Headers: * +Access-Control-Expose-Headers: * +Access-Control-Max-Age: 5 +Access-Control-Allow-Credentials: + + + +=== TEST 17: set route(overwrite upstream) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "**", + "allow_methods": "**", + "allow_headers": "request-h", + "expose_headers": "expose-h", + "allow_credential": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/headers" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: overwrite upstream +--- request +GET /headers?Access-Control-Allow-Origin=https://sub.domain.com HTTP/1.1 +--- more_headers +Origin: https://sub.domain.com +--- response_body +/headers +--- response_headers +Access-Control-Allow-Origin: https://sub.domain.com + + + +=== TEST 19: overwrite upstream(Access-Control-Allow-Methods) +--- request +GET /headers?Access-Control-Allow-Methods=methods HTTP/1.1 +--- more_headers +Origin: https://sub.domain.com +--- response_body +/headers +--- response_headers +Access-Control-Allow-Methods: GET,POST,PUT,DELETE,PATCH,HEAD,OPTIONS,CONNECT,TRACE + + + +=== TEST 20: overwrite upstream(Access-Control-Allow-Headers) +--- request +GET /headers?Access-Control-Allow-Headers=a-headers HTTP/1.1 +--- more_headers +Origin: https://sub.domain.com +--- response_body +/headers +--- response_headers +Access-Control-Allow-Headers: request-h + + + +=== TEST 21: overwrite upstream(Access-Control-Expose-Headers) +--- request +GET /headers?Access-Control-Expose-Headers=e-headers HTTP/1.1 +--- more_headers +Origin: https://sub.domain.com +--- response_body +/headers +--- response_headers +Access-Control-Expose-Headers: expose-h + + + +=== TEST 22: overwrite upstream(Access-Control-Max-Age) +--- request +GET /headers?Access-Control-Max-Age=10 HTTP/1.1 +--- more_headers +Origin: https://sub.domain.com +--- response_body +/headers +--- response_headers +Access-Control-Max-Age: 5 + + + +=== TEST 23: not overwrite upstream(Access-Control-Allow-Credentials) +--- request +GET /headers?Access-Control-Allow-Credentials=false HTTP/1.1 +--- more_headers +Origin: https://sub.domain.com +--- response_body +/headers +--- response_headers +Access-Control-Allow-Credentials: true + + + +=== TEST 24: should not set * to allow_origins when allow_credential is true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "*", + "allow_credential": true + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin cors err: you can not/ + + + +=== TEST 25: should not set * to allow_methods when allow_credential is true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_methods": "*", + "allow_credential": true + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin cors err: you can not/ + + + +=== TEST 26: should not set * to allow_headers when allow_credential is true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_headers": "*", + "allow_credential": true + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin cors err: you can not/ + + + +=== TEST 27: should not set * to expose_headers when allow_credential is true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "expose_headers": "*", + "allow_credential": true + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin cors err: you can not/ +--- no_error_log + + + +=== TEST 28: set route (regex specified) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "http://sub.domain.com,http://sub2.domain.com", + "allow_methods": "GET,POST", + "allow_headers": "headr1,headr2", + "expose_headers": "ex-headr1,ex-headr2", + "max_age": 50, + "allow_credential": true, + "allow_origins_by_regex":[".*\\.test.com$"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 29: regex specified +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://a.test.com +resp-vary: Via +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: http://a.test.com +Vary: Via, Origin +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: headr1,headr2 +Access-Control-Expose-Headers: ex-headr1,ex-headr2 +Access-Control-Max-Age: 50 +Access-Control-Allow-Credentials: true + + + +=== TEST 30: regex specified not match +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://a.test2.com +resp-vary: Via +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: + + + +=== TEST 31: set route (multiple regex specified ) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "http://sub.domain.com,http://sub2.domain.com", + "allow_methods": "GET,POST", + "allow_headers": "headr1,headr2", + "expose_headers": "ex-headr1,ex-headr2", + "max_age": 50, + "allow_credential": true, + "allow_origins_by_regex":[".*\\.test.com$",".*\\.example.org$"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 32: multiple regex specified match +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://foo.example.org +resp-vary: Via +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: http://foo.example.org +Vary: Via, Origin +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: headr1,headr2 +Access-Control-Expose-Headers: ex-headr1,ex-headr2 +Access-Control-Max-Age: 50 +Access-Control-Allow-Credentials: true + + + +=== TEST 33: multiple regex specified not match +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://foo.example.com +resp-vary: Via +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: + + + +=== TEST 34: origin was modified by the proxy_rewrite plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "http://sub.domain.com", + "allow_methods": "GET,POST", + "allow_headers": "headr1,headr2", + "expose_headers": "ex-headr1,ex-headr2", + "max_age": 50, + "allow_credential": true + }, + "proxy-rewrite": { + "headers": { + "Origin": "http://example.com" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 35: origin is not affected by proxy_rewrite plugins +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://sub.domain.com +resp-vary: Via +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: http://sub.domain.com +Vary: Via, Origin +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: headr1,headr2 +Access-Control-Expose-Headers: ex-headr1,ex-headr2 +Access-Control-Max-Age: 50 +Access-Control-Allow-Credentials: true diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/cors2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/cors2.t new file mode 100644 index 0000000..a2b0630 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/cors2.t @@ -0,0 +1,176 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: validate allow_origins +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.cors") + local function validate(val) + local conf = {} + conf.allow_origins = val + return plugin.check_schema(conf) + end + + local good = { + "*", + "**", + "null", + "http://y.com.uk", + "https://x.com", + "https://x.com,http://y.com.uk", + "https://x.com,http://y.com.uk,http://c.tv", + "https://x.com,http://y.com.uk:12000,http://c.tv", + } + for _, g in ipairs(good) do + local ok, err = validate(g) + if not ok then + ngx.say("failed to validate ", g, ", ", err) + end + end + + local bad = { + "", + "*a", + "*,http://y.com", + "nulll", + "http//y.com.uk", + "x.com", + "https://x.com,y.com.uk", + "https://x.com,*,https://y.com.uk", + "https://x.com,http://y.com.uk,http:c.tv", + } + for _, b in ipairs(bad) do + local ok, err = validate(b) + if ok then + ngx.say("failed to reject ", b) + end + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: set route ( regex specified and allow_origins is default value ) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "*", + "allow_methods": "GET,POST", + "allow_headers": "request-h", + "expose_headers": "expose-h", + "max_age": 10, + "allow_origins_by_regex":[".*\\.domain.com$"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: regex specified not match +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://sub.test.com +resp-vary: Via +--- response_headers +Access-Control-Allow-Origin: +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: + + + +=== TEST 4: regex no origin specified +--- request +GET /hello HTTP/1.1 +--- response_headers +Access-Control-Allow-Origin: +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: + + + +=== TEST 5: regex specified match +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://sub.domain.com +resp-vary: Via +--- response_headers +Access-Control-Allow-Origin: http://sub.domain.com +Vary: Via +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: request-h +Access-Control-Expose-Headers: expose-h +Access-Control-Max-Age: 10 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/cors3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/cors3.t new file mode 100644 index 0000000..80e4eca --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/cors3.t @@ -0,0 +1,422 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: validate metadata allow_origins +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.cors") + local schema_type = require("apisix.core").schema.TYPE_METADATA + local function validate(val) + local conf = {} + conf.allow_origins = val + return plugin.check_schema(conf, schema_type) + end + + local good = { + key_1 = "*", + key_2 = "**", + key_3 = "null", + key_4 = "http://y.com.uk", + key_5 = "https://x.com", + key_6 = "https://x.com,http://y.com.uk", + key_7 = "https://x.com,http://y.com.uk,http://c.tv", + key_8 = "https://x.com,http://y.com.uk:12000,http://c.tv", + } + local ok, err = validate(good) + if not ok then + ngx.say("failed to validate ", g, ", ", err) + end + + local bad = { + "", + "*a", + "*,http://y.com", + "nulll", + "http//y.com.uk", + "x.com", + "https://x.com,y.com.uk", + "https://x.com,*,https://y.com.uk", + "https://x.com,http://y.com.uk,http:c.tv", + } + for _, b in ipairs(bad) do + local ok, err = validate({key = b}) + if ok then + ngx.say("failed to reject ", b) + end + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: set plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/cors', + ngx.HTTP_PUT, + [[{ + "allow_origins": { + "key_1": "https://domain.com", + "key_2": "https://sub.domain.com,https://sub2.domain.com", + "key_3": "*" + }, + "inactive_timeout": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: set route (allow_origins_by_metadata specified) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "https://test.com", + "allow_origins_by_metadata": ["key_1"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: origin not match +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://foo.example.org +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: +Vary: Origin +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: + + + +=== TEST 5: origin matches with allow_origins +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: https://test.com +resp-vary: Via +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: https://test.com +Vary: Via, Origin +Access-Control-Allow-Methods: * +Access-Control-Allow-Headers: * +Access-Control-Expose-Headers: +Access-Control-Max-Age: 5 +Access-Control-Allow-Credentials: + + + +=== TEST 6: origin matches with allow_origins_by_metadata +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: https://domain.com +resp-vary: Via +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: https://domain.com +Vary: Via, Origin +Access-Control-Allow-Methods: * +Access-Control-Allow-Headers: * +Access-Control-Expose-Headers: +Access-Control-Max-Age: 5 +Access-Control-Allow-Credentials: + + + +=== TEST 7: set route (multiple allow_origins_by_metadata specified) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "https://test.com", + "allow_origins_by_metadata": ["key_1", "key_2"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: origin not match +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://foo.example.org +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: +Vary: Origin +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: + + + +=== TEST 9: origin matches with first allow_origins_by_metadata +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: https://domain.com +resp-vary: Via +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: https://domain.com +Vary: Via, Origin +Access-Control-Allow-Methods: * +Access-Control-Allow-Headers: * +Access-Control-Expose-Headers: +Access-Control-Max-Age: 5 +Access-Control-Allow-Credentials: + + + +=== TEST 10: origin matches with second allow_origins_by_metadata +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: https://sub.domain.com +resp-vary: Via +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: https://sub.domain.com +Vary: Via, Origin +Access-Control-Allow-Methods: * +Access-Control-Allow-Headers: * +Access-Control-Expose-Headers: +Access-Control-Max-Age: 5 +Access-Control-Allow-Credentials: + + + +=== TEST 11: set route (wildcard in allow_origins_by_metadata) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "https://test.com", + "allow_origins_by_metadata": ["key_3"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: origin matches by wildcard +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://foo.example.org +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: http://foo.example.org +Vary: Origin +Access-Control-Allow-Methods: * +Access-Control-Allow-Headers: * +Access-Control-Expose-Headers: +Access-Control-Max-Age: 5 +Access-Control-Allow-Credentials: + + + +=== TEST 13: set route (allow_origins_by_metadata specified and allow_origins * is invalid while set allow_origins_by_metadata) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "*", + "allow_origins_by_metadata": ["key_1"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: origin not match because allow_origins * invalid +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://foo.example.org +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: + + + +=== TEST 15: origin matches with first allow_origins_by_metadata +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: https://domain.com +--- response_body +hello world +--- response_headers +Access-Control-Allow-Origin: https://domain.com +Access-Control-Allow-Methods: * +Access-Control-Allow-Headers: * +Access-Control-Expose-Headers: +Access-Control-Max-Age: 5 +Access-Control-Allow-Credentials: diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/cors4.t b/CloudronPackages/APISIX/apisix-source/t/plugin/cors4.t new file mode 100644 index 0000000..28407c5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/cors4.t @@ -0,0 +1,751 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: validate timing_allow_origins +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.cors") + local function validate(val) + local conf = {} + conf.timing_allow_origins = val + return plugin.check_schema(conf) + end + + local good = { + "*", + "**", + "null", + "http://y.com.uk", + "https://x.com", + "https://x.com,http://y.com.uk", + "https://x.com,http://y.com.uk,http://c.tv", + "https://x.com,http://y.com.uk:12000,http://c.tv", + } + for _, g in ipairs(good) do + local ok, err = validate(g) + if not ok then + ngx.say("failed to validate ", g, ", ", err) + end + end + + local bad = { + "", + "*a", + "*,http://y.com", + "nulll", + "http//y.com.uk", + "x.com", + "https://x.com,y.com.uk", + "https://x.com,*,https://y.com.uk", + "https://x.com,http://y.com.uk,http:c.tv", + } + for _, b in ipairs(bad) do + local ok, err = validate(b) + if ok then + ngx.say("failed to reject ", b) + end + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: set route ( allow_origins default, timing_allow_origins specified ) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "*", + "allow_methods": "GET,POST", + "allow_headers": "request-h", + "expose_headers": "expose-h", + "max_age": 10, + "timing_allow_origins": "http://sub.domain.com" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: origin matching +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://sub.domain.com +--- response_headers +Access-Control-Allow-Origin: * +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: request-h +Access-Control-Expose-Headers: expose-h +Access-Control-Max-Age: 10 +Access-Control-Allow-Credentials: +Timing-Allow-Origin: http://sub.domain.com + + + +=== TEST 4: origin not matching timing_allow_origins +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://other.domain.com +--- response_headers +Access-Control-Allow-Origin: * +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: request-h +Access-Control-Expose-Headers: expose-h +Access-Control-Max-Age: 10 +Access-Control-Allow-Credentials: +Timing-Allow-Origin: + + + +=== TEST 5: set route ( allow_origins same as timing_allow_origins ) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "http://sub.domain.com", + "allow_methods": "GET,POST", + "allow_headers": "request-h", + "expose_headers": "expose-h", + "max_age": 10, + "timing_allow_origins": "http://sub.domain.com" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: origin matching +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://sub.domain.com +--- response_headers +Access-Control-Allow-Origin: http://sub.domain.com +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: request-h +Access-Control-Expose-Headers: expose-h +Access-Control-Max-Age: 10 +Access-Control-Allow-Credentials: +Timing-Allow-Origin: http://sub.domain.com + + + +=== TEST 7: origin not matching +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://other.domain.com +--- response_headers +Access-Control-Allow-Origin: +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: +Timing-Allow-Origin: + + + +=== TEST 8: set route ( allow_origins differs from timing_allow_origins ) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "http://one.domain.com", + "allow_methods": "GET,POST", + "allow_headers": "request-h", + "expose_headers": "expose-h", + "max_age": 10, + "timing_allow_origins": "http://another.domain.com" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: origin matching allow_origins +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://one.domain.com +--- response_headers +Access-Control-Allow-Origin: http://one.domain.com +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: request-h +Access-Control-Expose-Headers: expose-h +Access-Control-Max-Age: 10 +Access-Control-Allow-Credentials: +Timing-Allow-Origin: + + + +=== TEST 10: origin matching timing_allow_origins +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://another.domain.com +--- response_headers +Access-Control-Allow-Origin: +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: +Timing-Allow-Origin: http://another.domain.com + + + +=== TEST 11: origin not matching +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://notexistent.domain.com +--- response_headers +Access-Control-Allow-Origin: +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: +Timing-Allow-Origin: + + + +=== TEST 12: set route ( allow_origins superset of timing_allow_origins ) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "http://one.domain.com,http://two.domain.com", + "allow_methods": "GET,POST", + "allow_headers": "request-h", + "expose_headers": "expose-h", + "max_age": 10, + "timing_allow_origins": "http://one.domain.com" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: origin matching allow_origins and timing_allow_origins +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://one.domain.com +--- response_headers +Access-Control-Allow-Origin: http://one.domain.com +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: request-h +Access-Control-Expose-Headers: expose-h +Access-Control-Max-Age: 10 +Access-Control-Allow-Credentials: +Timing-Allow-Origin: http://one.domain.com + + + +=== TEST 14: origin matching only allow_origins +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://two.domain.com +--- response_headers +Access-Control-Allow-Origin: http://two.domain.com +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: request-h +Access-Control-Expose-Headers: expose-h +Access-Control-Max-Age: 10 +Access-Control-Allow-Credentials: +Timing-Allow-Origin: + + + +=== TEST 15: origin not matching +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://notexistent.domain.com +--- response_headers +Access-Control-Allow-Origin: +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: +Timing-Allow-Origin: + + + +=== TEST 16: set route ( allow_origins and timing_allow_origins are two different sets with intersection ) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins": "http://one.domain.com,http://two.domain.com", + "allow_methods": "GET,POST", + "allow_headers": "request-h", + "expose_headers": "expose-h", + "max_age": 10, + "timing_allow_origins": "http://one.domain.com,http://three.domain.com" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: origin matching allow_origins and timing_allow_origins +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://one.domain.com +--- response_headers +Access-Control-Allow-Origin: http://one.domain.com +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: request-h +Access-Control-Expose-Headers: expose-h +Access-Control-Max-Age: 10 +Access-Control-Allow-Credentials: +Timing-Allow-Origin: http://one.domain.com + + + +=== TEST 18: origin matching only allow_origins +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://two.domain.com +--- response_headers +Access-Control-Allow-Origin: http://two.domain.com +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: request-h +Access-Control-Expose-Headers: expose-h +Access-Control-Max-Age: 10 +Access-Control-Allow-Credentials: +Timing-Allow-Origin: + + + +=== TEST 19: origin matching only timing_allow_origins +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://three.domain.com +--- response_headers +Access-Control-Allow-Origin: +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: +Timing-Allow-Origin: http://three.domain.com + + + +=== TEST 20: origin not matching +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://notexistent.domain.com +--- response_headers +Access-Control-Allow-Origin: +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Access-Control-Allow-Credentials: +Timing-Allow-Origin: + + + +=== TEST 21: set route ( allow_origins and timing_allow_origins specified with regex ) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins_by_regex": ["http://.*?\\.domain\\.com"], + "allow_methods": "GET,POST", + "allow_headers": "request-h", + "expose_headers": "expose-h", + "max_age": 10, + "timing_allow_origins_by_regex": ["http://.*?\\.domain\\.com"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: regex specified match +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://sub.domain.com +--- response_headers +Access-Control-Allow-Origin: http://sub.domain.com +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: request-h +Access-Control-Expose-Headers: expose-h +Access-Control-Max-Age: 10 +Timing-Allow-Origin: http://sub.domain.com + + + +=== TEST 23: regex no match +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://other.newdomain.com +--- response_headers +Access-Control-Allow-Origin: +Access-Control-Allow-Methods: +Access-Control-Allow-Headers: +Access-Control-Expose-Headers: +Access-Control-Max-Age: +Timing-Allow-Origin: + + + +=== TEST 24: set route ( allow_origins and timing_allow_origins specified with different regex ) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_origins_by_regex": ["http://.*?\\.domain\\.com"], + "allow_methods": "GET,POST", + "allow_headers": "request-h", + "expose_headers": "expose-h", + "max_age": 10, + "timing_allow_origins_by_regex": ["http://test.*?\\.domain\\.com"], + "timing_allow_origins": "http://nonexistent.newdomain.com" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 25: regex specified match, test priority of regex over list of origins +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://testurl.domain.com +--- response_headers +Access-Control-Allow-Origin: http://testurl.domain.com +Access-Control-Allow-Methods: GET,POST +Access-Control-Allow-Headers: request-h +Access-Control-Expose-Headers: expose-h +Access-Control-Max-Age: 10 +Timing-Allow-Origin: http://testurl.domain.com + + + +=== TEST 26: set route ( expose_headers not specified ) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_credential": true, + "allow_headers": "**", + "allow_methods": "**", + "allow_origins": "**", + "expose_headers": "", + "max_age": 3500 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 27: remove Access-Control-Expose-Headers match +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://sub.domain.com +--- response_headers +Access-Control-Allow-Origin: http://sub.domain.com +Access-Control-Allow-Methods: GET,POST,PUT,DELETE,PATCH,HEAD,OPTIONS,CONNECT,TRACE +Access-Control-Expose-Headers: +Access-Control-Allow-Headers: +Access-Control-Max-Age: 3500 +Access-Control-Allow-Credentials: true + + + +=== TEST 28: set route ( expose_headers set value ) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "cors": { + "allow_credential": true, + "allow_headers": "**", + "allow_methods": "**", + "allow_origins": "**", + "expose_headers": "ex-headr1,ex-headr2", + "max_age": 3500 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 29: Access-Control-Expose-Headers match +--- request +GET /hello HTTP/1.1 +--- more_headers +Origin: http://sub.domain.com +--- response_headers +Access-Control-Allow-Origin: http://sub.domain.com +Access-Control-Allow-Methods: GET,POST,PUT,DELETE,PATCH,HEAD,OPTIONS,CONNECT,TRACE +Access-Control-Expose-Headers: ex-headr1,ex-headr2 +Access-Control-Allow-Headers: +Access-Control-Max-Age: 3500 +Access-Control-Allow-Credentials: true diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/csrf.t b/CloudronPackages/APISIX/apisix-source/t/plugin/csrf.t new file mode 100644 index 0000000..6ed8b95 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/csrf.t @@ -0,0 +1,390 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.csrf") + local ok, err = plugin.check_schema({name = '_csrf', expires = 3600, key = 'testkey'}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: set csrf plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "csrf": { + "key": "userkey", + "expires": 1000000000 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: have csrf cookie +--- request +GET /hello +--- response_headers_like +Set-Cookie: apisix-csrf-token\s*=\s*[^;]+(.*)?$ + + + +=== TEST 4: block request +--- request +POST /hello +--- error_code: 401 +--- response_body +{"error_msg":"no csrf token in headers"} + + + +=== TEST 5: only header +--- request +POST /hello +--- more_headers +apisix-csrf-token: wrongtoken +--- error_code: 401 +--- response_body +{"error_msg":"no csrf cookie"} + + + +=== TEST 6: only cookie +--- request +POST /hello +--- more_headers +Cookie: apisix-csrf-token=testcookie +--- error_code: 401 +--- response_body +{"error_msg":"no csrf token in headers"} + + + +=== TEST 7: header and cookie mismatch +--- request +POST /hello +--- more_headers +apisix-csrf-token: wrongtoken +Cookie: apisix-csrf-token=testcookie +--- error_code: 401 +--- response_body +{"error_msg":"csrf token mismatch"} + + + +=== TEST 8: invalid csrf token +--- request +POST /hello +--- more_headers +apisix-csrf-token: eyJyYW5kb20iOjAuMTYwOTgzMDYwMTg0NDksInNpZ24iOiI2YTEyYmViYTI4MzAyNDg4MDRmNGU0N2VkZDY5MWFmNjg5N2IyNzQ4YTY1YWMwMDJiMGFjMzFlN2NlMDdlZTViIiwiZXhwaXJlcyI6MTc0MzExOTkxMX0= +Cookie: apisix-csrf-token=eyJyYW5kb20iOjAuMTYwOTgzMDYwMTg0NDksInNpZ24iOiI2YTEyYmViYTI4MzAyNDg4MDRmNGU0N2VkZDY5MWFmNjg5N2IyNzQ4YTY1YWMwMDJiMGFjMzFlN2NlMDdlZTViIiwiZXhwaXJlcyI6MTc0MzExOTkxMX0= +--- error_code: 401 +--- error_log: Invalid signatures +--- response_body +{"error_msg":"Failed to verify the csrf token signature"} + + + +=== TEST 9: valid csrf token +--- request +POST /hello +--- more_headers +apisix-csrf-token: eyJyYW5kb20iOjAuNDI5ODYzMTk3MTYxMzksInNpZ24iOiI0ODRlMDY4NTkxMWQ5NmJhMDc5YzQ1ZGI0OTE2NmZkYjQ0ODhjODVkNWQ0NmE1Y2FhM2UwMmFhZDliNjE5OTQ2IiwiZXhwaXJlcyI6MjY0MzExOTYyNH0= +Cookie: apisix-csrf-token=eyJyYW5kb20iOjAuNDI5ODYzMTk3MTYxMzksInNpZ24iOiI0ODRlMDY4NTkxMWQ5NmJhMDc5YzQ1ZGI0OTE2NmZkYjQ0ODhjODVkNWQ0NmE1Y2FhM2UwMmFhZDliNjE5OTQ2IiwiZXhwaXJlcyI6MjY0MzExOTYyNH0= + + + +=== TEST 10: change expired +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "csrf": { + "key": "userkey", + "expires": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: expired csrf token +--- request +POST /hello +--- more_headers +apisix-csrf-token: eyJyYW5kb20iOjAuMDY3NjAxMDQwMDM5MzI4LCJzaWduIjoiOTE1Yjg2MjBhNTg1N2FjZmIzNjIxOTNhYWVlN2RkYjY5NmM0NWYwZjE5YjY5Zjg3NjM4ZTllNGNjNjYxYjQwNiIsImV4cGlyZXMiOjE2NDMxMjAxOTN9 +Cookie: apisix-csrf-token=eyJyYW5kb20iOjAuMDY3NjAxMDQwMDM5MzI4LCJzaWduIjoiOTE1Yjg2MjBhNTg1N2FjZmIzNjIxOTNhYWVlN2RkYjY5NmM0NWYwZjE5YjY5Zjg3NjM4ZTllNGNjNjYxYjQwNiIsImV4cGlyZXMiOjE2NDMxMjAxOTN9 +--- error_code: 401 +--- error_log: token has expired + + + +=== TEST 12: token has expired after sleep 2s +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + local cookie = res.headers["Set-Cookie"] + local token = cookie:match("=([^;]+)") + + ngx.sleep(2) + + local res, err = httpc:request_uri(uri, { + method = "POST", + headers = { + ["apisix-csrf-token"] = token, + ["Cookie"] = cookie, + } + }) + if not res then + ngx.say(err) + return + end + + if res.status >= 300 then + ngx.status = res.status + end + } + } +--- error_code: 401 +--- error_log: token has expired + + + +=== TEST 13: set expires 0 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "csrf": { + "key": "userkey", + "expires": 0 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: token no expired after sleep 1s +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + + ngx.sleep(1) + + local cookie = res.headers["Set-Cookie"] + local token = cookie:match("=([^;]+)") + + local res, err = httpc:request_uri(uri, { + method = "POST", + headers = { + ["apisix-csrf-token"] = token, + ["Cookie"] = cookie, + } + }) + if not res then + ngx.say(err) + return + end + + if res.status >= 300 then + ngx.status = res.status + end + ngx.status = res.status + ngx.print(res.body) + } + } +--- response_body +hello world + + + +=== TEST 15: data encryption for key +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "csrf": { + "key": "userkey", + "expires": 1000000000 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["csrf"].key) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["csrf"].key) + } + } +--- response_body +userkey +mt39FazQccyMqt4ctoRV7w== diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/custom_sort_plugins.t b/CloudronPackages/APISIX/apisix-source/t/plugin/custom_sort_plugins.t new file mode 100644 index 0000000..41a23b9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/custom_sort_plugins.t @@ -0,0 +1,633 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +no_long_string(); +no_root_location(); +log_level("info"); +run_tests; + +__DATA__ + +=== TEST 1: custom priority and default priority on different routes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + }, + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + }, + "serverless-pre-function": { + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: verify order +--- request +GET /hello +--- response_body +serverless-post-function +serverless-pre-function + + + +=== TEST 3: routing without custom plugin order is not affected +--- request +GET /hello1 +--- response_body +serverless-pre-function +serverless-post-function + + + +=== TEST 4: custom priority and default priority on same route +# the priority of serverless-post-function is -2000, execute serverless-post-function first +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + }, + "serverless-pre-function": { + "_meta": { + "priority": -2001 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: verify order +--- request +GET /hello +--- response_body +serverless-post-function +serverless-pre-function + + + +=== TEST 6: merge plugins from consumer and route, execute the rewrite phase +# in the rewrite phase, the plugins on the route must be executed first, +# and then executed the rewrite phase of the plugins on the consumer, +# and the custom plugin order fails for this case. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {}, + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: verify order(more requests) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local httpc = http.new() + local headers = {} + headers["apikey"] = "auth-one" + local res, err = httpc:request_uri(uri, {method = "GET", headers = headers}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + + local res, err = httpc:request_uri(uri, {method = "GET", headers = headers}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- response_body +serverless-pre-function +serverless-post-function +serverless-pre-function +serverless-post-function + + + +=== TEST 8: merge plugins form custom and route, execute the access phase +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "access", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {}, + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "access", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: verify order +--- request +GET /hello +--- more_headers +apikey: auth-one +--- response_body +serverless-post-function +serverless-pre-function + + + +=== TEST 10: merge plugins form service and route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "service_id": "1", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: verify order +--- request +GET /hello +--- response_body +serverless-post-function +serverless-pre-function + + + +=== TEST 12: custom plugins sort is not affected by plugins reload +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + + local t = require("lib.test_admin").test + local code, _, org_body = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + + ngx.say(org_body) + + ngx.sleep(0.2) + + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- response_body +serverless-post-function +serverless-pre-function +done +serverless-post-function +serverless-pre-function + + + +=== TEST 13: merge plugins form plugin_configs and route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, err = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function\"); + end"] + } + } + }]] + ) + if code > 300 then + ngx.status = code + ngx.say(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function\"); + end"] + } + }, + "plugin_config_id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: verify order +--- request +GET /hello +--- response_body +serverless-post-function +serverless-pre-function + + + +=== TEST 15: custom plugins sort on global_rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "_meta": { + "priority": 10000 + }, + "phase": "rewrite", + "functions" : ["return function(conf, ctx) + ngx.say(\"serverless-post-function on global rule\"); + end"] + }, + "serverless-pre-function": { + "_meta": { + "priority": -2000 + }, + "phase": "rewrite", + "functions": ["return function(conf, ctx) + ngx.say(\"serverless-pre-function on global rule\"); + end"] + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: verify order +--- request +GET /hello +--- response_body +serverless-post-function on global rule +serverless-pre-function on global rule +serverless-post-function +serverless-pre-function + + + +=== TEST 17: delete global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + ngx.say(body) + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/datadog.t b/CloudronPackages/APISIX/apisix-source/t/plugin/datadog.t new file mode 100644 index 0000000..506abcc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/datadog.t @@ -0,0 +1,537 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + $block->set_value("stream_conf_enable", 1); + + if (!defined $block->extra_stream_config) { + my $stream_config = <<_EOC_; + server { + listen 8125 udp; + content_by_lua_block { + require("lib.mock_layer4").dogstatsd() + } + } +_EOC_ + $block->set_value("extra_stream_config", $stream_config); + } + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity check metadata +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.datadog") + local ok, err = plugin.check_schema({host = "127.0.0.1", port = 8125}, core.schema.TYPE_METADATA) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- setting the metadata + local code, meta_body = t('/apisix/admin/plugin_metadata/datadog', + ngx.HTTP_PUT, + [[{ + "host":"127.0.0.1", + "port": 8125 + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "datadog": { + "batch_max_size" : 1, + "max_retry_count": 0 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "name": "datadog", + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + + ngx.say(meta_body) + ngx.say(body) + } + } +--- response_body +passed +passed + + + +=== TEST 3: testing behaviour with mock suite +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + } + } +--- wait: 0.5 +--- response_body +opentracing +--- grep_error_log eval +qr/message received: apisix(.+?(?=, ))/ +--- grep_error_log_out eval +qr/message received: apisix\.request\.counter:1\|c\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.request\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.upstream\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.apisix\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.ingress\.size:[\d]+\|ms\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.egress\.size:[\d]+\|ms\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +/ + + + +=== TEST 4: testing behaviour with multiple requests +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + + -- request 2 + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + } + } +--- wait: 0.5 +--- response_body +opentracing +opentracing +--- grep_error_log eval +qr/message received: apisix(.+?(?=, ))/ +--- grep_error_log_out eval +qr/message received: apisix\.request\.counter:1\|c\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.request\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.upstream\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.apisix\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.ingress\.size:[\d]+\|ms\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.egress\.size:[\d]+\|ms\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.request\.counter:1\|c\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.request\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.upstream\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.apisix\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.ingress\.size:[\d]+\|ms\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.egress\.size:[\d]+\|ms\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +/ + + + +=== TEST 5: testing behaviour with different namespace +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- Change the metadata + local code, meta_body = t('/apisix/admin/plugin_metadata/datadog', + ngx.HTTP_PUT, + [[{ + "host":"127.0.0.1", + "port": 8125, + "namespace": "mycompany" + }]]) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(meta_body) + + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + } + } +--- wait: 0.5 +--- response_body +passed +opentracing +--- grep_error_log eval +qr/message received: mycompany(.+?(?=, ))/ +--- grep_error_log_out eval +qr/message received: mycompany\.request\.counter:1\|c\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: mycompany\.request\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: mycompany\.upstream\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: mycompany\.apisix\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: mycompany\.ingress\.size:[\d]+\|ms\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: mycompany\.egress\.size:[\d]+\|ms\|#source:apisix,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +/ + + + +=== TEST 6: testing behaviour with different constant tags +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- Change the metadata + local code, meta_body = t('/apisix/admin/plugin_metadata/datadog', + ngx.HTTP_PUT, + [[{ + "host":"127.0.0.1", + "port": 8125, + "constant_tags": [ + "source:apisix", + "new_tag:must" + ] + }]]) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(meta_body) + + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + } + } +--- wait: 0.5 +--- response_body +passed +opentracing +--- grep_error_log eval +qr/message received: apisix(.+?(?=, ))/ +--- grep_error_log_out eval +qr/message received: apisix\.request\.counter:1\|c\|#source:apisix,new_tag:must,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.request\.latency:[\d.]+\|h\|#source:apisix,new_tag:must,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.upstream\.latency:[\d.]+\|h\|#source:apisix,new_tag:must,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.apisix\.latency:[\d.]+\|h\|#source:apisix,new_tag:must,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.ingress\.size:[\d]+\|ms\|#source:apisix,new_tag:must,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.egress\.size:[\d]+\|ms\|#source:apisix,new_tag:must,route_name:datadog,balancer_ip:[\d.]+,response_status:200,scheme:http +/ + + + +=== TEST 7: testing behaviour when route_name is missing - must fallback to route_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "datadog": { + "batch_max_size" : 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say(body) + + -- making a request to the route + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.print(body) + } + } +--- response_body +passed +opentracing +--- wait: 0.5 +--- grep_error_log eval +qr/message received: apisix(.+?(?=, ))/ +--- grep_error_log_out eval +qr/message received: apisix\.request\.counter:1\|c\|#source:apisix,new_tag:must,route_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.request\.latency:[\d.]+\|h\|#source:apisix,new_tag:must,route_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.upstream\.latency:[\d.]+\|h\|#source:apisix,new_tag:must,route_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.apisix\.latency:[\d.]+\|h\|#source:apisix,new_tag:must,route_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.ingress\.size:[\d]+\|ms\|#source:apisix,new_tag:must,route_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.egress\.size:[\d]+\|ms\|#source:apisix,new_tag:must,route_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http +/ + + + +=== TEST 8: testing behaviour with service id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "name": "service-1", + "plugins": { + "datadog": { + "batch_max_size" : 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say(body) + + -- create a route with service level abstraction + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "route-1", + "uri": "/opentracing", + "service_id": "1" + + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say(body) + + -- making a request to the route + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.print(body) + } + } +--- response_body +passed +passed +opentracing +--- wait: 0.5 +--- grep_error_log eval +qr/message received: apisix(.+?(?=, ))/ +--- grep_error_log_out eval +qr/message received: apisix\.request\.counter:1\|c\|#source:apisix,new_tag:must,route_name:route-1,service_name:service-1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.request\.latency:[\d.]+\|h\|#source:apisix,new_tag:must,route_name:route-1,service_name:service-1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.upstream\.latency:[\d.]+\|h\|#source:apisix,new_tag:must,route_name:route-1,service_name:service-1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.apisix\.latency:[\d.]+\|h\|#source:apisix,new_tag:must,route_name:route-1,service_name:service-1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.ingress\.size:[\d]+\|ms\|#source:apisix,new_tag:must,route_name:route-1,service_name:service-1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.egress\.size:[\d]+\|ms\|#source:apisix,new_tag:must,route_name:route-1,service_name:service-1,balancer_ip:[\d.]+,response_status:200,scheme:http +/ + + + +=== TEST 9: testing behaviour with prefer_name is false and service name is nil +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "datadog": { + "batch_max_size" : 1, + "prefer_name": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say(body) + + -- making a request to the route + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.print(body) + } + } +--- response_body +passed +opentracing +--- wait: 0.5 +--- grep_error_log eval +qr/message received: apisix(.+?(?=, ))/ +--- grep_error_log_out eval +qr/message received: apisix\.request\.counter:1\|c\|#source:apisix,new_tag:must,route_name:1,service_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.request\.latency:[\d.]+\|h\|#source:apisix,new_tag:must,route_name:1,service_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.upstream\.latency:[\d.]+\|h\|#source:apisix,new_tag:must,route_name:1,service_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.apisix\.latency:[\d.]+\|h\|#source:apisix,new_tag:must,route_name:1,service_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.ingress\.size:[\d]+\|ms\|#source:apisix,new_tag:must,route_name:1,service_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.egress\.size:[\d]+\|ms\|#source:apisix,new_tag:must,route_name:1,service_name:1,balancer_ip:[\d.]+,response_status:200,scheme:http +/ + + + +=== TEST 10: testing behaviour with consumer +--- apisix_yaml +consumers: + - username: user0 + plugins: + key-auth: + key: user0 +routes: + - uri: /opentracing + name: datadog + upstream: + nodes: + "127.0.0.1:1982": 1 + plugins: + datadog: + batch_max_size: 1 + max_retry_count: 0 + key-auth: {} +#END +--- request +GET /opentracing?apikey=user0 +--- response_body +opentracing +--- wait: 0.5 +--- grep_error_log eval +qr/message received: apisix(.+?(?=, ))/ +--- grep_error_log_out eval +qr/message received: apisix\.request\.counter:1\|c\|#source:apisix,route_name:datadog,consumer:user0,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.request\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,consumer:user0,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.upstream\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,consumer:user0,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.apisix\.latency:[\d.]+\|h\|#source:apisix,route_name:datadog,consumer:user0,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.ingress\.size:[\d]+\|ms\|#source:apisix,route_name:datadog,consumer:user0,balancer_ip:[\d.]+,response_status:200,scheme:http +message received: apisix\.egress\.size:[\d]+\|ms\|#source:apisix,route_name:datadog,consumer:user0,balancer_ip:[\d.]+,response_status:200,scheme:http +/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/degraphql.t b/CloudronPackages/APISIX/apisix-source/t/plugin/degraphql.t new file mode 100644 index 0000000..cd499d7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/degraphql.t @@ -0,0 +1,422 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: query list +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/graphql", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8888": 1 + } + }, + "plugins": { + "degraphql": { + "query": "{\n persons {\n id\n name\n }\n}\n" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.1) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/graphql" + local headers = { + ["Content-Type"] = "application/json" + } + local res, err = httpc:request_uri(uri, {headers = headers, method = "POST"}) + if not res then + ngx.say(err) + return + end + + local json = require("toolkit.json") + ngx.say(json.encode(res.body)) + } + } +--- response_body +"{\"data\":{\"persons\":[{\"id\":\"7\",\"name\":\"Niek\"},{\"id\":\"8\",\"name\":\"Josh\"},{\"id\":\"9\",\"name\":\"Simon\"},{\"id\":\"10\",\"name\":\"Audun\"},{\"id\":\"11\",\"name\":\"Truls\"},{\"id\":\"12\",\"name\":\"Maria\"},{\"id\":\"13\",\"name\":\"Zahin\"},{\"id\":\"14\",\"name\":\"Roberto\"},{\"id\":\"15\",\"name\":\"Susanne\"},{\"id\":\"16\",\"name\":\"Live JS\"},{\"id\":\"17\",\"name\":\"Dave\"},{\"id\":\"18\",\"name\":\"Matt\"}]}}" + + + +=== TEST 2: query with variables +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/graphql", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8888": 1 + } + }, + "plugins": { + "degraphql": { + "query": "query($name: String!) {\n persons(filter: { name: $name }) {\n id\n name\n blog\n githubAccount\n talks {\n id\n title\n }\n }\n}", + "variables": [ + "name" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: hit +--- request +POST /graphql +{ + "name": "Josh", + "githubAccount":"npalm" +} +--- more_headers +Content-Type: application/json +--- response_body chomp +{"data":{"persons":[{"id":"8","name":"Josh","blog":"","githubAccount":"joshlong","talks":[]}]}} + + + +=== TEST 4: query with more variables +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/graphql", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8888": 1 + } + }, + "plugins": { + "degraphql": { + "query": "query($name: String!, $githubAccount: String!) {\n persons(filter: { name: $name, githubAccount: $githubAccount }) {\n id\n name\n blog\n githubAccount\n talks {\n id\n title\n }\n }\n}", + "variables": [ + "name", + "githubAccount" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: hit +--- request +POST /graphql +{ + "name":"Niek", + "githubAccount":"npalm" +} +--- more_headers +Content-Type: application/json +--- response_body chomp +{"data":{"persons":[{"id":"7","name":"Niek","blog":"https://040code.github.io","githubAccount":"npalm","talks":[{"id":"19","title":"GraphQL - The Next API Language"},{"id":"20","title":"Immutable Infrastructure"}]}]}} + + + +=== TEST 6: without body +--- request +POST /graphql +--- error_log +missing request body +--- error_code: 400 + + + +=== TEST 7: invalid body +--- request +POST /graphql +"AA" +--- more_headers +Content-Type: application/json +--- error_log +invalid request body can't be decoded +--- error_code: 400 + + + +=== TEST 8: proxy should ensure the Content-Type is correct +--- request +POST /graphql +{ + "name":"Niek", + "githubAccount":"npalm" +} +--- response_body chomp +{"data":{"persons":[{"id":"7","name":"Niek","blog":"https://040code.github.io","githubAccount":"npalm","talks":[{"id":"19","title":"GraphQL - The Next API Language"},{"id":"20","title":"Immutable Infrastructure"}]}]}} + + + +=== TEST 9: schema check +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local query1 = "query persons($name: String!) {\n persons(filter: { name: $name }) {\n id\n name\n blog\n githubAccount\n talks {\n id\n title\n }\n }\n}" + local query2 = "query githubAccount($name: String!, $githubAccount: String!) {\n persons(filter: { name: $name, githubAccount: $githubAccount }) {\n id\n name\n blog\n githubAccount\n talks {\n id\n title\n }\n }\n}" + for _, case in ipairs({ + {input = { + }}, + {input = { + query = "uery {}", + }}, + {input = { + query = "query {}", + variables = {}, + }}, + {input = { + query = query1 .. query2, + }}, + }) do + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + { + id = "1", + plugins = { + ["degraphql"] = case.input + } + } + ) + ngx.print(body) + end + } +} +--- response_body +{"error_msg":"failed to check the configuration of plugin degraphql err: property \"query\" is required"} +{"error_msg":"failed to check the configuration of plugin degraphql err: failed to parse query: Syntax error near line 1"} +{"error_msg":"failed to check the configuration of plugin degraphql err: property \"variables\" validation failed: expect array to have at least 1 items"} +{"error_msg":"failed to check the configuration of plugin degraphql err: operation_name is required if multiple operations are present in the query"} + + + +=== TEST 10: check operation_name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("apisix.core.json") + local query1 = "query persons($name: String!) {\n persons(filter: { name: $name }) {\n id\n name\n blog\n githubAccount\n talks {\n id\n title\n }\n }\n}" + local query2 = "query githubAccount($name: String!, $githubAccount: String!) {\n persons(filter: { name: $name, githubAccount: $githubAccount }) {\n id\n name\n blog\n githubAccount\n talks {\n id\n title\n }\n }\n}" + local query = json.encode(query1 .. query2) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/graphql", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8888": 1 + } + }, + "plugins": { + "degraphql": { + "query": ]] .. query .. [[, + "operation_name": "persons", + "variables": [ + "name" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: hit +--- request +POST /graphql +{ + "name": "Josh", + "githubAccount":"npalm" +} +--- response_body chomp +{"data":{"persons":[{"id":"8","name":"Josh","blog":"","githubAccount":"joshlong","talks":[]}]}} + + + +=== TEST 12: GET with variables +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/graphql", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8888": 1 + } + }, + "plugins": { + "degraphql": { + "query": "query($name: String!, $githubAccount: String!) {\n persons(filter: { name: $name, githubAccount: $githubAccount }) {\n id\n name\n blog\n githubAccount\n talks {\n id\n title\n }\n }\n}", + "variables": [ + "name", + "githubAccount" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit +--- request +GET /graphql?name=Niek&githubAccount=npalm +--- response_body chomp +{"data":{"persons":[{"id":"7","name":"Niek","blog":"https://040code.github.io","githubAccount":"npalm","talks":[{"id":"19","title":"GraphQL - The Next API Language"},{"id":"20","title":"Immutable Infrastructure"}]}]}} + + + +=== TEST 14: GET without variables +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/graphql", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:8888": 1 + } + }, + "plugins": { + "degraphql": { + "query": "{\n persons {\n id\n name\n }\n}\n" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: hit +--- request +GET /graphql +--- response_body chomp +{"data":{"persons":[{"id":"7","name":"Niek"},{"id":"8","name":"Josh"},{"id":"9","name":"Simon"},{"id":"10","name":"Audun"},{"id":"11","name":"Truls"},{"id":"12","name":"Maria"},{"id":"13","name":"Zahin"},{"id":"14","name":"Roberto"},{"id":"15","name":"Susanne"},{"id":"16","name":"Live JS"},{"id":"17","name":"Dave"},{"id":"18","name":"Matt"}]}} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/dubbo-proxy/route.t b/CloudronPackages/APISIX/apisix-source/t/plugin/dubbo-proxy/route.t new file mode 100644 index 0000000..d21b062 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/dubbo-proxy/route.t @@ -0,0 +1,321 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/mod_dubbo/) { + plan(skip_all => "mod_dubbo not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->disable_dubbo) { + my $extra_yaml_config = <<_EOC_; +plugins: + - dubbo-proxy + - response-rewrite + - proxy-rewrite + - key-auth +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + + if ($block->apisix_yaml) { + my $upstream = <<_EOC_; +upstreams: + - nodes: + "127.0.0.1:20880": 1 + type: roundrobin + id: 1 +#END +_EOC_ + + $block->set_value("apisix_yaml", $block->apisix_yaml . $upstream); + } + + if (!$block->request) { + $block->set_value("request", "GET /hello"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: ignore route's dubbo configuration if dubbo is disable globally +--- disable_dubbo +--- apisix_yaml +routes: + - + uri: /hello + plugins: + dubbo-proxy: + service_name: org.apache.dubbo.backend.DemoService + service_version: 1.0.0 + method: hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +--- response_body +hello world + + + +=== TEST 2: check schema +--- apisix_yaml +routes: + - + uri: /hello + plugins: + dubbo-proxy: + service_name: org.apache.dubbo.backend.DemoService + method: hello + upstream_id: 1 +--- error_log +property "service_version" is required +--- error_code: 404 + + + +=== TEST 3: sanity +--- apisix_yaml +routes: + - + uri: /hello + plugins: + dubbo-proxy: + service_name: org.apache.dubbo.backend.DemoService + service_version: 1.0.0 + method: hello + upstream_id: 1 +--- more_headers +Extra-Arg-K: V +--- response_headers +Got-extra-arg-k: V +--- response_body +dubbo success + + + +=== TEST 4: enabled in service +--- apisix_yaml +routes: + - uri: /hello + service_id: 1 + +services: + - + plugins: + dubbo-proxy: + service_name: org.apache.dubbo.backend.DemoService + service_version: 1.0.0 + method: hello + id: 1 + upstream_id: 1 +--- response_body +dubbo success + + + +=== TEST 5: work with consumer +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: true +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, message = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username":"jack", + "plugins": { + "key-auth": { + "key": "jack" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream":{ + "nodes": { + "127.0.0.1:20880": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "dubbo-proxy": { + "service_name": "org.apache.dubbo.backend.DemoService", + "service_version": "1.0.0", + "method": "hello" + }, + "key-auth": {} + }, + "uris": ["/hello"] + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(message) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: blocked +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: true +--- error_code: 401 + + + +=== TEST 7: passed +--- yaml_config +apisix: + node_listen: 1984 + enable_admin: true +--- more_headers +apikey: jack +--- response_body +dubbo success + + + +=== TEST 8: rewrite response +--- apisix_yaml +routes: + - + uri: /hello + plugins: + response-rewrite: + headers: + fruit: banana + body: "hello world\n" + dubbo-proxy: + service_name: org.apache.dubbo.backend.DemoService + service_version: 1.0.0 + method: hello + upstream_id: 1 + +--- response_body +hello world +--- response_headers +fruit: banana + + + +=== TEST 9: rewrite request +--- apisix_yaml +routes: + - + uri: /hello + plugins: + proxy-rewrite: + headers: + extra-arg-fruit: banana + dubbo-proxy: + service_name: org.apache.dubbo.backend.DemoService + service_version: 1.0.0 + method: hello + upstream_id: 1 + +--- response_body +dubbo success +--- response_headers +Got-extra-arg-fruit: banana + + + +=== TEST 10: use uri as default method +--- apisix_yaml +routes: + - + uri: /hello + plugins: + dubbo-proxy: + service_name: org.apache.dubbo.backend.DemoService + service_version: 1.0.0 + upstream_id: 1 + +--- response_body +dubbo success + + + +=== TEST 11: version mismatch +--- apisix_yaml +routes: + - + uri: /hello + plugins: + dubbo-proxy: + service_name: org.apache.dubbo.backend.DemoService + service_version: 0.1.0 + method: hello + upstream_id: 1 +--- more_headers +Extra-Arg-K: V +--- error_code: 502 +--- error_log +may be version or group mismatch diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/dubbo-proxy/upstream.t b/CloudronPackages/APISIX/apisix-source/t/plugin/dubbo-proxy/upstream.t new file mode 100644 index 0000000..c54c091 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/dubbo-proxy/upstream.t @@ -0,0 +1,163 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/mod_dubbo/) { + plan(skip_all => "mod_dubbo not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); +worker_connections(256); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /hello"); + } + + if (!defined $block->disable_dubbo) { + my $extra_yaml_config = <<_EOC_; +plugins: + - dubbo-proxy +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: retry +--- apisix_yaml +upstreams: + - nodes: + - host: 127.0.0.1 + port: 20881 + weight: 1 + - host: 127.0.0.1 + port: 20880 + weight: 1 + type: roundrobin + id: 1 +routes: + - + uri: /hello + plugins: + dubbo-proxy: + service_name: org.apache.dubbo.backend.DemoService + service_version: 1.0.0 + method: hello + upstream_id: 1 +#END +--- response_body +dubbo success +--- ignore_error_log + + + +=== TEST 2: upstream return error +--- apisix_yaml +routes: + - + uri: /hello + plugins: + dubbo-proxy: + service_name: org.apache.dubbo.backend.DemoService + service_version: 1.0.0 + method: fail + upstream_id: 1 +upstreams: + - nodes: + "127.0.0.1:20880": 1 + type: roundrobin + id: 1 +#END +--- response_body +dubbo fail +--- error_code: 503 + + + +=== TEST 3: upstream timeout +--- apisix_yaml +routes: + - + uri: /hello + plugins: + dubbo-proxy: + service_name: org.apache.dubbo.backend.DemoService + service_version: 1.0.0 + method: timeout + upstream_id: 1 +upstreams: + - nodes: + "127.0.0.1:20880": 1 + type: roundrobin + timeout: + connect: 0.1 + read: 0.1 + send: 0.1 + id: 1 +#END +--- error_log +upstream timed out +--- error_code: 504 + + + +=== TEST 4: upstream return non-string status code +--- apisix_yaml +routes: + - + uri: /hello + plugins: + dubbo-proxy: + service_name: org.apache.dubbo.backend.DemoService + service_version: 1.0.0 + method: badStatus + upstream_id: 1 +upstreams: + - nodes: + "127.0.0.1:20880": 1 + type: roundrobin + id: 1 +#END +--- response_body +ok diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/echo.t b/CloudronPackages/APISIX/apisix-source/t/plugin/echo.t new file mode 100644 index 0000000..25ab78f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/echo.t @@ -0,0 +1,298 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.echo") + local ok, err = plugin.check_schema({before_body = "body before", body = "body to attach", + after_body = "body to attach"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: wrong type of integer +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.echo") + local ok, err = plugin.check_schema({before_body = "body before", body = "body to attach", + after_body = 10}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "after_body" validation failed: wrong type: expected string, got number +done + + + +=== TEST 3: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "echo": { + "before_body": "before the body modification ", + "body":"hello upstream", + "after_body": " after the body modification.", + "headers": { + "Location":"https://www.iresty.com", + "Authorization": "userpass" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: access +--- request +GET /hello +--- response_body chomp +before the body modification hello upstream after the body modification. +--- response_headers +Location: https://www.iresty.com +Authorization: userpass +--- wait: 0.2 + + + +=== TEST 5: update plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "echo": { + "before_body": "before the body modification ", + "headers": { + "Location":"https://www.iresty.com" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: access without upstream body change +--- request +GET /hello +--- response_body +before the body modification hello world +--- response_headers +Location: https://www.iresty.com +--- wait: 0.2 +--- wait: 0.2 + + + +=== TEST 7: print the `conf` in etcd, no dirty data +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local encode_with_keys_sorted = require("toolkit.json").encode + + local code, _, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "echo": { + "before_body": "before the body modification ", + "headers": { + "Location":"https://www.iresty.com" + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local resp_data = core.json.decode(body) + ngx.say(encode_with_keys_sorted(resp_data.value.plugins)) + } + } +--- request +GET /t +--- response_body +{"echo":{"before_body":"before the body modification ","headers":{"Location":"https://www.iresty.com"}}} + + + +=== TEST 8: set body with chunked upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "echo": { + "body":"hello upstream" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello_chunked" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: access +--- request +GET /hello_chunked +--- response_body chomp +hello upstream + + + +=== TEST 10: add before/after body with chunked upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "echo": { + "before_body": "before the body modification ", + "after_body": " after the body modification." + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello_chunked" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: access +--- request +GET /hello_chunked +--- response_body chomp +before the body modification hello world + after the body modification. diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/elasticsearch-logger.t b/CloudronPackages/APISIX/apisix-source/t/plugin/elasticsearch-logger.t new file mode 100644 index 0000000..7b08103 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/elasticsearch-logger.t @@ -0,0 +1,994 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local ok, err + local configs = { + -- full configuration + { + endpoint_addr = "http://127.0.0.1:9200", + field = { + index = "services" + }, + auth = { + username = "elastic", + password = "123456" + }, + ssl_verify = false, + timeout = 60, + max_retry_count = 0, + retry_delay = 1, + buffer_duration = 60, + inactive_timeout = 2, + batch_max_size = 10, + }, + -- minimize configuration + { + endpoint_addr = "http://127.0.0.1:9200", + field = { + index = "services" + } + }, + -- property "endpoint_addr" is required + { + field = { + index = "services" + } + }, + -- property "field" is required + { + endpoint_addr = "http://127.0.0.1:9200", + }, + -- property "index" is required + { + endpoint_addr = "http://127.0.0.1:9200", + field = {} + }, + -- property "endpoint" must not end with "/" + { + endpoint_addr = "http://127.0.0.1:9200/", + field = { + index = "services" + } + } + } + + local plugin = require("apisix.plugins.elasticsearch-logger") + for i = 1, #configs do + ok, err = plugin.check_schema(configs[i]) + if err then + ngx.say(err) + else + ngx.say("passed") + end + end + } + } +--- response_body_like +passed +passed +value should match only one schema, but matches none +value should match only one schema, but matches none +property "field" validation failed: property "index" is required +property "endpoint_addr" validation failed: failed to match pattern "\[\^/\]\$" with "http://127.0.0.1:9200/" + + + +=== TEST 2: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger', + ngx.HTTP_DELETE) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9200", + field = { + index = "services" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: test route (success write) +--- extra_init_by_lua + local core = require("apisix.core") + local http = require("resty.http") + local ngx_re = require("ngx.re") + local log_util = require("apisix.utils.log-util") + log_util.inject_get_full_log(function(ngx, conf) + return { + test = "test" + } + end) + + http.request_uri = function(self, uri, params) + if params.method == "GET" then + return { + status = 200, + body = [[ + { + "version": { + "number": "8.10.2" + } + } + ]] + } + end + if not params.body or type(params.body) ~= "string" then + return nil, "invalid params body" + end + + local arr = ngx_re.split(params.body, "\n") + if not arr or #arr ~= 2 then + return nil, "invalid params body" + end + + local entry = core.json.decode(arr[2]) + local origin_entry = log_util.get_full_log(ngx, {}) + for k, v in pairs(origin_entry) do + local vv = entry[k] + if not vv or vv ~= v then + return nil, "invalid params body" + end + end + + core.log.error("check elasticsearch full log body success") + return { + status = 200, + body = "success" + }, nil + end +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +check elasticsearch full log body success + + + +=== TEST 4: set route (auth) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + auth = { + username = "elastic", + password = "123456" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: test route (auth success) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +Batch Processor[elasticsearch-logger] successfully processed the entries + + + +=== TEST 6: set route (no auth) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: test route (no auth, failed) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +failed to process entries: elasticsearch server returned status: 401 + + + +=== TEST 8: set route (error auth) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + auth = { + username = "elastic", + password = "111111" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: test route (error auth failed) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +Batch Processor[elasticsearch-logger] failed to process entries +Batch Processor[elasticsearch-logger] exceeded the max_retry_count + + + +=== TEST 10: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/elasticsearch-logger', + ngx.HTTP_PUT, [[{ + "log_format": { + "custom_host": "$host", + "custom_timestamp": "$time_iso8601", + "custom_client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body_like +passed +passed + + + +=== TEST 11: hit route and check custom elasticsearch logger +--- extra_init_by_lua + local core = require("apisix.core") + local http = require("resty.http") + local ngx_re = require("ngx.re") + local log_util = require("apisix.utils.log-util") + log_util.inject_get_custom_format_log(function(ctx, format) + return { + test = "test" + } + end) + + http.request_uri = function(self, uri, params) + if params.method == "GET" then + return { + status = 200, + body = [[ + { + "version": { + "number": "8.10.2" + } + } + ]] + } + end + if not params.body or type(params.body) ~= "string" then + return nil, "invalid params body" + end + + local arr = ngx_re.split(params.body, "\n") + if not arr or #arr ~= 2 then + return nil, "invalid params body" + end + + local entry = core.json.decode(arr[2]) + local origin_entry = log_util.get_custom_format_log(nil, nil) + for k, v in pairs(origin_entry) do + local vv = entry[k] + if not vv or vv ~= v then + return nil, "invalid params body" + end + end + + core.log.error("check elasticsearch custom body success") + return { + status = 200, + body = "success" + }, nil + end +--- request +GET /hello +--- response_body +hello world +--- wait: 2 +--- error_log +check elasticsearch custom body success + + + +=== TEST 12: data encryption for auth.password +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + auth = { + username = "elastic", + password = "123456" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["elasticsearch-logger"].auth.password) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["elasticsearch-logger"].auth.password) + } + } +--- response_body +123456 +PTQvJEaPcNOXcOHeErC0XQ== + + + +=== TEST 13: add plugin on routes using multi elasticsearch-logger +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addrs = {"http://127.0.0.1:9200", "http://127.0.0.1:9201"}, + field = { + index = "services" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: to show that different endpoints will be chosen randomly +--- config + location /t { + content_by_lua_block { + local code_count = {} + local t = require("lib.test_admin").test + for i = 1, 12 do + local code, body = t('/hello', ngx.HTTP_GET) + if code ~= 200 then + ngx.say("code: ", code, " body: ", body) + end + code_count[code] = (code_count[code] or 0) + 1 + end + + local code_arr = {} + for code, count in pairs(code_count) do + table.insert(code_arr, {code = code, count = count}) + end + + ngx.say(require("toolkit.json").encode(code_arr)) + ngx.exit(200) + } + } +--- response_body +[{"code":200,"count":12}] +--- error_log +http://127.0.0.1:9200/_bulk +http://127.0.0.1:9201/_bulk + + + +=== TEST 15: log format in plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + log_format = { + custom_host = "$host" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: hit route and check custom elasticsearch logger +--- extra_init_by_lua + local core = require("apisix.core") + local http = require("resty.http") + local ngx_re = require("ngx.re") + local log_util = require("apisix.utils.log-util") + log_util.inject_get_custom_format_log(function(ctx, format) + return { + test = "test" + } + end) + + http.request_uri = function(self, uri, params) + if params.method == "GET" then + return { + status = 200, + body = [[ + { + "version": { + "number": "8.10.2" + } + } + ]] + } + end + if not params.body or type(params.body) ~= "string" then + return nil, "invalid params body" + end + + local arr = ngx_re.split(params.body, "\n") + if not arr or #arr ~= 2 then + return nil, "invalid params body" + end + + local entry = core.json.decode(arr[2]) + local origin_entry = log_util.get_custom_format_log(nil, nil) + for k, v in pairs(origin_entry) do + local vv = entry[k] + if not vv or vv ~= v then + return nil, "invalid params body" + end + end + + core.log.error("check elasticsearch custom body success") + return { + status = 200, + body = "success" + }, nil + end +--- request +GET /hello +--- response_body +hello world +--- wait: 2 +--- error_log +check elasticsearch custom body success + + + +=== TEST 17: using unsupported field (type) for elasticsearch v8 should work normally +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services", + type = "collector" + }, + auth = { + username = "elastic", + password = "123456" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: test route (auth success) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- no_error_log +Action/metadata line [1] contains an unknown parameter [_type] + + + +=== TEST 19: add plugin with 'include_req_body' setting, collect request log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/elasticsearch-logger', ngx.HTTP_DELETE) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + auth = { + username = "elastic", + password = "123456" + }, + batch_max_size = 1, + inactive_timeout = 1, + include_req_body = true + } + } + }) + + if code >= 300 then + ngx.status = code + end + + local code, _, body = t("/hello", "POST", "{\"sample_payload\":\"hello\"}") + } + } +--- error_log +"body":"{\"sample_payload\":\"hello\"}" + + + +=== TEST 20: add plugin with 'include_resp_body' setting, collect response log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/elasticsearch-logger', ngx.HTTP_DELETE) + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9201", + field = { + index = "services" + }, + auth = { + username = "elastic", + password = "123456" + }, + batch_max_size = 1, + inactive_timeout = 1, + include_req_body = true, + include_resp_body = true + } + } + }) + + if code >= 300 then + ngx.status = code + end + + local code, _, body = t("/hello", "POST", "{\"sample_payload\":\"hello\"}") + } + } +--- error_log +"body":"hello world\n" + + + +=== TEST 21: set route (auth) - check compat with version 9 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9301", + field = { + index = "services" + }, + auth = { + username = "elastic", + password = "123456" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 22: test route (auth success) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +Batch Processor[elasticsearch-logger] successfully processed the entries + + + +=== TEST 23: set route (auth) - check compat with version 7 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9401", + field = { + index = "services" + }, + auth = { + username = "elastic", + password = "123456" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 24: test route (auth success) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +Batch Processor[elasticsearch-logger] successfully processed the entries + + + +=== TEST 25: set route (auth) - check compat with version 6 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["elasticsearch-logger"] = { + endpoint_addr = "http://127.0.0.1:9501", + field = { + index = "services" + }, + auth = { + username = "elastic", + password = "123456" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 26: test route (auth success) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +Batch Processor[elasticsearch-logger] successfully processed the entries diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/error-log-logger-clickhouse.t b/CloudronPackages/APISIX/apisix-source/t/plugin/error-log-logger-clickhouse.t new file mode 100644 index 0000000..91ef60b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/error-log-logger-clickhouse.t @@ -0,0 +1,295 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + if (!defined $block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - error-log-logger +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: test schema checker +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.error-log-logger") + local ok, err = plugin.check_schema( + { + clickhouse = { + user = "default", + password = "a", + database = "default", + logtable = "t", + endpoint_addr = "http://127.0.0.1:1980/clickhouse_logger_server" + } + }, + core.schema.TYPE_METADATA + ) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: test unreachable server +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "clickhouse": { + "user": "default", + "password": "a", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server" + }, + "inactive_timeout": 1 + }]] + ) + ngx.sleep(2) + core.log.warn("this is a warning message for test2.") + } + } +--- response_body +--- error_log +this is a warning message for test2 +clickhouse body: INSERT INTO t FORMAT JSONEachRow +clickhouse headers: x-clickhouse-key:a +clickhouse headers: x-clickhouse-user:default +clickhouse headers: x-clickhouse-database:default +--- wait: 3 + + + +=== TEST 3: put plugin metadata and log an error level message +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "clickhouse": { + "user": "default", + "password": "a", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server" + }, + "batch_max_size": 15, + "inactive_timeout": 1 + }]] + ) + ngx.sleep(2) + core.log.warn("this is a warning message for test3.") + } + } +--- response_body +--- error_log +this is a warning message for test3 +clickhouse body: INSERT INTO t FORMAT JSONEachRow +clickhouse headers: x-clickhouse-key:a +clickhouse headers: x-clickhouse-user:default +clickhouse headers: x-clickhouse-database:default +--- wait: 5 + + + +=== TEST 4: log a warn level message +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + core.log.warn("this is a warning message for test4.") + } + } +--- response_body +--- error_log +this is a warning message for test4 +clickhouse body: INSERT INTO t FORMAT JSONEachRow +clickhouse headers: x-clickhouse-key:a +clickhouse headers: x-clickhouse-user:default +clickhouse headers: x-clickhouse-database:default +--- wait: 5 + + + +=== TEST 5: log some messages +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + core.log.warn("this is a warning message for test5.") + } + } +--- response_body +--- error_log +this is a warning message for test5 +clickhouse body: INSERT INTO t FORMAT JSONEachRow +clickhouse headers: x-clickhouse-key:a +clickhouse headers: x-clickhouse-user:default +clickhouse headers: x-clickhouse-database:default +--- wait: 5 + + + +=== TEST 6: log an info level message +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + core.log.info("this is an info message for test6.") + } + } +--- response_body +--- error_log +this is an info message for test6 +--- wait: 5 + + + +=== TEST 7: delete metadata for the plugin, recover to the default +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: data encryption for clickhouse.password +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "clickhouse": { + "user": "default", + "password": "bar", + "database": "default", + "logtable": "t", + "endpoint_addr": "http://127.0.0.1:1980/clickhouse_logger_server" + }, + "batch_max_size": 15, + "inactive_timeout": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value["clickhouse"].password) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/plugin_metadata/error-log-logger')) + + ngx.say(res.body.node.value["clickhouse"].password) + } + } +--- response_body +bar +77+NmbYqNfN+oLm0aX5akg== + + + +=== TEST 9: verify use the decrypted password to connect to clickhouse +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + core.log.warn("this is a warning message for test9") + } + } +--- response_body +--- error_log +this is a warning message for test9 +clickhouse headers: x-clickhouse-key:bar +--- wait: 5 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/error-log-logger-kafka.t b/CloudronPackages/APISIX/apisix-source/t/plugin/error-log-logger-kafka.t new file mode 100644 index 0000000..afae2a5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/error-log-logger-kafka.t @@ -0,0 +1,203 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level("info"); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + if (!defined $block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - error-log-logger +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: test schema checker +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.error-log-logger") + local ok, err = plugin.check_schema( + { + kafka = { + brokers = { + { + host = "127.0.0.1", + port = 9092 + } + }, + kafka_topic = "test2" + } + }, + core.schema.TYPE_METADATA + ) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: put plugin metadata and log an error level message - no auth kafka +--- extra_init_by_lua + local core = require("apisix.core") + local producer = require("resty.kafka.producer") + local old_producer_new = producer.new + producer.new = function(self, broker_list, producer_config, cluster_name) + core.log.info("broker_config is: ", core.json.delay_encode(producer_config)) + return old_producer_new(self, broker_list, producer_config, cluster_name) + end +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "kafka": { + "brokers": [{ + "host": "127.0.0.1", + "port": 9092 + }], + "kafka_topic": "test2", + "meta_refresh_interval": 1 + }, + "level": "ERROR", + "inactive_timeout": 1 + }]] + ) + ngx.sleep(2) + core.log.error("this is a error message for test2.") + } + } +--- error_log eval +[qr/this is a error message for test2/, +qr/send data to kafka: .*test2/, +qr/broker_config is: \{.*"refresh_interval":1000/, +] +--- wait: 3 + + + +=== TEST 3: log a error level message +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + core.log.error("this is a error message for test3.") + } + } +--- error_log eval +[qr/this is a error message for test3/, +qr/send data to kafka: .*test3/] +--- wait: 5 + + + +=== TEST 4: log an warning level message - will not send to kafka brokers +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + core.log.warn("this is an warning message for test4.") + } + } +--- error_log +this is an warning message for test4 +--- no_error_log eval +qr/send data to kafka: .*test4/ +--- wait: 5 + + + +=== TEST 5: put plugin metadata and log an error level message - auth kafka +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "kafka": { + "brokers": [{ + "host": "127.0.0.1", + "port": 19094, + "sasl_config": { + "mechanism": "PLAIN", + "user": "admin", + "password": "admin-secret" + } + }], + "producer_type": "sync", + "kafka_topic": "test4" + }, + "level": "ERROR", + "inactive_timeout": 1 + }]] + ) + ngx.sleep(2) + core.log.error("this is a error message for test5.") + } + } +--- error_log eval +[qr/this is a error message for test5/, +qr/send data to kafka: .*test5/] +--- wait: 3 + + + +=== TEST 6: delete metadata for the plugin, recover to the default +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/error-log-logger-skywalking.t b/CloudronPackages/APISIX/apisix-source/t/plugin/error-log-logger-skywalking.t new file mode 100644 index 0000000..da339f6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/error-log-logger-skywalking.t @@ -0,0 +1,229 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +worker_connections(128); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - error-log-logger +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: test schema checker +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.error-log-logger") + local ok, err = plugin.check_schema( + { + skywalking = { + endpoint_addr = "http://127.0.0.1" + } + }, + core.schema.TYPE_METADATA + ) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: test unreachable server +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "skywalking": { + "endpoint_addr": "http://127.0.0.1:1988/log" + }, + "inactive_timeout": 1 + }]] + ) + ngx.sleep(2) + core.log.warn("this is a warning message for test.") + } + } +--- request +GET /tg +--- response_body +--- error_log eval +qr/Batch Processor\[error-log-logger\] failed to process entries: error while sending data to skywalking\[http:\/\/127.0.0.1:1988\/log\] connection refused, context: ngx.timer/ +--- wait: 3 + + + +=== TEST 3: put plugin metadata and log an error level message +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "skywalking": { + "endpoint_addr": "http://127.0.0.1:1982/log", + "service_instance_name": "instance" + }, + "batch_max_size": 15, + "inactive_timeout": 1 + }]] + ) + ngx.sleep(2) + core.log.error("this is an error message for test.") + } + } +--- request +GET /tg +--- response_body +--- error_log +this is an error message for test +--- wait: 5 + + + +=== TEST 4: log a warn level message +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + core.log.warn("this is a warning message for test.") + } + } +--- request +GET /tg +--- response_body +--- error_log eval +qr/.*\[\{\"body\":\{\"text\":\{\"text\":\".*this is a warning message for test.*\"\}\},\"endpoint\":\"\",\"service\":\"APISIX\",\"serviceInstance\":\"instance\".*/ +--- wait: 5 + + + +=== TEST 5: log some messages +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + core.log.error("this is an error message for test.") + core.log.warn("this is a warning message for test.") + } + } +--- request +GET /tg +--- response_body +--- error_log eval +qr/.*\[\{\"body\":\{\"text\":\{\"text\":\".*this is an error message for test.*\"\}\},\"endpoint\":\"\",\"service\":\"APISIX\",\"serviceInstance\":\"instance\".*\},\{\"body\":\{\"text\":\{\"text\":\".*this is a warning message for test.*\"\}\}.*/ +--- wait: 5 + + + +=== TEST 6: log an info level message +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + core.log.info("this is an info message for test.") + } + } +--- request +GET /tg +--- response_body +--- no_error_log eval +qr/.*\[\{\"body\":\{\"text\":\{\"text\":\".*this is an info message for test.*\"\}\},\"endpoint\":\"\",\"service\":\"APISIX\",\"serviceInstance\":\"instance\".*/ +--- wait: 5 + + + +=== TEST 7: delete metadata for the plugin, recover to the default +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /tg +--- response_body +passed + + + +=== TEST 8: put plugin metadata with $hostname and log an error level message +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "skywalking": { + "endpoint_addr": "http://127.0.0.1:1982/log", + "service_instance_name": "$hostname" + }, + "batch_max_size": 15, + "inactive_timeout": 1 + }]] + ) + ngx.sleep(2) + core.log.error("this is an error message for test.") + } + } +--- request +GET /tg +--- response_body +--- no_error_log eval +qr/\\\"serviceInstance\\\":\\\"\$hostname\\\"/ +qr/\\\"serviceInstance\\\":\\\"\\\"/ +--- wait: 0.5 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/error-log-logger.t b/CloudronPackages/APISIX/apisix-source/t/plugin/error-log-logger.t new file mode 100644 index 0000000..bc2deaa --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/error-log-logger.t @@ -0,0 +1,447 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $stream_single_server = <<_EOC_; + # fake server, only for test + server { + listen 1999; + + content_by_lua_block { + local exiting = ngx.worker.exiting + local sock, err = ngx.req.socket(true) + if not sock then + ngx.log(ngx.WARN, "socket error:", err) + return + end + + sock:settimeout(30 * 1000) + while(not exiting()) + do + local data, err = sock:receive() + if (data) then + ngx.log(ngx.INFO, "[Server] receive data:", data) + else + if err ~= "timeout" then + ngx.log(ngx.WARN, "socket error:", err) + return + end + end + end + + } + } +_EOC_ + + $block->set_value("stream_config", $stream_single_server); + + my $stream_default_server = <<_EOC_; + content_by_lua_block { + ngx.log(ngx.INFO, "a stream server") + } +_EOC_ + + $block->set_value("stream_server_config", $stream_default_server); + + if (!defined $block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - error-log-logger +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: not enable the plugin +--- extra_yaml_config +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + core.log.warn("this is a warning message for test.") + } + } +--- request +GET /tg +--- response_body +--- no_error_log +error-log-logger +--- wait: 2 + + + +=== TEST 2: enable the plugin, but not init the metadata +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + core.log.warn("this is a warning message for test.") + } + } +--- request +GET /tg +--- response_body +--- error_log eval +qr/please set the correct plugin_metadata for error-log-logger/ +--- wait: 2 + + + +=== TEST 3: set a wrong metadata +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "tcp": { + "port": 1999 + }, + "inactive_timeout": 1 + }]] + ) + + -- ensure the request is rejected even this plugin doesn't + -- have check_schema method + ngx.status = code + core.log.warn("this is a warning message for test.") + } + } +--- request +GET /tg +--- error_code: 400 +--- response_body +--- error_log eval +qr/please set the correct plugin_metadata for error-log-logger/ +--- wait: 2 + + + +=== TEST 4: test unreachable server +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "tcp": { + "host": "127.0.0.1", + "port": 2999 + }, + "inactive_timeout": 1 + }]] + ) + ngx.sleep(2) + core.log.warn("this is a warning message for test.") + } + } +--- request +GET /tg +--- response_body +--- no_error_log eval +qr/\[Server\] receive data:.*this is a warning message for test./ +--- wait: 3 + + + +=== TEST 5: log a warn level message +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "tcp": { + "host": "127.0.0.1", + "port": 1999 + }, + "inactive_timeout": 1 + }]] + ) + ngx.sleep(2) + core.log.warn("this is a warning message for test.") + } + } +--- request +GET /tg +--- response_body +--- error_log eval +qr/\[Server\] receive data:.*this is a warning message for test./ +--- wait: 5 + + + +=== TEST 6: log an error level message +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + ngx.sleep(2) + core.log.error("this is an error message for test.") + } + } +--- request +GET /tg +--- response_body +--- error_log eval +qr/\[Server\] receive data:.*this is an error message for test./ +--- wait: 5 + + + +=== TEST 7: log an info level message +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + ngx.sleep(2) + core.log.info("this is an info message for test.") + } + } +--- request +GET /tg +--- response_body +--- no_error_log eval +qr/\[Server\] receive data:.*this is an info message for test./ +--- wait: 5 + + + +=== TEST 8: delete metadata for the plugin, recover to the default +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /tg +--- response_body +passed + + + +=== TEST 9: want to reload the plugin by route +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "error-log-logger": { + "tcp": { + "host": "127.0.0.1", + "port": 1999 + }, + "inactive_timeout": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + -- reload + code, body = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + core.log.warn("this is a warning message for test.") + } + } +--- request +GET /tg +--- response_body +--- error_log eval +qr/please set the correct plugin_metadata for error-log-logger/ +--- wait: 2 + + + +=== TEST 10: avoid sending stale error log +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + core.log.warn("this is a warning message for test.") + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "tcp": { + "host": "127.0.0.1", + "port": 1999 + }, + "level": "ERROR", + "inactive_timeout": 1 + }]] + ) + ngx.sleep(2) + core.log.error("this is an error message for test.") + } + } +--- request +GET /tg +--- response_body +--- no_error_log eval +qr/\[Server\] receive data:.*this is a warning message for test./ +--- error_log eval +qr/\[Server\] receive data:.*this is an error message for test./ +--- wait: 5 + + + +=== TEST 11: delete the route +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /tg +--- response_body +passed + + + +=== TEST 12: log a warn level message (schema compatibility testing) +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "tcp": { + "host": "127.0.0.1", + "port": 1999 + }, + "inactive_timeout": 1 + }]] + ) + ngx.sleep(2) + core.log.warn("this is a warning message for test.") + } + } +--- request +GET /tg +--- response_body +--- error_log eval +qr/\[Server\] receive data:.*this is a warning message for test./ +--- wait: 5 + + + +=== TEST 13: log an error level message (schema compatibility testing) +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + ngx.sleep(2) + core.log.error("this is an error message for test.") + } + } +--- request +GET /tg +--- response_body +--- error_log eval +qr/\[Server\] receive data:.*this is an error message for test./ +--- wait: 5 + + + +=== TEST 14: log an info level message (schema compatibility testing) +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + ngx.sleep(2) + core.log.info("this is an info message for test.") + } + } +--- request +GET /tg +--- response_body +--- no_error_log eval +qr/\[Server\] receive data:.*this is an info message for test./ +--- wait: 5 + + + +=== TEST 15: delete metadata for the plugin, recover to the default (schema compatibility testing) +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /tg +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/example.t b/CloudronPackages/APISIX/apisix-source/t/plugin/example.t new file mode 100644 index 0000000..527c547 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/example.t @@ -0,0 +1,341 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +no_shuffle(); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.example-plugin") + local ok, err = plugin.check_schema({i = 1, s = "s", t = {1}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: missing args +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.example-plugin") + + local ok, err = plugin.check_schema({s = "s", t = {1}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "i" is required +done + + + +=== TEST 3: small then minimum +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.example-plugin") + local ok, err = plugin.check_schema({i = -1, s = "s", t = {1, 2}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "i" validation failed: expected -1 to be at least 0 +done + + + +=== TEST 4: wrong type of string +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.example-plugin") + local ok, err = plugin.check_schema({i = 1, s = 123, t = {1}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "s" validation failed: wrong type: expected string, got number +done + + + +=== TEST 5: the size of array < minItems +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.example-plugin") + local ok, err = plugin.check_schema({i = 1, s = '123', t = {}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "t" validation failed: expect array to have at least 1 items +done + + + +=== TEST 6: load plugins +--- config + location /t { + content_by_lua_block { + local plugins, err = require("apisix.plugin").load() + if not plugins then + ngx.say("failed to load plugins: ", err) + end + + local encode_json = require("toolkit.json").encode + local conf = {} + local ctx = {} + for _, plugin in ipairs(plugins) do + ngx.say("plugin name: ", plugin.name, + " priority: ", plugin.priority) + + plugin.rewrite(conf, ctx) + end + } + } +--- request +GET /t +--- response_body +plugin name: example-plugin priority: 0 +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + etcd: + host: + - "http://127.0.0.1:2379" # etcd address + prefix: "/apisix" # apisix configurations prefix + timeout: 1 +plugins: + - example-plugin + - not-exist-plugin +--- grep_error_log eval +qr/\[error\].*/ +--- grep_error_log_out eval +qr/module 'apisix.plugins.not-exist-plugin' not found/ + + + +=== TEST 7: filter plugins +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugin") + + local all_plugins, err = plugin.load() + if not all_plugins then + ngx.say("failed to load plugins: ", err) + end + + local filter_plugins = plugin.filter(nil, { + value = { + plugins = { + ["example-plugin"] = {i = 1, s = "s", t = {1, 2}}, + ["new-plugin"] = {a = "a"}, + } + }, + modifiedIndex = 1, + }) + + local encode_json = require("toolkit.json").encode + for i = 1, #filter_plugins, 2 do + local plugin = filter_plugins[i] + local plugin_conf = filter_plugins[i + 1] + ngx.say("plugin [", plugin.name, "] config: ", + encode_json(plugin_conf)) + end + } + } +--- request +GET /t +--- response_body +plugin [example-plugin] config: {"i":1,"s":"s","t":[1,2]} + + + +=== TEST 8: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "example-plugin": { + "i": 11, + "ip": "127.0.0.1", + "port": 1981 + } + }, + "uri": "/server_port" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: hit route +--- request +GET /server_port +--- response_body_like eval +qr/1981/ + + + +=== TEST 10: set disable = true +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.example-plugin") + local ok, err = plugin.check_schema({ + i = 1, s = "s", t = {1}, + disable = true, + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 11: set disable = false +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.example-plugin") + local ok, err = plugin.check_schema({ + i = 1, s = "s", t = {1}, + disable = true, + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 12: body filter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "example-plugin": { + "i": 11, + "ip": "127.0.0.1", + "port": 1981 + } + }, + "uri": "/server_port" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: hit route +--- request +GET /server_port +--- grep_error_log eval +qr/plugin (body_filter|delayed_body_filter) phase, eof: (false|true)/ +--- grep_error_log_out +plugin body_filter phase, eof: false +plugin delayed_body_filter phase, eof: false +plugin body_filter phase, eof: true +plugin delayed_body_filter phase, eof: true diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/conf_token.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/conf_token.t new file mode 100644 index 0000000..0c659c5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/conf_token.t @@ -0,0 +1,141 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +workers(3); +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); +worker_connections(1024); + +$ENV{"PATH"} = $ENV{PATH} . ":" . $ENV{TEST_NGINX_HTML_DIR}; + +add_block_preprocessor(sub { + my ($block) = @_; + + $block->set_value("stream_conf_enable", 1); + + if (!defined $block->extra_stream_config) { + my $stream_config = <<_EOC_; + server { + listen unix:\$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({}) + } + } + +_EOC_ + $block->set_value("extra_stream_config", $stream_config); + } + + my $unix_socket_path = $ENV{"TEST_NGINX_HTML_DIR"} . "/nginx.sock"; + my $orig_extra_yaml_config = $block->extra_yaml_config // ""; + my $cmd = $block->ext_plugin_cmd // "['sleep', '5s']"; + my $extra_yaml_config = <<_EOC_; +ext-plugin: + path_for_test: $unix_socket_path + cmd: $cmd +_EOC_ + $extra_yaml_config = $extra_yaml_config . $orig_extra_yaml_config; + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ext-plugin-pre-req": {"a":"b"} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 2: share conf token in different workers +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + + local t = {} + for i = 1, 16 do + local th = assert(ngx.thread.spawn(function(i) + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.log(ngx.ERR, err) + return + end + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + ngx.say("done") + } + } +--- response_body +done +--- grep_error_log eval +qr/fetch token from shared dict, token: 233/ +--- grep_error_log_out eval +qr/(fetch token from shared dict, token: 233){1,}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/extra-info.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/extra-info.t new file mode 100644 index 0000000..e55bb67 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/extra-info.t @@ -0,0 +1,355 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + $block->set_value("stream_conf_enable", 1); + + if (!defined $block->extra_stream_config) { + my $stream_config = <<_EOC_; + server { + listen unix:\$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({}) + } + } + +_EOC_ + $block->set_value("extra_stream_config", $stream_config); + } + + my $unix_socket_path = $ENV{"TEST_NGINX_HTML_DIR"} . "/nginx.sock"; + my $cmd = $block->ext_plugin_cmd // "['sleep', '5s']"; + my $extra_yaml_config = <<_EOC_; +ext-plugin: + path_for_test: $unix_socket_path + cmd: $cmd +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: add route +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ext-plugin-pre-req": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 2: var +--- request +GET /hello?x= +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "var", name = "server_addr", result = "127.0.0.1"}, + {type = "var", name = "remote_addr", result = "127.0.0.1"}, + {type = "var", name = "route_id", result = "1"}, + {type = "var", name = "arg_x", result = ""}, + } + ext.go({extra_info = actions, stop = true}) + } + } +--- error_code: 405 +--- grep_error_log eval +qr/send extra info req successfully/ +--- grep_error_log_out +send extra info req successfully +send extra info req successfully +send extra info req successfully +send extra info req successfully + + + +=== TEST 3: ask nonexistent var +--- request +GET /hello +--- more_headers +X-Change: foo +X-Delete: foo +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "var", name = "erver_addr"}, + } + ext.go({extra_info = actions, rewrite = true}) + } + } +--- response_body +uri: /uri +host: localhost +x-add: bar +x-change: bar +x-real-ip: 127.0.0.1 +--- grep_error_log eval +qr/send extra info req successfully/ +--- grep_error_log_out +send extra info req successfully + + + +=== TEST 4: network is down in the middle +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "var", name = "server_addr", result = "127.0.0.1"}, + {type = "closed"}, + } + ext.go({extra_info = actions, stop = true}) + } + } +--- error_code: 503 +--- error_log +failed to receive RPC_HTTP_REQ_CALL: closed + + + +=== TEST 5: ask response body (not exist) +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "respbody", result = nil} + } + ext.go({extra_info = actions}) + } + } +--- error_log: failed to read response body: not exits + + + +=== TEST 6: add route with ext-plugin-post-resp +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/*", + "plugins": { + "ext-plugin-post-resp": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 7: ask var +--- request +GET /hello?x= +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "var", name = "server_addr", result = "127.0.0.1"}, + {type = "var", name = "remote_addr", result = "127.0.0.1"}, + {type = "var", name = "route_id", result = "1"}, + {type = "var", name = "arg_x", result = ""}, + } + ext.go({extra_info = actions}) + } + } +--- grep_error_log eval +qr/send extra info req successfully/ +--- grep_error_log_out +send extra info req successfully +send extra info req successfully +send extra info req successfully +send extra info req successfully +--- response_body +hello world + + + +=== TEST 8: ask response body +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "respbody", result = "hello world\n"}, + } + ext.go({extra_info = actions}) + } + } +--- grep_error_log eval +qr/send extra info req successfully/ +--- grep_error_log_out +send extra info req successfully +--- response_body +hello world + + + +=== TEST 9: ask response body (chunked) +--- request +GET /hello_chunked +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "respbody", result = "hello world\n"}, + } + ext.go({extra_info = actions}) + } + } +--- grep_error_log eval +qr/send extra info req successfully/ +--- grep_error_log_out +send extra info req successfully +--- response_body +hello world + + + +=== TEST 10: ask request body (empty) +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "reqbody", result = nil} + } + ext.go({extra_info = actions}) + } + } + + + +=== TEST 11: ask request body +--- request +POST /hello +123 +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "reqbody", result = "123"} + } + ext.go({extra_info = actions}) + } + } diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/http-req-call.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/http-req-call.t new file mode 100644 index 0000000..782dfa0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/http-req-call.t @@ -0,0 +1,809 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + $block->set_value("stream_conf_enable", 1); + + if (!defined $block->extra_stream_config) { + my $stream_config = <<_EOC_; + server { + listen unix:\$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({}) + } + } + +_EOC_ + $block->set_value("extra_stream_config", $stream_config); + } + + my $unix_socket_path = $ENV{"TEST_NGINX_HTML_DIR"} . "/nginx.sock"; + my $cmd = $block->ext_plugin_cmd // "['sleep', '5s']"; + my $extra_yaml_config = <<_EOC_; +ext-plugin: + path_for_test: $unix_socket_path + cmd: $cmd +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: add route +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ext-plugin-pre-req": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 2: stop +--- request +GET /hello +--- response_body chomp +cat +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({stop = true}) + } + } +--- error_code: 405 +--- response_headers +X-Resp: foo +X-Req: bar + + + +=== TEST 3: check input +--- request +PUT /hello?xx=y&xx=z&&y=&&z +--- more_headers +X-Req: foo +X-Req: bar +X-Resp: cat +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({check_input = true}) + } + } + + + +=== TEST 4: check input (ipv6) +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test_ipv6 + t('/hello') + } +} +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({check_input_ipv6 = true}) + } + } +--- listen_ipv6 + + + +=== TEST 5: rewrite +--- request +GET /hello +--- more_headers +X-Change: foo +X-Delete: foo +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({rewrite = true}) + } + } +--- response_body +uri: /uri +host: localhost +x-add: bar +x-change: bar +x-real-ip: 127.0.0.1 + + + +=== TEST 6: rewrite host +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({rewrite_host = true}) + } + } +--- response_body +uri: /uri +host: 127.0.0.1 +x-real-ip: 127.0.0.1 + + + +=== TEST 7: rewrite args +--- request +GET /hello?c=foo&d=bar +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({rewrite_args = true}) + } + } +--- response_body +uri: /plugin_proxy_rewrite_args +a: foo,bar +c: bar + + + +=== TEST 8: proxy-rewrite + rewrite host +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "proxy-rewrite": { + "host": "test.com" + }, + "ext-plugin-post-req": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 9: hit +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({rewrite_host = true, check_input_rewrite_host = true}) + } + } +--- response_body +uri: /uri +host: 127.0.0.1 +x-real-ip: 127.0.0.1 + + + +=== TEST 10: proxy-rewrite + rewrite path +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "proxy-rewrite": { + "uri": "/xxx" + }, + "ext-plugin-post-req": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 11: hit +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({rewrite_host = true, check_input_rewrite_path = true}) + } + } +--- response_body +uri: /uri +host: 127.0.0.1 +x-real-ip: 127.0.0.1 + + + +=== TEST 12: proxy-rewrite + rewrite path with args +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "proxy-rewrite": { + "uri": "/xxx?x=z" + }, + "ext-plugin-post-req": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 13: hit +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({rewrite_args = true, check_input_rewrite_args = true}) + } + } +--- response_body +uri: /plugin_proxy_rewrite_args +a: foo,bar +c: bar +x: z + + + +=== TEST 14: rewrite args only +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/plugin_proxy_rewrite_args", + "plugins": { + "ext-plugin-post-req": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 15: hit +--- request +GET /plugin_proxy_rewrite_args +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({rewrite_args_only = true}) + } + } +--- response_body +uri: /plugin_proxy_rewrite_args +a: foo,bar +c: bar + + + +=== TEST 16: rewrite, bad path +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ext-plugin-post-req": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 17: hit +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({rewrite_bad_path = true}) + } + } +--- error_log +undefined path in test server, uri: /plugin_proxy_rewrite_args%3Fa=2 +--- error_code: 404 + + + +=== TEST 18: stop without setting status code +--- request +GET /hello +--- response_body chomp +cat +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({stop = true, check_default_status = true}) + } + } +--- response_headers +X-Resp: foo +X-Req: bar + + + +=== TEST 19: rewrite response header and call the upstream service +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({rewrite_resp_header = true}) + } + } +--- response_body +plugin_proxy_rewrite_resp_header +--- response_headers +X-Resp: foo +X-Req: bar + + + +=== TEST 20: rewrite non-important response headers and call the upstream service +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({rewrite_vital_resp_header = true}) + } + } +--- response_body +plugin_proxy_rewrite_resp_header +--- response_headers +X-Resp: foo +X-Req: bar +Content-Type: text/plain +Content-Encoding: + + + +=== TEST 21: trace stopped request +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:1980/mock_zipkin", + "sample_ratio": 1 + }, + "ext-plugin-pre-req": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 22: hit +--- extra_init_by_lua + local prev_new = require("opentracing.tracer").new + local function new(...) + ngx.log(ngx.WARN, "tracer attached to stopped request") + return prev_new(...) + end + require("opentracing.tracer").new = new +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({stop = true}) + } + } +--- error_code: 405 +--- error_log +tracer attached to stopped request + + + +=== TEST 23: set header with OpenResty API should invalidate the cache +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "serverless-pre-function": { + "functions" : ["return function(conf, ctx) + require('apisix.core').request.headers(); + ngx.req.set_header('X-Req', 'foo'); + require('ngx.req').add_header('X-Req', 'bar'); + ngx.req.set_header('X-Resp', 'cat'); + end"] + }, + "ext-plugin-post-req": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 24: check input +--- request +PUT /hello?xx=y&xx=z&&y=&&z +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({check_input = true}) + } + } + + + +=== TEST 25: rewrite same response headers and call the upstream service +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({rewrite_same_resp_header = true}) + } + } +--- response_body +plugin_proxy_rewrite_resp_header +--- response_headers +X-Resp: foo +X-Req: bar +X-Same: one, two + + + +=== TEST 26: stop with modify same response headers +--- request +GET /hello +--- response_body chomp +cat +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({stop = true}) + } + } +--- error_code: 405 +--- response_headers +X-Resp: foo +X-Req: bar +X-Same: one, two + + + +=== TEST 27: add route +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "plugins": { + "ext-plugin-pre-req": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 28: test rewrite request body +--- request +GET /echo +--- response_body chomp +cat +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({rewrite_request_body = true}) + } + } +--- response_body +abc diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/request-body.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/request-body.t new file mode 100644 index 0000000..fe01362 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/request-body.t @@ -0,0 +1,201 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + $block->set_value("stream_conf_enable", 1); + + if (!defined $block->extra_stream_config) { + my $stream_config = <<_EOC_; + server { + listen unix:\$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({}) + } + } + +_EOC_ + $block->set_value("extra_stream_config", $stream_config); + } + + my $unix_socket_path = $ENV{"TEST_NGINX_HTML_DIR"} . "/nginx.sock"; + my $cmd = $block->ext_plugin_cmd // "['sleep', '5s']"; + my $extra_yaml_config = <<_EOC_; +ext-plugin: + path_for_test: $unix_socket_path + cmd: $cmd +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: add route +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ext-plugin-pre-req": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 2: request body(text) +--- request +POST /hello +123 +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "reqbody", result = "123"}, + } + ext.go({extra_info = actions, stop = true, get_request_body = true}) + } + } +--- error_code: 405 +--- grep_error_log eval +qr/send extra info req successfully/ +--- grep_error_log_out +send extra info req successfully + + + +=== TEST 3: request body(x-www-form-urlencoded) +--- request +POST /hello +foo=bar +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "reqbody", result = "foo=bar"}, + } + ext.go({extra_info = actions, stop = true, get_request_body = true}) + } + } +--- error_code: 405 +--- grep_error_log eval +qr/send extra info req successfully/ +--- grep_error_log_out +send extra info req successfully + + + +=== TEST 4: request body(json) +--- request +POST /hello +{"foo":"bar"} +--- more_headers +Content-Type: application/json +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "reqbody", result = "{\"foo\":\"bar\"}"}, + } + ext.go({extra_info = actions, stop = true, get_request_body = true}) + } + } +--- error_code: 405 +--- grep_error_log eval +qr/send extra info req successfully/ +--- grep_error_log_out +send extra info req successfully + + + +=== TEST 5: request body(nil) +--- request +POST /hello +--- extra_stream_config + server { + + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + local actions = { + {type = "reqbody", result = nil}, + } + ext.go({extra_info = actions, stop = true, get_request_body = true}) + } + } +--- error_code: 405 +--- grep_error_log eval +qr/send extra info req successfully/ +--- grep_error_log_out +send extra info req successfully diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/response.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/response.t new file mode 100644 index 0000000..d8a2be2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/response.t @@ -0,0 +1,432 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + $block->set_value("stream_conf_enable", 1); + + if (!defined $block->extra_stream_config) { + my $stream_config = <<_EOC_; + server { + listen unix:\$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({}) + } + } + +_EOC_ + $block->set_value("extra_stream_config", $stream_config); + } + + my $unix_socket_path = $ENV{"TEST_NGINX_HTML_DIR"} . "/nginx.sock"; + my $cmd = $block->ext_plugin_cmd // "['sleep', '5s']"; + my $extra_yaml_config = <<_EOC_; +ext-plugin: + path_for_test: $unix_socket_path + cmd: $cmd +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: add route +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/*", + "plugins": { + "ext-plugin-post-resp": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 2: check input +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({check_input = true}) + } + } +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 3: modify body +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({modify_body = true}) + } + } +--- error_code: 200 +--- response_body chomp +cat + + + +=== TEST 4: modify header +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({modify_header = true}) + } + } +--- more_headers +resp-X-Runner: Go-runner +--- error_code: 200 +--- response_headers +X-Runner: Test-Runner +Content-Type: application/json +--- response_body +hello world + + + +=== TEST 5: modify same response headers +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({modify_header = true, same_header = true}) + } + } +--- error_code: 200 +--- response_headers +X-Same: one, two +--- response_body +hello world + + + +=== TEST 6: modify status +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({modify_status = true}) + } + } +--- error_code: 304 + + + +=== TEST 7: default allow_degradation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ext-plugin-post-resp": { + "conf": [ + {"name":"foo", "value":"bar"} + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 8: ext-plugin-resp wrong, req reject +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock1; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({}) + } + } +--- error_code: 503 +--- error_log eval +qr/failed to connect to the unix socket/ + + + +=== TEST 9: open allow_degradation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ext-plugin-post-req": { + "conf": [ + {"name":"foo", "value":"bar"} + ], + "allow_degradation": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 10: ext-plugin-resp wrong, req access +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock1; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({}) + } + } +--- response_body +hello world +--- error_log eval +qr/Plugin Runner.*allow degradation/ + + + +=== TEST 11: add route: wrong upstream +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/*", + "plugins": { + "ext-plugin-post-resp": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:3980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 12: request upstream failed +--- request +GET /hello +--- error_code: 502 +--- error_log eval +qr/failed to request/ + + + +=== TEST 13: add route +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/*", + "plugins": { + "ext-plugin-post-resp": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 14: body_reader error +--- request +GET /hello1 +--- more_headers +resp-Content-Length: 14 +--- error_code: 502 +--- error_log eval +qr/read response failed/ + + + +=== TEST 15: response chunked +--- request +GET /hello_chunked +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 16: check upstream uri with args +--- request +GET /plugin_proxy_rewrite_args?aaa=bbb&ccc=ddd +--- error_code: 200 +--- response_body +uri: /plugin_proxy_rewrite_args +aaa: bbb +ccc: ddd diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/runner.sh b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/runner.sh new file mode 100755 index 0000000..056aa4f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/runner.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +echo "LISTEN $APISIX_LISTEN_ADDRESS" +echo "EXPIRE $APISIX_CONF_EXPIRE_TIME" +echo "MY_ENV_VAR $MY_ENV_VAR" +sleep "$1" +exit 111 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/runner_can_not_terminated.sh b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/runner_can_not_terminated.sh new file mode 100755 index 0000000..f58956c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/runner_can_not_terminated.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +term() { + eval sleep 1800 +} +trap term SIGTERM +sleep 1800 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/sanity.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/sanity.t new file mode 100644 index 0000000..873a540 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/sanity.t @@ -0,0 +1,713 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +$ENV{"PATH"} = $ENV{PATH} . ":" . $ENV{TEST_NGINX_HTML_DIR}; + +add_block_preprocessor(sub { + my ($block) = @_; + + $block->set_value("stream_conf_enable", 1); + + if (!defined $block->extra_stream_config) { + my $stream_config = <<_EOC_; + server { + listen unix:\$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({}) + } + } + +_EOC_ + $block->set_value("extra_stream_config", $stream_config); + } + + my $unix_socket_path = $ENV{"TEST_NGINX_HTML_DIR"} . "/nginx.sock"; + my $orig_extra_yaml_config = $block->extra_yaml_config // ""; + my $cmd = $block->ext_plugin_cmd // "['sleep', '5s']"; + my $extra_yaml_config = <<_EOC_; +ext-plugin: + path_for_test: $unix_socket_path + cmd: $cmd +_EOC_ + $extra_yaml_config = $extra_yaml_config . $orig_extra_yaml_config; + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ext-plugin-pre-req": {"a":"b"}, + "ext-plugin-post-req": {"c":"d"}, + "ext-plugin-post-resp": {"e":"f"} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 2: hit +--- request +GET /hello +--- response_body +hello world +--- error_log +get conf token: 233 +--- no_error_log +flush conf token lrucache +[error] +--- grep_error_log eval +qr/(sending|receiving) rpc type: \d data length:/ +--- grep_error_log_out +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 2 data length: +receiving rpc type: 2 data length: +sending rpc type: 2 data length: +receiving rpc type: 2 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 2 data length: +receiving rpc type: 2 data length: +sending rpc type: 2 data length: +receiving rpc type: 2 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 4 data length: +receiving rpc type: 4 data length: +sending rpc type: 4 data length: +receiving rpc type: 4 data length: + + + +=== TEST 3: header too short +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.header_too_short() + } + } +--- request +GET /hello +--- error_code: 503 +--- error_log +failed to receive RPC_PREPARE_CONF + + + +=== TEST 4: data too short +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.data_too_short() + } + } +--- request +GET /hello +--- error_code: 503 +--- error_log +failed to receive RPC_PREPARE_CONF + + + +=== TEST 5: not listen +--- extra_stream_config +--- request +GET /hello +--- error_code: 503 +--- error_log +failed to connect to the unix socket + + + +=== TEST 6: spawn runner +--- ext_plugin_cmd +["t/plugin/ext-plugin/runner.sh", "3600"] +--- config + location /t { + access_by_lua_block { + -- ensure the runner is spawned before the request finishes + ngx.sleep(0.1) + ngx.exit(200) + } + } +--- grep_error_log eval +qr/LISTEN unix:\S+/ +--- grep_error_log_out eval +qr/LISTEN unix:.+\/nginx.sock/ +--- error_log +EXPIRE 3600 + + + +=== TEST 7: respawn runner when it exited +--- ext_plugin_cmd +["t/plugin/ext-plugin/runner.sh", "0.1"] +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.2) + } + } +--- error_log +runner exited with reason: exit, status: 111 +respawn runner 3 seconds later with cmd: ["t/plugin/ext-plugin/runner.sh","0.1"] + + + +=== TEST 8: flush cache when runner exited +--- ext_plugin_cmd +["t/plugin/ext-plugin/runner.sh", "0.4"] +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local function r() + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.log(ngx.ERR, err) + return + else + ngx.print(res.body) + end + end + + r() + r() + ngx.sleep(0.5) + r() + } + } +--- response_body +hello world +hello world +hello world +--- grep_error_log eval +qr/(sending|receiving) rpc type: 1 data length:/ +--- grep_error_log_out +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +sending rpc type: 1 data length: +receiving rpc type: 1 data length: +--- error_log +flush conf token lrucache +flush conf token in shared dict + + + +=== TEST 9: prepare conf +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ext-plugin-pre-req": { + "conf": [ + {"name":"foo", "value":"bar"}, + {"name":"cat", "value":"dog"} + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 10: hit +--- request +GET /hello +--- response_body +hello world +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({with_conf = true, expect_key_pattern = [[^route#1#ext-plugin-pre-req#]]}) + } + } +--- error_log eval +qr/get conf token: 233 conf: \[(\{"value":"bar","name":"foo"\}|\{"name":"foo","value":"bar"\}),(\{"value":"dog","name":"cat"\}|\{"name":"cat","value":"dog"\})\]/ + + + +=== TEST 11: handle error code +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({inject_error = true}) + } + } +--- error_code: 503 +--- error_log +failed to receive RPC_PREPARE_CONF: bad request + + + +=== TEST 12: refresh token +--- request +GET /hello +--- response_body +hello world +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + if not package.loaded.count then + package.loaded.count = 1 + else + package.loaded.count = package.loaded.count + 1 + end + + if package.loaded.count == 1 then + ext.go({no_token = true}) + else + ext.go({with_conf = true}) + end + } + } +--- error_log +refresh cache and try again +flush conf token in shared dict + + + +=== TEST 13: runner can access the environment variable +--- main_config +env MY_ENV_VAR=foo; +--- ext_plugin_cmd +["t/plugin/ext-plugin/runner.sh", "3600"] +--- config + location /t { + access_by_lua_block { + -- ensure the runner is spawned before the request finishes + ngx.sleep(0.1) + ngx.exit(200) + } + } +--- error_log +MY_ENV_VAR foo + + + +=== TEST 14: bad conf +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ext-plugin-pre-req": { + "conf": [ + {"value":"bar"} + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.say(message) + end + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ext-plugin-post-req": { + "conf": [ + {"name":"bar"} + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.print(message) + end + } + } +--- response_body +{"error_msg":"failed to check the configuration of plugin ext-plugin-pre-req err: property \"conf\" validation failed: failed to validate item 1: property \"name\" is required"} + +{"error_msg":"failed to check the configuration of plugin ext-plugin-post-req err: property \"conf\" validation failed: failed to validate item 1: property \"value\" is required"} + + + +=== TEST 15: spawn runner which can't be terminated, ensure APISIX won't be blocked +--- ext_plugin_cmd +["t/plugin/ext-plugin/runner_can_not_terminated.sh"] +--- config + location /t { + return 200; + } + + + +=== TEST 16: prepare conf with global rule +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.say(message) + return + end + + local code, message, res = t.test('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ext-plugin-post-req": { + "conf": [ + {"name":"foo", "value":"bar"}, + {"name":"cat", "value":"dog"} + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 17: hit +--- request +GET /hello +--- response_body +hello world +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({with_conf = true, expect_key_pattern = [[^global_rule#1#ext-plugin-post-req#]]}) + } + } +--- error_log eval +qr/get conf token: 233 conf: \[(\{"value":"bar","name":"foo"\}|\{"name":"foo","value":"bar"\}),(\{"value":"dog","name":"cat"\}|\{"name":"cat","value":"dog"\})\]/ + + + +=== TEST 18: clean global rule +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + } + } + + + +=== TEST 19: default allow_degradation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ext-plugin-post-req": { + "conf": [ + {"name":"foo", "value":"bar"}, + {"name":"cat", "value":"dog"} + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 20: ext-plugin wrong, req reject +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock1; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({}) + } + } +--- error_code: 503 +--- error_log eval +qr/failed to connect to the unix socket/ + + + +=== TEST 21: open allow_degradation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local code, message, res = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ext-plugin-post-req": { + "conf": [ + {"name":"foo", "value":"bar"}, + {"name":"cat", "value":"dog"} + ], + "allow_degradation": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 22: ext-plugin wrong, req access +--- request +GET /hello +--- extra_stream_config + server { + listen unix:$TEST_NGINX_HTML_DIR/nginx.sock1; + + content_by_lua_block { + local ext = require("lib.ext-plugin") + ext.go({}) + } + } +--- response_body +hello world +--- error_log eval +qr/Plugin Runner.*allow degradation/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/sanity2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/sanity2.t new file mode 100644 index 0000000..6726005 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ext-plugin/sanity2.t @@ -0,0 +1,65 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $cmd = $block->ext_plugin_cmd // "['sleep', '5s']"; + my $extra_yaml_config = <<_EOC_; +ext-plugin: + cmd: $cmd +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: terminate spawn runner +--- ext_plugin_cmd +["t/plugin/ext-plugin/runner.sh", "3600"] +--- config + location /t { + return 200; + } +--- shutdown_error_log eval +qr/terminate runner \d+ with SIGTERM/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/fault-injection.t b/CloudronPackages/APISIX/apisix-source/t/plugin/fault-injection.t new file mode 100644 index 0000000..dde12a0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/fault-injection.t @@ -0,0 +1,1104 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); +log_level('info'); +run_tests; + +__DATA__ + +=== TEST 1: set route(invalid http_status in the abort property) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 100, + "body": "Fault Injection!\n" + } + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/validation failed/ + + + +=== TEST 2: set route(without http_status in the abort property) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + } + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/validation failed/ + + + +=== TEST 3: set route(without abort & delay properties) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/expect object to have at least 1 properties/ + + + +=== TEST 4: set route(without duration in the delay property) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "delay": { + } + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/validation failed/ + + + +=== TEST 5: set route(invalid duration with string in the delay property) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "delay": { + "duration": "test" + } + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/wrong type: expected number, got string/ + + + +=== TEST 6: set route(invalid duration with double dot in the delay property) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "delay": { + "duration": 0.1.1 + } + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/invalid request body/ +--- error_log eval +qr/invalid request body/ + + + +=== TEST 7: set route(invalid duration with whitespace in the delay property) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "delay": { + "duration": 0. 1 + } + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/invalid request body/ +--- error_log eval +qr/invalid request body/ + + + +=== TEST 8: set route(invalid vars in the delay property) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "delay": { + "duration": 0.1, + "vars": { + "a", + "b" + } + }, + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/invalid request body/ +--- error_log eval +qr/invalid request body/ + + + +=== TEST 9: set route(invalid vars in in the abort property) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "vars": { + "a", + "b" + } + } + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/invalid request body/ +--- error_log eval +qr/invalid request body/ + + + +=== TEST 10: set route(delay 1 seconds) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "delay": { + "duration": 1 + } + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 11: hit route(delay 1 seconds and return hello world) +--- request +GET /hello HTTP/1.1 +--- response_body +hello world + + + +=== TEST 12: set route(abort with http status 200 and return "Fault Injection!\n") +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "Fault Injection!\n" + } + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 13: hit route(abort with http code 200 and return "Fault Injection!\n") +--- request +GET /hello HTTP/1.1 +--- error_code: 200 +--- response_body +Fault Injection! + + + +=== TEST 14: set route(abort with http status 405 and return "Fault Injection!\n") +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 405, + "body": "Fault Injection!\n" + } + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 15: hit route(abort with http status 405 and return "Fault Injection!\n") +--- request +GET /hello HTTP/1.1 +--- error_code: 405 +--- response_body +Fault Injection! + + + +=== TEST 16: set route(play with redirect plugin) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "Fault Injection!\n" + } + }, + "redirect": { + "uri": "/hello/world", + "ret_code": 302 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 17: hit route(abort with http status 200 and return "Fault Injection!\n") +--- request +GET /hello HTTP/1.1 +--- error_code: 200 +--- response_body +Fault Injection! + + + +=== TEST 18: set route (abort injection but with zero percentage) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "Fault Injection!\n", + "percentage": 0 + } + }, + "redirect": { + "uri": "/hello/world", + "ret_code": 302 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 19: hit route (redirect) +--- request +GET /hello HTTP/1.1 +--- error_code: 302 + + + +=== TEST 20: set route (delay injection but with zero percentage) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "delay": { + "duration": 1, + "percentage": 0 + } + }, + "proxy-rewrite": { + "uri": "/hello1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 21: hit route (no wait and return hello1 world) +--- request +GET /hello HTTP/1.1 +--- error_code: 200 +--- response_body +hello1 world + + + +=== TEST 22: set route(body with var) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "client addr: $remote_addr\n" + } + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 23: hit route(body with var) +--- request +GET /hello +--- response_body +client addr: 127.0.0.1 + + + +=== TEST 24: set route(abort without body) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200 + } + }, + "proxy-rewrite": { + "uri": "/hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 25: hit route(abort without body) +--- request +GET /hello +--- response_body + + + +=== TEST 26: vars schema validation passed +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.fault-injection") + local ok, err = plugin.check_schema({ + abort = { + http_status = 403, + body = "Fault Injection!\n", + vars = { + { + {"arg_name","==","jack"}, + {"arg_age","!","<",18} + }, + { + {"http_apikey","==","api-key"} + } + } + }, + delay = { + duration = 2, + vars = { + { + {"arg_name","==","jack"}, + {"arg_age","!","<",18} + }, + { + {"http_apikey","==","api-key"} + } + } + } + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 27: vars schema validation failed(abort failed) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.fault-injection") + local ok, err = plugin.check_schema({ + abort = { + http_status = 403, + body = "Fault Injection!\n", + vars = { + {"arg_name","!=","jack"} + } + }, + delay = { + duration = 2, + vars = { + { + {"arg_name","==","jack"}, + {"arg_age","!","<",18} + }, + { + {"http_apikey","==","api-key"} + } + } + } + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +rule should be wrapped inside brackets +done +--- error_log eval +qr/failed to create vars expression:.*/ + + + +=== TEST 28: set route and configure the vars rule in abort +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 403, + "body": "Fault Injection!\n", + "vars": [ + [ + ["arg_name","==","jack"], + [ "arg_age","!","<",18 ] + ], + [ + [ "http_apikey","==","api-key" ] + ] + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 29: hit the route (all vars rules pass), execute abort +--- request +GET /hello?name=jack&age=18 +--- more_headers +apikey: api-key +--- error_code: 403 +--- response_body +Fault Injection! + + + +=== TEST 30: hit the route (missing apikey), execute abort +--- request +GET /hello?name=jack&age=20 +--- error_code: 403 +--- response_body +Fault Injection! + + + +=== TEST 31: hit the route (missing request parameters), execute abort +--- request +GET /hello +--- more_headers +apikey:api-key +--- error_code: 403 +--- response_body +Fault Injection! + + + +=== TEST 32: hit route(`vars` do not match, `age` is missing) +--- request +GET /hello?name=allen +--- response_body +hello world + + + +=== TEST 33: hit route(all `vars` do not match) +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 34: set route and configure the vars rule in delay +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "uri": "/hello", + "plugins": { + "fault-injection": { + "delay": { + "duration": 2, + "vars": [ + [ + ["arg_name","==","jack"], + [ "arg_age","!","<",18 ] + ] + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 35: hit route(delay 2 seconds and return hello world) +--- request +GET /hello?name=jack&age=22 +--- response_body +hello world + + + +=== TEST 36: hit route (no wait and return hello1 world) +--- request +GET /hello HTTP/1.1 +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 37: set route and configure the vars rule in abort and delay +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 403, + "body": "Fault Injection!\n", + "vars": [ + [ + ["arg_name","==","jack"], + ["arg_age","!","<",18] + ] + ] + }, + "delay": { + "duration": 2, + "vars": [ + [ + ["http_apikey","==","api-key"] + ] + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 38: hit the route (all vars rules are passed), execute abort and delay +--- request +GET /hello?name=jack&age=18 +--- more_headers +apikey: api-key +--- error_code: 403 +--- response_body +Fault Injection! + + + +=== TEST 39: hit the route (abort rule does not match), only execute delay +--- request +GET /hello?name=jack&age=16 +--- more_headers +apikey: api-key +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/fault-injection2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/fault-injection2.t new file mode 100644 index 0000000..bd28f6b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/fault-injection2.t @@ -0,0 +1,186 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: vars rule with ! (set) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 403, + "body": "Fault Injection!\n", + "vars": [ + [ + "!AND", + ["arg_name","==","jack"], + ["arg_age","!","<",18] + ] + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: vars rule with ! (hit) +--- request +GET /hello?name=jack&age=17 +--- error_code: 403 +--- response_body +Fault Injection! + + + +=== TEST 3: vars rule with ! (miss) +--- request +GET /hello?name=jack&age=18 +--- response_body +hello world + + + +=== TEST 4: inject header config +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "headers" : { + "h1": "v1", + "h2": 2, + "h3": "$uri" + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: inject header +--- request +GET /hello +--- response_headers +h1: v1 +h2: 2 +h3: /hello + + + +=== TEST 6: closing curly brace not should not be a part of variable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "body": "{\"count\": $arg_count}" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: test route +--- request +GET /hello?count=2 +--- response_body chomp +{"count": 2} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/file-logger-reopen.t b/CloudronPackages/APISIX/apisix-source/t/plugin/file-logger-reopen.t new file mode 100644 index 0000000..5f4dcb8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/file-logger-reopen.t @@ -0,0 +1,169 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (! $block->request) { + $block->set_value("request", "GET /t"); + } +}); + + +run_tests; + +__DATA__ + +=== TEST 1: prepare +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/file-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "file-logger": { + "path": "file.log" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: cache file +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello", ngx.HTTP_GET) + assert(io.open("file.log", 'r')) + os.remove("file.log") + local code = t("/hello", ngx.HTTP_GET) + local _, err = io.open("file.log", 'r') + ngx.say(err) + } + } +--- response_body +file.log: No such file or directory + + + +=== TEST 3: reopen file +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello", ngx.HTTP_GET) + assert(io.open("file.log", 'r')) + os.remove("file.log") + ngx.sleep(0.01) -- make sure last reopen file is expired + + local process = require "ngx.process" + local resty_signal = require "resty.signal" + local pid = process.get_master_pid() + + local ok, err = resty_signal.kill(pid, "USR1") + if not ok then + ngx.log(ngx.ERR, "failed to kill process of pid ", pid, ": ", err) + return + end + + local code = t("/hello", ngx.HTTP_GET) + assert(code == 200) + + -- file is reopened + local fd, err = io.open("file.log", 'r') + local msg + + if not fd then + core.log.error("failed to open file: file.log, error info: ", err) + return + end + + msg = fd:read() + + local new_msg = core.json.decode(msg) + if new_msg.client_ip == '127.0.0.1' and new_msg.route_id == '1' + and new_msg.host == '127.0.0.1' + then + msg = "write file log success" + ngx.status = code + ngx.say(msg) + end + + os.remove("file.log") + local code = t("/hello", ngx.HTTP_GET) + local _, err = io.open("file.log", 'r') + ngx.say(err) + } + } +--- response_body +write file log success +file.log: No such file or directory +--- grep_error_log eval +qr/reopen cached log file: file.log/ +--- grep_error_log_out +reopen cached log file: file.log diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/file-logger.t b/CloudronPackages/APISIX/apisix-source/t/plugin/file-logger.t new file mode 100644 index 0000000..30b9fc1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/file-logger.t @@ -0,0 +1,340 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (! $block->request) { + $block->set_value("request", "GET /t"); + } +}); + + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local configs = { + -- full configuration + { + path = "file.log" + }, + -- property "path" is required + { + path = nil + } + } + + local plugin = require("apisix.plugins.file-logger") + + for i = 1, #configs do + ok, err = plugin.check_schema(configs[i]) + if err then + ngx.say(err) + else + ngx.say("done") + end + end + } + } +--- response_body_like +done +property "path" is required + + + +=== TEST 2: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/file-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "file-logger": { + "path": "file.log" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: verify plugin +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello", ngx.HTTP_GET) + local fd, err = io.open("file.log", 'r') + local msg + + if not fd then + core.log.error("failed to open file: file.log, error info: ", err) + return + end + + msg = fd:read() + + local new_msg = core.json.decode(msg) + if new_msg.client_ip == '127.0.0.1' and new_msg.route_id == '1' + and new_msg.host == '127.0.0.1' + then + msg = "write file log success" + ngx.status = code + ngx.say(msg) + end + + --- a new request is logged + t("/hello", ngx.HTTP_GET) + msg = fd:read("*l") + local new_msg = core.json.decode(msg) + if new_msg.client_ip == '127.0.0.1' and new_msg.route_id == '1' + and new_msg.host == '127.0.0.1' + then + msg = "write file log success" + ngx.say(msg) + end + } + } +--- response_body +write file log success +write file log success + + + +=== TEST 5: failed to open the path +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "file-logger": { + "path": "/log/file.log" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local code, messages = t("/hello", GET) + core.log.warn("messages: ", messages) + if code >= 300 then + ngx.status = code + end + } + } +--- error_log +failed to open file: /log/file.log, error info: /log/file.log: No such file or directory + + + +=== TEST 6: log format in plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- ensure the format is not set + t('/apisix/admin/plugin_metadata/file-logger', + ngx.HTTP_DELETE + ) + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "file-logger": { + "path": "file.log", + "log_format": { + "host": "$host", + "client_ip": "$remote_addr" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: verify plugin +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello", ngx.HTTP_GET) + local fd, err = io.open("file.log", 'r') + local msg + + if not fd then + core.log.error("failed to open file: file.log, error info: ", err) + return + end + + msg = fd:read() + + local new_msg = core.json.decode(msg) + if new_msg.client_ip == '127.0.0.1' and new_msg.route_id == '1' + and new_msg.host == '127.0.0.1' + then + msg = "write file log success" + ngx.status = code + ngx.say(msg) + end + } + } +--- response_body +write file log success + + + +=== TEST 8: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/file-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: ensure config in plugin is prior to the one in plugin metadata +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello", ngx.HTTP_GET) + local fd, err = io.open("file.log", 'r') + local msg + + if not fd then + core.log.error("failed to open file: file.log, error info: ", err) + return + end + + msg = fd:read() + + local new_msg = core.json.decode(msg) + if new_msg.client_ip == '127.0.0.1' and new_msg.route_id == '1' + and new_msg.host == '127.0.0.1' + then + msg = "write file log success" + ngx.status = code + ngx.say(msg) + end + } + } +--- response_body +write file log success diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/file-logger2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/file-logger2.t new file mode 100644 index 0000000..706f401 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/file-logger2.t @@ -0,0 +1,516 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (! $block->request) { + $block->set_value("request", "GET /t"); + if (!$block->response_body) { + $block->set_value("response_body", "passed\n"); + } + } +}); + + +run_tests; + +__DATA__ + +=== TEST 1: add plugin with 'include_resp_body' setting +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- delete plugin metadata for response body format + t('/apisix/admin/plugin_metadata/file-logger', ngx.HTTP_DELETE) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "file-logger": { + "path": "file-with-resp-body.log", + "include_resp_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 2: verify plugin for file-logger with response +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello", ngx.HTTP_GET) + local fd, err = io.open("file-with-resp-body.log", 'r') + local msg + + if not fd then + core.log.error("failed to open file: file-resp-check.log, error info: ", err) + return + end + + -- note only for first line + msg = fd:read() + + local new_msg = core.json.decode(msg) + ngx.status = code + + if new_msg.response ~= nil and new_msg.response.body == "hello world\n" then + ngx.status = code + ngx.say('contain with target') + end + } + } +--- response_body +contain with target + + + +=== TEST 3: check file-logger 'include_resp_body' with 'expr' +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "file-logger": { + "path": "file-with-resp-expr-body.log", + "include_resp_body": true, + "include_resp_body_expr": [ + [ + "arg_foo", + "==", + "bar" + ] + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 4: verify file-logger resp with expression of concern +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello?foo=bar", ngx.HTTP_GET) + local fd, err = io.open("file-with-resp-expr-body.log", 'r') + local msg + + if not fd then + core.log.error("failed to open file: file-with-resp-expr-body.log, error info: ", err) + return + end + + -- note only for first line + msg = fd:read() + + local new_msg = core.json.decode(msg) + ngx.status = code + + if new_msg.response ~= nil and new_msg.response.body == "hello world\n" then + ngx.status = code + ngx.say('contain target body hits with expr') + end + + --- a new request is logged + t("/hello?name=pix", ngx.HTTP_GET) + msg = fd:read("*l") + local new_msg = core.json.decode(msg) + if new_msg.response.body == nil then + ngx.say('skip unconcern body') + end + } + } +--- response_body +contain target body hits with expr +skip unconcern body + + + +=== TEST 5: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/file-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "client_ip": "$remote_addr", + "resp_body": "$resp_body" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "file-logger": { + "path": "file-with-resp-body2.log", + "include_resp_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: verify plugin +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello", ngx.HTTP_GET) + local fd, err = io.open("file-with-resp-body2.log", 'r') + local msg + + if not fd then + core.log.error("failed to open file: file.log, error info: ", err) + return + end + + msg = fd:read() + + local new_msg = core.json.decode(msg) + if new_msg.resp_body == 'hello world\n' + then + msg = "write file log success" + ngx.status = code + ngx.say(msg) + end + } + } +--- response_body +write file log success + + + +=== TEST 8: Add new configuration with match +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "file-logger": { + "path": "file-with-match.log", + "match": [ + [ + [ "arg_name","==","jack" ] + ] + ], + "log_format": { + "request": "$request" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: Request match +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello?name=jack", ngx.HTTP_GET) + local fd, err = io.open("file-with-match.log", 'r') + if not fd then + core.log.error("failed to open file: file-with-match.log, error info: ", err) + return + end + local msg = fd:read() + + local new_msg = core.json.decode(msg) + if new_msg.request == 'GET /hello?name=jack HTTP/1.1' + and new_msg.route_id == '1' + then + msg = "write file log success" + ngx.status = code + ngx.say(msg) + end + + os.remove("file-with-match.log") + } + } +--- response_body +write file log success + + + +=== TEST 10: Request not match +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello?name=tony", ngx.HTTP_GET) + local fd, err = io.open("file-with-match.log", 'r') + if not fd then + local msg = "not write file log" + ngx.say(msg) + return + end + } + } +--- response_body +not write file log + + + +=== TEST 11: add plugin with 'include_req_body' setting +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/file-logger', ngx.HTTP_DELETE) + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "file-logger": { + "path": "file-with-req-body.log", + "include_req_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 12: verify plugin for file-logger with request +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello", ngx.HTTP_POST, "body-data") + local fd, err = io.open("file-with-req-body.log", 'r') + local msg + + if not fd then + core.log.error("failed to open file: file-with-req-body.log, error info: ", err) + return + end + + -- note only for first line + msg = fd:read() + + local new_msg = core.json.decode(msg) + ngx.status = code + if new_msg.request ~= nil and new_msg.request.body == "body-data" then + ngx.status = code + ngx.say('contain with target') + end + } + } +--- response_body +contain with target + + + +=== TEST 13: check file-logger 'include_req_body' with 'expr' +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "file-logger": { + "path": "file-with-req-expr-body.log", + "include_req_body": true, + "include_req_body_expr": [ + [ + "arg_log_body", + "==", + "yes" + ] + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 14: verify file-logger req with expression of concern +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello?log_body=yes", + ngx.HTTP_POST, + [[{"foo": "bar"}]] + ) + local fd, err = io.open("file-with-req-expr-body.log", 'r') + local msg + + if not fd then + core.log.error("failed to open file: file-with-req-expr-body.log, error info: ", err) + return + end + + -- note only for first line + msg = fd:read() + + local new_msg = core.json.decode(msg) + ngx.status = code + if new_msg.request ~= nil and new_msg.request.body ~= nil then + ngx.status = code + ngx.say('contain target body hits with expr') + end + + --- a new request is logged + t("/hello?log_body=no", ngx.HTTP_POST, [[{"foo": "b"}]]) + msg = fd:read("*l") + local new_msg = core.json.decode(msg) + if new_msg.request.body == nil then + ngx.say('skip unconcern body') + end + } + } +--- response_body +contain target body hits with expr +skip unconcern body diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/forward-auth.t b/CloudronPackages/APISIX/apisix-source/t/plugin/forward-auth.t new file mode 100644 index 0000000..d6f6575 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/forward-auth.t @@ -0,0 +1,405 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local test_cases = { + {uri = "http://127.0.0.1:8199"}, + {request_headers = {"test"}}, + {uri = 3233}, + {uri = "http://127.0.0.1:8199", request_headers = "test"}, + {uri = "http://127.0.0.1:8199", request_method = "POST"}, + {uri = "http://127.0.0.1:8199", request_method = "PUT"} + } + local plugin = require("apisix.plugins.forward-auth") + + for _, case in ipairs(test_cases) do + local ok, err = plugin.check_schema(case) + ngx.say(ok and "done" or err) + end + } + } +--- response_body +done +property "uri" is required +property "uri" validation failed: wrong type: expected string, got number +property "request_headers" validation failed: wrong type: expected array, got string +done +property "request_method" validation failed: matches none of the enum values + + + +=== TEST 2: setup route with plugin +--- config + location /t { + content_by_lua_block { + local data = { + { + url = "/apisix/admin/upstreams/u1", + data = [[{ + "nodes": { + "127.0.0.1:1984": 1 + }, + "type": "roundrobin" + }]], + }, + { + url = "/apisix/admin/routes/auth", + data = { + plugins = { + ["serverless-pre-function"] = { + phase = "rewrite", + functions = { + [[return function(conf, ctx) + local core = require("apisix.core"); + if core.request.header(ctx, "Authorization") == "111" then + core.response.exit(200); + end + end]], + [[return function(conf, ctx) + local core = require("apisix.core"); + if core.request.header(ctx, "Authorization") == "222" then + core.response.set_header("X-User-ID", "i-am-an-user"); + core.response.exit(200); + end + end]], + [[return function(conf, ctx) + local core = require("apisix.core"); + if core.request.header(ctx, "Authorization") == "333" then + core.response.set_header("Location", "http://example.com/auth"); + core.response.exit(403); + end + end]], + [[return function(conf, ctx) + local core = require("apisix.core"); + if core.request.header(ctx, "Authorization") == "444" then + core.response.exit(403, core.request.headers(ctx)); + end + end]], + [[return function(conf, ctx) + local core = require("apisix.core") + if core.request.get_method() == "POST" then + local req_body, err = core.request.get_body() + if err then + core.response.exit(400) + end + if req_body then + local data, err = core.json.decode(req_body) + if err then + core.response.exit(400) + end + if data["authorization"] == "555" then + core.response.set_header("X-User-ID", "i-am-an-user") + core.response.exit(200) + elseif data["authorization"] == "666" then + core.response.set_header("Location", "http://example.com/auth") + core.response.exit(403) + end + end + end + end]] + } + } + }, + uri = "/auth" + }, + }, + { + url = "/apisix/admin/routes/echo", + data = [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": [ + "return function (conf, ctx) + local core = require(\"apisix.core\"); + core.response.exit(200, core.request.headers(ctx)); + end" + ] + } + }, + "uri": "/echo" + }]], + }, + { + url = "/apisix/admin/routes/1", + data = [[{ + "plugins": { + "forward-auth": { + "uri": "http://127.0.0.1:1984/auth", + "request_headers": ["Authorization"], + "upstream_headers": ["X-User-ID"], + "client_headers": ["Location"] + }, + "proxy-rewrite": { + "uri": "/echo" + } + }, + "upstream_id": "u1", + "uri": "/hello" + }]], + }, + { + url = "/apisix/admin/routes/2", + data = [[{ + "plugins": { + "forward-auth": { + "uri": "http://127.0.0.1:1984/auth", + "request_headers": ["Authorization"] + }, + "proxy-rewrite": { + "uri": "/echo" + } + }, + "upstream_id": "u1", + "uri": "/empty" + }]], + }, + { + url = "/apisix/admin/routes/3", + data = [[{ + "plugins": { + "forward-auth": { + "uri": "http://127.0.0.1:1984/auth", + "request_method": "POST", + "upstream_headers": ["X-User-ID"], + "client_headers": ["Location"] + }, + "proxy-rewrite": { + "uri": "/echo" + } + }, + "upstream_id": "u1", + "uri": "/ping" + }]], + }, + { + url = "/apisix/admin/routes/4", + data = [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : ["return function() require(\"apisix.core\").response.exit(444); end"] + } + }, + "upstream_id": "u1", + "uri": "/crashed-auth" + }]], + }, + { + url = "/apisix/admin/routes/5", + data = [[{ + "plugins": { + "forward-auth": { + "uri": "http://127.0.0.1:1984/crashed-auth", + "request_headers": ["Authorization"], + "upstream_headers": ["X-User-ID"], + "client_headers": ["Location"] + } + }, + "upstream_id": "u1", + "uri": "/nodegr" + }]], + }, + { + url = "/apisix/admin/routes/6", + data = [[{ + "uri": "/hello", + "plugins": { + "forward-auth": { + "uri": "http://127.0.0.1:1984/crashed-auth", + "request_headers": ["Authorization"], + "upstream_headers": ["X-User-ID"], + "client_headers": ["Location"], + "allow_degradation": true + } + }, + "upstream": { + "nodes": { + "test.com:1980": 1 + }, + "type": "roundrobin" + } + }]], + }, + { + url = "/apisix/admin/routes/8", + data = [[{ + "plugins": { + "forward-auth": { + "uri": "http://127.39.40.1:9999/auth", + "request_headers": ["Authorization"], + "upstream_headers": ["X-User-ID"], + "client_headers": ["Location"], + "status_on_error": 503, + "allow_degradation": false + }, + "proxy-rewrite": { + "uri": "/echo" + } + }, + "upstream_id": "u1", + "uri": "/onerror" + }]], + } + } + + local t = require("lib.test_admin").test + + for _, data in ipairs(data) do + local code, body = t(data.url, ngx.HTTP_PUT, data.data) + ngx.say(body) + end + } + } +--- response_body eval +"passed\n" x 10 + + + +=== TEST 3: hit route (test request_headers) +--- request +GET /hello +--- more_headers +Authorization: 111 +--- response_body_like eval +qr/\"authorization\":\"111\"/ + + + +=== TEST 4: hit route (test upstream_headers) +--- request +GET /hello +--- more_headers +Authorization: 222 +--- response_body_like eval +qr/\"x-user-id\":\"i-am-an-user\"/ + + + +=== TEST 5: hit route (test client_headers) +--- request +GET /hello +--- more_headers +Authorization: 333 +--- error_code: 403 +--- response_headers +Location: http://example.com/auth + + + +=== TEST 6: hit route (check APISIX generated headers and ignore client headers) +--- request +GET /hello +--- more_headers +Authorization: 444 +X-Forwarded-Host: apisix.apache.org +--- error_code: 403 +--- response_body eval +qr/\"x-forwarded-proto\":\"http\"/ and qr/\"x-forwarded-method\":\"GET\"/ and +qr/\"x-forwarded-host\":\"localhost\"/ and qr/\"x-forwarded-uri\":\"\\\/hello\"/ and +qr/\"x-forwarded-for\":\"127.0.0.1\"/ +--- response_body_unlike eval +qr/\"x-forwarded-host\":\"apisix.apache.org\"/ + + + +=== TEST 7: hit route (not send upstream headers) +--- request +GET /empty +--- more_headers +Authorization: 222 +--- response_body_unlike eval +qr/\"x-user-id\":\"i-am-an-user\"/ + + + +=== TEST 8: hit route (not send client headers) +--- request +GET /empty +--- more_headers +Authorization: 333 +--- error_code: 403 +--- response_headers +!Location + + + +=== TEST 9: hit route (test upstream_headers when use post method) +--- request +POST /ping +{"authorization": "555"} +--- response_body_like eval +qr/\"x-user-id\":\"i-am-an-user\"/ + + + +=== TEST 10: hit route (test client_headers when use post method) +--- request +POST /ping +{"authorization": "666"} +--- error_code: 403 +--- response_headers +Location: http://example.com/auth + + + +=== TEST 11: hit route (unavailable auth server, expect failure) +--- request +GET /nodegr +--- more_headers +Authorization: 111 +--- error_code: 403 +--- error_log +failed to process forward auth, err: closed + + + +=== TEST 12: hit route (unavailable auth server, allow degradation) +--- request +GET /hello +--- more_headers +Authorization: 111 +--- error_code: 200 + + + +=== TEST 13: Verify status_on_error +--- request +GET /onerror +--- more_headers +Authorization: 333 +--- error_code: 503 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/forward-auth2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/forward-auth2.t new file mode 100644 index 0000000..f90841e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/forward-auth2.t @@ -0,0 +1,185 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: setup route with plugin +--- config + location /t { + content_by_lua_block { + local data = { + { + url = "/apisix/admin/upstreams/u1", + data = [[{ + "nodes": { + "127.0.0.1:1984": 1 + }, + "type": "roundrobin" + }]], + }, + { + url = "/apisix/admin/routes/auth", + data = { + plugins = { + ["serverless-pre-function"] = { + phase = "rewrite", + functions = { + [[return function(conf, ctx) + local core = require("apisix.core"); + local token = "token-headers-test"; + if core.request.header(ctx, "Authorization") == token then + if core.request.get_method() == "POST" then + if core.request.header(ctx, "Content-Length") or + core.request.header(ctx, "Transfer-Encoding") or + core.request.header(ctx, "Content-Encoding") then + core.response.exit(200) + else + core.response.exit(403) + end + else + if core.request.header(ctx, "Content-Length") or + core.request.header(ctx, "Transfer-Encoding") or + core.request.header(ctx, "Content-Encoding") then + core.response.exit(403) + else + core.response.exit(200) + end + end + end + end]] + } + } + }, + uri = "/auth" + }, + }, + { + url = "/apisix/admin/routes/echo", + data = [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": [ + "return function (conf, ctx) + local core = require(\"apisix.core\"); + core.response.exit(200, core.request.headers(ctx)); + end" + ] + } + }, + "uri": "/echo" + }]], + }, + { + url = "/apisix/admin/routes/1", + data = [[{ + "plugins": { + "forward-auth": { + "uri": "http://127.0.0.1:1984/auth", + "request_headers": ["Authorization"], + "request_method": "POST" + }, + "proxy-rewrite": { + "uri": "/echo" + } + }, + "upstream_id": "u1", + "uri": "/verify-auth-post" + }]], + }, + { + url = "/apisix/admin/routes/2", + data = [[{ + "plugins": { + "forward-auth": { + "uri": "http://127.0.0.1:1984/auth", + "request_headers": ["Authorization"], + "request_method": "GET" + }, + "proxy-rewrite": { + "uri": "/echo" + } + }, + "upstream_id": "u1", + "uri": "/verify-auth-get" + }]], + } + } + + local t = require("lib.test_admin").test + + for _, data in ipairs(data) do + local code, body = t(data.url, ngx.HTTP_PUT, data.data) + ngx.say(body) + end + } + } +--- response_body eval +"passed\n" x 5 + + + +=== TEST 2: verify auth server forward headers for request_method=GET +--- request +GET /verify-auth-get +--- more_headers +Authorization: token-headers-test +--- error_code: 200 + + + +=== TEST 3: verify auth server forward headers for request_method=POST for GET upstream +--- request +GET /verify-auth-post +--- more_headers +Authorization: token-headers-test +--- error_code: 200 + + + +=== TEST 4: verify auth server forward headers for request_method=POST +--- request +POST /verify-auth-post +{"authorization": "token-headers-test"} +--- more_headers +Authorization: token-headers-test +--- error_code: 200 + + + +=== TEST 5: verify auth server forward headers for request_method=GET for POST upstream +--- request +POST /verify-auth-get +{"authorization": "token-headers-test"} +--- more_headers +Authorization: token-headers-test +--- error_code: 200 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging.t b/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging.t new file mode 100644 index 0000000..81e7190 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging.t @@ -0,0 +1,832 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Full configuration verification (Auth File) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.google-cloud-logging") + local ok, err = plugin.check_schema({ + auth_file = "/path/to/apache/apisix/auth.json", + resource = { + type = "global" + }, + scope = { + "https://www.googleapis.com/auth/logging.admin" + }, + log_id = "syslog", + max_retry_count = 0, + retry_delay = 1, + buffer_duration = 60, + inactive_timeout = 10, + batch_max_size = 100, + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 2: Full configuration verification (Auth Config) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.google-cloud-logging") + local ok, err = plugin.check_schema({ + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = "private_key", + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/token", + }, + resource = { + type = "global" + }, + scope = { + "https://www.googleapis.com/auth/logging.admin" + }, + log_id = "syslog", + max_retry_count = 0, + retry_delay = 1, + buffer_duration = 60, + inactive_timeout = 10, + batch_max_size = 100, + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 3: Basic configuration verification (Auth File) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.google-cloud-logging") + local ok, err = plugin.check_schema({ + auth_file = "/path/to/apache/apisix/auth.json", + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 4: Basic configuration verification (Auth Config) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.google-cloud-logging") + local ok, err = plugin.check_schema({ + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = "private_key", + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/token", + }, + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 5: auth configure undefined +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.google-cloud-logging") + local ok, err = plugin.check_schema({ + log_id = "syslog", + max_retry_count = 0, + retry_delay = 1, + buffer_duration = 60, + inactive_timeout = 10, + batch_max_size = 100, + }) + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +value should match only one schema, but matches none + + + +=== TEST 6: set route (identity authentication failed) +--- config + location /t { + content_by_lua_block { + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN RSA PRIVATE KEY----- +MIIBOwIBAAJBAKeXgPvU/dAfVhOPk5BTBXCaOXy/0S3mY9VHyqvWZBJ97g6tGbLZ +psn6Gw0wC4mxDfEY5ER4YwU1NWCVtIr1XxcCAwEAAQJADkoowVBD4/8IA9r2JhQu +Ho/H3w8r8tH2KTVZ3pUFK15WGJf8vCF9LznVNKCP0X1NMLGvf4yRELx8jjpwJztI +gQIhANdWaJ3AGftJNaF5qXWwniFP1BcyCPSzn3q0rn19NhyHAiEAxz0HN8Yd+7vR +pi0w/L2I/2nLqgPFtqSGpL2KkJYcXPECIQCdM/PD1k4haNzCOXNA++M1JnYLSPfI +zKkMh4MrEZHDWQIhAKasRiKBaUnTCIJ04bs9L6NDtO4Ic9jj8ANW0Nk9yoJxAiAA +tBXLQH7fw5H8RaxBN91yQUZombw6JnRBXKKohWHZ3Q== +-----END RSA PRIVATE KEY-----]], + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/google/logging/token", + scope = { + "https://apisix.apache.org/logs:admin" + }, + entries_uri = "http://127.0.0.1:1980/google/logging/entries", + }, + inactive_timeout = 1, + batch_max_size = 1, + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: test route (identity authentication failed) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- grep_error_log eval +qr/\{\"error\"\:\"[\w+\s+]*\"\}/ +--- grep_error_log_out +{"error":"identity authentication failed"} +--- error_log +Batch Processor[google-cloud-logging] failed to process entries +Batch Processor[google-cloud-logging] exceeded the max_retry_count + + + +=== TEST 8: set route (no access to this scopes) +--- config + location /t { + content_by_lua_block { + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/google/logging/token", + entries_uri = "http://127.0.0.1:1980/google/logging/entries", + }, + inactive_timeout = 1, + batch_max_size = 1, + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: test route (no access to this scopes) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- grep_error_log eval +qr/\{\"error\"\:\"[\w+\s+]*\"\}/ +--- grep_error_log_out +{"error":"no access to this scopes"} +--- error_log +Batch Processor[google-cloud-logging] failed to process entries +Batch Processor[google-cloud-logging] exceeded the max_retry_count + + + +=== TEST 10: set route (succeed write) +--- config + location /t { + content_by_lua_block { + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/google/logging/token", + scope = { + "https://apisix.apache.org/logs:admin" + }, + entries_uri = "http://127.0.0.1:1980/google/logging/entries", + }, + inactive_timeout = 1, + batch_max_size = 1, + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: test route(succeed write) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world + + + +=== TEST 12: set route (customize auth type) +--- config + location /t { + content_by_lua_block { + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/google/logging/token?token_type=Basic", + scope = { + "https://apisix.apache.org/logs:admin" + }, + entries_uri = "http://127.0.0.1:1980/google/logging/entries?token_type=Basic", + }, + inactive_timeout = 1, + batch_max_size = 1, + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: test route(customize auth type) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world + + + +=== TEST 14: set route (customize auth type error) +--- config + location /t { + content_by_lua_block { + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/google/logging/token?token_type=Basic", + scope = { + "https://apisix.apache.org/logs:admin" + }, + entries_uri = "http://127.0.0.1:1980/google/logging/entries", + }, + inactive_timeout = 1, + batch_max_size = 1, + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: test route(customize auth type error) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- grep_error_log eval +qr/\{\"error\"\:\"[\w+\s+]*\"\}/ +--- grep_error_log_out +{"error":"identity authentication failed"} +--- error_log +Batch Processor[google-cloud-logging] failed to process entries +Batch Processor[google-cloud-logging] exceeded the max_retry_count + + + +=== TEST 16: set route (file configuration is successful) +--- config + location /t { + content_by_lua_block { + + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_file = "t/plugin/google-cloud-logging/config.json", + inactive_timeout = 1, + batch_max_size = 1, + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 17: test route(file configuration is successful) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world + + + +=== TEST 18: set route (file configuration is failed) +--- config + location /t { + content_by_lua_block { + + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_file = "google-cloud-logging/config.json", + inactive_timeout = 1, + batch_max_size = 1, + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 19: test route(file configuration is failed) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +config.json: No such file or directory + + + +=== TEST 20: set route (https file configuration is successful) +--- config + location /t { + content_by_lua_block { + + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_file = "t/plugin/google-cloud-logging/config-https-domain.json", + inactive_timeout = 1, + batch_max_size = 1, + ssl_verify = true, + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: test route(https file configuration is successful) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world + + + +=== TEST 22: set route (https file configuration SSL authentication failed: ssl_verify = true) +--- config + location /t { + content_by_lua_block { + + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_file = "t/plugin/google-cloud-logging/config-https-ip.json", + inactive_timeout = 1, + batch_max_size = 1, + ssl_verify = true, + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 23: test route(https file configuration SSL authentication failed: ssl_verify = true) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +failed to refresh google oauth access token, certificate host mismatch + + + +=== TEST 24: set route (https file configuration SSL authentication succeed: ssl_verify = false) +--- config + location /t { + content_by_lua_block { + + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_file = "t/plugin/google-cloud-logging/config-https-ip.json", + inactive_timeout = 1, + batch_max_size = 1, + ssl_verify = false, + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 25: test route(https file configuration SSL authentication succeed: ssl_verify = false) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging/config-https-domain.json b/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging/config-https-domain.json new file mode 100644 index 0000000..7225446 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging/config-https-domain.json @@ -0,0 +1,9 @@ +{ + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR\naeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC\nUuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF\n2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4\nv5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep\nAB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw\nIu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P\nPR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic\nDcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49\nsxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC\nafOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC\nl85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz\nlw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC\nrCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g\ntdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16\nUyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1\nUjqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI\n1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh\nGfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46\nxn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4\nupppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF\nFzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo\ny4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W\nvjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK\nYp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S\nkEJQcmfVew5mFXyxuEn3zA==\n-----END PRIVATE KEY-----", + "project_id": "apisix", + "token_uri": "https://test.com:1983/google/logging/token", + "scope": [ + "https://apisix.apache.org/logs:admin" + ], + "entries_uri": "https://test.com:1983/google/logging/entries" +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging/config-https-ip.json b/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging/config-https-ip.json new file mode 100644 index 0000000..86b33fc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging/config-https-ip.json @@ -0,0 +1,9 @@ +{ + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR\naeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC\nUuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF\n2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4\nv5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep\nAB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw\nIu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P\nPR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic\nDcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49\nsxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC\nafOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC\nl85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz\nlw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC\nrCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g\ntdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16\nUyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1\nUjqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI\n1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh\nGfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46\nxn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4\nupppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF\nFzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo\ny4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W\nvjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK\nYp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S\nkEJQcmfVew5mFXyxuEn3zA==\n-----END PRIVATE KEY-----", + "project_id": "apisix", + "token_uri": "https://127.0.0.1:1983/google/logging/token", + "scope": [ + "https://apisix.apache.org/logs:admin" + ], + "entries_uri": "https://127.0.0.1:1983/google/logging/entries" +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging/config.json b/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging/config.json new file mode 100644 index 0000000..3d0bb62 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging/config.json @@ -0,0 +1,9 @@ +{ + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR\naeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC\nUuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF\n2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4\nv5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep\nAB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw\nIu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P\nPR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic\nDcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49\nsxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC\nafOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC\nl85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz\nlw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC\nrCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g\ntdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16\nUyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1\nUjqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI\n1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh\nGfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46\nxn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4\nupppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF\nFzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo\ny4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W\nvjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK\nYp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S\nkEJQcmfVew5mFXyxuEn3zA==\n-----END PRIVATE KEY-----", + "project_id": "apisix", + "token_uri": "http://127.0.0.1:1980/google/logging/token", + "scope": [ + "https://apisix.apache.org/logs:admin" + ], + "entries_uri": "http://127.0.0.1:1980/google/logging/entries" +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging2.t new file mode 100644 index 0000000..35d162b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/google-cloud-logging2.t @@ -0,0 +1,441 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route (verify batch queue default params) +--- config + location /t { + content_by_lua_block { + + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_file = "t/plugin/google-cloud-logging/config.json", + } + } + } + + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: data encryption for auth_config.private_key +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/google/logging/token", + scope = { + "https://apisix.apache.org/logs:admin" + }, + entries_uri = "http://127.0.0.1:1980/google/logging/entries", + }, + inactive_timeout = 1, + batch_max_size = 1, + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["google-cloud-logging"].auth_config.private_key) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["google-cloud-logging"].auth_config.private_key) + } + } +--- response_body +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY----- +YnwwDKc5vNzo0OU4StTRQbwgCnTZ3dmYiBFm8aGnvTxlE86D2nT07Q3BWhUdky6OGIox4MRLbiHz13NZjyUao/Nudh4PeTj5wMldPD5YvNWtbTG4ig/TNSdBncmIQPLPaUqSweE61pnASxodpTlBJ5k9yxfTmwBTOkzZevoKy9D2E4wF9vGCdkcPK/tAkvRoJTj6xD3xVuAbkcap/81oHplUZZ+ghlEnBZgOH8UMa73UfeNbOQVHD2mlU0LxkTXtwFhHWl50adrt890VDHev0+FUUDjv5Ysl8r/nnnlyq3SV4oqJfs/IVRKROe93e8sJ2/49o7kEv2XT1/6DjM/VsSLKfAi5rLNobcSaSzztSSLkrBFKQvvy2rRA7GFWKbIk+rPZhYmTMItDJv23XP6uzaLRPoq2f/AnRTKpWmA8Dk9TfFHsZLupKi1bmjCdtK8lpMCf9Au1rezt7+2BybQrtbbDbwPzC5bKHmKhc0GPTUzLAWQBin3tuZxSfk/MqRtG+AemwnFTHivJrfRwmc3db+b9W6WX09mV488f2M4qbqBmkiFU5VARWCGZ5vbop2KGhmB2fQPXTmj8QSYk6fBxFDnfzTfnYMIu2cQsbSBPCnoPinQNpBfFD3RQkkCiNtJ8GA8DWsivWsnW4jWyPmkIN/P1eLW1DSsU6V4cbhTQJs6/LzOCGAZB/ewu3mr1SDLWJPlIWW6atC/g0uiXkZ3VLUsS0BQffITf8sVXyz/BEbflLlT777zERDKyz/qS2JyR6U8s2h3Yg+GncPUCEF6Lx5Veb1lL+zs+Stvv+5/t2GfDlNYiwTU8HeffhEGgAv1s86OPo3CfWe7lEnu/MFHIm0czVenYdEVy449xj66DHqXUQVzVc+3NelW15FrKhcvU0Cxwqfk+xEOE185ssD06L+tOGjxPPvADjlcQQ1crH+tEcTTLnZZ/e16I10kcc5rBJwDy4COoeY6DZ0dFwtAdbjoR/KaSTGLK6n/u9Ow7OGDPZog4LhrzMOn2T7hk/oaMOKhlDvKroiSijhhkrQf5ZDhhh3GQn/ZRXjyiPWBqKEQiBJGyZ/iRONzJLsF8U8vsBzBToxmTe9prlwHusgAEIBUFrZRSvsVgsPCFOyJ6XJXDTdcCInHUGI9LsxWdlojYvvNuSvavkw1I4K+VBmlEG5FCMx56eX2X49hfXwRcM1ZyRRrmq6cRh+33aMeMLAtpKgTsQgmB/I01mGNZlstvU0XEFnCPuWcks50BTnvPEbU7GZJLE3HFmGb3vyC57E8oTR2FjhDrevPlLkxMPrLvXhwbmV+3YiZYq+8k6oBKfrrq41JRKr+SJDb7m6xL8AuZccMrNhDrkByQLi6zn95dIYc3+vNU4XBzvhpb7HMj2wvorxEW2HpQ+OVSZiZSCU6m4Fx6juj1D5pGs1nr68ybihqMrXuZBKP4b9Y6sw99kNmnWBdwNiY95sWy1qUe0MJq3r44hhVHvCUmzOVyO4aBmhMwgkaSQWpEeQwyIWENM1IMU6WUrKCSuLuKJAl5bM++ThBaLvIIMCyXl39136jHp97aVmHRXbSMPcSAb8l/YQ6SLK0HBxmFTXvroxHmPxPqrJ5jz65C72+uArgOZxJN6tyimIcTMyoJoN7N+QKxDLjgmqnJyEcthycEK3gikyloWsLppzEmHLHBDXlKpJLflvUujYrNsKf2xohx31gIlxBWCHP/1KL3QAehn+FEWUWsXn2hWAR0KAtmIOM7gZuCY8yKNDfXrAZJs14rwDlTbnhJvyijt1Tr6gleehmJDKSm2vM/NbznVTKwJDyMRner+vvc4zD06az/Y6Y4oM0e0IWM2fMaiiwjNAaKhhwJzqvM1c8+ZOfuRajmHFECEkYgXCKZiQxQihFG2wWp2i+xEGGwP2e+FbDdY9Ygyvw5SUvahyoX36AYbbTBOFY6E9aYUIM/Et8ZuXoWs1QaxGfJwcVvueqke45y3GKkp54sHXhrqfKX0TTiw6DCUs6dRTybxOjmjJCKp6Yw4KGWY0t3J0xbK08KTUMeHNxgtfYcz1/Wg/Q61CkUJkRNBninAAkEz8rV2olBHy1GZFFjCQySAyPH4PtWm1S4sBzdsui5wT+m2pC/DsCcQW++TGH9LdaHeT8B9u32lYToVN1/L2j5kjkhN13sNKfb6I9yYTnUqweQFU79toBfDt+6KNNfIA1TcmvZw8RcuMOArEqJQ6OPOhgUQBwsZaGeqFmAE4q64n5raS4OCdWtasFtItW3c5QHxkKoEEER04glVsCoxOvc80U= + + + +=== TEST 3: set route to test custom log format +--- config + location /t { + content_by_lua_block { + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/google/logging/token", + scope = { + "https://apisix.apache.org/logs:admin" + }, + entries_uri = "http://127.0.0.1:1980/google/logging/entries", + }, + inactive_timeout = 1, + batch_max_size = 1, + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/plugin_metadata/google-cloud-logging', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: hit +--- extra_init_by_lua + local decode = require("toolkit.json").decode + local up = require("lib.server") + up.google_logging_entries = function() + ngx.log(ngx.WARN, "the mock backend is hit") + + ngx.req.read_body() + local data = ngx.req.get_body_data() + data = decode(data) + assert(data.entries[1].jsonPayload.client_ip == "127.0.0.1") + assert(data.entries[1].resource.type == "global") + ngx.say('{}') + end +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +the mock backend is hit +--- no_error_log +[error] + + + +=== TEST 5: bad custom log format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/google-cloud-logging', + ngx.HTTP_PUT, + [[{ + "log_format": "'$host' '$time_iso8601'" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + ngx.say(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"log_format\" validation failed: wrong type: expected object, got string"} + + + +=== TEST 6: set route to test custom log format in route +--- config + location /t { + content_by_lua_block { + local config = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["google-cloud-logging"] = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/google/logging/token", + scope = { + "https://apisix.apache.org/logs:admin" + }, + entries_uri = "http://127.0.0.1:1980/google/logging/entries", + }, + log_format = { + host = "$host", + ["@timestamp"] = "$time_iso8601", + vip = "$remote_addr" + }, + inactive_timeout = 1, + batch_max_size = 1, + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: hit +--- extra_init_by_lua + local decode = require("toolkit.json").decode + local up = require("lib.server") + up.google_logging_entries = function() + ngx.log(ngx.WARN, "the mock backend is hit") + + ngx.req.read_body() + local data = ngx.req.get_body_data() + data = decode(data) + assert(data.entries[1].jsonPayload.vip == "127.0.0.1") + assert(data.entries[1].resource.type == "global") + ngx.say('{}') + end +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +the mock backend is hit +--- no_error_log +[error] diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-transcode-reload-bugfix.t b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-transcode-reload-bugfix.t new file mode 100644 index 0000000..7eee56f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-transcode-reload-bugfix.t @@ -0,0 +1,75 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('warn'); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + my $extra_init_by_lua = <<_EOC_; + local core = require("apisix.core") + local orig_new = core.config.new + close_cnt = 0 + core.config.new = function(key, opts) + local obj, err = orig_new(key, opts) + if key == "/protos" then + local orig_close = obj.close + obj.close = function(...) + core.log.warn("call config close") + close_cnt = close_cnt + 1 + return orig_close(...) + end + end + return obj, err + end +_EOC_ + + $block->set_value("extra_init_by_lua", $extra_init_by_lua); +}); + +run_tests; + +__DATA__ + +=== TEST 1: close protos when grpc-transcode plugin reload +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + if code >= 300 then + ngx.status = code + return + end + + ngx.sleep(2) + if close_cnt ~= 1 then + ngx.status = 500 + end + } + } +--- error_log +call config close diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-transcode.t b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-transcode.t new file mode 100644 index 0000000..e261bf7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-transcode.t @@ -0,0 +1,763 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); +log_level('debug'); + +run_tests; + +__DATA__ + +=== TEST 1: set proto(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + }" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local res = assert(etcd.get('/protos/1')) + local create_time = res.body.node.value.create_time + assert(create_time ~= nil, "create_time is nil") + local update_time = res.body.node.value.update_time + assert(update_time ~= nil, "update_time is nil") + + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: set proto(id: 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/2', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + }" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: delete proto(id: 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/2', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: set routes(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHello" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: hit route +--- request +GET /grpctest?name=world +--- response_body eval +qr/\{"message":"Hello world"\}/ + + + +=== TEST 6: hit route by post +--- request +POST /grpctest +name=world +--- response_body eval +qr/\{"message":"Hello world"\}/ + + + +=== TEST 7: hit route by post json +--- request +POST /grpctest +{"name": "world"} +--- more_headers +Content-Type: application/json +--- response_body eval +qr/\{"message":"Hello world"\}/ + + + +=== TEST 8: wrong upstream scheme +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHello" + } + }, + "upstream": { + "scheme": "asf", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 + + + +=== TEST 9: wrong upstream address +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHello" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:1970": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: hit route (Connection refused) +--- request +GET /grpctest +--- response_body eval +qr/502 Bad Gateway/ +--- error_log +Connection refused) while connecting to upstream +--- error_code: 502 + + + +=== TEST 11: update proto(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + rpc Plus (PlusRequest) returns (PlusReply) {} + rpc SayHelloAfterDelay (HelloRequest) returns (HelloReply) {} + } + + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + } + message PlusRequest { + int64 a = 1; + int64 b = 2; + } + message PlusReply { + int64 result = 1; + }" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: set routes(id: 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/grpc_plus", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "Plus", + "pb_option":["int64_as_string", "enum_as_name"] + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: hit route +--- request +GET /grpc_plus?a=1&b=2 +--- response_body eval +qr/\{"result":3\}/ + + + +=== TEST 14: hit route +--- request +GET /grpc_plus?a=1&b=2251799813685260 +--- response_body eval +qr/\{"result":"#2251799813685261"\}/ + + + +=== TEST 15: set route3 deadline nodelay +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/3', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/grpc_deadline", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHello", + "deadline": 500 + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: hit route +--- request +GET /grpc_deadline?name=apisix +--- response_body eval +qr/\{"message":"Hello apisix"\}/ + + + +=== TEST 17: set route4 deadline delay +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/4', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/grpc_delay", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHelloAfterDelay", + "deadline": 500 + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: hit route +--- request +GET /grpc_delay?name=apisix +--- error_code: 504 + + + +=== TEST 19: set routes: missing method +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin grpc-transcode err: property \"method\" is required"} + + + +=== TEST 20: set proto(id: 1, with array parameter) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + repeated string items = 2; + } + message HelloReply { + string message = 1; + repeated string items = 2; + }" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 21: set routes(id: 1, with array parameter) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHello" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: hit route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/grpctest', + ngx.HTTP_POST, + [[ + {"name":"apisix", "items": ["a","b","c"]} + ]], + [[ + {"message":"Hello apisix", "items": ["a","b","c"]} + ]], + {["Content-Type"] = "application/json"} + ) + ngx.status = code + } + } +--- request +GET /t + + + +=== TEST 23: set proto with enum +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + enum Gender { + GENDER_UNKNOWN = 0; + GENDER_MALE = 1; + GENDER_FEMALE = 2; + } + message HelloRequest { + string name = 1; + repeated string items = 2; + Gender gender = 3; + } + message HelloReply { + string message = 1; + repeated string items = 2; + Gender gender = 3; + }" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 24: hit route, no gender +--- request +POST /grpctest +{"name":"world"} +--- more_headers +Content-Type: application/json +--- response_body eval +qr/"gender":"GENDER_UNKNOWN"/ + + + +=== TEST 25: hit route, gender is a value +--- request +POST /grpctest +{"name":"world","gender":2} +--- more_headers +Content-Type: application/json +--- response_body eval +qr/"gender":"GENDER_FEMALE"/ + + + +=== TEST 26: hit route, gender is a name +--- request +POST /grpctest +{"name":"world","gender":"GENDER_MALE"} +--- more_headers +Content-Type: application/json +--- response_body eval +qr/"gender":"GENDER_MALE"/ + + + +=== TEST 27: hit route, bad gender +--- request +POST /grpctest +{"name":"world","gender":"GENDER_MA"} +--- more_headers +Content-Type: application/json +--- error_code: 400 +--- error_log +failed to encode request data to protobuf + + + +=== TEST 28: set routes(decode enum as value) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHello", + "pb_option":["enum_as_value"] + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 29: hit route +--- request +POST /grpctest +{"name":"world","gender":2} +--- more_headers +Content-Type: application/json +--- response_body eval +qr/"gender":2/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-transcode2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-transcode2.t new file mode 100644 index 0000000..66baf9a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-transcode2.t @@ -0,0 +1,796 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + enum Gender { + GENDER_UNKNOWN = 0; + GENDER_MALE = 1; + GENDER_FEMALE = 2; + } + message Person { + string name = 1; + int32 age = 2; + } + message HelloRequest { + string name = 1; + repeated string items = 2; + Gender gender = 3; + Person person = 4; + } + message HelloReply { + string message = 1; + }" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHello" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit route +--- request +POST /grpctest +{"name":"world","person":{"name":"Joe","age":1}} +--- more_headers +Content-Type: application/json +--- response_body chomp +{"message":"Hello world, name: Joe, age: 1"} + + + +=== TEST 3: hit route, missing some fields +--- request +POST /grpctest +{"name":"world","person":{"name":"Joe"}} +--- more_headers +Content-Type: application/json +--- response_body chomp +{"message":"Hello world, name: Joe"} + + + +=== TEST 4: set rule to check if each proto is separate +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/2', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + // same message, different fields. use to pollute the type info + message HelloRequest { + string name = 1; + string person = 2; + } + message HelloReply { + string message = 1; + }" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/fail", + "plugins": { + "grpc-transcode": { + "proto_id": "2", + "service": "helloworld.Greeter", + "method": "SayHello" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: hit route +--- config +location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + local body = [[{"name":"world","person":{"name":"John"}}]] + local opt = {method = "POST", body = body, headers = {["Content-Type"] = "application/json"}} + + local function access(path) + local httpc = http.new() + local res, err = httpc:request_uri(uri .. path, opt) + if not res then + ngx.say(err) + return + end + if res.status > 300 then + ngx.say(res.status) + else + ngx.say(res.body) + end + end + + access("/fail") + access("/grpctest") + access("/fail") + access("/grpctest") + } +} +--- response_body +400 +{"message":"Hello world, name: John"} +400 +{"message":"Hello world, name: John"} +--- error_log +failed to encode request data to protobuf + + + +=== TEST 6: set binary rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + + local content = t.read_file("t/grpc_server_example/proto.pb") + local data = {content = ngx.encode_base64(content)} + local code, body = t.test('/apisix/admin/protos/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.TestImport", + "method": "Run" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: hit route +--- request +POST /grpctest +{"body":"world","user":{"name":"Hello"}} +--- more_headers +Content-Type: application/json +--- response_body chomp +{"body":"Hello world"} + + + +=== TEST 8: service/method not found +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/service_not_found", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.TestImportx", + "method": "Run" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/method_not_found", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.TestImport", + "method": "Runx" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: hit route +--- request +POST /service_not_found +{"body":"world","user":{"name":"Hello"}} +--- more_headers +Content-Type: application/json +--- error_log +Undefined service method +--- error_code: 503 + + + +=== TEST 10: hit route +--- request +POST /method_not_found +{"body":"world","user":{"name":"Hello"}} +--- more_headers +Content-Type: application/json +--- error_log +Undefined service method +--- error_code: 503 + + + +=== TEST 11: set proto(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + rpc Plus (PlusRequest) returns (PlusReply) {} + rpc SayHelloAfterDelay (HelloRequest) returns (HelloReply) {} + } + + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + } + message PlusRequest { + int64 a = 1; + int64 b = 2; + } + message PlusReply { + int64 result = 1; + }" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: work with logger plugin which on global rule and read response body (logger plugins store undecoded body) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/grpc_plus", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "Plus", + "pb_option":["int64_as_string", "enum_as_name"] + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1980/log", + "batch_max_size": 1, + "include_resp_body": true + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit route +--- request +GET /grpc_plus?a=1&b=2 +--- response_body eval +qr/\{"result":3\}/ +--- error_log eval +qr/request log: \{.*body":\"\\u0000\\u0000\\u0000\\u0000\\u0002\\b\\u0003"/ + + + +=== TEST 14: delete global rules +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE + ) + ngx.say("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 200 message: passed + + + +=== TEST 15: work with logger plugin which on route and read response body (logger plugins store decoded body) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/grpc_plus", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "Plus", + "pb_option":["int64_as_string", "enum_as_name"] + }, + "http-logger": { + "uri": "http://127.0.0.1:1980/log", + "batch_max_size": 1, + "include_resp_body": true + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: hit route +--- request +GET /grpc_plus?a=1&b=2 +--- response_body eval +qr/\{"result":3\}/ +--- error_log eval +qr/request log: \{.*body":\"\{\\"result\\":3}/ + + + +=== TEST 17: pb_option should be be set on the route level +--- extra_init_by_lua + local pb = require("pb") + local old_f = pb.option + pb.option = function(o) + if o ~= "int64_as_string" and o ~= "int64_as_number" then + -- filter out options set by other components. + -- we can still test some options like enum_as_name + ngx.log(ngx.WARN, "set protobuf option: ", o) + end + return old_f(o) + end +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc Plus (PlusRequest) returns (PlusReply) {} + } + + message PlusRequest { + int64 a = 1; + int64 b = 2; + } + message PlusReply { + int64 result = 1; + }" + }]] + ) + + if code >= 300 then + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/grpc_plus2", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "Plus" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + if code >= 300 then + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/grpc_plus", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "Plus", + "pb_option":["int64_as_string", "enum_as_name"] + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.say(body) + return + end + + for i = 1, 3 do + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. + (i == 2 and "/grpc_plus2" or "/grpc_plus") .. + "?a=1&b=2251799813685260" + local httpc = http.new() + local res = assert(httpc:request_uri(uri, {keepalive = false})) + ngx.say(res.body) + end + } + } +--- response_body +{"result":"#2251799813685261"} +{"result":2.2517998136853e+15} +{"result":"#2251799813685261"} +--- grep_error_log eval +qr/set protobuf option: \w+/ +--- grep_error_log_out +set protobuf option: enum_as_name +set protobuf option: auto_default_values +set protobuf option: disable_hooks +set protobuf option: enum_as_name +set protobuf option: enum_as_name + + + +=== TEST 18: pb_option should be be set on the route level, two route have the same options +--- extra_init_by_lua + local pb = require("pb") + local old_f = pb.option + pb.option = function(o) + if o ~= "int64_as_string" and o ~= "int64_as_number" then + -- filter out options set by other components + -- we can still test some options like enum_as_name + ngx.log(ngx.WARN, "set protobuf option: ", o) + end + return old_f(o) + end +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc Plus (PlusRequest) returns (PlusReply) {} + } + + message PlusRequest { + int64 a = 1; + int64 b = 2; + } + message PlusReply { + int64 result = 1; + }" + }]] + ) + + if code >= 300 then + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/grpc_plus2", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "Plus" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + if code >= 300 then + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/grpc_plus", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "Plus" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.say(body) + return + end + + for i = 1, 3 do + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. + (i == 2 and "/grpc_plus2" or "/grpc_plus") .. + "?a=1&b=2251799813685260" + local httpc = http.new() + local res = assert(httpc:request_uri(uri, {keepalive = false})) + ngx.say(res.body) + end + } + } +--- response_body +{"result":2.2517998136853e+15} +{"result":2.2517998136853e+15} +{"result":2.2517998136853e+15} +--- grep_error_log eval +qr/set protobuf option: \w+/ +--- grep_error_log_out +set protobuf option: auto_default_values +set protobuf option: disable_hooks +set protobuf option: enum_as_name diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-transcode3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-transcode3.t new file mode 100644 index 0000000..93a3d62 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-transcode3.t @@ -0,0 +1,621 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set rule +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayMultipleHello(MultipleHelloRequest) returns (MultipleHelloReply) {} + } + + enum Gender { + GENDER_UNKNOWN = 0; + GENDER_MALE = 1; + GENDER_FEMALE = 2; + } + + message Person { + string name = 1; + int32 age = 2; + } + + message MultipleHelloRequest { + string name = 1; + repeated string items = 2; + repeated Gender genders = 3; + repeated Person persons = 4; + } + + message MultipleHelloReply{ + string message = 1; + }" + }]] + ) + + if code >= 300 then + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["POST"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayMultipleHello" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.say(body) + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit route +--- request +POST /grpctest +{"name":"world","persons":[{"name":"Joe","age":1},{"name":"Jake","age":2}]} +--- more_headers +Content-Type: application/json +--- response_body chomp +{"message":"Hello world, name: Joe, age: 1, name: Jake, age: 2"} + + + +=== TEST 3: set proto (id: 1, get error response from rpc) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc GetErrResp (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + repeated string items = 2; + } + message HelloReply { + string message = 1; + repeated string items = 2; + } + message ErrorDetail { + int64 code = 1; + string message = 2; + string type = 3; + }" + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "GetErrResp" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: hit route (error response in header) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, body, headers = t('/grpctest?name=world', + ngx.HTTP_GET + ) + + ngx.status = code + + ngx.header['grpc-status'] = headers['grpc-status'] + ngx.header['grpc-message'] = headers['grpc-message'] + ngx.header['grpc-status-details-bin'] = headers['grpc-status-details-bin'] + + body = json.encode(body) + ngx.say(body) + } + } +--- response_headers +grpc-status: 14 +grpc-message: Out of service +grpc-status-details-bin: CA4SDk91dCBvZiBzZXJ2aWNlGlcKKnR5cGUuZ29vZ2xlYXBpcy5jb20vaGVsbG93b3JsZC5FcnJvckRldGFpbBIpCAESHFRoZSBzZXJ2ZXIgaXMgb3V0IG9mIHNlcnZpY2UaB3NlcnZpY2U +--- response_body_unlike eval +qr/error/ +--- error_code: 503 + + + +=== TEST 5: set routes (id: 1, show error response in body) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "GetErrResp", + "show_status_in_body": true + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: hit route (show error status in body) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, body, headers = t('/grpctest?name=world', + ngx.HTTP_GET + ) + + ngx.status = code + + ngx.header['grpc-status'] = headers['grpc-status'] + ngx.header['grpc-message'] = headers['grpc-message'] + ngx.header['grpc-status-details-bin'] = headers['grpc-status-details-bin'] + + body = json.decode(body) + body = json.encode(body) + ngx.say(body) + } + } +--- response_headers +grpc-status: 14 +grpc-message: Out of service +grpc-status-details-bin: CA4SDk91dCBvZiBzZXJ2aWNlGlcKKnR5cGUuZ29vZ2xlYXBpcy5jb20vaGVsbG93b3JsZC5FcnJvckRldGFpbBIpCAESHFRoZSBzZXJ2ZXIgaXMgb3V0IG9mIHNlcnZpY2UaB3NlcnZpY2U +--- response_body +{"error":{"code":14,"details":[{"type_url":"type.googleapis.com/helloworld.ErrorDetail","value":"\b\u0001\u0012\u001cThe server is out of service\u001a\u0007service"}],"message":"Out of service"}} +--- error_code: 503 + + + +=== TEST 7: set routes (id: 1, show error details in body) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "GetErrResp", + "show_status_in_body": true, + "status_detail_type": "helloworld.ErrorDetail" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit route (show error details in body) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, body, headers = t('/grpctest?name=world', + ngx.HTTP_GET + ) + + ngx.status = code + + ngx.header['grpc-status'] = headers['grpc-status'] + ngx.header['grpc-message'] = headers['grpc-message'] + ngx.header['grpc-status-details-bin'] = headers['grpc-status-details-bin'] + + body = json.decode(body) + body = json.encode(body) + ngx.say(body) + } + } +--- response_headers +grpc-status: 14 +grpc-message: Out of service +grpc-status-details-bin: CA4SDk91dCBvZiBzZXJ2aWNlGlcKKnR5cGUuZ29vZ2xlYXBpcy5jb20vaGVsbG93b3JsZC5FcnJvckRldGFpbBIpCAESHFRoZSBzZXJ2ZXIgaXMgb3V0IG9mIHNlcnZpY2UaB3NlcnZpY2U +--- response_body +{"error":{"code":14,"details":[{"code":1,"message":"The server is out of service","type":"service"}],"message":"Out of service"}} +--- error_code: 503 + + + +=== TEST 9: set routes (id: 1, show error details in body and wrong status_detail_type) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "GetErrResp", + "show_status_in_body": true, + "status_detail_type": "helloworld.error" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: hit route (show error details in body and wrong status_detail_type) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local code, body, headers = t('/grpctest?name=world', + ngx.HTTP_GET + ) + + ngx.status = code + + ngx.header['grpc-status'] = headers['grpc-status'] + ngx.header['grpc-message'] = headers['grpc-message'] + ngx.header['grpc-status-details-bin'] = headers['grpc-status-details-bin'] + + ngx.say(body) + } + } +--- response_headers +grpc-status: 14 +grpc-message: Out of service +grpc-status-details-bin: CA4SDk91dCBvZiBzZXJ2aWNlGlcKKnR5cGUuZ29vZ2xlYXBpcy5jb20vaGVsbG93b3JsZC5FcnJvckRldGFpbBIpCAESHFRoZSBzZXJ2ZXIgaXMgb3V0IG9mIHNlcnZpY2UaB3NlcnZpY2U +--- response_body +failed to call pb.decode to decode details in grpc-status-details-bin +--- error_log +transform response error: failed to call pb.decode to decode details in grpc-status-details-bin, err: +--- error_code: 503 + + + +=== TEST 11: set binary rule for EchoStruct +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + + local content = t.read_file("t/grpc_server_example/echo.pb") + local data = {content = ngx.encode_base64(content)} + local code, body = t.test('/apisix/admin/protos/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "echo.Echo", + "method": "EchoStruct" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:10051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: hit route to test EchoStruct +--- config +location /t { + content_by_lua_block { + local core = require "apisix.core" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/grpctest" + local body = [[{"data":{"fields":{"foo":{"string_value":"xxx"},"bar":{"number_value":666}}}}]] + local opt = {method = "POST", body = body, headers = {["Content-Type"] = "application/json"}, keepalive = false} + local httpc = http.new() + local res, err = httpc:request_uri(uri, opt) + if not res then + ngx.log(ngx.ERR, err) + return ngx.exit(500) + end + if res.status > 300 then + return ngx.exit(res.status) + else + local req = core.json.decode(body) + local rsp = core.json.decode(res.body) + for k, v in pairs(req.data.fields) do + if rsp.data.fields[k] == nil then + ngx.log(ngx.ERR, "rsp missing field=", k, ", rsp: ", res.body) + else + for k1, v1 in pairs(v) do + if v1 ~= rsp.data.fields[k][k1] then + ngx.log(ngx.ERR, "rsp mismatch: k=", k1, + ", req=", v1, ", rsp=", rsp.data.fields[k][k1]) + end + end + end + end + end + } +} + + + +=== TEST 13: bugfix - filter out illegal INT(string) formats +--- config +location /t { + content_by_lua_block { + local pcall = pcall + local require = require + local protoc = require("protoc") + local pb = require("pb") + local pb_encode = pb.encode + + assert(protoc:load [[ + syntax = "proto3"; + message IntStringPattern { + int64 value = 1; + }]]) + + local patterns + do + local function G(pattern) + return {pattern, true} + end + + local function B(pattern) + return {pattern, [[bad argument #2 to '?' (number/'#number' expected for field 'value', got string)]]} + end + + patterns = { + G(1), G(2), G(-3), G("#123"), G("0xabF"), G("#-0x123abcdef"), G("-#0x123abcdef"), G("#0x123abcdef"), G("123"), + B("#a"), B("+aaa"), B("#aaaa"), B("#-aa"), + } + end + + for _, p in pairs(patterns) do + local pattern = { + value = p[1], + } + local status, err = pcall(pb_encode, "IntStringPattern", pattern) + local res = status + if not res then + res = err + end + assert(res == p[2]) + end + ngx.say("passed") + } +} +--- response_body +passed + + + +=== TEST 14: pb_option - check the matchings between enum and category +--- config + location /t { + content_by_lua_block { + local ngx_re_match = ngx.re.match + local plugin = require("apisix.plugins.grpc-transcode") + + local pb_option_def = plugin.schema.properties.pb_option.items.anyOf + + local patterns = { + [[^enum_as_.+$]], + [[^int64_as_.+$]], + [[^.+_default_.+$]], + [[^.+_hooks$]], + } + + local function check_pb_option_enum_category() + for i, category in ipairs(pb_option_def) do + for _, enum in ipairs(category.enum) do + if not ngx_re_match(enum, patterns[i], "jo") then + return ([[mismatch between enum("%s") and category("%s")]]):format( + enum, category.description) + end + end + end + end + + local err = check_pb_option_enum_category() + if err then + ngx.say(err) + return + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web.t b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web.t new file mode 100644 index 0000000..ac8f689 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web.t @@ -0,0 +1,355 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route (default grpc web proxy route) +--- config + location /t { + content_by_lua_block { + + local config = { + uri = "/grpc/web/*", + upstream = { + scheme = "grpc", + type = "roundrobin", + nodes = { + ["127.0.0.1:50001"] = 1 + } + }, + plugins = { + ["grpc-web"] = {} + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: Proxy unary request using APISIX with trailers gRPC-Web plugin +Status should be printed at most once per request, otherwise this would be out of specification. +--- exec +node ./t/plugin/grpc-web/client.js BIN UNARY +node ./t/plugin/grpc-web/client.js TEXT UNARY +--- response_body +Status: { code: 0, details: '', metadata: {} } +{"name":"hello","path":"/hello"} +Status: { code: 0, details: '', metadata: {} } +{"name":"hello","path":"/hello"} + + + +=== TEST 3: Proxy server-side streaming request using APISIX with trailers gRPC-Web plugin +--- exec +node ./t/plugin/grpc-web/client.js BIN STREAM +node ./t/plugin/grpc-web/client.js TEXT STREAM +--- response_body +{"name":"hello","path":"/hello"} +{"name":"world","path":"/world"} +Status: { code: 0, details: '', metadata: {} } +{"name":"hello","path":"/hello"} +{"name":"world","path":"/world"} +Status: { code: 0, details: '', metadata: {} } + + + +=== TEST 4: test options request +--- request +OPTIONS /grpc/web/a6.RouteService/GetRoute +--- error_code: 204 +--- response_headers +Access-Control-Allow-Methods: POST +Access-Control-Allow-Headers: content-type,x-grpc-web,x-user-agent +Access-Control-Allow-Origin: * + + + +=== TEST 5: test non-options request +--- request +GET /grpc/web/a6.RouteService/GetRoute +--- error_code: 405 +--- response_headers +Access-Control-Allow-Origin: * +--- error_log +request method: `GET` invalid + + + +=== TEST 6: test non gRPC Web MIME type request +--- request +POST /grpc/web/a6.RouteService/GetRoute +--- more_headers +Content-Type: application/json +--- error_code: 400 +--- response_headers +Access-Control-Allow-Origin: * +Content-Type: text/html +--- error_log +request Content-Type: `application/json` invalid + + + +=== TEST 7: set route (absolute match) +--- config + location /t { + content_by_lua_block { + + local config = { + uri = "/grpc/web2/a6.RouteService/GetRoute", + upstream = { + scheme = "grpc", + type = "roundrobin", + nodes = { + ["127.0.0.1:50001"] = 1 + } + }, + plugins = { + ["grpc-web"] = {} + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: test route (absolute match) +--- request +POST /grpc/web2/a6.RouteService/GetRoute +--- more_headers +Content-Type: application/grpc-web +--- error_code: 400 +--- response_headers +Access-Control-Allow-Origin: * +Content-Type: text/html +--- error_log +routing configuration error, grpc-web plugin only supports `prefix matching` pattern routing + + + +=== TEST 9: set route (with cors plugin) +--- config + location /t { + content_by_lua_block { + local config = { + uri = "/grpc/web/*", + upstream = { + scheme = "grpc", + type = "roundrobin", + nodes = { + ["127.0.0.1:50001"] = 1 + } + }, + plugins = { + ["grpc-web"] = {}, + cors = { + allow_origins = "http://test.com", + allow_methods = "POST,OPTIONS", + allow_headers = "application/grpc-web", + expose_headers = "application/grpc-web", + max_age = 5, + allow_credential = true + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: don't override Access-Control-Allow-Origin header in response +--- exec +curl -iv --location 'http://127.0.0.1:1984/grpc/web/a6.RouteService/GetRoute' \ +--header 'Origin: http://test.com' \ +--header 'Content-Type: application/grpc-web-text' \ +--data-raw 'AAAAAAcKBXdvcmxkCgo=' +--- response_body eval +qr/HTTP\/1.1 200 OK/ and qr/Access-Control-Allow-Origin: http:\/\/test.com/ + + + +=== TEST 11: check for Access-Control-Expose-Headers header in response +--- exec +curl -iv --location 'http://127.0.0.1:1984/grpc/web/a6.RouteService/GetRoute' \ +--header 'Origin: http://test.com' \ +--header 'Content-Type: application/grpc-web-text' \ +--data-raw 'AAAAAAcKBXdvcmxkCgo=' +--- response_body eval +qr/Access-Control-Expose-Headers: grpc-message,grpc-status/ and qr/Access-Control-Allow-Origin: http:\/\/test.com/ + + + +=== TEST 12: verify trailers in response +According to the gRPC documentation, the grpc-web proxy should not retain trailers received from upstream when +forwarding them, as the reference implementation envoy does, so the current test case is status quo rather +than "correct", which is not expected to have an impact since browsers ignore trailers. +Currently there is no API or hook point available in nginx/lua-nginx-module to remove specified trailers +on demand (grpc_hide_header can do it but it affects the grpc proxy), and some nginx patches may be needed +to allow for code-controlled removal of the trailer at runtime. +When we implement that, this use case will be removed. +--- exec +curl -iv --location 'http://127.0.0.1:1984/grpc/web/a6.RouteService/GetRoute' \ +--header 'Content-Type: application/grpc-web+proto' \ +--header 'X-Grpc-Web: 1' \ +--data-binary '@./t/plugin/grpc-web/req.bin' +--- response_body eval +qr/grpc-status:0\x0d\x0agrpc-message:/ + + + +=== TEST 13: confg default response route +--- config + location /t { + content_by_lua_block { + local config = { + uri = "/grpc/web/*", + upstream = { + scheme = "grpc", + type = "roundrobin", + nodes = { + ["127.0.0.1:50001"] = 1 + } + }, + plugins = { + ["grpc-web"] = {} + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: check header in default response +--- request +OPTIONS /grpc/web/a6.RouteService/GetRoute +--- error_code: 204 +--- response_headers +Access-Control-Allow-Methods: POST +Access-Control-Allow-Headers: content-type,x-grpc-web,x-user-agent +Access-Control-Allow-Origin: * +Access-Control-Expose-Headers: grpc-message,grpc-status + + + +=== TEST 15: Custom configuration routing +--- config + location /t { + content_by_lua_block { + local config = { + uri = "/grpc/web/*", + upstream = { + scheme = "grpc", + type = "roundrobin", + nodes = { + ["127.0.0.1:50001"] = 1 + } + }, + plugins = { + ["grpc-web"] = { + cors_allow_headers = "grpc-accept-encoding" + } + } + } + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: check header in default response +--- request +OPTIONS /grpc/web/a6.RouteService/GetRoute +--- error_code: 204 +--- response_headers +Access-Control-Allow-Methods: POST +Access-Control-Allow-Headers: grpc-accept-encoding +Access-Control-Allow-Origin: * +Access-Control-Expose-Headers: grpc-message,grpc-status diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route.pb.go b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route.pb.go new file mode 100644 index 0000000..ee8e6cc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route.pb.go @@ -0,0 +1,290 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package a6 + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Query struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} +func (*Query) Descriptor() ([]byte, []int) { + return fileDescriptor_0984d49a362b6b9f, []int{0} +} + +func (m *Query) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Query.Unmarshal(m, b) +} +func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Query.Marshal(b, m, deterministic) +} +func (m *Query) XXX_Merge(src proto.Message) { + xxx_messageInfo_Query.Merge(m, src) +} +func (m *Query) XXX_Size() int { + return xxx_messageInfo_Query.Size(m) +} +func (m *Query) XXX_DiscardUnknown() { + xxx_messageInfo_Query.DiscardUnknown(m) +} + +var xxx_messageInfo_Query proto.InternalMessageInfo + +func (m *Query) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type Route struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Route) Reset() { *m = Route{} } +func (m *Route) String() string { return proto.CompactTextString(m) } +func (*Route) ProtoMessage() {} +func (*Route) Descriptor() ([]byte, []int) { + return fileDescriptor_0984d49a362b6b9f, []int{1} +} + +func (m *Route) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Route.Unmarshal(m, b) +} +func (m *Route) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Route.Marshal(b, m, deterministic) +} +func (m *Route) XXX_Merge(src proto.Message) { + xxx_messageInfo_Route.Merge(m, src) +} +func (m *Route) XXX_Size() int { + return xxx_messageInfo_Route.Size(m) +} +func (m *Route) XXX_DiscardUnknown() { + xxx_messageInfo_Route.DiscardUnknown(m) +} + +var xxx_messageInfo_Route proto.InternalMessageInfo + +func (m *Route) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Route) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func init() { + proto.RegisterType((*Query)(nil), "a6.Query") + proto.RegisterType((*Route)(nil), "a6.Route") +} + +func init() { proto.RegisterFile("route.proto", fileDescriptor_0984d49a362b6b9f) } + +var fileDescriptor_0984d49a362b6b9f = []byte{ + // 149 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2e, 0xca, 0x2f, 0x2d, + 0x49, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4a, 0x34, 0x53, 0x92, 0xe6, 0x62, 0x0d, + 0x2c, 0x4d, 0x2d, 0xaa, 0x14, 0x12, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, + 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x95, 0xf4, 0xb9, 0x58, 0x83, 0x40, 0xea, 0xb1, 0x49, 0x82, 0xc4, + 0x0a, 0x12, 0x4b, 0x32, 0x24, 0x98, 0x20, 0x62, 0x20, 0xb6, 0x51, 0x24, 0x17, 0x0f, 0x58, 0x43, + 0x70, 0x6a, 0x51, 0x59, 0x66, 0x72, 0xaa, 0x90, 0x12, 0x17, 0x87, 0x7b, 0x6a, 0x09, 0xc4, 0x0c, + 0x4e, 0xbd, 0x44, 0x33, 0x3d, 0xb0, 0x5d, 0x52, 0x60, 0x26, 0x58, 0x54, 0x89, 0x41, 0x48, 0x95, + 0x8b, 0x13, 0xa6, 0xa6, 0x18, 0x97, 0x22, 0x03, 0x46, 0x27, 0xf6, 0x28, 0x56, 0x3d, 0x7d, 0xeb, + 0x44, 0xb3, 0x24, 0x36, 0xb0, 0xe3, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x54, 0xf0, 0x73, + 0x63, 0xcb, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// RouteServiceClient is the client API for RouteService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type RouteServiceClient interface { + GetRoute(ctx context.Context, in *Query, opts ...grpc.CallOption) (*Route, error) + GetRoutes(ctx context.Context, in *Query, opts ...grpc.CallOption) (RouteService_GetRoutesClient, error) +} + +type routeServiceClient struct { + cc *grpc.ClientConn +} + +func NewRouteServiceClient(cc *grpc.ClientConn) RouteServiceClient { + return &routeServiceClient{cc} +} + +func (c *routeServiceClient) GetRoute(ctx context.Context, in *Query, opts ...grpc.CallOption) (*Route, error) { + out := new(Route) + err := c.cc.Invoke(ctx, "/a6.RouteService/GetRoute", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *routeServiceClient) GetRoutes(ctx context.Context, in *Query, opts ...grpc.CallOption) (RouteService_GetRoutesClient, error) { + stream, err := c.cc.NewStream(ctx, &_RouteService_serviceDesc.Streams[0], "/a6.RouteService/GetRoutes", opts...) + if err != nil { + return nil, err + } + x := &routeServiceGetRoutesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type RouteService_GetRoutesClient interface { + Recv() (*Route, error) + grpc.ClientStream +} + +type routeServiceGetRoutesClient struct { + grpc.ClientStream +} + +func (x *routeServiceGetRoutesClient) Recv() (*Route, error) { + m := new(Route) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// RouteServiceServer is the server API for RouteService service. +type RouteServiceServer interface { + GetRoute(context.Context, *Query) (*Route, error) + GetRoutes(*Query, RouteService_GetRoutesServer) error +} + +// UnimplementedRouteServiceServer can be embedded to have forward compatible implementations. +type UnimplementedRouteServiceServer struct { +} + +func (*UnimplementedRouteServiceServer) GetRoute(ctx context.Context, req *Query) (*Route, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetRoute not implemented") +} +func (*UnimplementedRouteServiceServer) GetRoutes(req *Query, srv RouteService_GetRoutesServer) error { + return status.Errorf(codes.Unimplemented, "method GetRoutes not implemented") +} + +func RegisterRouteServiceServer(s *grpc.Server, srv RouteServiceServer) { + s.RegisterService(&_RouteService_serviceDesc, srv) +} + +func _RouteService_GetRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Query) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouteServiceServer).GetRoute(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/a6.RouteService/GetRoute", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouteServiceServer).GetRoute(ctx, req.(*Query)) + } + return interceptor(ctx, in, info, handler) +} + +func _RouteService_GetRoutes_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(Query) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(RouteServiceServer).GetRoutes(m, &routeServiceGetRoutesServer{stream}) +} + +type RouteService_GetRoutesServer interface { + Send(*Route) error + grpc.ServerStream +} + +type routeServiceGetRoutesServer struct { + grpc.ServerStream +} + +func (x *routeServiceGetRoutesServer) Send(m *Route) error { + return x.ServerStream.SendMsg(m) +} + +var _RouteService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "a6.RouteService", + HandlerType: (*RouteServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetRoute", + Handler: _RouteService_GetRoute_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetRoutes", + Handler: _RouteService_GetRoutes_Handler, + ServerStreams: true, + }, + }, + Metadata: "route.proto", +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route.proto b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route.proto new file mode 100644 index 0000000..5a9a4ae --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route.proto @@ -0,0 +1,36 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package a6; + +option go_package = "./;a6"; + +service RouteService { + rpc GetRoute(Query) returns (Route) {} + rpc GetRoutes(Query) returns (stream Route) {} +} + +message Query { + string name = 1; +} + +message Route { + string name = 1; + string path = 2; +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route_grpc_web_bin_pb.js b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route_grpc_web_bin_pb.js new file mode 100644 index 0000000..bb137af --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route_grpc_web_bin_pb.js @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +const grpc = {}; +grpc.web = require('grpc-web'); + +const proto = {}; +proto.a6 = require('./route_pb.js'); + +/** + * @param {string} hostname + * @param {?Object} credentials + * @param {?grpc.web.ClientOptions} options + * @constructor + * @struct + * @final + */ +proto.a6.RouteServiceClient = + function(hostname, credentials, options) { + if (!options) options = {}; + options.format = 'binary'; + + /** + * @private @const {!grpc.web.GrpcWebClientBase} The client + */ + this.client_ = new grpc.web.GrpcWebClientBase(options); + + /** + * @private @const {string} The hostname + */ + this.hostname_ = hostname; + +}; + + +/** + * @param {string} hostname + * @param {?Object} credentials + * @param {?grpc.web.ClientOptions} options + * @constructor + * @struct + * @final + */ +proto.a6.RouteServicePromiseClient = + function(hostname, credentials, options) { + if (!options) options = {}; + options.format = 'binary'; + + /** + * @private @const {!grpc.web.GrpcWebClientBase} The client + */ + this.client_ = new grpc.web.GrpcWebClientBase(options); + + /** + * @private @const {string} The hostname + */ + this.hostname_ = hostname; + +}; + + +/** + * @const + * @type {!grpc.web.MethodDescriptor< + * !proto.a6.Query, + * !proto.a6.Route>} + */ +const methodDescriptor_RouteService_GetRoute = new grpc.web.MethodDescriptor( + '/a6.RouteService/GetRoute', + grpc.web.MethodType.UNARY, + proto.a6.Query, + proto.a6.Route, + /** + * @param {!proto.a6.Query} request + * @return {!Uint8Array} + */ + function(request) { + return request.serializeBinary(); + }, + proto.a6.Route.deserializeBinary +); + + +/** + * @param {!proto.a6.Query} request The + * request proto + * @param {?Object} metadata User defined + * call metadata + * @param {function(?grpc.web.RpcError, ?proto.a6.Route)} + * callback The callback function(error, response) + * @return {!grpc.web.ClientReadableStream|undefined} + * The XHR Node Readable Stream + */ +proto.a6.RouteServiceClient.prototype.getRoute = + function(request, metadata, callback) { + return this.client_.rpcCall(this.hostname_ + + '/a6.RouteService/GetRoute', + request, + metadata || {}, + methodDescriptor_RouteService_GetRoute, + callback); +}; + + +/** + * @param {!proto.a6.Query} request The + * request proto + * @param {?Object=} metadata User defined + * call metadata + * @return {!Promise} + * Promise that resolves to the response + */ +proto.a6.RouteServicePromiseClient.prototype.getRoute = + function(request, metadata) { + return this.client_.unaryCall(this.hostname_ + + '/a6.RouteService/GetRoute', + request, + metadata || {}, + methodDescriptor_RouteService_GetRoute); +}; + + +/** + * @const + * @type {!grpc.web.MethodDescriptor< + * !proto.a6.Query, + * !proto.a6.Route>} + */ +const methodDescriptor_RouteService_GetRoutes = new grpc.web.MethodDescriptor( + '/a6.RouteService/GetRoutes', + grpc.web.MethodType.SERVER_STREAMING, + proto.a6.Query, + proto.a6.Route, + /** + * @param {!proto.a6.Query} request + * @return {!Uint8Array} + */ + function(request) { + return request.serializeBinary(); + }, + proto.a6.Route.deserializeBinary +); + + +/** + * @param {!proto.a6.Query} request The request proto + * @param {?Object=} metadata User defined + * call metadata + * @return {!grpc.web.ClientReadableStream} + * The XHR Node Readable Stream + */ +proto.a6.RouteServiceClient.prototype.getRoutes = + function(request, metadata) { + return this.client_.serverStreaming(this.hostname_ + + '/a6.RouteService/GetRoutes', + request, + metadata || {}, + methodDescriptor_RouteService_GetRoutes); +}; + + +/** + * @param {!proto.a6.Query} request The request proto + * @param {?Object=} metadata User defined + * call metadata + * @return {!grpc.web.ClientReadableStream} + * The XHR Node Readable Stream + */ +proto.a6.RouteServicePromiseClient.prototype.getRoutes = + function(request, metadata) { + return this.client_.serverStreaming(this.hostname_ + + '/a6.RouteService/GetRoutes', + request, + metadata || {}, + methodDescriptor_RouteService_GetRoutes); +}; + + +module.exports = proto.a6; + diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route_grpc_web_text_pb.js b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route_grpc_web_text_pb.js new file mode 100644 index 0000000..1a108cc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route_grpc_web_text_pb.js @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +const grpc = {}; +grpc.web = require('grpc-web'); + +const proto = {}; +proto.a6 = require('./route_pb.js'); + +/** + * @param {string} hostname + * @param {?Object} credentials + * @param {?grpc.web.ClientOptions} options + * @constructor + * @struct + * @final + */ +proto.a6.RouteServiceClient = + function(hostname, credentials, options) { + if (!options) options = {}; + options.format = 'text'; + + /** + * @private @const {!grpc.web.GrpcWebClientBase} The client + */ + this.client_ = new grpc.web.GrpcWebClientBase(options); + + /** + * @private @const {string} The hostname + */ + this.hostname_ = hostname; + +}; + + +/** + * @param {string} hostname + * @param {?Object} credentials + * @param {?grpc.web.ClientOptions} options + * @constructor + * @struct + * @final + */ +proto.a6.RouteServicePromiseClient = + function(hostname, credentials, options) { + if (!options) options = {}; + options.format = 'text'; + + /** + * @private @const {!grpc.web.GrpcWebClientBase} The client + */ + this.client_ = new grpc.web.GrpcWebClientBase(options); + + /** + * @private @const {string} The hostname + */ + this.hostname_ = hostname; + +}; + + +/** + * @const + * @type {!grpc.web.MethodDescriptor< + * !proto.a6.Query, + * !proto.a6.Route>} + */ +const methodDescriptor_RouteService_GetRoute = new grpc.web.MethodDescriptor( + '/a6.RouteService/GetRoute', + grpc.web.MethodType.UNARY, + proto.a6.Query, + proto.a6.Route, + /** + * @param {!proto.a6.Query} request + * @return {!Uint8Array} + */ + function(request) { + return request.serializeBinary(); + }, + proto.a6.Route.deserializeBinary +); + + +/** + * @param {!proto.a6.Query} request The + * request proto + * @param {?Object} metadata User defined + * call metadata + * @param {function(?grpc.web.RpcError, ?proto.a6.Route)} + * callback The callback function(error, response) + * @return {!grpc.web.ClientReadableStream|undefined} + * The XHR Node Readable Stream + */ +proto.a6.RouteServiceClient.prototype.getRoute = + function(request, metadata, callback) { + return this.client_.rpcCall(this.hostname_ + + '/a6.RouteService/GetRoute', + request, + metadata || {}, + methodDescriptor_RouteService_GetRoute, + callback); +}; + + +/** + * @param {!proto.a6.Query} request The + * request proto + * @param {?Object=} metadata User defined + * call metadata + * @return {!Promise} + * Promise that resolves to the response + */ +proto.a6.RouteServicePromiseClient.prototype.getRoute = + function(request, metadata) { + return this.client_.unaryCall(this.hostname_ + + '/a6.RouteService/GetRoute', + request, + metadata || {}, + methodDescriptor_RouteService_GetRoute); +}; + + +/** + * @const + * @type {!grpc.web.MethodDescriptor< + * !proto.a6.Query, + * !proto.a6.Route>} + */ +const methodDescriptor_RouteService_GetRoutes = new grpc.web.MethodDescriptor( + '/a6.RouteService/GetRoutes', + grpc.web.MethodType.SERVER_STREAMING, + proto.a6.Query, + proto.a6.Route, + /** + * @param {!proto.a6.Query} request + * @return {!Uint8Array} + */ + function(request) { + return request.serializeBinary(); + }, + proto.a6.Route.deserializeBinary +); + + +/** + * @param {!proto.a6.Query} request The request proto + * @param {?Object=} metadata User defined + * call metadata + * @return {!grpc.web.ClientReadableStream} + * The XHR Node Readable Stream + */ +proto.a6.RouteServiceClient.prototype.getRoutes = + function(request, metadata) { + return this.client_.serverStreaming(this.hostname_ + + '/a6.RouteService/GetRoutes', + request, + metadata || {}, + methodDescriptor_RouteService_GetRoutes); +}; + + +/** + * @param {!proto.a6.Query} request The request proto + * @param {?Object=} metadata User defined + * call metadata + * @return {!grpc.web.ClientReadableStream} + * The XHR Node Readable Stream + */ +proto.a6.RouteServicePromiseClient.prototype.getRoutes = + function(request, metadata) { + return this.client_.serverStreaming(this.hostname_ + + '/a6.RouteService/GetRoutes', + request, + metadata || {}, + methodDescriptor_RouteService_GetRoutes); +}; + + +module.exports = proto.a6; + diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route_pb.js b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route_pb.js new file mode 100644 index 0000000..a444d8f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/a6/route_pb.js @@ -0,0 +1,356 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var jspb = require('google-protobuf'); +var goog = jspb; +var global = Function('return this')(); + +goog.exportSymbol('proto.a6.Query', null, global); +goog.exportSymbol('proto.a6.Route', null, global); +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.a6.Query = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.a6.Query, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.a6.Query.displayName = 'proto.a6.Query'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.a6.Route = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.a6.Route, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.a6.Route.displayName = 'proto.a6.Route'; +} + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.a6.Query.prototype.toObject = function(opt_includeInstance) { + return proto.a6.Query.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.a6.Query} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.a6.Query.toObject = function(includeInstance, msg) { + var f, obj = { + name: jspb.Message.getFieldWithDefault(msg, 1, "") + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.a6.Query} + */ +proto.a6.Query.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.a6.Query; + return proto.a6.Query.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.a6.Query} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.a6.Query} + */ +proto.a6.Query.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.setName(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.a6.Query.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.a6.Query.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.a6.Query} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.a6.Query.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getName(); + if (f.length > 0) { + writer.writeString( + 1, + f + ); + } +}; + + +/** + * optional string name = 1; + * @return {string} + */ +proto.a6.Query.prototype.getName = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); +}; + + +/** + * @param {string} value + * @return {!proto.a6.Query} returns this + */ +proto.a6.Query.prototype.setName = function(value) { + return jspb.Message.setProto3StringField(this, 1, value); +}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.a6.Route.prototype.toObject = function(opt_includeInstance) { + return proto.a6.Route.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.a6.Route} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.a6.Route.toObject = function(includeInstance, msg) { + var f, obj = { + name: jspb.Message.getFieldWithDefault(msg, 1, ""), + path: jspb.Message.getFieldWithDefault(msg, 2, "") + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.a6.Route} + */ +proto.a6.Route.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.a6.Route; + return proto.a6.Route.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.a6.Route} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.a6.Route} + */ +proto.a6.Route.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.setName(value); + break; + case 2: + var value = /** @type {string} */ (reader.readString()); + msg.setPath(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.a6.Route.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.a6.Route.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.a6.Route} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.a6.Route.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getName(); + if (f.length > 0) { + writer.writeString( + 1, + f + ); + } + f = message.getPath(); + if (f.length > 0) { + writer.writeString( + 2, + f + ); + } +}; + + +/** + * optional string name = 1; + * @return {string} + */ +proto.a6.Route.prototype.getName = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); +}; + + +/** + * @param {string} value + * @return {!proto.a6.Route} returns this + */ +proto.a6.Route.prototype.setName = function(value) { + return jspb.Message.setProto3StringField(this, 1, value); +}; + + +/** + * optional string path = 2; + * @return {string} + */ +proto.a6.Route.prototype.getPath = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); +}; + + +/** + * @param {string} value + * @return {!proto.a6.Route} returns this + */ +proto.a6.Route.prototype.setPath = function(value) { + return jspb.Message.setProto3StringField(this, 2, value); +}; + + +goog.object.extend(exports, proto.a6); diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/client.js b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/client.js new file mode 100644 index 0000000..9ec0441 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/client.js @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +global.XMLHttpRequest = require('xhr2') + +const RouteServiceQuery = require('./a6/route_pb').Query +const RouteServiceBinProtocolClient = require('./a6/route_grpc_web_bin_pb').RouteServiceClient +const RouteServiceTextProtocolClient = require('./a6/route_grpc_web_text_pb').RouteServiceClient + +const MODE_TEXT = "TEXT" +const MODE_BIN = "BIN" + +const modes = [MODE_TEXT, MODE_BIN]; + + +const TYPE_UNARY = "UNARY" +const TYPE_STREAM = "STREAM" + +const types = [TYPE_UNARY, TYPE_STREAM]; + + +class gRPCWebClient { + constructor() { + this.clients = {} + this.clients[MODE_BIN] = new RouteServiceBinProtocolClient("http://127.0.0.1:1984/grpc/web"); + this.clients[MODE_TEXT] = new RouteServiceTextProtocolClient("http://127.0.0.1:1984/grpc/web"); + }; + + unary(mode) { + let query = new RouteServiceQuery() + query.setName("hello") + this.clients[mode].getRoute(query, {}, function (error, response) { + if (error) { + console.log(error); + return + } + console.log(JSON.stringify(response.toObject())); + }).on("status", function (status) { + console.log("Status:", status); + }); + } + + stream(mode) { + let query = new RouteServiceQuery() + var stream = this.clients[mode].getRoutes(query, {}); + stream.on('data', function(response) { + console.log(JSON.stringify(response.toObject())); + }); + + stream.on('end', function(end) { + stream.cancel(); + }); + + stream.on("status", function (status) { + console.log("Status:", status); + }); + } +} + + +const arguments = process.argv.splice(2) + +if (arguments.length !== 2) { + console.log("please input dispatch function, e.g: node client.js [mode] [type]") + return +} + +const mode = arguments[0].toUpperCase() +if (!modes.includes(mode)) { + console.log("dispatch mode not found") + return +} + +const t = arguments[1].toUpperCase() +if (!types.includes(t)) { + console.log("dispatch types not found") + return +} + +let grpc = new gRPCWebClient(); + +if (t === TYPE_UNARY) { + grpc.unary(mode) +} else { + grpc.stream(mode) +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/go.mod b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/go.mod new file mode 100644 index 0000000..b1cfda9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/go.mod @@ -0,0 +1,8 @@ +module apisix.apache.org/plugin/grpc-web + +go 1.16 + +require ( + github.com/golang/protobuf v1.5.2 + google.golang.org/grpc v1.53.0 +) diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/go.sum b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/go.sum new file mode 100644 index 0000000..2c5b51f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/go.sum @@ -0,0 +1,1112 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/package-lock.json b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/package-lock.json new file mode 100644 index 0000000..02c7f98 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/package-lock.json @@ -0,0 +1,52 @@ +{ + "name": "apisix-grpc-web", + "lockfileVersion": 2, + "requires": true, + "packages": { + "": { + "name": "apisix-grpc-web", + "dependencies": { + "google-protobuf": "^3.19.1", + "grpc-web": "^1.3.0", + "xhr2": "^0.2.1" + } + }, + "node_modules/google-protobuf": { + "version": "3.19.1", + "resolved": "https://registry.npmmirror.com/google-protobuf/download/google-protobuf-3.19.1.tgz?cache=0&sync_timestamp=1635869461201&other_urls=https%3A%2F%2Fregistry.npmmirror.com%2Fgoogle-protobuf%2Fdownload%2Fgoogle-protobuf-3.19.1.tgz", + "integrity": "sha1-WvU5DoIGxEbY9J/rr/1Lf0rCj0E=", + "license": "BSD-3-Clause" + }, + "node_modules/grpc-web": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/grpc-web/download/grpc-web-1.3.0.tgz", + "integrity": "sha1-TDbZfnp7YQKn30Y+eCLNhtT2Xtg=", + "license": "Apache-2.0" + }, + "node_modules/xhr2": { + "version": "0.2.1", + "resolved": "https://registry.npmmirror.com/xhr2/download/xhr2-0.2.1.tgz", + "integrity": "sha1-TnOtxPnP7Jy9IVf3Pv3OOl8QipM=", + "engines": { + "node": ">= 6" + } + } + }, + "dependencies": { + "google-protobuf": { + "version": "3.19.1", + "resolved": "https://registry.npmmirror.com/google-protobuf/download/google-protobuf-3.19.1.tgz?cache=0&sync_timestamp=1635869461201&other_urls=https%3A%2F%2Fregistry.npmmirror.com%2Fgoogle-protobuf%2Fdownload%2Fgoogle-protobuf-3.19.1.tgz", + "integrity": "sha1-WvU5DoIGxEbY9J/rr/1Lf0rCj0E=" + }, + "grpc-web": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/grpc-web/download/grpc-web-1.3.0.tgz", + "integrity": "sha1-TDbZfnp7YQKn30Y+eCLNhtT2Xtg=" + }, + "xhr2": { + "version": "0.2.1", + "resolved": "https://registry.npmmirror.com/xhr2/download/xhr2-0.2.1.tgz", + "integrity": "sha1-TnOtxPnP7Jy9IVf3Pv3OOl8QipM=" + } + } +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/package.json b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/package.json new file mode 100644 index 0000000..29b035c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/package.json @@ -0,0 +1,8 @@ +{ + "name": "apisix-grpc-web", + "dependencies": { + "google-protobuf": "^3.19.1", + "grpc-web": "^1.3.0", + "xhr2": "^0.2.1" + } +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/req.bin b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/req.bin new file mode 100644 index 0000000..908c829 Binary files /dev/null and b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/req.bin differ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/server.go b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/server.go new file mode 100644 index 0000000..3eaaa75 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/server.go @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "encoding/json" + "flag" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "log" + "net" + pb "apisix.apache.org/plugin/grpc-web/a6" +) + +type routeServiceServer struct { + savedRoutes []*pb.Route +} + +func (rss *routeServiceServer) GetRoute(ctx context.Context, req *pb.Query) (*pb.Route, error) { + var r *pb.Route + if len(req.Name) <= 0 { + return nil, status.Errorf(codes.InvalidArgument, "query params invalid") + } + + for _, savedRoute := range rss.savedRoutes { + if savedRoute.Name == req.Name { + r = savedRoute + break + } + } + + if r == nil { + return nil, status.Errorf(codes.NotFound, "route not found") + } + + return r, nil +} + +func (rss *routeServiceServer) GetRoutes(req *pb.Query, srv pb.RouteService_GetRoutesServer) error { + if len(rss.savedRoutes) <= 0 { + return status.Errorf(codes.NotFound, "routes data is empty") + } + for _, savedRoute := range rss.savedRoutes { + if err := srv.Send(savedRoute); err != nil { + return err + } + } + + return nil +} + +func (rss *routeServiceServer) LoadRoutes() { + if err := json.Unmarshal(exampleData, &rss.savedRoutes); err != nil { + log.Fatalf("Failed to load default routes: %v", err) + } +} + +var exampleData = []byte(`[ +{ + "name":"hello", + "path":"/hello" +}, +{ + "name":"world", + "path":"/world" +}]`) + +var ServerPort = ":50001" + +func main() { + flag.Parse() + + lis, err := net.Listen("tcp", ServerPort) + if err != nil { + log.Fatalf("failed to listen gRPC-Web Test Server: %v", err) + } else { + log.Printf("successful to listen gRPC-Web Test Server, address %s", ServerPort) + } + + s := routeServiceServer{} + s.LoadRoutes() + var opts []grpc.ServerOption + grpcServer := grpc.NewServer(opts...) + pb.RegisterRouteServiceServer(grpcServer, &s) + + if err = grpcServer.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/setup.sh b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/setup.sh new file mode 100755 index 0000000..4305ee4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/grpc-web/setup.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -ex + +npm install + +CGO_ENABLED=0 go build -o grpc-web-server server.go + +./grpc-web-server > grpc-web-server.log 2>&1 || (cat grpc-web-server.log && exit 1)& diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/gzip.t b/CloudronPackages/APISIX/apisix-source/t/plugin/gzip.t new file mode 100644 index 0000000..8486be3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/gzip.t @@ -0,0 +1,542 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "gzip": { + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 2: hit +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip +Content-Type: text/html +--- response_headers +Content-Encoding: gzip +Vary: + + + +=== TEST 3: default buffers and compress level +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.gzip") + local core = require("apisix.core") + local json = require("toolkit.json") + + for _, conf in ipairs({ + {}, + {buffers = {}}, + {buffers = {number = 1}}, + {buffers = {size = 1}}, + }) do + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + return + end + ngx.say(json.encode(conf.buffers)) + end + } + } +--- response_body +{"number":32,"size":4096} +{"number":32,"size":4096} +{"number":1,"size":4096} +{"number":32,"size":1} + + + +=== TEST 4: compress level +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "uri": "/echo", + "vars": [["http_x", "==", "1"]], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "gzip": { + "comp_level": 1 + } + } + }]=] + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "uri": "/echo", + "vars": [["http_x", "==", "2"]], + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "gzip": { + "comp_level": 9 + } + } + }]=] + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 5: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/echo" + local httpc = http.new() + local res, err = httpc:request_uri(uri, + {method = "POST", headers = {x = "1"}, body = ("0123"):rep(1024)}) + if not res then + ngx.say(err) + return + end + local less_compressed = res.body + local res, err = httpc:request_uri(uri, + {method = "POST", headers = {x = "2"}, body = ("0123"):rep(1024)}) + if not res then + ngx.say(err) + return + end + if #less_compressed < 4096 and #less_compressed < #res.body then + ngx.say("ok") + end + } + } +--- response_body +ok + + + +=== TEST 6: min length +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "gzip": { + "min_length": 21 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 7: not hit +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip +Content-Type: text/html +--- response_headers +Content-Encoding: + + + +=== TEST 8: http version +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "gzip": { + "http_version": 1.1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 9: not hit +--- request +POST /echo HTTP/1.0 +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip +Content-Type: text/html +--- response_headers +Content-Encoding: + + + +=== TEST 10: hit again +--- request +POST /echo HTTP/1.1 +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip +Content-Type: text/html +--- response_headers +Content-Encoding: gzip + + + +=== TEST 11: types +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "gzip": { + "types": ["text/plain", "text/xml"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 12: not hit +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip +Content-Type: text/html +--- response_headers +Content-Encoding: + + + +=== TEST 13: hit again +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip +Content-Type: text/xml +--- response_headers +Content-Encoding: gzip + + + +=== TEST 14: hit with charset +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip +Content-Type: text/plain; charset=UTF-8 +--- response_headers +Content-Encoding: gzip + + + +=== TEST 15: match all types +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "gzip": { + "types": "*" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 16: hit +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip +Content-Type: video/3gpp +--- response_headers +Content-Encoding: gzip + + + +=== TEST 17: vary +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "gzip": { + "vary": true + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 18: hit +--- request +POST /echo +0123456789 +012345678 +--- more_headers +Accept-Encoding: gzip +Vary: upstream +Content-Type: text/html +--- response_headers +Content-Encoding: gzip +Vary: upstream, Accept-Encoding + + + +=== TEST 19: schema check +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for _, case in ipairs({ + {input = { + types = {} + }}, + {input = { + min_length = 0 + }}, + {input = { + comp_level = 10 + }}, + {input = { + http_version = 2 + }}, + {input = { + buffers = { + number = 0, + } + }}, + {input = { + buffers = { + size = 0, + } + }}, + {input = { + vary = 0 + }} + }) do + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + { + id = "1", + plugins = { + ["gzip"] = case.input + } + } + ) + ngx.print(body) + end + } +} +--- response_body +{"error_msg":"failed to check the configuration of plugin gzip err: property \"types\" validation failed: object matches none of the required"} +{"error_msg":"failed to check the configuration of plugin gzip err: property \"min_length\" validation failed: expected 0 to be at least 1"} +{"error_msg":"failed to check the configuration of plugin gzip err: property \"comp_level\" validation failed: expected 10 to be at most 9"} +{"error_msg":"failed to check the configuration of plugin gzip err: property \"http_version\" validation failed: matches none of the enum values"} +{"error_msg":"failed to check the configuration of plugin gzip err: property \"buffers\" validation failed: property \"number\" validation failed: expected 0 to be at least 1"} +{"error_msg":"failed to check the configuration of plugin gzip err: property \"buffers\" validation failed: property \"size\" validation failed: expected 0 to be at least 1"} +{"error_msg":"failed to check the configuration of plugin gzip err: property \"vary\" validation failed: wrong type: expected boolean, got number"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth-anonymous-consumer.t b/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth-anonymous-consumer.t new file mode 100644 index 0000000..ea80ea2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth-anonymous-consumer.t @@ -0,0 +1,189 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + + +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $user_yaml_config = <<_EOC_; +apisix: + data_encryption: + enable_encrypt_fields: false +_EOC_ + $block->set_value("yaml_config", $user_yaml_config); +}); + + +run_tests; + +__DATA__ + +=== TEST 1: add consumer jack and anonymous +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "hmac-auth": { + "key_id": "user-key", + "secret_key": "my-secret-key" + }, + "limit-count": { + "count": 4, + "time_window": 60 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 60 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +passed + + + +=== TEST 2: add hmac auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: request without hmac-auth header will be from anonymous consumer and it will pass +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 4: request without hmac-auth header will be from anonymous consumer and different rate limit will apply +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 503, 503, 503] + + + +=== TEST 5: add hmac auth plugin with non-existent anonymous_consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": { + "anonymous_consumer": "not-found-anonymous" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: anonymous-consumer configured in the route should not be found +--- request +GET /hello +--- error_code: 401 +--- error_log +failed to get anonymous consumer not-found-anonymous +--- response_body +{"message":"Invalid user authorization"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth.t b/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth.t new file mode 100644 index 0000000..68029b3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth.t @@ -0,0 +1,1174 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +no_shuffle(); +run_tests; + +__DATA__ + +=== TEST 1: add consumer with username and plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "hmac-auth": { + "key_id": "my-access-key", + "secret_key": "my-secret-key" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: add consumer with plugin hmac-auth - missing secret key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "hmac-auth": { + "key_id": "user-key" + } + } + }]]) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid plugins configuration: failed to check the configuration of plugin hmac-auth err: property \\"secret_key\\" is required"\}/ + + + +=== TEST 3: add consumer with plugin hmac-auth - missing key_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "bar", + "plugins": { + "hmac-auth": { + "secret_key": "skey" + } + } + }]]) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid plugins configuration: failed to check the configuration of plugin hmac-auth err: property \\"key_id\\" is required"\}/ + + + +=== TEST 4: add consumer with plugin hmac-auth - key id exceeds the length limit +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "li", + "plugins": { + "hmac-auth": { + "key_id": "akeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakeyakey", + "secret_key": "skey" + } + } + }]]) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid plugins configuration: failed to check the configuration of plugin hmac-auth err: property \\"key_id\\" validation failed: string too long, expected at most 256, got 320"\}/ + + + +=== TEST 5: add consumer with plugin hmac-auth - secret key exceeds the length limit +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "zhang", + "plugins": { + "hmac-auth": { + "key_id": "akey", + "secret_key": "skeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskeyskey" + } + } + }]]) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid plugins configuration: failed to check the configuration of plugin hmac-auth err: property \\"secret_key\\" validation failed: string too long, expected at most 256, got 384"\}/ + + + +=== TEST 6: enable hmac auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: verify,missing Authorization header +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"client request can't be validated: missing Authorization header"} +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: missing Authorization header + + + +=== TEST 8: verify, missing algorithm +--- request +GET /hello +--- more_headers +Authorization: Signature keyId="my-access-key",headers="@request-target date" ,signature="asdf" +Date: Thu, 24 Sep 2020 06:39:52 GMT +--- error_code: 401 +--- response_body +{"message":"client request can't be validated"} +--- grep_error_log eval +qr/client request can't be validated[^,]+/ +--- grep_error_log_out +client request can't be validated: algorithm missing + + + +=== TEST 9: verify: invalid key_id +--- request +GET /hello +--- more_headers +Authorization: Signature keyId="sdf",algorithm="hmac-sha256",headers="@request-target date",signature="asdf" +Date: Thu, 24 Sep 2020 06:39:52 GMT +--- error_code: 401 +--- response_body +{"message":"client request can't be validated"} +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: Invalid key_id + + + +=== TEST 10: verify: invalid algorithm +--- request +GET /hello +--- more_headers +Authorization: Signature keyId="my-access-key",algorithm="ljlj",headers="@request-target date",signature="asdf" +Date: Thu, 24 Sep 2020 06:39:52 GMT +--- error_code: 401 +--- response_body +{"message":"client request can't be validated"} +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: Invalid algorithm + + + +=== TEST 11: verify: Clock skew exceeded +--- request +GET /hello +--- more_headers +Authorization: Signature keyId="my-access-key",algorithm="hmac-sha256",headers="@request-target date",signature="asdf" +Date: Thu, 24 Sep 2020 06:39:52 GMT +--- error_code: 401 +--- response_body +{"message":"client request can't be validated"} +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: Clock skew exceeded + + + +=== TEST 12: verify: missing Date +--- request +GET /hello +--- more_headers +Authorization: Signature keyId="my-access-key",algorithm="hmac-sha256",headers="@request-target date",signature="asdf" +--- error_code: 401 +--- response_body +{"message":"client request can't be validated"} +--- grep_error_log eval +qr/client request can't be validated: Date header missing/ +--- grep_error_log_out +client request can't be validated: Date header missing + + + +=== TEST 13: verify: Invalid GMT format time +--- request +GET /hello +--- more_headers +Authorization: Signature keyId="my-access-key",algorithm="hmac-sha256",headers="@request-target date",signature="asdf" +Date: adfsdf +--- error_code: 401 +--- response_body +{"message":"client request can't be validated"} +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: Invalid GMT format time + + + +=== TEST 14: verify: ok +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + + local signing_string = { + key_id, + "GET /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + core.log.info("signing_string:", signing_string) + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature algorithm=\"hmac-sha256\"" .. ",keyId=\"" .. key_id .. "\",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: add route with 0 clock skew +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": { + "clock_skew": 0 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code == 400 then + ngx.say(body) + end + } + } +--- request +GET /t +-- error_code: 400 +--- response_body eval +qr/.*failed to check the configuration of plugin hmac-auth err.*/ + + + +=== TEST 16: add route with valid clock skew +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": { + "key_id": "my-access-key3", + "secret_key": "my-secret-key3", + "clock_skew": 1000000000000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code == 200 then + ngx.say(body) + end + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: verify: invalid signature +--- request +GET /hello +--- more_headers +Authorization: Signature keyId="my-access-key",algorithm="hmac-sha256",headers="@request-target date",signature="asdf" +Date: Thu, 24 Sep 2020 06:39:52 GMT +--- error_code: 401 +--- response_body +{"message":"client request can't be validated"} +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: Invalid signature + + + +=== TEST 18: verify: invalid signature +--- request +GET /hello +--- more_headers +Authorization: Signature keyId="my-access-key",algorithm="hmac-sha256",headers="@request-target date",signature="asdf" +Date: Thu, 24 Sep 2020 06:39:52 GMT +--- error_code: 401 +--- response_body +{"message":"client request can't be validated"} +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: Invalid signature + + + +=== TEST 19: add route with 1 clock skew +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": { + "clock_skew": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code == 200 then + ngx.say(body) + end + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: verify: Invalid GMT format time +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + + ngx.sleep(2) + + local signing_string = "GET" .. "/hello" .. "" .. + key_id .. gmt .. custom_header_a .. custom_header_b + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_GET, + core.json.encode(data), + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- error_code: 401 +--- response_body eval +qr/{"message":"client request can't be validated"}/ +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: Clock skew exceeded + + + +=== TEST 21: update route with default clock skew +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code == 200 then + ngx.say(body) + end + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: verify: put ok +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local data = {cert = "ssl_cert", key = "ssl_key", sni = "test.com"} + local req_body = core.json.encode(data) + req_body = req_body or "" + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + + local signing_string = { + key_id, + "PUT /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + core.log.info("signing_string:", signing_string) + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_PUT, + req_body, + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 23: update route with signed_headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": { + "signed_headers": ["date","x-custom-header-a", "x-custom-header-b"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 24: verify with invalid signed header +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_c = "23879fmsldfk" + + local signing_string = "GET" .. "/hello" .. "" .. + key_id .. gmt .. custom_header_a .. custom_header_c + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-c\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-c"] = custom_header_c + + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- error_code: 401 +--- response_body eval +qr/{"message":"client request can't be validated"}/ +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: expected header "x-custom-header-b" missing in signing + + + +=== TEST 25: verify ok with signed headers +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "asld$%dfasf" + + local signing_string = { + key_id, + "GET /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 26: add consumer with plugin hmac-auth - empty configuration +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "hmac-auth": { + } + } + }]]) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid plugins configuration: failed to check the configuration of plugin hmac-auth err: property \\"(key_id|secret_key)\\" is required"\}/ + + + +=== TEST 27: add route with no allowed algorithms +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": { + "allowed_algorithms": [] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/validation failed: expect array to have at least 1 items/ + + + +=== TEST 28: update route with signed_headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": { + "hide_credentials": true + } + }, + "upstream": { + "nodes": { + "httpbin.org:80": 1 + }, + "type": "roundrobin" + }, + "uri": "/headers" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 29: verify Authorization header missing +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + + local signing_string = { + key_id, + "GET /headers", + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + local code, _, body = t.test('/headers', + ngx.HTTP_GET, + "", + nil, + headers + ) + + if string.find(body,"Authorization") then + ngx.say("failed") + else + ngx.say("passed") + end + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 30 : update route with signed_headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": { + "signed_headers": ["date","x-custom-header-a", "x-custom-header-b"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 31: verify error with the client only sends one in the request, but there are two in the signature +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "asld$%dfasf" + + local signing_string = { + key_id, + "GET /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- error_code: 401 +--- response_body eval +qr/client request can't be validated/ +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: Invalid signature + + + +=== TEST 32: verify error with the client sends two in the request, but there is only one in the signature +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "asld$%dfasf" + + local signing_string = { + key_id, + "GET /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- error_code: 401 +--- response_body eval +qr/client request can't be validated/ +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: Invalid signature + + + +=== TEST 33 : update route with allowed_algorithms +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": { + "allowed_algorithms": ["hmac-sha256"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 34: verify with hmac-sha1 algorithm, not part of allowed_algorithms +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "asld$%dfasf" + + local signing_string = { + key_id, + "GET /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA1):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha1\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- error_code: 401 +--- response_body eval +qr/client request can't be validated/ +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: Invalid algorithm diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth2.t new file mode 100644 index 0000000..7d6e860 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth2.t @@ -0,0 +1,150 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: enable the hmac auth plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/uri" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: get the default schema +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/plugins/hmac-auth', + ngx.HTTP_GET, + nil, + [[ +{"type":"object","$comment":"this is a mark for our injected plugin schema","title":"work with route or service object","properties":{"allowed_algorithms":{"type":"array","default":["hmac-sha1","hmac-sha256","hmac-sha512"],"items":{"type":"string","enum":["hmac-sha1","hmac-sha256","hmac-sha512"]},"minItems":1},"_meta":{"type":"object","properties":{"filter":{"description":"filter determines whether the plugin needs to be executed at runtime","type":"array"},"error_response":{"oneOf":[{"type":"string"},{"type":"object"}]},"disable":{"type":"boolean"},"priority":{"description":"priority of plugins by customized order","type":"integer"}}},"clock_skew":{"type":"integer","default":300,"minimum":1},"signed_headers":{"type":"array","items":{"type":"string","minLength":1,"maxLength":50}},"hide_credentials":{"type":"boolean","default":false},"validate_request_body":{"type":"boolean","default":false,"title":"A boolean value telling the plugin to enable body validation"}}} + ]] + ) + ngx.status = code + } + } + + + +=== TEST 3: get the schema by schema_type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/plugins/hmac-auth?schema_type=consumer', + ngx.HTTP_GET, + nil, + [[ +{"title":"work with consumer object","required":["key_id","secret_key"],"properties":{"secret_key":{"minLength":1,"maxLength":256,"type":"string"},"key_id":{"minLength":1,"maxLength":256,"type":"string"}},"type":"object"} + ]] + ) + ngx.status = code + } + } + + + +=== TEST 4: get the schema by error schema_type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/plugins/hmac-auth?schema_type=consumer123123', + ngx.HTTP_GET, + nil, + [[ +{"properties":{},"title":"work with route or service object","type":"object"} + ]] + ) + ngx.status = code + } + } + + + +=== TEST 5: enable hmac auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth3.t new file mode 100644 index 0000000..faeebad --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth3.t @@ -0,0 +1,280 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: add consumer with validate_request_body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "robin", + "plugins": { + "hmac-auth": { + "key_id": "my-access-key", + "secret_key": "my-secret-key" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: enable hmac auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": { + "validate_request_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: missing body digest when validate_request_body is enabled +--- config + location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + local body = "{\"name\": \"world\"}" + + local signing_string = { + key_id, + "POST /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + core.log.info("signing_string:", signing_string) + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_POST, + body, + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } + } +--- error_code: 401 +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: Invalid digest +--- response_body eval +qr/\{"message":"client request can't be validated"\}/ + + + +=== TEST 4: verify body digest: not ok +--- config + location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + local body = "{\"name\": \"world\"}" + + local signing_string = { + key_id, + "POST /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + core.log.info("signing_string:", signing_string) + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] ="Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["Digest"] = "hello" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_POST, + body, + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } + } +--- error_code: 401 +--- grep_error_log eval +qr/client request can't be validated: [^,]+/ +--- grep_error_log_out +client request can't be validated: Invalid digest +--- response_body eval +qr/\{"message":"client request can't be validated"\}/ + + + +=== TEST 5: verify body digest: ok +--- config + location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + local body = "{\"name\": \"world\"}" + + local signing_string = { + key_id, + "POST /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + core.log.info("signing_string:", signing_string) + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + local ngx_encode_base64 = ngx.encode_base64 + + local resty_sha256 = require("resty.sha256") + local hash = resty_sha256:new() + hash:update(body) + local digest = hash:final() + local body_digest = ngx_encode_base64(digest) + + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Digest"] = "SHA-256=" .. body_digest + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_POST, + body, + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth4.t b/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth4.t new file mode 100644 index 0000000..1bdd470 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/hmac-auth4.t @@ -0,0 +1,280 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{VAULT_TOKEN} = "root"; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set hmac-auth conf: secret_key uses secret ref +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "root" + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + -- change consumer with secrets ref: vault + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "hmac-auth": { + "key_id": "my-access-key", + "secret_key": "$secret://vault/test1/jack/secret_key" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + -- set route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/jack secret_key=my-secret-key +--- response_body +Success! Data written to: kv/apisix/jack + + + +=== TEST 3: verify: ok +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + + local signing_string = { + key_id, + "GET /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + core.log.info("signing_string:", signing_string) + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 4: set hmac-auth conf with the token in an env var: secret_key uses secret ref +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "$ENV://VAULT_TOKEN" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + -- change consumer with secrets ref: vault + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "hmac-auth": { + "key_id": "my-access-key", + "secret_key": "$secret://vault/test1/jack/secret_key" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + -- set route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "hmac-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: verify: ok +--- config +location /t { + content_by_lua_block { + local ngx_time = ngx.time + local ngx_http_time = ngx.http_time + local core = require("apisix.core") + local t = require("lib.test_admin") + local hmac = require("resty.hmac") + local ngx_encode_base64 = ngx.encode_base64 + + local secret_key = "my-secret-key" + local timestamp = ngx_time() + local gmt = ngx_http_time(timestamp) + local key_id = "my-access-key" + local custom_header_a = "asld$%dfasf" + local custom_header_b = "23879fmsldfk" + + local signing_string = { + key_id, + "GET /hello", + "date: " .. gmt, + "x-custom-header-a: " .. custom_header_a, + "x-custom-header-b: " .. custom_header_b + } + signing_string = core.table.concat(signing_string, "\n") .. "\n" + core.log.info("signing_string:", signing_string) + + local signature = hmac:new(secret_key, hmac.ALGOS.SHA256):final(signing_string) + core.log.info("signature:", ngx_encode_base64(signature)) + local headers = {} + headers["Date"] = gmt + headers["Authorization"] = "Signature keyId=\"" .. key_id .. "\",algorithm=\"hmac-sha256\"" .. ",headers=\"@request-target date x-custom-header-a x-custom-header-b\",signature=\"" .. ngx_encode_base64(signature) .. "\"" + headers["x-custom-header-a"] = custom_header_a + headers["x-custom-header-b"] = custom_header_b + + local code, body = t.test('/hello', + ngx.HTTP_GET, + "", + nil, + headers + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/http-dubbo.t b/CloudronPackages/APISIX/apisix-source/t/plugin/http-dubbo.t new file mode 100644 index 0000000..8006f07 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/http-dubbo.t @@ -0,0 +1,179 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); +add_block_preprocessor(sub { + my ($block) = @_; + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: test_pojo +--- apisix_yaml +upstreams: + - nodes: + - host: 127.0.0.1 + port: 30880 + weight: 1 + type: roundrobin + id: 1 +routes: + - + uri: /t + plugins: + http-dubbo: + service_name: org.apache.dubbo.backend.DubboSerializationTestService + params_type_desc: Lorg/apache/dubbo/backend/PoJo; + serialized: true + method: testPoJo + service_version: 1.0.0 + upstream_id: 1 +#END +--- request +POST /t +{"aBoolean":true,"aByte":1,"aDouble":1.1,"aFloat":1.2,"aInt":2,"aLong":3,"aShort":4,"aString":"aa","acharacter":"a","stringMap":{"key":"value"},"strings":["aa","bb"]} +--- response_body chomp +{"aBoolean":true,"aByte":1,"aDouble":1.1,"aFloat":1.2,"aInt":2,"aLong":3,"aShort":4,"aString":"aa","acharacter":"a","stringMap":{"key":"value"},"strings":["aa","bb"]} + + + +=== TEST 2: test_pojos +--- apisix_yaml +upstreams: + - nodes: + - host: 127.0.0.1 + port: 30880 + weight: 1 + type: roundrobin + id: 1 +routes: + - + uri: /t + plugins: + http-dubbo: + service_name: org.apache.dubbo.backend.DubboSerializationTestService + params_type_desc: "[Lorg/apache/dubbo/backend/PoJo;" + serialized: true + method: testPoJos + service_version: 1.0.0 + upstream_id: 1 +#END +--- request +POST /t +[{"aBoolean":true,"aByte":1,"aDouble":1.1,"aFloat":1.2,"aInt":2,"aLong":3,"aShort":4,"aString":"aa","acharacter":"a","stringMap":{"key":"value"},"strings":["aa","bb"]}] +--- response_body chomp +[{"aBoolean":true,"aByte":1,"aDouble":1.1,"aFloat":1.2,"aInt":2,"aLong":3,"aShort":4,"aString":"aa","acharacter":"a","stringMap":{"key":"value"},"strings":["aa","bb"]}] + + + +=== TEST 3: test_timeout +--- apisix_yaml +upstreams: + - nodes: + - host: 127.0.0.1 + port: 30881 + weight: 1 + type: roundrobin + id: 1 +routes: + - + uri: /t + plugins: + http-dubbo: + service_name: org.apache.dubbo.backend.DubboSerializationTestService + params_type_desc: "[Lorg/apache/dubbo/backend/PoJo;" + serialized: true + method: testPoJos + service_version: 1.0.0 + connect_timeout: 100 + read_timeout: 100 + send_timeout: 100 + upstream_id: 1 +#END +--- request +GET /t +--- error_code: 502 +--- error_log +failed to connect to upstream + + + +=== TEST 4: test_void +--- apisix_yaml +upstreams: + - nodes: + - host: 127.0.0.1 + port: 30880 + weight: 1 + type: roundrobin + id: 1 +routes: + - + uri: /t + plugins: + http-dubbo: + service_name: org.apache.dubbo.backend.DubboSerializationTestService + serialized: true + method: testVoid + service_version: 1.0.0 + upstream_id: 1 +#END +--- request +GET /t + + + +=== TEST 5: test_fail +--- apisix_yaml +upstreams: + - nodes: + - host: 127.0.0.1 + port: 30880 + weight: 1 + type: roundrobin + id: 1 +routes: + - + uri: /t + plugins: + http-dubbo: + service_name: org.apache.dubbo.backend.DubboSerializationTestService + serialized: true + method: testFailure + service_version: 1.0.0 + upstream_id: 1 +#END +--- request +GET /t +--- error_code: 500 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger-json.t b/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger-json.t new file mode 100644 index 0000000..b4a46af --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger-json.t @@ -0,0 +1,239 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: json body with request_body +--- apisix_yaml +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + plugins: + http-logger: + batch_max_size: 1 + uri: http://127.0.0.1:1980/log + include_req_body: true +#END +--- request +POST /hello +{"sample_payload":"hello"} +--- error_log +"body":"{\"sample_payload\":\"hello\"}" + + + +=== TEST 2: json body with response_body +--- apisix_yaml +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + plugins: + http-logger: + batch_max_size: 1 + uri: http://127.0.0.1:1980/log + include_resp_body: true +#END +--- request +POST /hello +{"sample_payload":"hello"} +--- error_log +"response":{"body":"hello world\n" + + + +=== TEST 3: json body with response_body and response_body expression +--- apisix_yaml +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + plugins: + http-logger: + batch_max_size: 1 + uri: http://127.0.0.1:1980/log + include_resp_body: true + include_resp_body_expr: + - - arg_bar + - == + - foo +#END +--- request +POST /hello?bar=foo +{"sample_payload":"hello"} +--- error_log +"response":{"body":"hello world\n" + + + +=== TEST 4: json body with response_body, expr not hit +--- apisix_yaml +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + plugins: + http-logger: + batch_max_size: 1 + uri: http://127.0.0.1:1980/log + include_resp_body: true + include_resp_body_expr: + - - arg_bar + - == + - foo +#END +--- request +POST /hello?bar=bar +{"sample_payload":"hello"} +--- no_error_log +"response":{"body":"hello world\n" + + + +=== TEST 5: json body with request_body and response_body +--- apisix_yaml +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + plugins: + http-logger: + batch_max_size: 1 + uri: http://127.0.0.1:1980/log + include_req_body: true + include_resp_body: true +#END +--- request +POST /hello +{"sample_payload":"hello"} +--- error_log eval +qr/(.*"response":\{.*"body":"hello world\\n".*|.*\{\\\"sample_payload\\\":\\\"hello\\\"\}.*){2}/ + + + +=== TEST 6: json body without request_body or response_body +--- apisix_yaml +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + plugins: + http-logger: + batch_max_size: 1 + uri: http://127.0.0.1:1980/log +#END +--- request +POST /hello +{"sample_payload":"hello"} +--- error_log eval +qr/(.*"response":\{.*"body":"hello world\\n".*|.*\{\\\"sample_payload\\\":\\\"hello\\\"\}.*){0}/ + + + +=== TEST 7: json body with request_body and request_body expression +--- apisix_yaml +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + plugins: + http-logger: + batch_max_size: 1 + uri: http://127.0.0.1:1980/log + include_req_body: true + include_req_body_expr: + - - arg_bar + - == + - foo +#END +--- request +POST /hello?bar=foo +{"test":"hello"} +--- error_log +"request":{"body":"{\"test\":\"hello\"}" + + + +=== TEST 8: json body with request_body, expr not hit +--- apisix_yaml +routes: + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + plugins: + http-logger: + batch_max_size: 1 + uri: http://127.0.0.1:1980/log + include_resp_body: true + include_resp_body_expr: + - - arg_bar + - == + - foo +#END +--- request +POST /hello?bar=bar +{"sample_payload":"hello"} +--- no_error_log +"request":{"body":"{\"test\":\"hello\"}" diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger-log-format.t b/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger-log-format.t new file mode 100644 index 0000000..0bc8cea --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger-log-format.t @@ -0,0 +1,569 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +repeat_each(1); +no_long_string(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/http-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } + }]] + ) + if code >=300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: sanity, batch_max_size=1 and concat_method is new_line +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:3001", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "concat_method": "new_line" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: report http logger +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval +qr/.*route_id":"1".*/ + + + +=== TEST 4: sanity, batch_max_size=2 and concat_method is new_line +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:3001", + "batch_max_size": 2, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "concat_method": "new_line" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: report http logger +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval +qr/"\@timestamp":"20/ + + + +=== TEST 6: sanity, batch_max_size=1 and concat_method is json +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:3001", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "concat_method": "json" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: report http logger +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval +qr/"route_id":"1"/ + + + +=== TEST 8: sanity, batch_max_size=2 and concat_method is json +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:3001", + "batch_max_size": 2, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "concat_method": "json" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: report http logger to confirm two json in array +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval +qr/\[\{.*?\},\{.*?\}\]/ + + + +=== TEST 10: remove plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/http-logger', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: remove route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: check default log format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + } + } + }]] + ) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:3001", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + }, + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local code, _, _ = t("/hello", "GET",null,null,{apikey = "auth-one"}) + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: check logs +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval +qr/"consumer":\{"username":"jack"\}/ +--- wait: 0.5 + + + +=== TEST 14: multi level nested expr conditions +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.http-logger") + local ok, err = plugin.check_schema({ + uri = "http://127.0.0.1", + include_resp_body = true, + include_resp_body_expr = { + {"http_content_length", "<", 1024}, + {"http_content_type", "in", {"application/xml", "application/json", "text/plain", "text/xml"}} + } + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 15: use custom variable in the logger +--- extra_init_by_lua + local core = require "apisix.core" + + core.ctx.register_var("a6_route_labels", function(ctx) + local route = ctx.matched_route and ctx.matched_route.value + if route and route.labels then + return route.labels + end + return nil + end) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/plugin_metadata/http-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "labels": "$a6_route_labels", + "client_ip": "$remote_addr" + } + }]] + ) + if code >= 300 then + ngx.status = code + return body + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:3001", + "batch_max_size": 1, + "concat_method": "json" + } + }, + "labels":{ + "key":"testvalue" + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: hit route and report http logger +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval +qr/.*testvalue.*/ + + + +=== TEST 17: log format in plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:3001", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "concat_method": "new_line", + "log_format": { + "x_ip": "$remote_addr" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: hit route and report http logger +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval +qr/"x_ip":"127.0.0.1".*\}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger-new-line.t b/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger-new-line.t new file mode 100644 index 0000000..b88e4f7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger-new-line.t @@ -0,0 +1,288 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: sanity, batch_max_size=1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1980/log", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "concat_method": "new_line" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route and report http logger +--- request +GET /hello +--- response_body +hello world +--- wait: 0.5 +--- error_log eval +qr/request log: .*"upstream":"127.0.0.1:1982"/ + + + +=== TEST 3: sanity, batch_max_size=1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1980/log", + "batch_max_size": 3, + "max_retry_count": 3, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 1, + "concat_method": "new_line" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit route, and no report log +--- request +GET /hello +--- response_body +hello world +--- no_error_log +[error] +request log: + + + +=== TEST 5: hit route, and report log +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + for i = 1, 6 do + t('/hello', ngx.HTTP_GET) + end + + ngx.sleep(3) + ngx.say("done") + } +} +--- request +GET /t +--- timeout: 10 +--- grep_error_log eval +qr/request log:/ +--- grep_error_log_out +request log: +request log: + + + +=== TEST 6: hit route, and report log +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + for i = 1, 6 do + t('/hello', ngx.HTTP_GET) + end + + ngx.sleep(3) + ngx.say("done") + } +} +--- request +GET /t +--- timeout: 10 +--- grep_error_log eval +qr/"upstream":"127.0.0.1:1982"/ +--- grep_error_log_out +"upstream":"127.0.0.1:1982" +"upstream":"127.0.0.1:1982" +"upstream":"127.0.0.1:1982" +"upstream":"127.0.0.1:1982" +"upstream":"127.0.0.1:1982" +"upstream":"127.0.0.1:1982" + + + +=== TEST 7: hit route, and report log +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + for i = 1, 5 do + t('/hello', ngx.HTTP_GET) + end + + ngx.sleep(3) + ngx.say("done") + } +} +--- request +GET /t +--- timeout: 10 +--- grep_error_log eval +qr/"upstream":"127.0.0.1:1982"/ +--- grep_error_log_out +"upstream":"127.0.0.1:1982" +"upstream":"127.0.0.1:1982" +"upstream":"127.0.0.1:1982" +"upstream":"127.0.0.1:1982" +"upstream":"127.0.0.1:1982" + + + +=== TEST 8: set in global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1980/log", + "batch_max_size": 3, + "max_retry_count": 3, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 1, + "concat_method": "new_line" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: not hit route, and report log +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + for i = 1, 5 do + t('/not_hit_route', ngx.HTTP_GET) + end + + ngx.sleep(3) + ngx.say("done") + } +} +--- request +GET /t +--- timeout: 10 + + + +=== TEST 10: delete the global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger.t b/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger.t new file mode 100644 index 0000000..51b165a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger.t @@ -0,0 +1,717 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.http-logger") + local ok, err = plugin.check_schema({uri = "http://127.0.0.1"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: full schema check +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.http-logger") + local ok, err = plugin.check_schema({uri = "http://127.0.0.1", + auth_header = "Basic 123", + timeout = 3, + name = "http-logger", + max_retry_count = 2, + retry_delay = 2, + buffer_duration = 2, + inactive_timeout = 2, + batch_max_size = 500, + ssl_verify = false, + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 3: uri is missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.http-logger") + local ok, err = plugin.check_schema({auth_header = "Basic 123", + timeout = 3, + name = "http-logger", + max_retry_count = 2, + retry_delay = 2, + buffer_duration = 2, + inactive_timeout = 2, + batch_max_size = 500, + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "uri" is required +done + + + +=== TEST 4: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1982/hello", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: access local server +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +Batch Processor[http logger] successfully processed the entries +--- wait: 0.5 + + + +=== TEST 6: set to the http external endpoint +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1982/echo", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: access external endpoint +--- request +GET /hello +--- response_body +hello world +--- error_log +Batch Processor[http logger] successfully processed the entries +--- wait: 1.5 + + + +=== TEST 8: set wrong https endpoint +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "https://127.0.0.1:1982/echo", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "ssl_verify": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: access wrong https endpoint +--- request +GET /hello1 +--- response_body +hello1 world +--- error_log +failed to perform SSL with host[127.0.0.1] port[1982] handshake failed +--- wait: 1.5 + + + +=== TEST 10: set correct https endpoint +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "https://127.0.0.1:1983/echo", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "ssl_verify": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: access correct https endpoint +--- request +GET /hello1 +--- response_body +hello1 world +--- error_log +Batch Processor[http logger] successfully processed the entries +--- wait: 1.5 + + + +=== TEST 12: set batch max size to two +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "https://127.0.0.1:1983/echo", + "batch_max_size": 2, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: access route with batch max size twice +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { method = "GET"}) + res, err = httpc:request_uri(uri, { method = "GET"}) + ngx.status = res.status + if res.status == 200 then + ngx.say("hello1 world") + end + } + } +--- request +GET /t +--- response_body +hello1 world +--- error_log +Batch Processor[http logger] batch max size has exceeded +transferring buffer entries to processing pipe line, buffercount[2] +Batch Processor[http logger] successfully processed the entries +--- wait: 1.5 + + + +=== TEST 14: set wrong port +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:9991/echo", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: access wrong port +--- request +GET /hello1 +--- response_body +hello1 world +--- error_log +Batch Processor[http logger] failed to process entries: failed to connect to host[127.0.0.1] port[9991] connection refused +--- wait: 1.5 + + + +=== TEST 16: check uri +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.http-logger") + local bad_uris = { + "127.0.0.1", + "127.0.0.1:1024", + } + for _, bad_uri in ipairs(bad_uris) do + local ok, err = plugin.check_schema({uri = bad_uri}) + if ok then + ngx.say("mismatched ", bad) + end + end + + local good_uris = { + "http://127.0.0.1:1024/x?aa=b", + "http://127.0.0.1:1024?aa=b", + "http://127.0.0.1:1024", + "http://x.con", + "https://x.con", + } + for _, good_uri in ipairs(good_uris) do + local ok, err = plugin.check_schema({uri = good_uri}) + if not ok then + ngx.say("mismatched ", good) + end + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 17: check plugin configuration updating +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body1 = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1982/hello", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, _, body2 = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, body3 = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1982/hello1", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, _, body4 = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.print(body1) + ngx.print(body2) + ngx.print(body3) + ngx.print(body4) + } + } +--- wait: 0.5 +--- response_body +passedopentracing +passedopentracing +--- grep_error_log eval +qr/sending a batch logs to http:\/\/127.0.0.1:1982\/hello\d?/ +--- grep_error_log_out +sending a batch logs to http://127.0.0.1:1982/hello +sending a batch logs to http://127.0.0.1:1982/hello1 + + + +=== TEST 18: check log schema(include_resp_body_expr) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.http-logger") + local ok, err = plugin.check_schema({uri = "http://127.0.0.1", + auth_header = "Basic 123", + timeout = 3, + name = "http-logger", + max_retry_count = 2, + retry_delay = 2, + buffer_duration = 2, + inactive_timeout = 2, + batch_max_size = 500, + include_resp_body = true, + include_resp_body_expr = { + {"bar", "<>", "foo"} + } + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +failed to validate the 'include_resp_body_expr' expression: invalid operator '<>' +done + + + +=== TEST 19: ssl_verify default is false for comppatibaility +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1982/hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: set correct https endpoint and ssl verify true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "https://127.0.0.1:1983/echo", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "ssl_verify": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: access correct https endpoint but ssl verify failed +--- request +GET /hello1 +--- error_log +certificate host mismatch +--- wait: 3 + + + +=== TEST 22: set correct https endpoint and ssl verify false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "https://127.0.0.1:1983/echo", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "ssl_verify": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 23: access correct https endpoint but ssl verify ok +--- request +GET /hello1 +--- wait: 3 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger2.t new file mode 100644 index 0000000..e8cee41 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/http-logger2.t @@ -0,0 +1,515 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + server { + listen 12001; + + location /http-logger/test { + content_by_lua_block { + ngx.say("test-http-logger-response") + } + } + + location /http-logger/Authorization { + content_by_lua_block { + ngx.log(ngx.WARN, "received Authorization header: [", ngx.var.http_authorization, "]") + ngx.say("OK") + } + } + + location /http-logger/center { + content_by_lua_block { + local function str_split(str, reps) + local str_list = {} + string.gsub(str, '[^' .. reps .. ']+', function(w) + table.insert(str_list, w) + end) + return str_list + end + + local args = ngx.req.get_uri_args() + local query = args.query or nil + ngx.req.read_body() + local body = ngx.req.get_body_data() + + if query then + if type(query) == "string" then + query = {query} + end + + local data, err = require("cjson").decode(body) + if err then + ngx.log(ngx.WARN, "logs:", body) + end + + for i = 1, #query do + local fields = str_split(query[i], ".") + local val + for j = 1, #fields do + local key = fields[j] + if j == 1 then + val = data[key] + else + val = val[key] + end + end + ngx.log(ngx.WARN ,query[i], ":", val) + end + else + ngx.log(ngx.WARN, "logs:", body) + end + } + } + + location / { + content_by_lua_block { + ngx.log(ngx.WARN, "test http logger for root path") + } + } + } + + server { + listen 11451; + gzip on; + gzip_types *; + gzip_min_length 1; + location /gzip_hello { + content_by_lua_block { + ngx.req.read_body() + local s = "gzip hello world" + ngx.header['Content-Length'] = #s + 1 + ngx.say(s) + } + } + } + + server { + listen 11452; + location /brotli_hello { + content_by_lua_block { + ngx.req.read_body() + local s = "brotli hello world" + ngx.header['Content-Length'] = #s + 1 + ngx.say(s) + } + header_filter_by_lua_block { + local conf = { + comp_level = 6, + http_version = 1.1, + lgblock = 0, + lgwin = 19, + min_length = 1, + mode = 0, + types = "*", + } + local brotli = require("apisix.plugins.brotli") + brotli.header_filter(conf, ngx.ctx) + } + body_filter_by_lua_block { + local conf = { + comp_level = 6, + http_version = 1.1, + lgblock = 0, + lgwin = 19, + min_length = 1, + mode = 0, + types = "*", + } + local brotli = require("apisix.plugins.brotli") + brotli.body_filter(conf, ngx.ctx) + } + } + } + +_EOC_ + + $block->set_value("http_config", $http_config); + + my $extra_init_by_lua = <<_EOC_; + local bpm = require("apisix.utils.batch-processor-manager") + bpm.set_check_stale_interval(1) +_EOC_ + + $block->set_value("extra_init_by_lua", $extra_init_by_lua); +}); + +run_tests; + +__DATA__ + +=== TEST 1: check stale batch processor +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1982/hello", + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: don't remove current processor +--- request +GET /opentracing +--- error_log +Batch Processor[http logger] successfully processed the entries +--- no_error_log +removing batch processor stale object +--- wait: 0.5 + + + +=== TEST 3: remove stale processor +--- request +GET /opentracing +--- error_log +Batch Processor[http logger] successfully processed the entries +removing batch processor stale object +--- wait: 1.5 + + + +=== TEST 4: don't remove batch processor which is in used +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1982/hello", + "batch_max_size": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: don't remove +--- request +GET /opentracing +--- no_error_log +removing batch processor stale object +--- wait: 1.5 + + + +=== TEST 6: set fetch request body and response body route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["POST"], + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:12001/http-logger/center?query[]=request.body&query[]=response.body", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "include_req_body": true, + "include_resp_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:12001": 1 + }, + "type": "roundrobin" + }, + "uri": "/http-logger/test" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: test fetch request body and response body route +--- request +POST /http-logger/test +test-http-logger-request +--- response_body +test-http-logger-response +--- error_log +request.body:test-http-logger-request +response.body:test-http-logger-response +--- wait: 1.5 + + + +=== TEST 8: set fetch request body and response body route - gzip +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:12001/http-logger/center?query[]=response.body", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "include_resp_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:11451": 1 + }, + "type": "roundrobin" + }, + "uri": "/gzip_hello" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: test fetch request body and response body route +--- request +GET /gzip_hello +--- more_headers +Accept-Encoding: gzip +--- error_log +response.body:gzip hello world +--- wait: 1.5 + + + +=== TEST 10: set fetch request body and response body route - brotli +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:12001/http-logger/center?query[]=response.body", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "include_resp_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:11452": 1 + }, + "type": "roundrobin" + }, + "uri": "/brotli_hello" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: test fetch request body and response body route +--- request +GET /brotli_hello +--- more_headers +Accept-Encoding: br +--- error_log +response.body:brotli hello world +--- wait: 1.5 + + + +=== TEST 12: test default Authorization header sent to the log server +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["POST"], + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:12001/http-logger/Authorization", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:12001": 1 + }, + "type": "roundrobin" + }, + "uri": "/http-logger/test" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit +--- request +POST /http-logger/test +test-http-logger-request +--- error_log +received Authorization header: [nil] +--- wait: 1.5 + + + +=== TEST 14: add default path +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:12001", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:12001": 1 + }, + "type": "roundrobin" + }, + "uri": "/http-logger/test" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: hit +--- request +GET /http-logger/test +--- error_log +test http logger for root path diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/inspect.t b/CloudronPackages/APISIX/apisix-source/t/plugin/inspect.t new file mode 100644 index 0000000..56f56d8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/inspect.t @@ -0,0 +1,557 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('warn'); +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $user_yaml_config = <<_EOC_; +plugin_attr: + inspect: + delay: 1 + hooks_file: "/tmp/apisix_inspect_hooks.lua" +_EOC_ + $block->set_value("yaml_config", $user_yaml_config); + + my $extra_init_worker_by_lua = $block->extra_init_worker_by_lua // ""; + $extra_init_worker_by_lua .= <<_EOC_; +local function gen_funcs_invoke(...) + local code = "" + for _, func in ipairs({...}) do + code = code .. "test." .. func .. "();" + end + return code +end +function set_test_route(...) + func = func or 'run1' + local t = require("lib.test_admin").test + local code = [[{ + "methods": ["GET"], + "uri": "/inspect", + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : ["return function() local test = require(\\"lib.test_inspect\\");]] + .. gen_funcs_invoke(...) + .. [[ngx.say(\\"ok\\"); end"] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + return t('/apisix/admin/routes/inspect', ngx.HTTP_PUT, code) +end + +function do_request() + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/inspect" + + local httpc = http.new() + local res = httpc:request_uri(uri, {method = "GET"}) + assert(res.body == "ok\\n") +end + +function write_hooks(code, file) + local file = io.open(file or "/tmp/apisix_inspect_hooks.lua", "w") + file:write(code) + file:close() +end +_EOC_ + $block->set_value("extra_init_worker_by_lua", $extra_init_worker_by_lua); + + # note that it's different from APISIX.pm, + # here we enable no_error_log ignoreless of error_log. + if (!$block->no_error_log) { + $block->set_value("no_error_log", "[error]"); + } + + if (!$block->timeout) { + $block->set_value("timeout", "10"); + } +}); + +add_cleanup_handler(sub { + unlink("/tmp/apisix_inspect_hooks.lua"); +}); + +run_tests; + +__DATA__ + +=== TEST 1: simple hook +--- config + location /t { + content_by_lua_block { + local code = set_test_route("run1") + if code >= 300 then + ngx.status = code + return + end + + write_hooks([[ + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("t/lib/test_inspect.lua", 27, nil, function(info) + ngx.log(ngx.WARN, "var1=", info.vals.var1) + return true + end) + ]]) + + ngx.sleep(1.5) + + do_request() + + os.remove("/tmp/apisix_inspect_hooks.lua") + } + } +--- error_log +var1=hello + + + +=== TEST 2: filename only +--- config + location /t { + content_by_lua_block { + local code = set_test_route("run1") + if code >= 300 then + ngx.status = code + return + end + + write_hooks([[ + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("test_inspect.lua", 27, nil, function(info) + ngx.log(ngx.WARN, "var1=", info.vals.var1) + return true + end) + ]]) + + ngx.sleep(1.5) + + do_request() + + os.remove("/tmp/apisix_inspect_hooks.lua") + } + } +--- error_log +var1=hello + + + +=== TEST 3: hook lifetime +--- config + location /t { + content_by_lua_block { + local code = set_test_route("run1") + if code >= 300 then + ngx.status = code + return + end + + write_hooks([[ + local dbg = require "apisix.inspect.dbg" + local hook1_times = 2 + dbg.set_hook("test_inspect.lua", 27, nil, function(info) + ngx.log(ngx.WARN, "var1=", info.vals.var1) + hook1_times = hook1_times - 1 + return hook1_times == 0 + end) + ]]) + + ngx.sleep(1.5) + + -- request 3 times, but hook triggered 2 times + for _ = 1,3 do + do_request() + end + + os.remove("/tmp/apisix_inspect_hooks.lua") + } + } +--- error_log +var1=hello +var1=hello + + + +=== TEST 4: multiple hooks +--- config + location /t { + content_by_lua_block { + local code = set_test_route("run1") + if code >= 300 then + ngx.status = code + return + end + + write_hooks([[ + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("test_inspect.lua", 26, nil, function(info) + ngx.log(ngx.WARN, "var1=", info.vals.var1) + return true + end) + + dbg.set_hook("test_inspect.lua", 27, nil, function(info) + ngx.log(ngx.WARN, "var2=", info.vals.var2) + return true + end) + ]]) + + ngx.sleep(1.5) + + do_request() + + -- note that we don't remove the hook file, + -- used for next test case + } + } +--- error_log +var1=hello +var2=world + + + +=== TEST 5: hook file not removed, re-enabled by next startup +--- config + location /t { + content_by_lua_block { + local code = set_test_route("run1") + if code >= 300 then + ngx.status = code + return + end + + do_request() + + os.remove("/tmp/apisix_inspect_hooks.lua") + } + } +--- error_log +var1=hello + + + +=== TEST 6: soft link +--- config + location /t { + content_by_lua_block { + local code = set_test_route("run1") + if code >= 300 then + ngx.status = code + return + end + + write_hooks([[ + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("t/lib/test_inspect.lua", 27, nil, function(info) + ngx.log(ngx.WARN, "var1=", info.vals.var1) + return true + end) + ]], "/tmp/test_real_tmp_file.lua") + + os.execute("ln -sf /tmp/test_real_tmp_file.lua /tmp/apisix_inspect_hooks.lua") + + ngx.sleep(1.5) + + do_request() + + os.remove("/tmp/apisix_inspect_hooks.lua") + os.remove("/tmp/test_real_tmp_file.lua") + } + } +--- error_log +var1=hello + + + +=== TEST 7: remove soft link would disable hooks +--- config + location /t { + content_by_lua_block { + local code = set_test_route("run1") + if code >= 300 then + ngx.status = code + return + end + + write_hooks([[ + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("t/lib/test_inspect.lua", 27, nil, function(info) + ngx.log(ngx.WARN, "var1=", info.vals.var1) + return true + end) + ]], "/tmp/test_real_tmp_file.lua") + + os.execute("ln -sf /tmp/test_real_tmp_file.lua /tmp/apisix_inspect_hooks.lua") + + ngx.sleep(1.5) + os.remove("/tmp/apisix_inspect_hooks.lua") + ngx.sleep(1.5) + + do_request() + + os.remove("/tmp/test_real_tmp_file.lua") + } + } +--- no_error_log +var1=hello + + + +=== TEST 8: ensure we see all local variables till the hook line +--- config + location /t { + content_by_lua_block { + local code = set_test_route("run1") + if code >= 300 then + ngx.status = code + return + end + + write_hooks([[ + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("t/lib/test_inspect.lua", 27, nil, function(info) + local count = 0 + for k,v in pairs(info.vals) do + count = count + 1 + end + ngx.log(ngx.WARN, "count=", count) + return true + end) + ]]) + + ngx.sleep(1.5) + + do_request() + + os.remove("/tmp/apisix_inspect_hooks.lua") + } + } +--- error_log +count=2 + + + +=== TEST 9: check upvalue of run2(), only upvalue used in function code are visible +--- config + location /t { + content_by_lua_block { + local code = set_test_route("run2") + if code >= 300 then + ngx.status = code + return + end + + write_hooks([[ + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("t/lib/test_inspect.lua", 33, nil, function(info) + ngx.log(ngx.WARN, "upvar1=", info.uv.upvar1) + ngx.log(ngx.WARN, "upvar2=", info.uv.upvar2) + return true + end) + ]]) + + ngx.sleep(1.5) + + do_request() + + os.remove("/tmp/apisix_inspect_hooks.lua") + } + } +--- error_log +upvar1=2 +upvar2=nil + + + +=== TEST 10: check upvalue of run3(), now both upvar1 and upvar2 are visible +--- config + location /t { + content_by_lua_block { + local code = set_test_route("run3") + if code >= 300 then + ngx.status = code + return + end + + write_hooks([[ + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("t/lib/test_inspect.lua", 37, nil, function(info) + ngx.log(ngx.WARN, "upvar1=", info.uv.upvar1) + ngx.log(ngx.WARN, "upvar2=", info.uv.upvar2) + return true + end) + ]]) + + ngx.sleep(1.5) + + do_request() + + os.remove("/tmp/apisix_inspect_hooks.lua") + } + } +--- error_log +upvar1=2 +upvar2=yes + + + +=== TEST 11: flush specific JIT cache +--- config + location /t { + content_by_lua_block { + local test = require("lib.test_inspect") + + local t1 = test.hot1() + local t8 = test.hot2() + + write_hooks([[ + local test = require("lib.test_inspect") + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("t/lib/test_inspect.lua", 47, test.hot1, function(info) + return false + end) + ]]) + + ngx.sleep(1.5) + + local t2 = test.hot1() + local t9 = test.hot2() + + assert(t2-t1 > t1, "hot1 consumes at least double times than before") + assert(t9-t8 < t8*0.8, "hot2 not affected") + + os.remove("/tmp/apisix_inspect_hooks.lua") + + ngx.sleep(1.5) + + local t3 = test.hot1() + local t4 = test.hot2() + assert(t3-t1 < t1*0.8, "hot1 jit recover") + assert(t4-t8 < t4*0.8, "hot2 jit recover") + } + } + + + +=== TEST 12: flush the whole JIT cache +--- config + location /t { + content_by_lua_block { + local test = require("lib.test_inspect") + + local t1 = test.hot1() + local t8 = test.hot2() + + write_hooks([[ + local test = require("lib.test_inspect") + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("t/lib/test_inspect.lua", 47, nil, function(info) + return false + end) + ]]) + + ngx.sleep(1.5) + + local t2 = test.hot1() + local t9 = test.hot2() + + assert(t2-t1 > t1, "hot1 consumes at least double times than before") + assert(t9-t8 > t8, "hot2 consumes at least double times than before") + + os.remove("/tmp/apisix_inspect_hooks.lua") + + ngx.sleep(1.5) + + local t3 = test.hot1() + local t4 = test.hot2() + assert(t3-t1 < t1*0.8, "hot1 jit recover") + assert(t4-t8 < t4*0.8, "hot2 jit recover") + } + } + + + +=== TEST 13: remove hook log +--- config + location /t { + content_by_lua_block { + local code = set_test_route("run1") + if code >= 300 then + ngx.status = code + return + end + + write_hooks([[ + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("t/lib/test_inspect.lua", 27, nil, function(info) + return true + end) + ]]) + + ngx.sleep(1.5) + + do_request() + + os.remove("/tmp/apisix_inspect_hooks.lua") + } + } +--- error_log +inspect: remove hook: t/lib/test_inspect.lua#27 +inspect: all hooks removed + + + +=== TEST 14: jit should be recovered after all hooks are done +--- config + location /t { + content_by_lua_block { + local test = require("lib.test_inspect") + + local t1 = test.hot1() + + write_hooks([[ + local test = require("lib.test_inspect") + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("t/lib/test_inspect.lua", 47, test.hot1, function(info) + return true + end) + ]]) + + ngx.sleep(1.5) + + local t2 = test.hot1() + assert(t2-t1 < t1*0.8, "hot1 consumes at least double times than before") + } + } +--- error_log +inspect: remove hook: t/lib/test_inspect.lua#47 +inspect: all hooks removed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ip-restriction.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ip-restriction.t new file mode 100644 index 0000000..a6e7ad7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ip-restriction.t @@ -0,0 +1,847 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ip-restriction") + local conf = { + whitelist = { + "10.255.254.0/24", + "192.168.0.0/16" + } + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- request +GET /t +--- response_body +{"message":"Your IP address is not allowed","response_code":403,"whitelist":["10.255.254.0/24","192.168.0.0/16"]} + + + +=== TEST 2: wrong CIDR v4 format +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ip-restriction") + local conf = { + whitelist = { + "10.255.256.0/24", + "192.168.0.0/16" + } + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + return + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- request +GET /t +--- response_body_like eval +qr/failed to validate item 1: object matches none of the required/ + + + +=== TEST 3: wrong CIDR v4 format +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ip-restriction") + local conf = { + whitelist = { + "10.255.254.0/38", + "192.168.0.0/16" + } + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + return + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- request +GET /t +--- response_body_like eval +qr/failed to validate item 1: object matches none of the required/ + + + +=== TEST 4: empty conf +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ip-restriction") + + local ok, err = plugin.check_schema({}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +value should match only one schema, but matches none +done + + + +=== TEST 5: empty CIDRs +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ip-restriction") + + local ok, err = plugin.check_schema({blacklist={}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body_like eval +qr/expect array to have at least 1 items/ + + + +=== TEST 6: whitelist and blacklist mutual exclusive +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ip-restriction") + local ok, err = plugin.check_schema({whitelist={"172.17.40.0/24"}, blacklist={"10.255.0.0/16"}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +value should match only one schema, but matches both schemas 1 and 2 +done + + + +=== TEST 7: set whitelist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ip-restriction": { + "whitelist": [ + "127.0.0.0/24", + "113.74.26.106" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit route and ip cidr in the whitelist +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 9: hit route and ip in the whitelist +--- http_config +set_real_ip_from 127.0.0.1; +real_ip_header X-Forwarded-For; +--- more_headers +X-Forwarded-For: 113.74.26.106 +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 10: hit route and ip not in the whitelist +--- http_config +set_real_ip_from 127.0.0.1; +real_ip_header X-Forwarded-For; +--- more_headers +X-Forwarded-For: 114.114.114.114 +--- request +GET /hello +--- error_code: 403 +--- response_body +{"message":"Your IP address is not allowed"} +--- error_log +ip-restriction exits with http status code 403 + + + +=== TEST 11: hit route and IPv6 not in the whitelist +--- http_config +set_real_ip_from 127.0.0.1; +real_ip_header X-Forwarded-For; +--- more_headers +X-Forwarded-For: 2001:db8::2 +--- request +GET /hello +--- error_code: 403 +--- response_body +{"message":"Your IP address is not allowed"} + + + +=== TEST 12: set blacklist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ip-restriction": { + "blacklist": [ + "127.0.0.0/24", + "113.74.26.106" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: hit route and ip cidr in the blacklist +--- request +GET /hello +--- error_code: 403 +--- response_body +{"message":"Your IP address is not allowed"} + + + +=== TEST 14: hit route and ip in the blacklist +--- http_config +set_real_ip_from 127.0.0.1; +real_ip_header X-Forwarded-For; +--- more_headers +X-Forwarded-For: 113.74.26.106 +--- request +GET /hello +--- error_code: 403 +--- response_body +{"message":"Your IP address is not allowed"} + + + +=== TEST 15: hit route and ip not in the blacklist +--- http_config +set_real_ip_from 127.0.0.1; +real_ip_header X-Forwarded-For; +--- more_headers +X-Forwarded-For: 114.114.114.114 +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 16: hit route and IPv6 not in the blacklist +--- http_config +set_real_ip_from 127.0.0.1; +real_ip_header X-Forwarded-For; +--- more_headers +X-Forwarded-For: 2001:db8::2 +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 17: remove ip-restriction +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: hit route +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 19: sanity(IPv6) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ip-restriction") + local conf = { + whitelist = { + "::1", + "fe80::/32", + "2001:DB8:0:23:8:800:200C:417A", + } + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say("pass") + } + } +--- request +GET /t +--- response_body +pass + + + +=== TEST 20: set blacklist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ip-restriction": { + "blacklist": [ + "::1", + "fe80::/32" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 21: hit route +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 22: hit route and IPv6 in the blacklist +--- http_config +set_real_ip_from 127.0.0.1; +real_ip_header X-Forwarded-For; +--- more_headers +X-Forwarded-For: ::1 +--- request +GET /hello +--- error_code: 403 +--- response_body +{"message":"Your IP address is not allowed"} + + + +=== TEST 23: hit route and IPv6 in the blacklist +--- http_config +set_real_ip_from 127.0.0.1; +real_ip_header X-Forwarded-For; +--- more_headers +X-Forwarded-For: fe80::1:1 +--- request +GET /hello +--- error_code: 403 +--- response_body +{"message":"Your IP address is not allowed"} + + + +=== TEST 24: wrong IPv6 format +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ip-restriction") + for i, ip in ipairs({"::1/129", "::ffgg"}) do + local conf = { + whitelist = { + ip + } + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + end + } + } +--- request +GET /t +--- response_body_like eval +qr/failed to validate item 1: object matches none of the required/ + + + +=== TEST 25: set disable=true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ip-restriction": { + "blacklist": [ + "127.0.0.0/24" + ], + "_meta": { + "disable": true + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 26: set blacklist and user-defined message +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ip-restriction": { + "blacklist": [ + "127.0.0.0/24", + "113.74.26.106" + ], + "message": "Do you want to do something bad?" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 27: hit route and ip cidr in the blacklist +--- request +GET /hello +--- error_code: 403 +--- response_body +{"message":"Do you want to do something bad?"} + + + +=== TEST 28: hit route and ip in the blacklist +--- http_config +set_real_ip_from 127.0.0.1; +real_ip_header X-Forwarded-For; +--- more_headers +X-Forwarded-For: 113.74.26.106 +--- request +GET /hello +--- error_code: 403 +--- response_body +{"message":"Do you want to do something bad?"} + + + +=== TEST 29: set whitelist and user-defined message +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ip-restriction": { + "whitelist": [ + "127.0.0.0/24", + "113.74.26.106" + ], + "message": "Do you want to do something bad?" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 30: hit route and ip not in the whitelist +--- http_config +set_real_ip_from 127.0.0.1; +real_ip_header X-Forwarded-For; +--- more_headers +X-Forwarded-For: 114.114.114.114 +--- request +GET /hello +--- error_code: 403 +--- response_body +{"message":"Do you want to do something bad?"} +--- error_log +ip-restriction exits with http status code 403 + + + +=== TEST 31: message that do not reach the minimum range +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ip-restriction": { + "whitelist": [ + "127.0.0.0/24", + "113.74.26.106" + ], + "message": "" + } + } + }]] + ) + + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/string too short, expected at least 1, got 0/ + + + +=== TEST 32: exceeds the maximum limit of message +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + + local data = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + } + }, + plugins = { + ["ip-restriction"] = { + ["whitelist"] = { + "127.0.0.0/24", + "113.74.26.106" + }, + message = ("-1Aa#"):rep(205) + } + } + } + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/string too long, expected at most 1024, got 1025/ + + + +=== TEST 33: set whitelist and 404 response code +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ip-restriction": { + "whitelist": [ + "127.0.0.0/24", + "113.74.26.106" + ], + "response_code": 404 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 34: hit route and ip not in the whitelist expect 404 +--- http_config +set_real_ip_from 127.0.0.1; +real_ip_header X-Forwarded-For; +--- more_headers +X-Forwarded-For: 114.114.114.114 +--- request +GET /hello +--- error_code: 404 +--- error_log +ip-restriction exits with http status code 404 + + + +=== TEST 35: set wrong response code +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ip-restriction": { + "whitelist": [ + "127.0.0.0/24", + "113.74.26.106" + ], + "response_code": 409 + } + } + }]] + ) + + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/property \\"response_code\\" validation failed: expected 409 to be at most 404/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/jwe-decrypt.t b/CloudronPackages/APISIX/apisix-source/t/plugin/jwe-decrypt.t new file mode 100644 index 0000000..af2af32 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/jwe-decrypt.t @@ -0,0 +1,585 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.jwe-decrypt") + local core = require("apisix.core") + local conf = {key = "123", secret = "12345678901234567890123456789012"} + + local ok, err = plugin.check_schema(conf, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- response_body_like eval +qr/{"key":"123","secret":"[a-zA-Z0-9+\\\/]+={0,2}"}/ + + + +=== TEST 2: wrong type of key +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.jwe-decrypt") + local ok, err = plugin.check_schema({key = 123, secret = "12345678901234567890123456789012"}, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "key" validation failed: wrong type: expected string, got number +done + + + +=== TEST 3: wrong type of secret +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.jwe-decrypt") + local ok, err = plugin.check_schema({key = "123", secret = 12345678901234567890123456789012}, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "secret" validation failed: wrong type: expected string, got number +done + + + +=== TEST 4: secret length too long +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: false +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.jwe-decrypt") + local ok, err = plugin.check_schema({key = "123", secret = "123456789012345678901234567890123"}, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +the secret length should be 32 chars +done + + + +=== TEST 5: secret length too long(base64 encode) +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: false +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.jwe-decrypt") + local ok, err = plugin.check_schema({key = "123", secret = "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXphYmNkZWZn", is_base64_encoded = true}, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +the secret length after base64 decode should be 32 chars +done + + + +=== TEST 6: add consumer with username and plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwe-decrypt": { + "key": "user-key", + "secret": "12345678901234567890123456789012" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: verify encrypted field +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + -- get plugin conf from etcd, secret and key is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/consumers/jack')) + ngx.say(res.body.node.value.plugins["jwe-decrypt"].key) + ngx.say(res.body.node.value.plugins["jwe-decrypt"].secret) + } + } +--- response_body +XU29sA3FEVF68hGcdPo7sg== +f9pGB0Dt4gYNCLKiINPfVSviKjQs2zfkBCT4+XZ3mDABZkJTr0orzYRD5CptDKMc + + + +=== TEST 8: enable jwe-decrypt plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwe-decrypt": { + "header": "Authorization", + "forward_header": "Authorization" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: create public API route (jwe-decrypt sign) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/plugin/jwe/encrypt" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: sign / verify in argument +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, err, token = t('/apisix/plugin/jwe/encrypt?key=user-key&payload=hello', + ngx.HTTP_GET + ) + + if code > 200 then + ngx.status = code + ngx.say(err) + return + end + + code, err, body = t('/hello', + ngx.HTTP_GET, + nil, + nil, + { Authorization = token } + ) + + ngx.print(body) + } + } +--- response_body +hello world + + + +=== TEST 11: test for unsupported method +--- request +PATCH /apisix/plugin/jwe/encrypt?key=user-key +--- error_code: 404 + + + +=== TEST 12: verify, missing token +--- request +GET /hello +--- error_code: 403 +--- response_body +{"message":"missing JWE token in request"} + + + +=== TEST 13: verify: invalid JWE token +--- request +GET /hello +--- more_headers +Authorization: invalid-eyJhbGciOiJkaXIiLCJraWQiOiJ1c2VyLWtleSIsImVuYyI6IkEyNTZHQ00ifQ..MTIzNDU2Nzg5MDEy.6JeRgm02rpOJdg.4nkSYJgwMKYgTeacatgmRw +--- error_code: 400 +--- response_body +{"message":"JWE token invalid"} + + + +=== TEST 14: verify (in header) +--- request +GET /hello +--- more_headers +Authorization: Bearer eyJhbGciOiJkaXIiLCJraWQiOiJ1c2VyLWtleSIsImVuYyI6IkEyNTZHQ00ifQ..MTIzNDU2Nzg5MDEy.6JeRgm02rpOJdg.4nkSYJgwMKYgTeacatgmRw +--- response_body +hello world + + + +=== TEST 15: verify (in header without Bearer) +--- request +GET /hello +--- more_headers +Authorization: eyJhbGciOiJkaXIiLCJraWQiOiJ1c2VyLWtleSIsImVuYyI6IkEyNTZHQ00ifQ..MTIzNDU2Nzg5MDEy.6JeRgm02rpOJdg.4nkSYJgwMKYgTeacatgmRw +--- response_body +hello world + + + +=== TEST 16: verify (header with bearer) +--- request +GET /hello +--- more_headers +Authorization: bearer eyJhbGciOiJkaXIiLCJraWQiOiJ1c2VyLWtleSIsImVuYyI6IkEyNTZHQ00ifQ..MTIzNDU2Nzg5MDEy.6JeRgm02rpOJdg.4nkSYJgwMKYgTeacatgmRw +--- response_body +hello world + + + +=== TEST 17: verify (invalid bearer token) +--- request +GET /hello +--- more_headers +Authorization: bearer invalid-eyJhbGciOiJkaXIiLCJraWQiOiJ1c2VyLWtleSIsImVuYyI6IkEyNTZHQ00ifQ..MTIzNDU2Nzg5MDEy.6JeRgm02rpOJdg.4nkSYJgwMKYgTeacatgmRw +--- error_code: 400 +--- response_body +{"message":"JWE token invalid"} + + + +=== TEST 18: delete a exist consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwe-decrypt": { + "key": "user-key", + "secret": "12345678901234567890123456789012" + } + } + }]] + ) + ngx.say("code: ", code < 300, " body: ", body) + + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "chen", + "plugins": { + "jwe-decrypt": { + "key": "chen-key", + "secret": "12345678901234567890123456789021" + } + } + }]] + ) + ngx.say("code: ", code < 300, " body: ", body) + + code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_DELETE) + ngx.say("code: ", code < 300, " body: ", body) + + code, body = t('/apisix/plugin/jwe/encrypt?key=chen-key&payload=hello', + ngx.HTTP_GET) + ngx.say("code: ", code < 300, " body: ", body) + } + } +--- response_body +code: true body: passed +code: true body: passed +code: true body: passed +code: true body: passed + + + +=== TEST 19: add consumer with username and plugins with base64 secret +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwe-decrypt": { + "key": "user-key", + "secret": "fo4XKdZ1xSrIZyms4q2BwPrW5lMpls9qqy5tiAk2esc=", + "is_base64_encoded": true + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: enable jwt decrypt plugin with base64 secret +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwe-decrypt": { + "header": "Authorization", + "forward_header": "Authorization" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: create public API route (jwe-decrypt sign) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/plugin/jwe/encrypt" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 22: sign / verify in argument +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, err, token = t('/apisix/plugin/jwe/encrypt?key=user-key&payload=hello', + ngx.HTTP_GET + ) + + if code > 200 then + ngx.status = code + ngx.say(err) + return + end + + ngx.log(ngx.WARN, "dibag: ", token) + + code, err, body = t('/hello', + ngx.HTTP_GET, + nil, + nil, + { Authorization = token } + ) + + ngx.print(body) + } + } +--- response_body +hello world + + + +=== TEST 23: verify (in header) +--- request +GET /hello +--- more_headers +Authorization: Bearer eyJhbGciOiJkaXIiLCJraWQiOiJ1c2VyLWtleSIsImVuYyI6IkEyNTZHQ00ifQ..MTIzNDU2Nzg5MDEy._0DrWD0.vl-ydutnNuMpkYskwNqu-Q +--- response_body +hello world + + + +=== TEST 24: verify (in header without Bearer) +--- request +GET /hello +--- more_headers +Authorization: eyJhbGciOiJkaXIiLCJraWQiOiJ1c2VyLWtleSIsImVuYyI6IkEyNTZHQ00ifQ..MTIzNDU2Nzg5MDEy._0DrWD0.vl-ydutnNuMpkYskwNqu-Q +--- response_body +hello world + + + +=== TEST 25: enable jwt decrypt plugin with test upstream route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/3', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwe-decrypt": { + "header": "Authorization", + "forward_header": "Authorization" + } + }, + "upstream": { + "nodes": { + "httpbin.org": 1 + }, + "type": "roundrobin" + }, + "uri": "/headers" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 26: verify in upstream header +--- request +GET /headers +--- more_headers +Authorization: eyJhbGciOiJkaXIiLCJraWQiOiJ1c2VyLWtleSIsImVuYyI6IkEyNTZHQ00ifQ..MTIzNDU2Nzg5MDEy._0DrWD0.vl-ydutnNuMpkYskwNqu-Q +--- response_body_like +.*"Authorization": "hello".* diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth-anonymous-consumer.t b/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth-anonymous-consumer.t new file mode 100644 index 0000000..17cda61 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth-anonymous-consumer.t @@ -0,0 +1,224 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + + +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $user_yaml_config = <<_EOC_; +apisix: + data_encryption: + enable_encrypt_fields: false +_EOC_ + $block->set_value("yaml_config", $user_yaml_config); +}); + + +run_tests; + +__DATA__ + +=== TEST 1: add consumer jack and anonymous +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "my-secret-key" + }, + "limit-count": { + "count": 4, + "time_window": 60 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 60 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +passed + + + +=== TEST 2: add jwt auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: normal consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + for i = 1, 5, 1 do + local code, body = t('/hello', + ngx.HTTP_GET, + nil, + nil, + { + Authorization = "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs" + } + ) + + if code >= 300 then + ngx.say("failed" .. code) + return + end + ngx.say(body .. i) + end + } + } +--- request +GET /t +--- response_body +passed1 +passed2 +passed3 +passed4 +failed503 + + + +=== TEST 4: request without jwt-auth header will be from anonymous consumer and it will pass +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 5: request without jwt-auth header will be from anonymous consumer and different rate limit will apply +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 503, 503, 503] + + + +=== TEST 6: add jwt auth plugin with non-existent anonymous_consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "anonymous_consumer": "not-found-anonymous" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: anonymous-consumer configured in the route should not be found +--- request +GET /hello +--- error_code: 401 +--- error_log +failed to get anonymous consumer not-found-anonymous +--- response_body +{"message":"Invalid user authorization"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth.t b/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth.t new file mode 100644 index 0000000..1c28123 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth.t @@ -0,0 +1,1224 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.jwt-auth") + local core = require("apisix.core") + local conf = {key = "123"} + + local ok, err = plugin.check_schema(conf, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- response_body_like eval +qr/{"algorithm":"HS256","base64_secret":false,"exp":86400,"key":"123","lifetime_grace_period":0,"secret":"[a-zA-Z0-9+\\\/]+={0,2}"}/ + + + +=== TEST 2: wrong type of string +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.jwt-auth") + local ok, err = plugin.check_schema({key = 123}, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "key" validation failed: wrong type: expected string, got number +done + + + +=== TEST 3: add consumer with username and plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "my-secret-key" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: enable jwt auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: verify, missing token +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing JWT token in request"} + + + +=== TEST 6: verify: invalid JWT token +--- request +GET /hello?jwt=invalid-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTU2Mzg3MDUwMX0.pPNVvh-TQsdDzorRwa-uuiLYiEBODscp9wv0cwD6c68 +--- error_code: 401 +--- response_body +{"message":"JWT token invalid"} +--- error_log +JWT token invalid: invalid header: invalid-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9 + + + +=== TEST 7: verify: expired JWT token +--- request +GET /hello?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTU2Mzg3MDUwMX0.pPNVvh-TQsdDzorRwa-uuiLYiEBODscp9wv0cwD6c68 +--- error_code: 401 +--- response_body +{"message":"failed to verify jwt"} +--- error_log +failed to verify jwt: 'exp' claim expired at Tue, 23 Jul 2019 08:28:21 GMT + + + +=== TEST 8: verify (in header) +--- request +GET /hello +--- more_headers +Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_body +hello world + + + +=== TEST 9: verify (in cookie) +--- request +GET /hello +--- more_headers +Cookie: jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_body +hello world + + + +=== TEST 10: verify (in header without Bearer) +--- request +GET /hello +--- more_headers +Authorization: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_body +hello world + + + +=== TEST 11: verify (header with bearer) +--- request +GET /hello +--- more_headers +Authorization: bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_body +hello world + + + +=== TEST 12: verify (invalid bearer token) +--- request +GET /hello +--- more_headers +Authorization: bearer invalid-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- error_code: 401 +--- response_body +{"message":"JWT token invalid"} +--- error_log +JWT token invalid: invalid header: invalid-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9 + + + +=== TEST 13: delete a exist consumer +--- config + location /t { + content_by_lua_block { + ngx.sleep(1) + + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "my-secret-key" + } + } + }]] + ) + ngx.say("code: ", code < 300, " body: ", body) + + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "chen", + "plugins": { + "jwt-auth": { + "key": "chen-key", + "secret": "chen-key" + } + } + }]] + ) + ngx.say("code: ", code < 300, " body: ", body) + + code, body = t('/apisix/admin/consumers/jack', + ngx.HTTP_DELETE) + ngx.say("code: ", code < 300, " body: ", body) + } + } +--- response_body +code: true body: passed +code: true body: passed +code: true body: passed + + + +=== TEST 14: add consumer with username and plugins with base64 secret +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "fo4XKdZ1xSrIZyms4q2BwPrW5lMpls9qqy5tiAk2esc=", + "base64_secret": true + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: enable jwt auth plugin with base64 secret +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: sign / verify +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- sign is generated via https://jwt.io/#debugger-io. This is the case for all other test cases and is not specified further + local sign = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsIm5iZiI6MTcyNzI3NDk4M30._Z8b_Asb2ROvGX4R5sNMbgJNQXB6x7aQeuVjmjY21Nw" + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- response_body +hello world + + + +=== TEST 17: verify: invalid JWT token +--- request +GET /hello?jwt=invalid-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTU2Mzg3MDUwMX0.pPNVvh-TQsdDzorRwa-uuiLYiEBODscp9wv0cwD6c68 +--- error_code: 401 +--- response_body +{"message":"JWT token invalid"} +--- error_log +JWT token invalid: invalid header: invalid-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9 + + + +=== TEST 18: verify: invalid signature +--- request +GET /hello +--- more_headers +Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- error_code: 401 +--- response_body +{"message":"failed to verify jwt"} +--- error_log +failed to verify jwt: signature mismatch: fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs + + + +=== TEST 19: verify: happy path +--- request +GET /hello +--- more_headers +Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0._kNmXeH1uYVAvApFTONk2Z3Gh-a4XfGrjmqd_ahoOI0 +--- response_body +hello world + + + +=== TEST 20: without key +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.jwt-auth") + local ok, err = plugin.check_schema({}, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + return + end + + ngx.say("done") + } + } +--- response_body +property "key" is required + + + +=== TEST 21: get the schema by schema_type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body, raw = t('/apisix/admin/schema/plugins/jwt-auth?schema_type=consumer', + ngx.HTTP_GET, + [[ +{"dependencies":{"algorithm":{"oneOf":[{"properties":{"algorithm":{"default":"HS256","enum":["HS256","HS512"]}}},{"required":["public_key"],"properties":{"algorithm":{"enum":["RS256","ES256"]},"public_key":{"type":"string"}}}]}},"required":["key"],"type":"object","properties":{"base64_secret":{"default":false,"type":"boolean"},"secret":{"type":"string"},"algorithm":{"enum":["HS256","HS512","RS256","ES256"],"default":"HS256","type":"string"},"exp":{"minimum":1,"default":86400,"type":"integer"},"key":{"type":"string"}}} + ]] + ) + + ngx.status = code + } + } + + + +=== TEST 22: get the schema by error schema_type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/plugins/jwt-auth?schema_type=consumer123123', + ngx.HTTP_GET, + nil, + [[ + {"properties":{},"type":"object"} + ]] + ) + ngx.status = code + } + } + + + +=== TEST 23: get the schema by default schema_type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/plugins/jwt-auth', + ngx.HTTP_GET, + nil, + [[ + {"properties":{},"type":"object"} + ]] + ) + ngx.status = code + } + } + + + +=== TEST 24: add consumer with username and plugins with public_key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "key": "user-key-rs256", + "algorithm": "RS256", + "public_key": "-----BEGIN PUBLIC KEY-----\nMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKebDxlvQMGyEesAL1r1nIJBkSdqu3Hr\n7noq/0ukiZqVQLSJPMOv0oxQSutvvK3hoibwGakDOza+xRITB7cs2cECAwEAAQ==\n-----END PUBLIC KEY-----" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed +--- skip_eval +1: $ENV{OPENSSL_FIPS} eq 'yes' + + + +=== TEST 25: JWT sign and verify use RS256 algorithm +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed +--- skip_eval +1: $ENV{OPENSSL_FIPS} eq 'yes' + + + +=== TEST 26: sign/verify use RS256 algorithm(private_key numbits = 512) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- the jwt signature is encoded with this private_key + -- private_key = "-----BEGIN RSA PRIVATE KEY-----\nMIIBOgIBAAJBAKebDxlvQMGyEesAL1r1nIJBkSdqu3Hr7noq/0ukiZqVQLSJPMOv\n0oxQSutvvK3hoibwGakDOza+xRITB7cs2cECAwEAAQJAYPWh6YvjwWobVYC45Hz7\n+pqlt1DWeVQMlN407HSWKjdH548ady46xiQuZ5Cfx3YyCcnsfVWaQNbC+jFbY4YL\nwQIhANfASwz8+2sKg1xtvzyaChX5S5XaQTB+azFImBJumixZAiEAxt93Td6JH1RF\nIeQmD/K+DClZMqSrliUzUqJnCPCzy6kCIAekDsRh/UF4ONjAJkKuLedDUfL3rNFb\n2M4BBSm58wnZAiEAwYLMOg8h6kQ7iMDRcI9I8diCHM8yz0SfbfbsvzxIFxECICXs\nYvIufaZvBa8f+E/9CANlVhm5wKAyM8N8GJsiCyEG\n-----END RSA PRIVATE KEY-----" + + local sign = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleS1yczI1NiIsIm5iZiI6MTcyNzI3NDk4M30.FaV6N-bWaSXkRrF2ec28hH5QENl-8I0LCONdNnQpB1YOb4akP-lKnwtABgfsQ_eKaEIf1PWNoghyByLejXaPbQ" + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- response_body +hello world +--- skip_eval +1: $ENV{OPENSSL_FIPS} eq 'yes' + + + +=== TEST 27: add consumer with username and plugins with public_key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "key": "user-key-rs256", + "algorithm": "RS256", + "public_key": "-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDGxOfVe/seP5T/V8pkS5YNAPRC\n3Ffxxedi7v0pyZh/4d4p9Qx0P9wOmALwlOq4Ftgks311pxG0zL0LcTJY4ikbc3r0\nh8SM0yhj9UV1VGtuia4YakobvpM9U+kq3lyIMO9ZPRez0cP3AJIYCt5yf8E7bNYJ\njbJNjl8WxvM1tDHqVQIDAQAB\n-----END PUBLIC KEY-----" + } + } + } + ]] + ) + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed +--- skip_eval +1: $ENV{OPENSSL_FIPS} eq 'yes' + + + +=== TEST 28: JWT sign and verify use RS256 algorithm +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed +--- skip_eval +1: $ENV{OPENSSL_FIPS} eq 'yes' + + + +=== TEST 29: sign/verify use RS256 algorithm(private_key numbits = 1024) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- the jwt signature is encoded with this private_key + -- private_key = "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQDGxOfVe/seP5T/V8pkS5YNAPRC3Ffxxedi7v0pyZh/4d4p9Qx0\nP9wOmALwlOq4Ftgks311pxG0zL0LcTJY4ikbc3r0h8SM0yhj9UV1VGtuia4Yakob\nvpM9U+kq3lyIMO9ZPRez0cP3AJIYCt5yf8E7bNYJjbJNjl8WxvM1tDHqVQIDAQAB\nAoGAYFy9eAXvLC7u8QuClzT9vbgksvVXvWKQVqo+GbAeOoEpz3V5YDJFYN3ZLwFC\n+ZQ5nTFXNV6Veu13CMEMA4NBIa8I4r3aYzSjq7X7UEBkLDBtEUge52mYakNfXD8D\nqViHkyJqvtVnBl7jNZVqbBderQnXA0kigaeZPL3+hkYKBgECQQDmiDbUL3FBynLy\nNX6/JdAbO4g1Nl/1RsGg8svhb6vRM8WQyIQWt5EKi7yoP/9nIRXcIgdwpVO6wZRU\nDojL0oy1AkEA3LpjqXxIRzcy2ALsqKN3hoNPGAlkPyG3Mlph91mqSZ2jYpXCX9LW\nhhQdf9GmfO8jZtYhYAJqEMOJrKeZHToLIQJBAJbrJbnTNTn05ztZehh5ELxDRPBR\nIJDaOXi8emyjRsA2PGiEXLTih7l3sZIUE4fYSQ9L18MO+LmScSB2Q2fr9uECQFc7\nIh/dCgN7ARD1Nun+kEIMqrlpHMEGZgv0RDsoqG+naOaRINwVysn6MR5OkGlXaLo/\nbbkvuxMc88/T/GLciYECQQC4oUveCOic4Qs6TQfMUKKv/kJ09slbD70HkcBzA5nY\nyro4RT4z/SN6T3SD+TuWn2//I5QxiQEIbOCTySci7yuh\n-----END RSA PRIVATE KEY-----" + + local sign = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleS1yczI1NiIsIm5iZiI6MTcyNzI3NDk4M30.FG-PAyscR-pFyw1a5ZiRxHLxzSI1jyVyZxm-fj3-u5igjacJY7UByCUKDnieV9-Ft81X15gdHAcrumUsTbu-77F50Bp5A1sxzdL_PXVLJ1cc8UP2ltvQwf1YWdutK7CI_uNLaeCYPZd9tWPhnfpsv4AdTdaCWeFyoaZSNOdw4oA" + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- response_body +hello world +--- skip_eval +1: $ENV{OPENSSL_FIPS} eq 'yes' + + + +=== TEST 30: sign/verify use RS256 algorithm(private_key numbits = 1024,with extra payload) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- the jwt signature is encoded with this private_key and payload + -- private_key = "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQDGxOfVe/seP5T/V8pkS5YNAPRC3Ffxxedi7v0pyZh/4d4p9Qx0\nP9wOmALwlOq4Ftgks311pxG0zL0LcTJY4ikbc3r0h8SM0yhj9UV1VGtuia4Yakob\nvpM9U+kq3lyIMO9ZPRez0cP3AJIYCt5yf8E7bNYJjbJNjl8WxvM1tDHqVQIDAQAB\nAoGAYFy9eAXvLC7u8QuClzT9vbgksvVXvWKQVqo+GbAeOoEpz3V5YDJFYN3ZLwFC\n+ZQ5nTFXNV6Veu13CMEMA4NBIa8I4r3aYzSjq7X7UEBkLDBtEUge52mYakNfXD8D\nqViHkyJqvtVnBl7jNZVqbBderQnXA0kigaeZPL3+hkYKBgECQQDmiDbUL3FBynLy\nNX6/JdAbO4g1Nl/1RsGg8svhb6vRM8WQyIQWt5EKi7yoP/9nIRXcIgdwpVO6wZRU\nDojL0oy1AkEA3LpjqXxIRzcy2ALsqKN3hoNPGAlkPyG3Mlph91mqSZ2jYpXCX9LW\nhhQdf9GmfO8jZtYhYAJqEMOJrKeZHToLIQJBAJbrJbnTNTn05ztZehh5ELxDRPBR\nIJDaOXi8emyjRsA2PGiEXLTih7l3sZIUE4fYSQ9L18MO+LmScSB2Q2fr9uECQFc7\nIh/dCgN7ARD1Nun+kEIMqrlpHMEGZgv0RDsoqG+naOaRINwVysn6MR5OkGlXaLo/\nbbkvuxMc88/T/GLciYECQQC4oUveCOic4Qs6TQfMUKKv/kJ09slbD70HkcBzA5nY\nyro4RT4z/SN6T3SD+TuWn2//I5QxiQEIbOCTySci7yuh\n-----END RSA PRIVATE KEY-----" + -- payload = {"aaa":"11","bb":"222"} + + local sign = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleS1yczI1NiIsIm5iZiI6MTcyNzI3NDk4M30.FG-PAyscR-pFyw1a5ZiRxHLxzSI1jyVyZxm-fj3-u5igjacJY7UByCUKDnieV9-Ft81X15gdHAcrumUsTbu-77F50Bp5A1sxzdL_PXVLJ1cc8UP2ltvQwf1YWdutK7CI_uNLaeCYPZd9tWPhnfpsv4AdTdaCWeFyoaZSNOdw4oA" + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- response_body +hello world +--- skip_eval +1: $ENV{OPENSSL_FIPS} eq 'yes' + + + +=== TEST 31: add consumer with username and plugins with public_key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "key": "user-key-rs256", + "algorithm": "RS256", + "public_key": "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv5LHjZ4FxQ9jk6eQGDRt\noRwFVkLq+dUBebs97hrzirokVr2B+RoxqdLfKAM+AsN2DadawZ2GqlCV9DL0/gz6\nnWSqTQpWbQ8c7CrF31EkIHUYRzZvWy17K3WC9Odk/gM1FVd0HbZ2Rjuqj9ADeeqx\nnj9npDqKrMODOENy31SqZNerWZsdgGkML5JYbX5hbI2L9LREvRU21fDgSfGL6Mw4\nNaxnnzcvll4yqwrBELSeDZEAt0+e/p1dO7moxF+b1pFkh9vQl6zGvnvf8fOqn5Ex\ntLHXVzgx752PHMwmuj9mO1ko6p8FOM0JHDnooI+5rwK4j3I27Ho5nnatVWUaxK4U\n8wIDAQAB\n-----END PUBLIC KEY-----" + } + } + } + ]] + ) + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed +--- error_code_like: ^(?:200|201)?$ + + + +=== TEST 32: JWT sign and verify use RS256 algorithm(private_key numbits = 2048) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 33: sign/verify use RS256 algorithm(private_key numbits = 2048) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- the jwt signature is encoded with this private_key + -- private_key = "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAv5LHjZ4FxQ9jk6eQGDRtoRwFVkLq+dUBebs97hrzirokVr2B\n+RoxqdLfKAM+AsN2DadawZ2GqlCV9DL0/gz6nWSqTQpWbQ8c7CrF31EkIHUYRzZv\nWy17K3WC9Odk/gM1FVd0HbZ2Rjuqj9ADeeqxnj9npDqKrMODOENy31SqZNerWZsd\ngGkML5JYbX5hbI2L9LREvRU21fDgSfGL6Mw4Naxnnzcvll4yqwrBELSeDZEAt0+e\n/p1dO7moxF+b1pFkh9vQl6zGvnvf8fOqn5ExtLHXVzgx752PHMwmuj9mO1ko6p8F\nOM0JHDnooI+5rwK4j3I27Ho5nnatVWUaxK4U8wIDAQABAoIBAFsFQC73H8KrNyKW\ngI4fit77U0XS8ZXWMKdH4XrZ71DAdDeKPtC+M05+1GxMbhAeEl8WXraTQ8J0G2s1\nMtXqEMDrbUbBXKLghVtoTy91e/a369sZ7/qgN19Eq/30WzWdDIGhVZgwcy2Xd8hw\nitZIPi/z7ChJcE35bsUytseJkJPsWeMJNq4mLbHqMSBQWze/vNvIeGYr2xfqXc6H\nywGWGlk46RI28mOf7PecU0DxFoTBNcntZrpOwaIrTDsC7E6uNvhVbtsneseTlQuj\nihS7DAH72Zx3CXc9+SL3b5QNRD1Rnp+gKM6itjW1yduOj2dS0p8YzcUYNtxnw5Gv\nuLoHwuECgYEA58NhvnHn10YLBEMYxb30tDobdGfOjBSfih8K53+/SJhqF5mv4qZX\nUfw3o5R+CkkrhbZ24yst7wqKFYZ+LfazOqljOPOrBsgIIry/sXBlcbGLCw9MYFfB\nejKTt/xZjqLdDCcEbiSB0L2xNuyF/TZOu8V5Nu55LXKBqeW4yISQ5FkCgYEA05t1\n2cq8gE1jMfGXQNFIpUDG2j4wJXAPqnJZSUF/BICa55mH/HYRKoP2uTSvAnqNrdGt\nsnjnnMA7T+fGogB4STif1POWfj+BTKVa/qhUX9ytH6TeI4aqPXSZdTVEPRfR7bG1\nIB/j2lyPkiNi2VijMx33xqxIaQUUsvxIT95GSisCgYAdaJFylQmSK3UiaVEvZlcy\nt1zcfH+dDtDfueisT216TLzJmdrTq7/Qy2xT+Xe03mwDX4/ea5A8kN3MtXA1bOR5\nQR0yENlW1vMRVVoNrfFxZ9H46UwLvZbzZo+P/RlwHAJolFrfjwpZ7ngaPBEUfFup\nP/mNmt0Ng0YoxNmZuBiaoQKBgQCa2d4RRgpRvdAEYW41UbHetJuQZAfprarZKZrr\nP9HKoq45I6Je/qurOCzZ9ZLItpRtic6Zl16u2AHPhKZYMQ3VT2mvdZ5AvwpI44zG\nZLpx+FR8nrKsvsRf+q6+Ff/c0Uyfq/cHDi84wZmS8PBKa1Hqe1ix+6t1pvEx1eq4\n/8jiRwKBgGOZzt5H5P0v3cFG9EUPXtvf2k81GmZjlDWu1gu5yWSYpqCfYr/K/1Md\ndaQ/YCKTc12SYL7hZ2j+2/dGFXNXwknIyKNj76UxjUpJywWI5mUaXJZJDkLCRvxF\nkk9nWvPorpjjjxaIVN+TkGgDd/60at/tI6HxzZitVyla5rB8hoPm\n-----END RSA PRIVATE KEY-----" + + local sign = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleS1yczI1NiIsIm5iZiI6MTcyNzI3NDk4M30.Zvp8dXefvGXrKgeoaNsA3sbV_3fw1w6Te7a0B_UANzef7gGJwvlnD6c3-f4yAy7GPgNzP_H1-atcF-sgLHAYpUa14XKe22a9S_BJSoQszoZuqGgpnGcjSzDMK9JX3FLUtzOFMQR5C4_3d7_z0NlepNo2xdQ6IQj0SvS1jrNwydpA9L89N07id3EO739uNw339g78N9QHP-j8nWItfbjo31xefCWTHtcloGkfaJOhcr06qmSbrivBU1AuPA8T3ZVumqw6fcRJzrvQJdKEfVyP-IPUtUy8SM1yLqstaKojJtU3A2HKaeb4fycwHXxtl52xhzIshr_I3iUhX_ak-z7m0A" + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- response_body +hello world + + + +=== TEST 34: sign/verify use RS256 algorithm(private_key numbits = 2048,with extra payload) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- the jwt signature is encoded with this private_key and payload + -- private_key = "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAv5LHjZ4FxQ9jk6eQGDRtoRwFVkLq+dUBebs97hrzirokVr2B\n+RoxqdLfKAM+AsN2DadawZ2GqlCV9DL0/gz6nWSqTQpWbQ8c7CrF31EkIHUYRzZv\nWy17K3WC9Odk/gM1FVd0HbZ2Rjuqj9ADeeqxnj9npDqKrMODOENy31SqZNerWZsd\ngGkML5JYbX5hbI2L9LREvRU21fDgSfGL6Mw4Naxnnzcvll4yqwrBELSeDZEAt0+e\n/p1dO7moxF+b1pFkh9vQl6zGvnvf8fOqn5ExtLHXVzgx752PHMwmuj9mO1ko6p8F\nOM0JHDnooI+5rwK4j3I27Ho5nnatVWUaxK4U8wIDAQABAoIBAFsFQC73H8KrNyKW\ngI4fit77U0XS8ZXWMKdH4XrZ71DAdDeKPtC+M05+1GxMbhAeEl8WXraTQ8J0G2s1\nMtXqEMDrbUbBXKLghVtoTy91e/a369sZ7/qgN19Eq/30WzWdDIGhVZgwcy2Xd8hw\nitZIPi/z7ChJcE35bsUytseJkJPsWeMJNq4mLbHqMSBQWze/vNvIeGYr2xfqXc6H\nywGWGlk46RI28mOf7PecU0DxFoTBNcntZrpOwaIrTDsC7E6uNvhVbtsneseTlQuj\nihS7DAH72Zx3CXc9+SL3b5QNRD1Rnp+gKM6itjW1yduOj2dS0p8YzcUYNtxnw5Gv\nuLoHwuECgYEA58NhvnHn10YLBEMYxb30tDobdGfOjBSfih8K53+/SJhqF5mv4qZX\nUfw3o5R+CkkrhbZ24yst7wqKFYZ+LfazOqljOPOrBsgIIry/sXBlcbGLCw9MYFfB\nejKTt/xZjqLdDCcEbiSB0L2xNuyF/TZOu8V5Nu55LXKBqeW4yISQ5FkCgYEA05t1\n2cq8gE1jMfGXQNFIpUDG2j4wJXAPqnJZSUF/BICa55mH/HYRKoP2uTSvAnqNrdGt\nsnjnnMA7T+fGogB4STif1POWfj+BTKVa/qhUX9ytH6TeI4aqPXSZdTVEPRfR7bG1\nIB/j2lyPkiNi2VijMx33xqxIaQUUsvxIT95GSisCgYAdaJFylQmSK3UiaVEvZlcy\nt1zcfH+dDtDfueisT216TLzJmdrTq7/Qy2xT+Xe03mwDX4/ea5A8kN3MtXA1bOR5\nQR0yENlW1vMRVVoNrfFxZ9H46UwLvZbzZo+P/RlwHAJolFrfjwpZ7ngaPBEUfFup\nP/mNmt0Ng0YoxNmZuBiaoQKBgQCa2d4RRgpRvdAEYW41UbHetJuQZAfprarZKZrr\nP9HKoq45I6Je/qurOCzZ9ZLItpRtic6Zl16u2AHPhKZYMQ3VT2mvdZ5AvwpI44zG\nZLpx+FR8nrKsvsRf+q6+Ff/c0Uyfq/cHDi84wZmS8PBKa1Hqe1ix+6t1pvEx1eq4\n/8jiRwKBgGOZzt5H5P0v3cFG9EUPXtvf2k81GmZjlDWu1gu5yWSYpqCfYr/K/1Md\ndaQ/YCKTc12SYL7hZ2j+2/dGFXNXwknIyKNj76UxjUpJywWI5mUaXJZJDkLCRvxF\nkk9nWvPorpjjjxaIVN+TkGgDd/60at/tI6HxzZitVyla5rB8hoPm\n-----END RSA PRIVATE KEY-----" + -- payload = {"aaa":"11","bb":"222"} + + local sign = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleS1yczI1NiIsIm5iZiI6MTcyNzI3NDk4M30.Zvp8dXefvGXrKgeoaNsA3sbV_3fw1w6Te7a0B_UANzef7gGJwvlnD6c3-f4yAy7GPgNzP_H1-atcF-sgLHAYpUa14XKe22a9S_BJSoQszoZuqGgpnGcjSzDMK9JX3FLUtzOFMQR5C4_3d7_z0NlepNo2xdQ6IQj0SvS1jrNwydpA9L89N07id3EO739uNw339g78N9QHP-j8nWItfbjo31xefCWTHtcloGkfaJOhcr06qmSbrivBU1AuPA8T3ZVumqw6fcRJzrvQJdKEfVyP-IPUtUy8SM1yLqstaKojJtU3A2HKaeb4fycwHXxtl52xhzIshr_I3iUhX_ak-z7m0A" + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- response_body +hello world + + + +=== TEST 35: JWT sign with the public key when using the RS256 algorithm +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "key": "user-key-rs256", + "algorithm": "RS256", + "public_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIBOgIBAAJBAKebDxlvQMGyEesAL1r1nIJBkSdqu3Hr7noq/0ukiZqVQLSJPMOv\n0oxQSutvvK3hoibwGakDOza+xRITB7cs2cECAwEAAQJAYPWh6YvjwWobVYC45Hz7\n+pqlt1DWeVQMlN407HSWKjdH548ady46xiQuZ5Cfx3YyCcnsfVWaQNbC+jFbY4YL\nwQIhANfASwz8+2sKg1xtvzyaChX5S5XaQTB+azFImBJumixZAiEAxt93Td6JH1RF\nIeQmD/K+DClZMqSrliUzUqJnCPCzy6kCIAekDsRh/UF4ONjAJkKuLedDUfL3rNFb\n2M4BBSm58wnZAiEAwYLMOg8h6kQ7iMDRcI9I8diCHM8yz0SfbfbsvzxIFxECICXs\nYvIufaZvBa8f+E/9CANlVhm5wKAyM8N8GJsiCyEG\n-----END RSA PRIVATE KEY-----" + } + } + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 36: JWT sign and verify RS256 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 37: sanity(algorithm = HS512) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.jwt-auth") + local core = require("apisix.core") + local conf = {key = "123", algorithm = "HS512"} + + local ok, err = plugin.check_schema(conf, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- response_body_like eval +qr/{"algorithm":"HS512","base64_secret":false,"exp":86400,"key":"123","lifetime_grace_period":0,"secret":"[a-zA-Z0-9+\\\/]+={0,2}"}/ + + + +=== TEST 38: add consumer with username and plugins use HS512 algorithm +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "key": "user-key-HS512", + "algorithm": "HS512", + "secret": "my-secret-key" + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 39: JWT sign and verify use HS512 algorithm +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 40: sign / verify (algorithm = HS512) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local sign = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleS1IUzUxMiIsIm5iZiI6MTcyNzI3NDk4M30.emzmjIbFqkRAr55YW5YobdXDxYWiMuUNLPooE5G_bbme1ul19p1dKW7ESrlqvr4BPJRKThm4PnkNC4h9xSJpBQ" + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- response_body +hello world + + + +=== TEST 41: sign / verify (algorithm = HS512,with extra payload) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- the jwt signature is encoded with this payload + -- payload = {"aaa":"11","bb":"222"} + + local sign = "eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJhYWEiOiIxMSIsImJiIjoiMjIyIiwia2V5IjoidXNlci1rZXktSFM1MTIiLCJuYmYiOjE3MjcyNzQ5ODN9.s6E3-wNJypgJL71MxoyTTHBDeqdrGQddFjkhLlh3ZN6IZwgpFRlFT1_8suQg9dWUDHGQqgejULyLPhmBMIbw2A" + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- response_body +hello world + + + +=== TEST 42: test for unsupported algorithm +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.jwt-auth") + local core = require("apisix.core") + local conf = {key = "123", algorithm = "ES512"} + + local ok, err = plugin.check_schema(conf, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- response_body_like eval +qr/property "algorithm" validation failed/ + + + +=== TEST 43: wrong format of secret +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.jwt-auth") + local ok, err = plugin.check_schema({ + key = "123", + secret = "{^c0j4&]2!=J=", + base64_secret = true, + }, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- response_body +base64_secret required but the secret is not in base64 format + + + +=== TEST 44: when the exp value is not set, make sure the default value(86400) works +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body, res = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "key": "exp-not-set", + "secret": "my-secret-key" + } + } + }]] + ) + + res = require("toolkit.json").decode(res) + assert(res.value.plugins["jwt-auth"].exp == 86400) + + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 45: RS256 without public key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "algorithm": "RS256", + "key": "user-key" + } + } + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 400 +--- response_body_like eval +qr/failed to validate dependent schema for \\"algorithm\\"/ + + + +=== TEST 46: RS256 without private key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "algorithm": "RS256", + "key": "user-key", + "public_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIBOgIBAAJBAKebDxlvQMGyEesAL1r1nIJBkSdqu3Hr7noq/0ukiZqVQLSJPMOv\n0oxQSutvvK3hoibwGakDOza+xRITB7cs2cECAwEAAQJAYPWh6YvjwWobVYC45Hz7\n+pqlt1DWeVQMlN407HSWKjdH548ady46xiQuZ5Cfx3YyCcnsfVWaQNbC+jFbY4YL\nwQIhANfASwz8+2sKg1xtvzyaChX5S5XaQTB+azFImBJumixZAiEAxt93Td6JH1RF\nIeQmD/K+DClZMqSrliUzUqJnCPCzy6kCIAekDsRh/UF4ONjAJkKuLedDUfL3rNFb\n2M4BBSm58wnZAiEAwYLMOg8h6kQ7iMDRcI9I8diCHM8yz0SfbfbsvzxIFxECICXs\nYvIufaZvBa8f+E/9CANlVhm5wKAyM8N8GJsiCyEG\n-----END RSA PRIVATE KEY-----" + } + } + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 200 + + + +=== TEST 47: add consumer with username and plugins with public_key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "key": "user-key-es256", + "algorithm": "ES256", + "public_key": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEEVs/o5+uQbTjL3chynL4wXgUg2R9\nq9UU8I5mEovUf86QZ7kOBIjJwqnzD1omageEHWwHdBO6B+dFabmdT9POxg==\n-----END PUBLIC KEY-----" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 48: JWT sign and verify use ES256 algorithm(private_key numbits = 512) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed +--- skip_eval +1: $ENV{OPENSSL_FIPS} eq 'yes' + + + +=== TEST 49: sign/verify use ES256 algorithm(private_key numbits = 512) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- the jwt signature is encoded with this private_key + -- private_key = "-----BEGIN PRIVATE KEY-----\nMIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgevZzL1gdAFr88hb2\nOF/2NxApJCzGCEDdfSp6VQO30hyhRANCAAQRWz+jn65BtOMvdyHKcvjBeBSDZH2r\n1RTwjmYSi9R/zpBnuQ4EiMnCqfMPWiZqB4QdbAd0E7oH50VpuZ1P087G\n-----END PRIVATE KEY-----" + + local sign = "eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleS1lczI1NiIsIm5iZiI6MTcyNzI3NDk4M30.t-CZzJRSxIuVVjU3m8_zDtb7h9x2R2s3BJWmerh0hw-RMIklBqLJ3V9kYAWl7DIyXlp0jQCPDZ_M7mhr1Q3HPw" + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- response_body +hello world +--- skip_eval +1: $ENV{OPENSSL_FIPS} eq 'yes' + + + +=== TEST 50: add consumer missing public_key (algorithm=RS256) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "key": "user-key-res256", + "algorithm": "RS256" + } + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid plugins configuration: failed to check the configuration of plugin jwt-auth err: failed to validate dependent schema for \"algorithm\": value should match only one schema, but matches none"} + + + +=== TEST 51: add consumer missing public_key (algorithm=ES256) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "key": "user-key-es256", + "algorithm": "ES256" + } + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid plugins configuration: failed to check the configuration of plugin jwt-auth err: failed to validate dependent schema for \"algorithm\": value should match only one schema, but matches none"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth2.t new file mode 100644 index 0000000..f419967 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth2.t @@ -0,0 +1,455 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: add consumer with username and plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "my-secret-key" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: enable jwt auth plugin using admin api with custom parameter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "header": "jwt-header", + "query": "jwt-query", + "cookie": "jwt-cookie" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: verify (in header) +--- request +GET /hello +--- more_headers +jwt-header: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_body +hello world + + + +=== TEST 4: verify (in cookie) +--- request +GET /hello +--- more_headers +Cookie: jwt-cookie=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_body +hello world + + + +=== TEST 5: verify (in query) +--- request +GET /hello?jwt-query=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_body +hello world + + + +=== TEST 6: verify (in header without Bearer) +--- request +GET /hello +--- more_headers +jwt-header: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_body +hello world + + + +=== TEST 7: verify (in header with bearer) +--- request +GET /hello +--- more_headers +jwt-header: bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_body +hello world + + + +=== TEST 8: use lifetime_grace_period default value +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- in order to modify the system_leeway in jwt-validators module + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "scope": "apisix", + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAw86xcJwNxL2MkWnjIGiw\n]] .. + [[94QY78Sq89dLqMdV/Ku2GIX9lYkbS0VDGtmxDGJLBOYW4cKTX+pigJyzglLgE+nD\n]] .. + [[z3VJf2oCqSV74gTyEdi7sw9e1rCyR6dR8VA7LEpIHwmhnDhhjXy1IYSKRdiVHLS5\n]] .. + [[sYmaAGckpUo3MLqUrgydGj5tFzvK/R/ELuZBdlZM+XuWxYry05r860E3uL+VdVCO\n]] .. + [[oU4RJQknlJnTRd7ht8KKcZb6uM14C057i26zX/xnOJpaVflA4EyEo99hKQAdr8Sh\n]] .. + [[G70MOLYvGCZxl1o8S3q4X67MxcPlfJaXnbog2AOOGRaFar88XiLFWTbXMCLuz7xD\n]] .. + [[zQIDAQAB\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256", + "claim_validator": { + "issuer": { + "valid_issuers": ["Mysoft corp"] + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = [[Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCBjb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.Vq_sBN7nH67vMDbiJE01EP4hvJYE_5ju6izjkOX8pF5OS4g2RWKWpL6h6-b0tTkCzG4JD5BEl13LWW-Gxxw0i9vEK0FLg_kC_kZLYB8WuQ6B9B9YwzmZ3OLbgnYzt_VD7D-7psEbwapJl5hbFsIjDgOAEx-UCmjUcl2frZxZavG2LUiEGs9Ri7KqOZmTLgNDMWfeWh1t1LyD0_b-eTInbasVtKQxMlb5kR0Ln_Qg5092L-irJ7dqaZma7HItCnzXJROdqJEsMIBAYRwDGa_w5kIACeMOdU85QKtMHzOenYFkm6zh_s59ndziTctKMz196Y8AL08xuTi6d1gEWpM92A]] + } + }) + ngx.status = res.status + if res.status >= 300 then + ngx.status = res.status + ngx.say(res.body) + return + end + + -- add consumer + local code, body, res_data = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "exp": 1, + "algorithm": "HS256", + "base64_secret": false, + "secret": "test-jwt-secret", + "key": "test-jwt-a" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- add route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "query": "jwt", + "header": "Mytoken", + "cookie": "jwt" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local gen_token = require("lib.apisix.plugins.jwt-auth").gen_token + local auth_conf = { + exp = 1, + algorithm = "HS256", + base64_secret = false, + secret = "test-jwt-secret", + key = "test-jwt-a" + } + local sign = gen_token(auth_conf) + if not sign then + ngx.status = 500 + ngx.say("failed to gen_token") + end + + -- verify JWT token + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers={Mytoken=sign}}) + + -- the JWT has not expired, so it should be valid + if res.status >= 300 then + ngx.status = res.status + ngx.say(res.body) + return + end + + -- after 1.1 seconds, the JWT should be expired, because the exp is only 1 second + ngx.sleep(1.1) + res, err = httpc:request_uri(uri, {headers={Mytoken=sign}}) + ngx.status = res.status + ngx.print(res.body) + } + } +--- error_code: 401 +--- response_body eval +qr/failed to verify jwt/ +--- error_log eval +qr/ailed to verify jwt: 'exp' claim expired at/ + + + +=== TEST 9: lifetime_grace_period is 2 seconds +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- in order to modify the system_leeway in jwt-validators module + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "scope": "apisix", + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAw86xcJwNxL2MkWnjIGiw\n]] .. + [[94QY78Sq89dLqMdV/Ku2GIX9lYkbS0VDGtmxDGJLBOYW4cKTX+pigJyzglLgE+nD\n]] .. + [[z3VJf2oCqSV74gTyEdi7sw9e1rCyR6dR8VA7LEpIHwmhnDhhjXy1IYSKRdiVHLS5\n]] .. + [[sYmaAGckpUo3MLqUrgydGj5tFzvK/R/ELuZBdlZM+XuWxYry05r860E3uL+VdVCO\n]] .. + [[oU4RJQknlJnTRd7ht8KKcZb6uM14C057i26zX/xnOJpaVflA4EyEo99hKQAdr8Sh\n]] .. + [[G70MOLYvGCZxl1o8S3q4X67MxcPlfJaXnbog2AOOGRaFar88XiLFWTbXMCLuz7xD\n]] .. + [[zQIDAQAB\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256", + "claim_validator": { + "issuer": { + "valid_issuers": ["Mysoft corp"] + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = [[Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCBjb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.Vq_sBN7nH67vMDbiJE01EP4hvJYE_5ju6izjkOX8pF5OS4g2RWKWpL6h6-b0tTkCzG4JD5BEl13LWW-Gxxw0i9vEK0FLg_kC_kZLYB8WuQ6B9B9YwzmZ3OLbgnYzt_VD7D-7psEbwapJl5hbFsIjDgOAEx-UCmjUcl2frZxZavG2LUiEGs9Ri7KqOZmTLgNDMWfeWh1t1LyD0_b-eTInbasVtKQxMlb5kR0Ln_Qg5092L-irJ7dqaZma7HItCnzXJROdqJEsMIBAYRwDGa_w5kIACeMOdU85QKtMHzOenYFkm6zh_s59ndziTctKMz196Y8AL08xuTi6d1gEWpM92A]] + } + }) + ngx.status = res.status + if res.status >= 300 then + ngx.status = res.status + ngx.say(res.body) + return + end + + -- add consumer + local code, body, res_data = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "exp": 1, + "algorithm": "HS256", + "base64_secret": false, + "secret": "test-jwt-secret", + "key": "test-jwt-a", + "lifetime_grace_period": 2 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- add route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "query": "jwt", + "header": "Mytoken", + "cookie": "jwt" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + -- get JWT token + local gen_token = require("lib.apisix.plugins.jwt-auth").gen_token + local auth_conf = { + exp = 1, + algorithm = "HS256", + base64_secret = false, + secret = "test-jwt-secret", + key = "test-jwt-a", + lifetime_grace_period = 2 + } + local sign = gen_token(auth_conf) + if not sign then + ngx.status = 500 + ngx.say("failed to gen_token") + end + + -- verify JWT token + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local httpc = http.new() + + -- after 1.1 seconds, since lifetime_grace_period is 2 seconds, + -- so the JWT has not expired, it should be valid + ngx.sleep(1.1) + local res, err = httpc:request_uri(uri, {headers={Mytoken=sign}}) + ngx.status = res.status + ngx.print(res.body) + } + } +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth3.t new file mode 100755 index 0000000..0c0d33a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth3.t @@ -0,0 +1,608 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{VAULT_TOKEN} = "root"; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + if (!$block->response_body) { + $block->set_value("response_body", "passed\n"); + } + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: add consumer with username and plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "my-secret-key" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 2: enable jwt auth plugin using admin api with custom parameter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "header": "jwt-header", + "query": "jwt-query", + "cookie": "jwt-cookie", + "hide_credentials": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 3: verify (in header) not hiding credentials +--- request +GET /echo +--- more_headers +jwt-header: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_headers +jwt-header: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs + + + +=== TEST 4: verify (in cookie) not hiding credentials +--- request +GET /echo +--- more_headers +Cookie: jwt-cookie=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_headers +Cookie: jwt-cookie=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs + + + +=== TEST 5: enable jwt auth plugin using admin api without hiding credentials +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "header": "jwt-header", + "query": "jwt-query", + "cookie": "jwt-cookie", + "hide_credentials": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin_proxy_rewrite_args" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 6: verify (in query) without hiding credentials +--- request +GET /plugin_proxy_rewrite_args?foo=bar&hello=world&jwt-query=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_body +uri: /plugin_proxy_rewrite_args +foo: bar +hello: world +jwt-query: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs + + + +=== TEST 7: enable jwt auth plugin using admin api with hiding credentials +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "header": "jwt-header", + "query": "jwt-query", + "cookie": "jwt-cookie", + "hide_credentials": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 8: verify (in header) with hiding credentials +--- request +GET /echo +--- more_headers +jwt-header: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_headers +!jwt-header + + + +=== TEST 9: enable jwt auth plugin using admin api with hiding credentials +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "header": "jwt-header", + "query": "jwt-query", + "cookie": "jwt-cookie", + "hide_credentials": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin_proxy_rewrite_args" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 10: verify (in query) with hiding credentials +--- request +GET /plugin_proxy_rewrite_args?foo=bar&hello=world&jwt-query=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_body +uri: /plugin_proxy_rewrite_args +foo: bar +hello: world + + + +=== TEST 11: verify (in cookie) with hiding credentials +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "header": "jwt-header", + "query": "jwt-query", + "cookie": "jwt-cookie", + "hide_credentials": true + } + }, + "upstream": { + "nodes": { + "test.com:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 12: verify (in cookie) with hiding credentials +--- request +GET /hello +--- more_headers +Cookie: hello=world; jwt-cookie=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs; foo=bar +--- response_body +hello world + + + +=== TEST 13: delete exist consumers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- delete exist consumers + local code, body = t('/apisix/admin/consumers/jack', ngx.HTTP_DELETE) + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: data encryption for secret +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "my-secret-key" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/consumers/jack', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + ngx.say(res.value.plugins["jwt-auth"].secret) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/consumers/jack')) + ngx.say(res.body.node.value.plugins["jwt-auth"].secret) + } + } +--- response_body +my-secret-key +IRWpPjbDq5BCgHyIllnOMA== + + + +=== TEST 15: set jwt-auth conf: secret uses secret ref +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "root" + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + -- change consumer with secrets ref: vault + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "$secret://vault/test1/jack/secret" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + -- set route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "header": "jwt-header", + "query": "jwt-query", + "cookie": "jwt-cookie", + "hide_credentials": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/jack secret=my-secret-key +--- response_body +Success! Data written to: kv/apisix/jack + + + +=== TEST 17: verify (in header) not hiding credentials +--- request +GET /echo +--- more_headers +jwt-header: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_headers +jwt-header: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs + + + +=== TEST 18: store rsa key pairs and secret into vault from local filesystem +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/rsa1 secret=$3nsitiv3-c8d3 public_key=@t/certs/public.pem +--- response_body +Success! Data written to: kv/apisix/rsa1 + + + +=== TEST 19: create consumer for RS256 algorithm with public key fetched from vault and public key in consumer schema +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- enable jwt auth plugin using admin api + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "john", + "plugins": { + "jwt-auth": { + "key": "rsa1", + "algorithm": "RS256", + "secret": "$secret://vault/test1/rsa1/secret", + "public_key": "$secret://vault/test1/rsa1/public_key" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: set jwt-auth conf with the token in an env var: secret uses secret ref +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "$ENV://VAULT_TOKEN" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + -- change consumer with secrets ref: vault + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "$secret://vault/test1/jack/secret" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + -- set route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "header": "jwt-header", + "query": "jwt-query", + "cookie": "jwt-cookie", + "hide_credentials": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: verify (in header) not hiding credentials +--- request +GET /echo +--- more_headers +jwt-header: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_headers +jwt-header: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth4.t b/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth4.t new file mode 100644 index 0000000..333b261 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/jwt-auth4.t @@ -0,0 +1,352 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + if (!$block->response_body) { + $block->set_value("response_body", "passed\n"); + } + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: add consumer with username and plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "my-secret-key" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: enable jwt auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "my-secret-key", + "key_claim_name": "iss" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: verify (in header) +--- config + location /t { + content_by_lua_block { + local function gen_token(payload) + local buffer = require "string.buffer" + local openssl_mac = require "resty.openssl.mac" + + local base64 = require "ngx.base64" + local base64_encode = base64.encode_base64url + + local json = require("cjson") + + local function sign(data, key) + return openssl_mac.new(key, "HMAC", nil, "sha256"):final(data) + end + local header = { typ = "JWT", alg = "HS256" } + local buf = buffer.new() + + buf:put(base64_encode(json.encode(header))):put("."):put(base64_encode(json.encode(payload))) + + local ok, signature = pcall(sign, buf:tostring(), "my-secret-key") + if not ok then + return nil, signature + end + + buf:put("."):put(base64_encode(signature)) + + return buf:get() + end + + local payload = { + sub = "1234567890", + iss = "user-key", + exp = 9916239022 + } + + local token = gen_token(payload) + + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local opt = {method = "POST", headers = {["Authorization"] = "Bearer " .. token}} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + + ngx.print(res.body) + } + } +--- request +GET /t +--- more_headers +--- response_body +hello world + + + +=== TEST 4: ensure secret is non empty +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + -- prepare consumer with a custom key claim name + local csm_code, csm_body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "mike", + "plugins": { + "jwt-auth": { + "key": "custom-user-key", + "secret": "" + } + } + }]] + ) + if csm_code == 200 then + ngx.status = 500 + ngx.say("error") + return + end + ngx.status = csm_code + ngx.say(csm_body) + } + } +--- error_code: 400 +--- response_body eval +qr/\\"secret\\" validation failed: string too short, expected at least 1, got 0/ + + + +=== TEST 5: ensure key is non empty +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + -- prepare consumer with a custom key claim name + local csm_code, csm_body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "mike", + "plugins": { + "jwt-auth": { + "key": "", + "algorithm": "RS256", + "public_key": "somekey", + "private_key": "someprivkey" + } + } + }]] + ) + if csm_code == 200 then + ngx.status = 500 + ngx.say("error") + return + end + ngx.status = csm_code + ngx.say(csm_body) + } + } +--- error_code: 400 +--- response_body eval +qr/\\"key\\" validation failed: string too short, expected at least 1, got 0/ + + + +=== TEST 6: store_in_ctx disabled +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {}, + "serverless-post-function": { + "phase": "rewrite", + "functions": [ + "return function(conf, ctx) + if ctx.jwt_auth_payload then + ngx.status = 200 + ngx.say(\"JWT found in ctx. Payload key: \" .. ctx.jwt_auth_payload.key) + return ngx.exit(200) + else + ngx.status = 401 + ngx.say(\"JWT not found in ctx.\") + return ngx.exit(401) + end + end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/jwt-auth-no-ctx" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: verify store_in_ctx disabled (header with bearer) +--- request +GET /jwt-auth-no-ctx +--- more_headers +Authorization: bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsIm5iZiI6MTcyNzI3NDk4M30.N6ebc4U5ms976pwKZ_iQ88w_uJKqUVNtTYZ_nXhRpWo +--- error_code: 401 +--- response_body +JWT not found in ctx. + + + +=== TEST 8: store_in_ctx enabled +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "store_in_ctx": true + }, + "serverless-post-function": { + "phase": "rewrite", + "functions": [ + "return function(conf, ctx) + if ctx.jwt_auth_payload then + ngx.status = 200 + ngx.say(\"JWT found in ctx. Payload key: \" .. ctx.jwt_auth_payload.key) + return ngx.exit(200) + else + ngx.status = 401 + ngx.say(\"JWT not found in ctx.\") + return ngx.exit(401) + end + end" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/jwt-auth-ctx" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: verify store_in_ctx enabled (header with bearer) +--- request +GET /jwt-auth-ctx +--- more_headers +Authorization: bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsIm5iZiI6MTcyNzI3NDk4M30.N6ebc4U5ms976pwKZ_iQ88w_uJKqUVNtTYZ_nXhRpWo +--- error_code: 200 +--- response_body +JWT found in ctx. Payload key: user-key diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger-large-body.t b/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger-large-body.t new file mode 100644 index 0000000..ab87a90 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger-large-body.t @@ -0,0 +1,946 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + # fake server, only for test + server { + listen 1970; + location /large_resp { + content_by_lua_block { + local large_body = { + "h", "e", "l", "l", "o" + } + + local size_in_bytes = 1024 * 1024 -- 1mb + for i = 1, size_in_bytes do + large_body[i+5] = "l" + end + large_body = table.concat(large_body, "") + + ngx.say(large_body) + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: max_body_bytes is not an integer +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.kafka-logger") + local ok, err = plugin.check_schema({ + broker_list= { + ["127.0.0.1"] = 9092 + }, + kafka_topic = "test2", + key = "key1", + timeout = 1, + batch_max_size = 1, + max_req_body_bytes = "10", + include_req_body = true, + meta_format = "origin" + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +property "max_req_body_bytes" validation failed: wrong type: expected integer, got string +done + + + +=== TEST 2: set route(meta_format = origin, include_req_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1, + "max_req_body_bytes": 5, + "include_req_body": true, + "meta_format": "origin" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: hit route(meta_format = origin, include_req_body = true) +--- request +GET /hello?ab=cd +abcdef +--- response_body +hello world +--- error_log +send data to kafka: GET /hello?ab=cd HTTP/1.1 +host: localhost +content-length: 6 +connection: close +abcde +--- wait: 2 + + + +=== TEST 4: set route(meta_format = default, include_req_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1, + "max_req_body_bytes": 5, + "include_req_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: hit route(meta_format = default, include_req_body = true) +--- request +GET /hello?ab=cd +abcdef +--- response_body +hello world +--- error_log_like eval +qr/"body": "abcde"/ +--- wait: 2 + + + +=== TEST 6: set route(id: 1, meta_format = default, include_resp_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "max_resp_body_bytes": 5, + "include_resp_body": true, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 7: hit route(meta_format = default, include_resp_body = true) +--- request +POST /hello?name=qwerty +abcdef +--- response_body +hello world +--- error_log eval +qr/send data to kafka: \{.*"body":"hello"/ +--- wait: 2 + + + +=== TEST 8: set route(id: 1, meta_format = origin, include_resp_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "meta_format": "origin", + "include_resp_body": true, + "max_resp_body_bytes": 5, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 9: hit route(meta_format = origin, include_resp_body = true) +--- request +POST /hello?name=qwerty +abcdef +--- response_body +hello world +--- error_log +send data to kafka: POST /hello?name=qwerty HTTP/1.1 +host: localhost +content-length: 6 +connection: close +--- wait: 2 + + + +=== TEST 10: set route(id: 1, meta_format = default, include_resp_body = true, include_req_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "meta_format": "default", + "include_req_body": true, + "max_req_body_bytes": 5, + "include_resp_body": true, + "max_resp_body_bytes": 5, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 11: hit route(meta_format = default, include_resp_body = true, include_req_body = true) +--- request +POST /hello?name=qwerty +abcdef +--- response_body +hello world +--- error_log eval +qr/send data to kafka: \{.*"body":"abcde"/ +--- error_log_like +*"body":"hello" +--- wait: 2 + + + +=== TEST 12: set route(id: 1, meta_format = default, include_resp_body = false, include_req_body = false) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "meta_format": "default", + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 13: hit route(meta_format = default, include_resp_body = false, include_req_body = false) +--- request +POST /hello?name=qwerty +abcdef +--- response_body +hello world +--- no_error_log eval +qr/send data to kafka: \{.*"body":.*/ +--- wait: 2 + + + +=== TEST 14: set route(large_body, meta_format = default, include_resp_body = true, include_req_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "meta_format": "default", + "include_req_body": true, + "max_req_body_bytes": 256, + "include_resp_body": true, + "max_resp_body_bytes": 256, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 15: hit route(large_body, meta_format = default, include_resp_body = true, include_req_body = true) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + local http = require("resty.http") + + local large_body = { + "h", "e", "l", "l", "o" + } + + local size_in_bytes = 10 * 1024 -- 10kb + for i = 1, size_in_bytes do + large_body[i+5] = "l" + end + large_body = table.concat(large_body, "") + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/echo" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, + { + method = "POST", + body = large_body, + } + ) + ngx.say(res.body) + } + } +--- request +GET /t +--- error_log eval +qr/send data to kafka: \{.*"body":"hello(l{251})".*/ +--- response_body eval +qr/hello.*/ + + + +=== TEST 16: set route(large_body, meta_format = default, include_resp_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "meta_format": "default", + "include_resp_body": true, + "max_resp_body_bytes": 256, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 17: hit route(large_body, meta_format = default, include_resp_body = true) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + local http = require("resty.http") + + local large_body = { + "h", "e", "l", "l", "o" + } + + local size_in_bytes = 10 * 1024 -- 10kb + for i = 1, size_in_bytes do + large_body[i+5] = "l" + end + large_body = table.concat(large_body, "") + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/echo" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, + { + method = "POST", + body = large_body, + } + ) + ngx.say(res.body) + } + } +--- request +GET /t +--- error_log eval +qr/send data to kafka: \{.*"body":"hello(l{251})".*/ +--- response_body eval +qr/hello.*/ + + + +=== TEST 18: set route(large_body, meta_format = default, include_req_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "meta_format": "default", + "include_req_body": true, + "max_req_body_bytes": 256, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 19: hit route(large_body, meta_format = default, include_req_body = true) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + local http = require("resty.http") + + local large_body = { + "h", "e", "l", "l", "o" + } + + local size_in_bytes = 10 * 1024 -- 10kb + for i = 1, size_in_bytes do + large_body[i+5] = "l" + end + large_body = table.concat(large_body, "") + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/echo" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, + { + method = "POST", + body = large_body, + } + ) + ngx.say(res.body) + } + } +--- request +GET /t +--- error_log eval +qr/send data to kafka: \{.*"body":"hello(l{251})".*/ +--- response_body eval +qr/hello.*/ + + + +=== TEST 20: set route(large_body, meta_format = default, include_resp_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "meta_format": "default", + "include_resp_body": true, + "max_resp_body_bytes": 256, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1970": 1 + }, + "type": "roundrobin" + }, + "uri": "/large_resp" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 21: truncate upstream response body 1m to 256 bytes +--- request +GET /large_resp +--- response_body eval +qr/hello.*/ +--- error_log eval +qr/send data to kafka: \{.*"body":"hello(l{251})".*/ + + + +=== TEST 22: set route(large_body, meta_format = default, include_req_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "meta_format": "default", + "include_req_body": true, + "max_req_body_bytes": 256, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 23: truncate upstream request body 1m to 256 bytes +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + local http = require("resty.http") + + local large_body = { + "h", "e", "l", "l", "o" + } + + local size_in_bytes = 100 * 1024 -- 10kb + for i = 1, size_in_bytes do + large_body[i+5] = "l" + end + large_body = table.concat(large_body, "") + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, + { + method = "POST", + body = large_body, + } + ) + + if err then + ngx.say(err) + end + + ngx.say(res.body) + } + } +--- request +GET /t +--- response_body_like +hello world +--- error_log eval +qr/send data to kafka: \{.*"body":"hello(l{251})".*/ + + + +=== TEST 24: set route(meta_format = default, include_req_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1, + "max_req_body_bytes": 5, + "include_req_body": true, + "meta_format": "default" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 25: empty request body +--- request +GET /hello?ab=cd +--- response_body +hello world +--- error_log eval +qr/send data to kafka/ +--- wait: 2 + + + +=== TEST 26: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/kafka-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "request_body": "$request_body" + } + }]] + ) + if code >=300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 27: set route(meta_format = default, include_req_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1, + "max_req_body_bytes": 5 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 28: hit route(meta_format = default, include_req_body = true) +--- request +GET /hello?ab=cd +abcdef +--- response_body +hello world +--- error_log_like eval +qr/"request_body": "abcde"/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger-log-format.t b/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger-log-format.t new file mode 100644 index 0000000..9532182 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger-log-format.t @@ -0,0 +1,163 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +repeat_each(1); +no_long_string(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/kafka-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: set route(id: 1), batch_max_size=1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: hit route and report kafka logger +--- request +GET /hello +--- response_body +hello world +--- wait: 0.5 +--- error_log eval +qr/send data to kafka: \{.*"host":"localhost"/ + + + +=== TEST 4: log format in plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1":9092 + }, + "log_format": { + "x_ip": "$remote_addr" + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: hit route and report kafka logger +--- request +GET /hello +--- response_body +hello world +--- wait: 0.5 +--- error_log eval +qr/send data to kafka: \{.*"x_ip":"127.0.0.1".*\}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger.t b/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger.t new file mode 100644 index 0000000..4ffef3b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger.t @@ -0,0 +1,762 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.kafka-logger") + local ok, err = plugin.check_schema({ + kafka_topic = "test", + key = "key1", + broker_list = { + ["127.0.0.1"] = 3 + } + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: missing broker list +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.kafka-logger") + local ok, err = plugin.check_schema({kafka_topic = "test", key= "key1"}) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +value should match only one schema, but matches none +done + + + +=== TEST 3: wrong type of string +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.kafka-logger") + local ok, err = plugin.check_schema({ + broker_list = { + ["127.0.0.1"] = 3000 + }, + timeout = "10", + kafka_topic ="test", + key= "key1" + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +property "timeout" validation failed: wrong type: expected integer, got string +done + + + +=== TEST 4: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: access +--- request +GET /hello +--- response_body +hello world +--- wait: 2 +--- ignore_error_log + + + +=== TEST 6: error log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092, + "127.0.0.1":9093 + }, + "kafka_topic" : "test2", + "producer_type": "sync", + "key" : "key1", + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + } + } +--- error_log +failed to send data to Kafka topic +[error] +--- wait: 1 + + + +=== TEST 7: set route(meta_format = origin, include_req_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": true, + "meta_format": "origin" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit route, report log to kafka +--- request +GET /hello?ab=cd +abcdef +--- response_body +hello world +--- error_log +send data to kafka: GET /hello?ab=cd HTTP/1.1 +host: localhost +content-length: 6 +connection: close + +abcdef +--- wait: 2 + + + +=== TEST 9: set route(meta_format = origin, include_req_body = false) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false, + "meta_format": "origin" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: hit route, report log to kafka +--- request +GET /hello?ab=cd +abcdef +--- response_body +hello world +--- error_log +send data to kafka: GET /hello?ab=cd HTTP/1.1 +host: localhost +content-length: 6 +connection: close +--- wait: 2 + + + +=== TEST 11: set route(meta_format = default) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: hit route, report log to kafka +--- request +GET /hello?ab=cd +abcdef +--- response_body +hello world +--- error_log_like eval +qr/send data to kafka: \{.*"upstream":"127.0.0.1:1980"/ +--- wait: 2 + + + +=== TEST 13: set route(id: 1), missing key field +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "timeout" : 1, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: access, test key field is optional +--- request +GET /hello +--- response_body +hello world +--- wait: 2 + + + +=== TEST 15: set route(meta_format = default), missing key field +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: hit route, report log to kafka +--- request +GET /hello?ab=cd +abcdef +--- response_body +hello world +--- error_log_like eval +qr/send data to kafka: \{.*"upstream":"127.0.0.1:1980"/ +--- wait: 2 + + + +=== TEST 17: use the topic with 3 partitions +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1": 9092 + }, + "kafka_topic" : "test3", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: report log to kafka by different partitions +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1": 9092 + }, + "kafka_topic" : "test3", + "producer_type": "sync", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + } + } +--- timeout: 5s +--- ignore_response +--- error_log eval +[qr/partition_id: 1/, +qr/partition_id: 0/, +qr/partition_id: 2/] + + + +=== TEST 19: report log to kafka by different partitions in async mode +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1": 9092 + }, + "kafka_topic" : "test3", + "producer_type": "async", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + } + } +--- timeout: 5s +--- ignore_response +--- error_log eval +[qr/partition_id: 1/, +qr/partition_id: 0/, +qr/partition_id: 2/] + + + +=== TEST 20: set route with incorrect sasl_config +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins":{ + "kafka-logger":{ + "brokers":[ + { + "host":"127.0.0.1", + "port":19094, + "sasl_config":{ + "mechanism":"PLAIN", + "user":"admin", + "password":"admin-secret2233" + } + }], + "kafka_topic":"test2", + "key":"key1", + "timeout":1, + "batch_max_size":1 + } + }, + "upstream":{ + "nodes":{ + "127.0.0.1:1980":1 + }, + "type":"roundrobin" + }, + "uri":"/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: hit route, failed to send data to kafka +--- request +GET /hello +--- response_body +hello world +--- error_log +failed to do PLAIN auth with 127.0.0.1:19094: Authentication failed: Invalid username or password +--- wait: 2 + + + +=== TEST 22: set route with correct sasl_config +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins":{ + "kafka-logger":{ + "brokers":[ + { + "host":"127.0.0.1", + "port":19094, + "sasl_config":{ + "mechanism":"PLAIN", + "user":"admin", + "password":"admin-secret" + } + }], + "kafka_topic":"test4", + "producer_type":"sync", + "key":"key1", + "timeout":1, + "batch_max_size":1, + "include_req_body": true + } + }, + "upstream":{ + "nodes":{ + "127.0.0.1:1980":1 + }, + "type":"roundrobin" + }, + "uri":"/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 23: hit route, send data to kafka successfully +--- request +POST /hello?name=qwerty +abcdef +--- response_body +hello world +--- error_log eval +qr/send data to kafka: \{.*"body":"abcdef"/ +--- no_error_log +[error] +--- wait: 2 + + + +=== TEST 24: set route(batch_max_size = 2), check if prometheus is initialized properly +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 25: access +--- extra_yaml_config +plugins: + - kafka-logger +--- request +GET /hello +--- response_body +hello world +--- wait: 2 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger2.t new file mode 100644 index 0000000..84b6f90 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger2.t @@ -0,0 +1,1090 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: required_acks, matches none of the enum values +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.kafka-logger") + local ok, err = plugin.check_schema({ + broker_list = { + ["127.0.0.1"] = 3000 + }, + required_acks = 10, + kafka_topic ="test", + key= "key1" + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +property "required_acks" validation failed: matches none of the enum values +done + + + +=== TEST 2: report log to kafka, with required_acks(1, -1) +--- config +location /t { + content_by_lua_block { + local data = { + { + input = { + plugins = { + ["kafka-logger"] = { + broker_list = { + ["127.0.0.1"] = 9092 + }, + kafka_topic = "test2", + producer_type = "sync", + timeout = 1, + batch_max_size = 1, + required_acks = 1, + meta_format = "origin", + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + }, + uri = "/hello", + }, + }, + { + input = { + plugins = { + ["kafka-logger"] = { + broker_list = { + ["127.0.0.1"] = 9092 + }, + kafka_topic = "test2", + producer_type = "sync", + timeout = 1, + batch_max_size = 1, + required_acks = -1, + meta_format = "origin", + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + }, + uri = "/hello", + }, + }, + } + + local t = require("lib.test_admin").test + local err_count = 0 + for i in ipairs(data) do + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, data[i].input) + + if code >= 300 then + err_count = err_count + 1 + end + ngx.print(body) + + t('/hello', ngx.HTTP_GET) + end + + assert(err_count == 0) + } +} +--- error_log +send data to kafka: GET /hello +send data to kafka: GET /hello +send data to kafka: GET /hello + + + +=== TEST 3: update the broker_list and cluster_name, generate different kafka producers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + ngx.sleep(0.5) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1": 9092 + }, + "kafka_topic" : "test2", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false, + "cluster_name": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + + code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1": 19092 + }, + "kafka_topic" : "test4", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false, + "cluster_name": 2 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + + ngx.sleep(2) + ngx.say("passed") + } + } +--- timeout: 10 +--- response +passed +--- wait: 5 +--- error_log +phase_func(): kafka cluster name 1, broker_list[1] port 9092 +phase_func(): kafka cluster name 2, broker_list[1] port 19092 +--- no_error_log eval +qr/not found topic/ + + + +=== TEST 4: use the topic that does not exist on kafka(even if kafka allows auto create topics, first time push messages to kafka would got this error) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : { + "127.0.0.1": 9092 + }, + "kafka_topic" : "undefined_topic", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + + ngx.sleep(2) + ngx.say("passed") + } + } +--- timeout: 5 +--- response +passed +--- error_log eval +qr/not found topic, retryable: true, topic: undefined_topic, partition_id: -1/ + + + +=== TEST 5: check broker_list via schema +--- config + location /t { + content_by_lua_block { + local data = { + { + input = { + broker_list = {}, + kafka_topic = "test", + key= "key1", + }, + }, + { + input = { + broker_list = { + ["127.0.0.1"] = "9092" + }, + kafka_topic = "test", + key= "key1", + }, + }, + { + input = { + broker_list = { + ["127.0.0.1"] = 0 + }, + kafka_topic = "test", + key= "key1", + }, + }, + { + input = { + broker_list = { + ["127.0.0.1"] = 65536 + }, + kafka_topic = "test", + key= "key1", + }, + }, + { + input = { + brokers = { + }, + kafka_topic = "test", + key = "key1", + }, + }, + { + input = { + brokers = { + { + host = "127.0.0.1", + } + }, + kafka_topic = "test", + key = "key1", + }, + }, + { + input = { + brokers = { + { + port = 9092, + } + }, + kafka_topic = "test", + key = "key1", + }, + }, + { + input = { + brokers = { + { + host = "127.0.0.1", + port = "9093", + }, + }, + kafka_topic = "test", + key = "key1", + }, + }, + { + input = { + brokers = { + { + host = "127.0.0.1", + port = 0, + }, + }, + kafka_topic = "test", + key = "key1", + }, + }, + { + input = { + brokers = { + { + host = "127.0.0.1", + port = 65536, + }, + }, + kafka_topic = "test", + key = "key1", + }, + }, + { + input = { + brokers = { + { + host = "127.0.0.1", + port = 9093, + sasl_config = { + mechanism = "INVALID", + user = "admin", + password = "admin-secret", + }, + }, + }, + kafka_topic = "test", + key = "key1", + }, + }, + { + input = { + brokers = { + { + host = "127.0.0.1", + port = 9093, + sasl_config = { + user = "admin", + }, + }, + }, + kafka_topic = "test", + key = "key1", + }, + }, + { + input = { + brokers = { + { + host = "127.0.0.1", + port = 9093, + sasl_config = { + password = "admin-secret", + }, + }, + }, + kafka_topic = "test", + key = "key1", + }, + }, + } + + local plugin = require("apisix.plugins.kafka-logger") + + local err_count = 0 + for i in ipairs(data) do + local ok, err = plugin.check_schema(data[i].input) + if not ok then + err_count = err_count + 1 + ngx.say(err) + end + end + + assert(err_count == #data) + } + } +--- response_body +property "broker_list" validation failed: expect object to have at least 1 properties +property "broker_list" validation failed: failed to validate 127.0.0.1 (matching ".*"): wrong type: expected integer, got string +property "broker_list" validation failed: failed to validate 127.0.0.1 (matching ".*"): expected 0 to be at least 1 +property "broker_list" validation failed: failed to validate 127.0.0.1 (matching ".*"): expected 65536 to be at most 65535 +property "brokers" validation failed: expect array to have at least 1 items +property "brokers" validation failed: failed to validate item 1: property "port" is required +property "brokers" validation failed: failed to validate item 1: property "host" is required +property "brokers" validation failed: failed to validate item 1: property "port" validation failed: wrong type: expected integer, got string +property "brokers" validation failed: failed to validate item 1: property "port" validation failed: expected 0 to be at least 1 +property "brokers" validation failed: failed to validate item 1: property "port" validation failed: expected 65536 to be at most 65535 +property "brokers" validation failed: failed to validate item 1: property "sasl_config" validation failed: property "mechanism" validation failed: matches none of the enum values +property "brokers" validation failed: failed to validate item 1: property "sasl_config" validation failed: property "password" is required +property "brokers" validation failed: failed to validate item 1: property "sasl_config" validation failed: property "user" is required + + + +=== TEST 6: kafka brokers info in log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.127":9092 + }, + "kafka_topic" : "test2", + "producer_type": "sync", + "key" : "key1", + "batch_max_size": 1, + "cluster_name": 10 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + } + } +--- error_log_like eval +qr/create new kafka producer instance, brokers: \[\{"port":9092,"host":"127.0.0.127"}]/ +qr/failed to send data to Kafka topic: .*, brokers: \{"127.0.0.127":9092}/ + + + +=== TEST 7: set route(id: 1,include_req_body = true,include_req_body_expr = array) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "include_req_body": true, + "include_req_body_expr": [ + [ + "arg_name", + "==", + "qwerty" + ] + ], + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 8: hit route, expr eval success +--- request +POST /hello?name=qwerty +abcdef +--- response_body +hello world +--- error_log eval +qr/send data to kafka: \{.*"body":"abcdef"/ +--- wait: 2 + + + +=== TEST 9: hit route,expr eval fail +--- request +POST /hello?name=zcxv +abcdef +--- response_body +hello world +--- no_error_log eval +qr/send data to kafka: \{.*"body":"abcdef"/ +--- wait: 2 + + + +=== TEST 10: check log schema(include_req_body) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.kafka-logger") + local ok, err = plugin.check_schema({ + kafka_topic = "test", + key = "key1", + broker_list = { + ["127.0.0.1"] = 3 + }, + include_req_body = true, + include_req_body_expr = { + {"bar", "<>", "foo"} + } + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +failed to validate the 'include_req_body_expr' expression: invalid operator '<>' +done + + + +=== TEST 11: check log schema(include_resp_body) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.kafka-logger") + local ok, err = plugin.check_schema({ + kafka_topic = "test", + key = "key1", + broker_list = { + ["127.0.0.1"] = 3 + }, + include_resp_body = true, + include_resp_body_expr = { + {"bar", "", "foo"} + } + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +failed to validate the 'include_resp_body_expr' expression: invalid operator '' +done + + + +=== TEST 12: set route include_resp_body - gzip +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "include_resp_body": true, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:11451": 1 + }, + "type": "roundrobin" + }, + "uri": "/gzip_hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 13: hit +--- http_config +server { + listen 11451; + gzip on; + gzip_types *; + gzip_min_length 1; + location /gzip_hello { + content_by_lua_block { + ngx.req.read_body() + local s = "gzip hello world" + ngx.header['Content-Length'] = #s + 1 + ngx.say(s) + } + } +} +--- request +GET /gzip_hello +--- more_headers +Accept-Encoding: gzip +--- error_log eval +qr/send data to kafka: \{.*"body":"gzip hello world\\n"/ +--- wait: 2 + + + +=== TEST 14: set route include_resp_body - brotli +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "include_resp_body": true, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:11452": 1 + }, + "type": "roundrobin" + }, + "uri": "/brotli_hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 15: hit +--- http_config +server { + listen 11452; + location /brotli_hello { + content_by_lua_block { + ngx.req.read_body() + local s = "brotli hello world" + ngx.header['Content-Length'] = #s + 1 + ngx.say(s) + } + header_filter_by_lua_block { + local conf = { + comp_level = 6, + http_version = 1.1, + lgblock = 0, + lgwin = 19, + min_length = 1, + mode = 0, + types = "*", + } + local brotli = require("apisix.plugins.brotli") + brotli.header_filter(conf, ngx.ctx) + } + body_filter_by_lua_block { + local conf = { + comp_level = 6, + http_version = 1.1, + lgblock = 0, + lgwin = 19, + min_length = 1, + mode = 0, + types = "*", + } + local brotli = require("apisix.plugins.brotli") + brotli.body_filter(conf, ngx.ctx) + } + } +} +--- request +GET /brotli_hello +--- more_headers +Accept-Encoding: br +--- error_log eval +qr/send data to kafka: \{.*"body":"brotli hello world\\n"/ +--- wait: 2 + + + +=== TEST 16: set route(id: 1,include_resp_body = true,include_resp_body_expr = array) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "broker_list" : + { + "127.0.0.1":9092 + }, + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "include_resp_body": true, + "include_resp_body_expr": [ + [ + "arg_name", + "==", + "qwerty" + ] + ], + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 17: hit route, expr eval success +--- request +POST /hello?name=qwerty +abcdef +--- response_body +hello world +--- error_log eval +qr/send data to kafka: \{.*"body":"hello world\\n"/ +--- wait: 2 + + + +=== TEST 18: hit route,expr eval fail +--- request +POST /hello?name=zcxv +abcdef +--- response_body +hello world +--- no_error_log eval +qr/send data to kafka: \{.*"body":"hello world\\n"/ +--- wait: 2 + + + +=== TEST 19: multi level nested expr conditions +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local kafka = { + kafka_topic = "test2", + key = "key1", + batch_max_size = 1, + broker_list = { + ["127.0.0.1"] = 9092 + }, + timeout = 3, + include_req_body = true, + include_req_body_expr = { + {"request_length", "<", 1054}, + {"arg_name", "in", {"qwerty", "asdfgh"}} + }, + include_resp_body = true, + include_resp_body_expr = { + {"http_content_length", "<", 1054}, + {"arg_name", "in", {"qwerty", "zxcvbn"}} + } + } + local plugins = {} + plugins["kafka-logger"] = kafka + local data = { + plugins = plugins + } + data.upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + data.uri = "/hello" + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: hit route, req_body_expr and resp_body_expr both eval success +--- request +POST /hello?name=qwerty +abcdef +--- response_body +hello world +--- error_log eval +[qr/send data to kafka: \{.*"body":"abcdef"/, +qr/send data to kafka: \{.*"body":"hello world\\n"/] +--- wait: 2 + + + +=== TEST 21: hit route, req_body_expr eval success, resp_body_expr both eval failed +--- request +POST /hello?name=asdfgh +abcdef +--- response_body +hello world +--- error_log eval +qr/send data to kafka: \{.*"body":"abcdef"/ +--- no_error_log eval +qr/send data to kafka: \{.*"body":"hello world\\n"/ +--- wait: 2 + + + +=== TEST 22: hit route, req_body_expr eval failed, resp_body_expr both eval success +--- request +POST /hello?name=zxcvbn +abcdef +--- response_body +hello world +--- error_log eval +qr/send data to kafka: \{.*"body":"hello world\\n"/ +--- no_error_log eval +qr/send data to kafka: \{.*"body":"abcdef"/ +--- wait: 2 + + + +=== TEST 23: hit route, req_body_expr eval success, resp_body_expr both eval failed +--- request +POST /hello?name=xxxxxx +abcdef +--- response_body +hello world +--- no_error_log eval +[qr/send data to kafka: \{.*"body":"abcdef"/, +qr/send data to kafka: \{.*"body":"hello world\\n"/] +--- wait: 2 + + + +=== TEST 24: update route(id: 1,include_req_body = true,include_req_body_expr = array) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "brokers" : + [{ + "host":"127.0.0.1", + "port": 9092 + }], + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "include_req_body": true, + "include_req_body_expr": [ + [ + "arg_name", + "==", + "qwerty" + ] + ], + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 25: hit route, expr eval success +--- request +POST /hello?name=qwerty +abcdef +--- response_body +hello world +--- error_log eval +qr/send data to kafka: \{.*"body":"abcdef"/ +--- wait: 2 + + + +=== TEST 26: setup route with meta_refresh_interval +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "kafka-logger": { + "brokers" : + [{ + "host":"127.0.0.1", + "port": 9092 + }], + "kafka_topic" : "test2", + "key" : "key1", + "timeout" : 1, + "meta_refresh_interval": 1, + "batch_max_size": 1, + "include_req_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 27: hit route, send data to kafka successfully +--- request +POST /hello +abcdef +--- response_body +hello world +--- no_error_log +[error] +--- error_log eval +qr/send data to kafka: \{.*"body":"abcdef"/ +--- wait: 2 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger3.t new file mode 100644 index 0000000..82c8248 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-logger3.t @@ -0,0 +1,120 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: should drop entries +--- extra_yaml_config +plugins: + - kafka-logger +--- config +location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local data = { + { + input = { + plugins = { + ["kafka-logger"] = { + broker_list = { + ["127.0.0.1"] = 1234 + }, + kafka_topic = "test2", + producer_type = "sync", + timeout = 1, + batch_max_size = 1, + required_acks = 1, + meta_format = "origin", + max_retry_count = 10, + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + }, + uri = "/hello", + }, + }, + } + + local t = require("lib.test_admin").test + + -- Set plugin metadata + local metadata = { + log_format = { + host = "$host", + ["@timestamp"] = "$time_iso8601", + client_ip = "$remote_addr" + }, + max_pending_entries = 1 + } + + local code, body = t('/apisix/admin/plugin_metadata/kafka-logger', ngx.HTTP_PUT, metadata) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + -- Create route + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, data[1].input) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + httpc:request_uri(uri, { + method = "GET", + keepalive_timeout = 1, + keepalive_pool = 1, + }) + httpc:request_uri(uri, { + method = "GET", + keepalive_timeout = 1, + keepalive_pool = 1, + }) + httpc:request_uri(uri, { + method = "GET", + keepalive_timeout = 1, + keepalive_pool = 1, + }) + ngx.sleep(2) + } +} +--- error_log +max pending entries limit exceeded. discarding entry +--- timeout: 5 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-proxy.t b/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-proxy.t new file mode 100644 index 0000000..eedc6d9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/kafka-proxy.t @@ -0,0 +1,122 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local test_cases = { + {}, + {sasl = {username = "user", password = "pwd"}}, + {sasl = {username = "user"}}, + {sasl = {username = "user", password = 1234}}, + } + local plugin = require("apisix.plugins.kafka-proxy") + + for _, case in ipairs(test_cases) do + local ok, err = plugin.check_schema(case) + ngx.say(ok and "done" or err) + end + } + } +--- response_body +done +done +property "sasl" validation failed: property "password" is required +property "sasl" validation failed: property "password" validation failed: wrong type: expected string, got number + + + +=== TEST 2: data encryption for sasl.password +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "kafka-proxy": { + "sasl": { + "username": "admin", + "password": "admin-secret" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["kafka-proxy"].sasl.password) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["kafka-proxy"].sasl.password) + } + } +--- response_body +admin-secret +y4Z3aqo51xrt3f9UziNUrg== diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/key-auth-anonymous-consumer.t b/CloudronPackages/APISIX/apisix-source/t/plugin/key-auth-anonymous-consumer.t new file mode 100644 index 0000000..20448a0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/key-auth-anonymous-consumer.t @@ -0,0 +1,223 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + + +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $user_yaml_config = <<_EOC_; +apisix: + data_encryption: + enable_encrypt_fields: false +_EOC_ + $block->set_value("yaml_config", $user_yaml_config); +}); + + +run_tests; + +__DATA__ + +=== TEST 1: add consumer jack and anonymous +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "limit-count": { + "count": 4, + "time_window": 60 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "anonymous", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 60 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +passed + + + +=== TEST 2: add key auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": { + "anonymous_consumer": "anonymous" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: normal consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + for i = 1, 5, 1 do + local code, body = t('/hello', + ngx.HTTP_GET, + nil, + nil, + { + apikey = "auth-one" + } + ) + + if code >= 300 then + ngx.say("failed" .. code) + return + end + ngx.say(body .. i) + end + } + } +--- request +GET /t +--- response_body +passed1 +passed2 +passed3 +passed4 +failed503 + + + +=== TEST 4: request without key-auth header will be from anonymous consumer and it will pass +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 5: request without key-auth header will be from anonymous consumer and different rate limit will apply +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 503, 503, 503] + + + +=== TEST 6: add key auth plugin with non-existent anonymous_consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": { + "anonymous_consumer": "not-found-anonymous" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: anonymous-consumer configured in the route should not be found +--- request +GET /hello +--- error_code: 401 +--- error_log +failed to get anonymous consumer not-found-anonymous +--- response_body +{"message":"Invalid user authorization"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/key-auth-upstream-domain-node.t b/CloudronPackages/APISIX/apisix-source/t/plugin/key-auth-upstream-domain-node.t new file mode 100644 index 0000000..236c320 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/key-auth-upstream-domain-node.t @@ -0,0 +1,247 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{CUSTOM_DNS_SERVER} = "127.0.0.1:1053"; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +run_tests; + +__DATA__ + +=== TEST 1: create consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + } + } + }]] + ) + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: set service and enabled plugin `key-auth` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "desc": "new service" + }]] + ) + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: create route with plugin `limit-count`(upstream node contains domain) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 1, + "time_window": 5, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "ttl.test.local:1980": 1 + }, + "pass_host": "node", + "type": "roundrobin" + }, + "service_id": 1, + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit route 3 times +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + local headers = { + ["User-Agent"] = "curl/7.68.0", + ["apikey"] = "auth-one", + } + + for i = 1, 3 do + local code, body = t.test('/index.html', + ngx.HTTP_GET, + "", + nil, + headers + ) + ngx.say("return: ", code) + ngx.sleep(1) + end + } +} +--- request +GET /t +--- response_body +return: 404 +return: 503 +return: 503 + + + +=== TEST 5: set upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "ttl.test.local:1980": 1 + }, + "pass_host": "node", + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: create route with plugin `limit-count`, and bind upstream via id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 1, + "time_window": 5, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream_id": 1, + "service_id": 1, + "uri": "/index.html" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: hit route 3 times +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + local headers = { + ["User-Agent"] = "curl/7.68.0", + ["apikey"] = "auth-one", + } + + for i = 1, 3 do + local code, body = t.test('/index.html', + ngx.HTTP_GET, + "", + nil, + headers + ) + ngx.say("return: ", code) + end + } +} +--- request +GET /t +--- response_body +return: 404 +return: 503 +return: 503 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/key-auth.t b/CloudronPackages/APISIX/apisix-source/t/plugin/key-auth.t new file mode 100644 index 0000000..5c28e6b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/key-auth.t @@ -0,0 +1,709 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{VAULT_TOKEN} = "root"; +} + +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $user_yaml_config = <<_EOC_; +apisix: + data_encryption: + enable_encrypt_fields: false +_EOC_ + $block->set_value("yaml_config", $user_yaml_config); +}); + + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.key-auth") + local ok, err = plugin.check_schema({key = 'test-key'}, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: wrong type of string +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.key-auth") + local ok, err = plugin.check_schema({key = 123}, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "key" validation failed: wrong type: expected string, got number +done + + + +=== TEST 3: add consumer with username and plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: add key auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: valid consumer +--- request +GET /hello +--- more_headers +apikey: auth-one +--- response_body +hello world + + + +=== TEST 6: invalid consumer +--- request +GET /hello +--- more_headers +apikey: 123 +--- error_code: 401 +--- response_body +{"message":"Invalid API key in request"} + + + +=== TEST 7: not found apikey header +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing API key in request"} + + + +=== TEST 8: valid consumer +--- config + location /add_more_consumer { + content_by_lua_block { + local t = require("lib.test_admin").test + local username = "" + local key = "" + local code, body + for i = 1, 20 do + username = "user_" .. tostring(i) + key = "auth-" .. tostring(i) + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + string.format('{"username":"%s","plugins":{"key-auth":{"key":"%s"}}}', username, key), + string.format('{"value":{"username":"%s","plugins":{"key-auth":{"key":"%s"}}}}', username, key) + ) + end + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /add_more_consumer +--- pipelined_requests eval +["GET /add_more_consumer", "GET /hello"] +--- more_headers +apikey: auth-13 +--- response_body eval +["passed\n", "hello world\n"] + + + +=== TEST 9: add consumer with empty key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "error", + "plugins": { + "key-auth": { + } + } + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid plugins configuration: failed to check the configuration of plugin key-auth err: property \"key\" is required"} + + + +=== TEST 10: customize header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": { + "header": "Authorization" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: valid consumer +--- request +GET /hello +--- more_headers +Authorization: auth-one +--- response_body +hello world + + + +=== TEST 12: customize query string +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": { + "query": "auth" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: valid consumer +--- request +GET /hello?auth=auth-one +--- response_body +hello world + + + +=== TEST 14: enable key auth plugin using admin api, set hide_credentials = false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": { + "hide_credentials": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: verify apikey request header should not hidden +--- request +GET /echo +--- more_headers +apikey: auth-one +--- response_headers +apikey: auth-one + + + +=== TEST 16: add key auth plugin using admin api, set hide_credentials = true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": { + "hide_credentials": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: verify apikey request header is hidden +--- request +GET /echo +--- more_headers +apikey: auth-one +--- response_headers +!apikey + + + +=== TEST 18: verify that only the keys in the title are deleted +--- request +GET /echo +--- more_headers +apikey: auth-one +test: auth-two +--- response_headers +!apikey +test: auth-two + + + +=== TEST 19: when apikey both in header and query string, verify apikey request header is hidden but request args is not hidden +--- request +GET /echo?apikey=auth-one +--- more_headers +apikey: auth-one +--- response_headers +!apikey +--- response_args +apikey: auth-one + + + +=== TEST 20: customize query string, set hide_credentials = true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": { + "query": "auth", + "hide_credentials": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 21: verify auth request args is hidden +--- request +GET /echo?auth=auth-one +--- response_args +!auth + + + +=== TEST 22: verify that only the keys in the query parameters are deleted +--- request +GET /echo?auth=auth-one&test=auth-two +--- response_args +!auth +test: auth-two + + + +=== TEST 23: when auth both in header and query string, verify auth request args is hidden but request header is not hidden +--- request +GET /echo?auth=auth-one +--- more_headers +auth: auth-one +--- response_headers +auth: auth-one +--- response_args +!auth + + + +=== TEST 24: customize query string, set hide_credentials = false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": { + "query": "auth", + "hide_credentials": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 25: verify auth request args should not hidden +--- request +GET /hello?auth=auth-one +--- response_args +auth: auth-one + + + +=== TEST 26: change consumer with secrets ref: env +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$env://test_auth" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 27: verify auth request +--- main_config +env test_auth=authone; +--- request +GET /hello?auth=authone +--- response_args +auth: authone + + + +=== TEST 28: set key-auth conf: key uses secret ref +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "root" + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + -- change consumer with secrets ref: vault + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$secret://vault/test1/jack/key" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 29: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/jack key=authtwo +--- response_body +Success! Data written to: kv/apisix/jack + + + +=== TEST 30: verify auth request +--- request +GET /hello?auth=authtwo +--- response_args +auth: authtwo + + + +=== TEST 31: set key-auth conf with the token in an env var: key uses secret ref +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "$ENV://VAULT_TOKEN" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + -- change consumer with secrets ref: vault + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$secret://vault/test1/jack/key" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 32: verify auth request +--- request +GET /hello?auth=authtwo +--- response_args +auth: authtwo diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/lago.spec.mts b/CloudronPackages/APISIX/apisix-source/t/plugin/lago.spec.mts new file mode 100644 index 0000000..36833cd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/lago.spec.mts @@ -0,0 +1,352 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import { generateKeyPair } from 'node:crypto'; +import { existsSync } from 'node:fs'; +import { readFile, rm, writeFile } from 'node:fs/promises'; +import { promisify } from 'node:util'; + +import { afterAll, beforeAll, describe, expect, it } from '@jest/globals'; +import axios from 'axios'; +import * as compose from 'docker-compose'; +import { gql, request } from 'graphql-request'; +import { Api as LagoApi, Client as LagoClient } from 'lago-javascript-client'; +import simpleGit from 'simple-git'; +import * as YAML from 'yaml'; + +import { request as requestAdminAPI } from '../ts/admin_api'; +import { wait } from '../ts/utils'; + +const LAGO_VERSION = 'v1.27.0'; +const LAGO_PATH = '/tmp/lago'; +const LAGO_FRONT_PORT = 59999; +const LAGO_API_PORT = 30699; +const LAGO_API_URL = `http://127.0.0.1:${LAGO_API_PORT}`; +const LAGO_API_BASEURL = `http://127.0.0.1:${LAGO_API_PORT}/api/v1`; +const LAGO_API_GRAPHQL_ENDPOINT = `${LAGO_API_URL}/graphql`; +const LAGO_BILLABLE_METRIC_CODE = 'test'; +const LAGO_EXTERNAL_SUBSCRIPTION_ID = 'jack_test'; + +// The project uses AGPLv3, so we can't store the docker compose file it uses in our repository and download it during testing. +const downloadComposeFile = async () => + simpleGit().clone('https://github.com/getlago/lago', LAGO_PATH, { + '--depth': '1', + '--branch': LAGO_VERSION, + }); + +const launchLago = async () => { + // patch docker-compose.yml to disable useless port + const composeFilePath = `${LAGO_PATH}/docker-compose.yml`; + const composeFile = YAML.parse(await readFile(composeFilePath, 'utf8')); + delete composeFile.services.front; // front-end is not needed for tests + delete composeFile.services['api-clock']; // clock is not needed for tests + delete composeFile.services['api-worker']; // worker is not needed for tests + delete composeFile.services['pdf']; // pdf is not needed for tests + delete composeFile.services.redis.ports; // prevent port conflict + delete composeFile.services.db.ports; // prevent port conflict + await writeFile(composeFilePath, YAML.stringify(composeFile), 'utf8'); + + // launch services + const { privateKey } = await promisify(generateKeyPair)('rsa', { + modulusLength: 2048, + publicKeyEncoding: { type: 'pkcs1', format: 'pem' }, + privateKeyEncoding: { type: 'pkcs1', format: 'pem' }, + }); + const composeOpts: compose.IDockerComposeOptions = { + cwd: LAGO_PATH, + log: true, + env: { + LAGO_RSA_PRIVATE_KEY: Buffer.from(privateKey).toString('base64'), + FRONT_PORT: `${LAGO_FRONT_PORT}`, // avoiding conflicts, tests do not require a front-end + API_PORT: `${LAGO_API_PORT}`, + LAGO_FRONT_URL: `http://127.0.0.1:${LAGO_FRONT_PORT}`, + LAGO_API_URL, + }, + }; + + await compose.createAll(composeOpts); + await compose.upOne('api', composeOpts); + await compose.exec('api', 'rails db:create', composeOpts); + await compose.exec('api', 'rails db:migrate', composeOpts); + await compose.upAll(composeOpts); +}; + +const provisionLago = async () => { + // sign up + const { registerUser } = await request<{ + registerUser: { token: string; user: { organizations: { id: string } } }; + }>( + LAGO_API_GRAPHQL_ENDPOINT, + gql` + mutation signup($input: RegisterUserInput!) { + registerUser(input: $input) { + token + user { + id + organizations { + id + } + } + } + } + `, + { + input: { + email: 'test@test.com', + password: 'Admin000!', + organizationName: 'test', + }, + }, + ); + + const webToken = registerUser.token; + const organizationId = registerUser.user.organizations[0].id; + const requestHeaders = { + Authorization: `Bearer ${webToken}`, + 'X-Lago-Organization': organizationId, + }; + + // list api keys + const { apiKeys } = await request<{ + apiKeys: { collection: { id: string }[] }; + }>( + LAGO_API_GRAPHQL_ENDPOINT, + gql` + query getApiKeys { + apiKeys(page: 1, limit: 20) { + collection { + id + } + } + } + `, + {}, + requestHeaders, + ); + + // get first api key + const { apiKey } = await request<{ apiKey: { value: string } }>( + LAGO_API_GRAPHQL_ENDPOINT, + gql` + query getApiKeyValue($id: ID!) { + apiKey(id: $id) { + id + value + } + } + `, + { id: apiKeys.collection[0].id }, + requestHeaders, + ); + + const lagoClient = LagoClient(apiKey.value, { baseUrl: LAGO_API_BASEURL }); + + // create billable metric + const { data: billableMetric } = + await lagoClient.billableMetrics.createBillableMetric({ + billable_metric: { + name: LAGO_BILLABLE_METRIC_CODE, + code: LAGO_BILLABLE_METRIC_CODE, + aggregation_type: 'count_agg', + filters: [ + { + key: 'tier', + values: ['normal', 'expensive'], + }, + ], + }, + }); + + // create plan + const { data: plan } = await lagoClient.plans.createPlan({ + plan: { + name: 'test', + code: 'test', + interval: 'monthly', + amount_cents: 0, + amount_currency: 'USD', + pay_in_advance: false, + charges: [ + { + billable_metric_id: billableMetric.billable_metric.lago_id, + charge_model: 'standard', + pay_in_advance: false, + properties: { amount: '1' }, + filters: [ + { + properties: { amount: '10' }, + values: { tier: ['expensive'] }, + }, + ], + }, + ], + }, + }); + + // create customer + const external_customer_id = 'jack'; + const { data: customer } = await lagoClient.customers.createCustomer({ + customer: { + external_id: external_customer_id, + name: 'Jack', + currency: 'USD', + }, + }); + + // assign plan to customer + await lagoClient.subscriptions.createSubscription({ + subscription: { + external_customer_id: customer.customer.external_id, + plan_code: plan.plan.code, + external_id: LAGO_EXTERNAL_SUBSCRIPTION_ID, + }, + }); + + return { apiKey: apiKey.value, client: lagoClient }; +}; + +describe('Plugin - Lago', () => { + const JACK_USERNAME = 'jack_test'; + const client = axios.create({ baseURL: 'http://127.0.0.1:1984' }); + + let restAPIKey: string; + let lagoClient: LagoApi; // prettier-ignore + + // set up + beforeAll(async () => { + if (existsSync(LAGO_PATH)) await rm(LAGO_PATH, { recursive: true }); + await downloadComposeFile(); + await launchLago(); + let res = await provisionLago(); + restAPIKey = res.apiKey; + lagoClient = res.client; + }, 120 * 1000); + + // clean up + afterAll(async () => { + await compose.downAll({ + cwd: LAGO_PATH, + commandOptions: ['--volumes'], + }); + await rm(LAGO_PATH, { recursive: true }); + }, 30 * 1000); + + it('should create route', async () => { + await expect( + requestAdminAPI('/apisix/admin/routes/1', 'PUT', { + uri: '/hello', + upstream: { + nodes: { + '127.0.0.1:1980': 1, + }, + type: 'roundrobin', + }, + plugins: { + 'request-id': { include_in_response: true }, // for transaction_id + 'key-auth': {}, // for subscription_id + lago: { + endpoint_addrs: [LAGO_API_URL], + token: restAPIKey, + event_transaction_id: '${http_x_request_id}', + event_subscription_id: '${http_x_consumer_username}', + event_code: 'test', + batch_max_size: 1, // does not buffered usage reports + }, + }, + }), + ).resolves.not.toThrow(); + + await expect( + requestAdminAPI('/apisix/admin/routes/2', 'PUT', { + uri: '/hello1', + upstream: { + nodes: { + '127.0.0.1:1980': 1, + }, + type: 'roundrobin', + }, + plugins: { + 'request-id': { include_in_response: true }, + 'key-auth': {}, + lago: { + endpoint_addrs: [LAGO_API_URL], + token: restAPIKey, + event_transaction_id: '${http_x_request_id}', + event_subscription_id: '${http_x_consumer_username}', + event_code: 'test', + event_properties: { tier: 'expensive' }, + batch_max_size: 1, + }, + }, + }), + ).resolves.not.toThrow(); + }); + + it('should create consumer', async () => + expect( + requestAdminAPI(`/apisix/admin/consumers/${JACK_USERNAME}`, 'PUT', { + username: JACK_USERNAME, + plugins: { + 'key-auth': { key: JACK_USERNAME }, + }, + }), + ).resolves.not.toThrow()); + + it('call API (without key)', async () => { + const res = await client.get('/hello', { validateStatus: () => true }); + expect(res.status).toEqual(401); + }); + + it('call normal API', async () => { + for (let i = 0; i < 3; i++) { + await expect( + client.get('/hello', { headers: { apikey: JACK_USERNAME } }), + ).resolves.not.toThrow(); + } + await wait(500); + }); + + it('check Lago events (normal API)', async () => { + const { data } = await lagoClient.events.findAllEvents({ + external_subscription_id: LAGO_EXTERNAL_SUBSCRIPTION_ID, + }); + + expect(data.events).toHaveLength(3); + expect(data.events[0].code).toEqual(LAGO_BILLABLE_METRIC_CODE); + }); + + let expensiveStartAt: Date; + it('call expensive API', async () => { + expensiveStartAt = new Date(); + for (let i = 0; i < 3; i++) { + await expect( + client.get('/hello1', { headers: { apikey: JACK_USERNAME } }), + ).resolves.not.toThrow(); + } + await wait(500); + }); + + it('check Lago events (expensive API)', async () => { + const { data } = await lagoClient.events.findAllEvents({ + external_subscription_id: LAGO_EXTERNAL_SUBSCRIPTION_ID, + timestamp_from: expensiveStartAt.toISOString(), + }); + + expect(data.events).toHaveLength(3); + expect(data.events[0].code).toEqual(LAGO_BILLABLE_METRIC_CODE); + expect(data.events[1].properties).toEqual({ tier: 'expensive' }); + }); +}); diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/lago.t b/CloudronPackages/APISIX/apisix-source/t/plugin/lago.t new file mode 100644 index 0000000..7e1c640 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/lago.t @@ -0,0 +1,77 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local test_cases = { + {endpoint_addrs = {"http://127.0.0.1:3000"}, token = "token", event_transaction_id = "tid", event_subscription_id = "sid", event_code = "code"}, + {endpoint_addrs = "http://127.0.0.1:3000", token = "token", event_transaction_id = "tid", event_subscription_id = "sid", event_code = "code"}, + {endpoint_addrs = {}, token = "token", event_transaction_id = "tid", event_subscription_id = "sid", event_code = "code"}, + {endpoint_addrs = {"http://127.0.0.1:3000"}, endpoint_uri = "/test", token = "token", event_transaction_id = "tid", event_subscription_id = "sid", event_code = "code"}, + {endpoint_addrs = {"http://127.0.0.1:3000"}, endpoint_uri = 1234, token = "token", event_transaction_id = "tid", event_subscription_id = "sid", event_code = "code"}, + {endpoint_addrs = {"http://127.0.0.1:3000"}, token = 1234, event_transaction_id = "tid", event_subscription_id = "sid", event_code = "code"}, + {endpoint_addrs = {"http://127.0.0.1:3000"}, token = "token", event_transaction_id = "tid", event_subscription_id = "sid", event_code = "code", event_properties = {key = "value"}}, + {endpoint_addrs = {"http://127.0.0.1:3000"}, token = "token", event_transaction_id = "tid", event_subscription_id = "sid", event_code = "code", event_properties = {1,2,3}}, + } + local plugin = require("apisix.plugins.lago") + + for _, case in ipairs(test_cases) do + local ok, err = plugin.check_schema(case) + ngx.say(ok and "done" or err) + end + } + } +--- response_body +done +property "endpoint_addrs" validation failed: wrong type: expected array, got string +property "endpoint_addrs" validation failed: expect array to have at least 1 items +done +property "endpoint_uri" validation failed: wrong type: expected string, got number +property "token" validation failed: wrong type: expected string, got number +done +property "event_properties" validation failed: wrong type: expected object, got table + + + +=== TEST 2: test +--- timeout: 300 +--- max_size: 2048000 +--- exec +cd t && pnpm test plugin/lago.spec.mts 2>&1 +--- no_error_log +failed to execute the script with status +--- response_body eval +qr/PASS plugin\/lago.spec.mts/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ldap-auth.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ldap-auth.t new file mode 100644 index 0000000..ba16f2c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ldap-auth.t @@ -0,0 +1,616 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{VAULT_TOKEN} = "root"; +} + +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +no_shuffle(); +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.ldap-auth") + local ok, err = plugin.check_schema({user_dn = 'foo'}, core.schema.TYPE_CONSUMER) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: wrong type of string +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ldap-auth") + local ok, err = plugin.check_schema({base_dn = 123, ldap_uri = "127.0.0.1:1389"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body_like eval +qr/wrong type: expected string, got number +done +/ + + + +=== TEST 3: add consumer with username and plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "user01", + "plugins": { + "ldap-auth": { + "user_dn": "cn=user01,ou=users,dc=example,dc=org" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: enable basic auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ldap-auth": { + "base_dn": "ou=users,dc=example,dc=org", + "ldap_uri": "127.0.0.1:1389", + "uid": "cn" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: verify, missing authorization +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing authorization in request"} + + + +=== TEST 6: verify, invalid basic authorization header +--- request +GET /hello +--- more_headers +Authorization: Bad_header Zm9vOmZvbwo= +--- error_code: 401 +--- response_body +{"message":"Invalid authorization in request"} +--- grep_error_log eval +qr/Invalid authorization header format/ +--- grep_error_log_out +Invalid authorization header format + + + +=== TEST 7: verify, invalid authorization value (bad base64 str) +--- request +GET /hello +--- more_headers +Authorization: Basic aca_a +--- error_code: 401 +--- response_body +{"message":"Invalid authorization in request"} +--- grep_error_log eval +qr/Failed to decode authentication header: aca_a/ +--- grep_error_log_out +Failed to decode authentication header: aca_a + + + +=== TEST 8: verify, invalid authorization value (no password) +--- request +GET /hello +--- more_headers +Authorization: Basic Zm9v +--- error_code: 401 +--- response_body +{"message":"Invalid authorization in request"} +--- grep_error_log eval +qr/Split authorization err: invalid decoded data: foo/ +--- grep_error_log_out +Split authorization err: invalid decoded data: foo + + + +=== TEST 9: verify, invalid password +--- request +GET /hello +--- more_headers +Authorization: Basic Zm9vOmZvbwo= +--- error_code: 401 +--- response_body +{"message":"Invalid user authorization"} +--- error_log +The supplied credential is invalid + + + +=== TEST 10: verify +--- request +GET /hello +--- more_headers +Authorization: Basic dXNlcjAxOnBhc3N3b3JkMQ== +--- response_body +hello world +--- error_log +find consumer user01 + + + +=== TEST 11: enable basic auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ldap-auth": { + "base_dn": "ou=users,dc=example,dc=org", + "ldap_uri": "127.0.0.1:1389", + "uid": "cn" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: verify +--- request +GET /hello +--- more_headers +Authorization: Basic dXNlcjAxOnBhc3N3b3JkMQ== +--- response_body +hello world +--- error_log +find consumer user01 + + + +=== TEST 13: invalid schema +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for _, case in ipairs({ + {}, + "blah" + }) do + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + { + username = "foo", + plugins = { + ["ldap-auth"] = case + } + } + ) + ngx.print(body) + end + } + } +--- response_body +{"error_msg":"invalid plugins configuration: failed to check the configuration of plugin ldap-auth err: property \"user_dn\" is required"} +{"error_msg":"invalid plugins configuration: invalid plugin conf \"blah\" for plugin [ldap-auth]"} + + + +=== TEST 14: get the default schema +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/plugins/ldap-auth', + ngx.HTTP_GET, + nil, + [[ +{"title":"work with route or service object","required":["base_dn","ldap_uri"],"properties":{"base_dn":{"type":"string"},"ldap_uri":{"type":"string"},"use_tls":{"type":"boolean"},"tls_verify":{"type":"boolean"},"uid":{"type":"string"}},"type":"object"} + ]] + ) + ngx.status = code + } + } + + + +=== TEST 15: get the schema by schema_type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/plugins/ldap-auth?schema_type=consumer', + ngx.HTTP_GET, + nil, + [[ +{"title":"work with consumer object","required":["user_dn"],"properties":{"user_dn":{"type":"string"}},"type":"object"} + ]] + ) + ngx.status = code + } + } + + + +=== TEST 16: get the schema by error schema_type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/plugins/ldap-auth?schema_type=consumer123123', + ngx.HTTP_GET, + nil, + [[ +{"title":"work with route or service object","required":["base_dn","ldap_uri"],"properties":{"base_dn":{"type":"string"},"ldap_uri":{"type":"string"},"use_tls":{"type":"boolean"},"tls_verify":{"type":"boolean"},"uid":{"type":"string"}},"type":"object"} ]] + ) + ngx.status = code + } + } + + + +=== TEST 17: enable ldap-auth with tls +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ldap-auth": { + "base_dn": "ou=users,dc=example,dc=org", + "ldap_uri": "test.com:1636", + "uid": "cn", + "use_tls": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: verify +--- request +GET /hello +--- more_headers +Authorization: Basic dXNlcjAxOnBhc3N3b3JkMQ== +--- response_body +hello world +--- error_log +find consumer user01 + + + +=== TEST 19: enable ldap-auth with tls, verify CA +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ldap-auth": { + "base_dn": "ou=users,dc=example,dc=org", + "ldap_uri": "test.com:1636", + "uid": "cn", + "use_tls": true, + "tls_verify": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: verify +--- request +GET /hello +--- more_headers +Authorization: Basic dXNlcjAxOnBhc3N3b3JkMQ== +--- response_body +hello world +--- error_log +find consumer user01 + + + +=== TEST 21: set ldap-auth conf: user_dn uses secret ref +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "root" + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + -- change consumer with secrets ref: vault + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "user01", + "plugins": { + "ldap-auth": { + "user_dn": "$secret://vault/test1/user01/user_dn" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + -- set route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ldap-auth": { + "base_dn": "ou=users,dc=example,dc=org", + "ldap_uri": "127.0.0.1:1389", + "uid": "cn" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 22: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/user01 user_dn="cn=user01,ou=users,dc=example,dc=org" +--- response_body +Success! Data written to: kv/apisix/user01 + + + +=== TEST 23: verify +--- request +GET /hello +--- more_headers +Authorization: Basic dXNlcjAxOnBhc3N3b3JkMQ== +--- response_body +hello world +--- error_log +find consumer user01 + + + +=== TEST 24: set ldap-auth conf with the token in an env var: user_dn uses secret ref +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "$ENV://VAULT_TOKEN" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + -- change consumer with secrets ref: vault + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "user01", + "plugins": { + "ldap-auth": { + "user_dn": "$secret://vault/test1/user01/user_dn" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + -- set route + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ldap-auth": { + "base_dn": "ou=users,dc=example,dc=org", + "ldap_uri": "127.0.0.1:1389", + "uid": "cn" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 25: verify +--- request +GET /hello +--- more_headers +Authorization: Basic dXNlcjAxOnBhc3N3b3JkMQ== +--- response_body +hello world +--- error_log +find consumer user01 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn-redis-cluster.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn-redis-cluster.t new file mode 100644 index 0000000..997a4a2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn-redis-cluster.t @@ -0,0 +1,339 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + + +add_block_preprocessor(sub { + my ($block) = @_; + my $port = $ENV{TEST_NGINX_SERVER_PORT}; + + my $config = $block->config // <<_EOC_; + location /access_root_dir { + content_by_lua_block { + local httpc = require "resty.http" + local hc = httpc:new() + + local res, err = hc:request_uri('http://127.0.0.1:$port/limit_conn') + if res then + ngx.exit(res.status) + end + } + } + + location /test_concurrency { + content_by_lua_block { + local reqs = {} + local status_map = {} + for i = 1, 10 do + reqs[i] = { "/access_root_dir" } + end + local resps = { ngx.location.capture_multi(reqs) } + for i, resp in ipairs(resps) do + local status_key = resp.status + if status_map[status_key] then + status_map[status_key] = status_map[status_key] + 1 + else + status_map[status_key] = 1 + end + end + for key, value in pairs(status_map) do + ngx.say("status:" .. key .. ", " .. "count:" .. value) + end + } + } +_EOC_ + + $block->set_value("config", $config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-conn") + local ok, err = plugin.check_schema({ + conn = 1, + burst = 0, + default_conn_delay = 0.1, + rejected_code = 503, + key = 'remote_addr', + policy = "redis-cluster", + redis_cluster_nodes = { + "127.0.0.1:5000", + "127.0.0.1:5003", + "127.0.0.1:5002" + }, + dict_name = "test", + redis_cluster_name = "test" + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 100, + "burst": 50, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5003", + "127.0.0.1:5002" + ], + "redis_cluster_name": "test" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: not exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:10 + + + +=== TEST 4: update plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5002" + ], + "redis_cluster_name": "redis-cluster-1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:3 +status:503, count:7 + + + +=== TEST 6: update plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 5, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5002" + ], + "redis_cluster_name": "redis-cluster-1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:6 +status:503, count:4 + + + +=== TEST 8: set route, with redis_cluster_nodes and redis_cluster_name redis_cluster_ssl and redis_cluster_ssl_verify +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 5, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "127.0.0.1:7001", + "127.0.0.1:7002", + "127.0.0.1:7000" + ], + "redis_cluster_name": "redis-cluster-2", + "redis_cluster_ssl": true, + "redis_cluster_ssl_verify": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:6 +status:503, count:4 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn-redis.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn-redis.t new file mode 100644 index 0000000..a121453 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn-redis.t @@ -0,0 +1,810 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + + +add_block_preprocessor(sub { + my ($block) = @_; + my $port = $ENV{TEST_NGINX_SERVER_PORT}; + + my $config = $block->config // <<_EOC_; + location /access_root_dir { + content_by_lua_block { + local httpc = require "resty.http" + local hc = httpc:new() + + local res, err = hc:request_uri('http://127.0.0.1:$port/limit_conn') + if res then + ngx.exit(res.status) + end + } + } + + location /test_concurrency { + content_by_lua_block { + local reqs = {} + local status_map = {} + for i = 1, 10 do + reqs[i] = { "/access_root_dir" } + end + local resps = { ngx.location.capture_multi(reqs) } + for i, resp in ipairs(resps) do + local status_key = resp.status + if status_map[status_key] then + status_map[status_key] = status_map[status_key] + 1 + else + status_map[status_key] = 1 + end + end + for key, value in pairs(status_map) do + ngx.say("status:" .. key .. ", " .. "count:" .. value) + end + } + } +_EOC_ + + $block->set_value("config", $config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-conn") + local ok, err = plugin.check_schema({ + conn = 1, + burst = 0, + default_conn_delay = 0.1, + rejected_code = 503, + key = 'remote_addr', + policy = "redis", + redis_host = 'localhost', + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 100, + "burst": 50, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: not exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:10 + + + +=== TEST 4: update plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:3 +status:503, count:7 + + + +=== TEST 6: update plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 5, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:6 +status:503, count:4 + + + +=== TEST 8: update plugin with username, password +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 5, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_username": "alice", + "redis_password": "somepassword" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:6 +status:503, count:4 + + + +=== TEST 10: update plugin with username, wrong password +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 5, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_username": "alice", + "redis_password": "someerrorpassword" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: catch wrong pass +--- request +GET /access_root_dir +--- error_code: 500 +--- error_log +failed to limit conn: WRONGPASS invalid username-password pair or user is disabled. + + + +=== TEST 12: invalid route: missing redis_host +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "conn": 1, + "policy": "redis" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-conn err: then clause did not match"} + + + +=== TEST 13: disable plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:10 + + + +=== TEST 15: set route(key: server_addr) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 100, + "burst": 50, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "server_addr", + "policy": "redis", + "redis_host": "127.0.0.1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: key: http_x_real_ip +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 5, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "http_x_real_ip", + "policy": "redis", + "redis_host": "127.0.0.1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: exceeding the burst (X-Real-IP) +--- config +location /access_root_dir { + content_by_lua_block { + local port = ngx.var.server_port + local httpc = require "resty.http" + local hc = httpc:new() + + local res, err = hc:request_uri('http://127.0.0.1:' .. port .. '/limit_conn', { + keepalive = false, + headers = {["X-Real-IP"] = "10.10.10.1"} + }) + if res then + ngx.exit(res.status) + end + } +} + +location /test_concurrency { + content_by_lua_block { + local reqs = {} + local status_map = {} + for i = 1, 10 do + reqs[i] = { "/access_root_dir" } + end + local resps = { ngx.location.capture_multi(reqs) } + for i, resp in ipairs(resps) do + local status_key = resp.status + if status_map[status_key] then + status_map[status_key] = status_map[status_key] + 1 + else + status_map[status_key] = 1 + end + end + for key, value in pairs(status_map) do + ngx.say("status:" .. key .. ", " .. "count:" .. value) + end + } +} +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:6 +status:503, count:4 +--- error_log +limit key: 10.10.10.1route + + + +=== TEST 18: key: http_x_forwarded_for +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 5, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "http_x_forwarded_for", + "policy": "redis", + "redis_host": "127.0.0.1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 19: exceeding the burst(X-Forwarded-For) +--- config +location /access_root_dir { + content_by_lua_block { + local port = ngx.var.server_port + local httpc = require "resty.http" + local hc = httpc:new() + + local res, err = hc:request_uri('http://127.0.0.1:' .. port .. '/limit_conn', { + keepalive = false, + headers = {["X-Forwarded-For"] = "10.10.10.2"} + }) + if res then + ngx.exit(res.status) + end + } +} + +location /test_concurrency { + content_by_lua_block { + local reqs = {} + local status_map = {} + for i = 1, 10 do + reqs[i] = { "/access_root_dir" } + end + local resps = { ngx.location.capture_multi(reqs) } + for i, resp in ipairs(resps) do + local status_key = resp.status + if status_map[status_key] then + status_map[status_key] = status_map[status_key] + 1 + else + status_map[status_key] = 1 + end + end + for key, value in pairs(status_map) do + ngx.say("status:" .. key .. ", " .. "count:" .. value) + end + } +} +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:6 +status:503, count:4 +--- error_log +limit key: 10.10.10.2route + + + +=== TEST 20: default rejected_code +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 4, + "burst": 1, + "default_conn_delay": 0.1, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 21: exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:5 +status:503, count:5 + + + +=== TEST 22: set global rule with conn = 2 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 23: exceeding the burst of global rule +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:3 +status:503, count:7 + + + +=== TEST 24: delete global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 25: not exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +status:200, count:5 +status:503, count:5 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn.t new file mode 100644 index 0000000..0182019 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn.t @@ -0,0 +1,1202 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + + +add_block_preprocessor(sub { + my ($block) = @_; + my $port = $ENV{TEST_NGINX_SERVER_PORT}; + + my $config = $block->config // <<_EOC_; + location /access_root_dir { + content_by_lua_block { + local httpc = require "resty.http" + local hc = httpc:new() + + local res, err = hc:request_uri('http://127.0.0.1:$port/limit_conn') + if res then + ngx.exit(res.status) + end + } + } + + location /test_concurrency { + content_by_lua_block { + local reqs = {} + for i = 1, 10 do + reqs[i] = { "/access_root_dir" } + end + local resps = { ngx.location.capture_multi(reqs) } + for i, resp in ipairs(resps) do + ngx.say(resp.status) + end + } + } +_EOC_ + + $block->set_value("config", $config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-conn") + local ok, err = plugin.check_schema({conn = 1, burst = 0, default_conn_delay = 0.1, rejected_code = 503, key = 'remote_addr'}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: wrong value of key +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-conn") + local ok, err = plugin.check_schema({conn = 1, default_conn_delay = 0.1, rejected_code = 503, key = 'remote_addr'}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "burst" is required +done + + + +=== TEST 3: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 100, + "burst": 50, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: not exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +200 +200 +200 +200 +200 +200 +200 +200 +200 +200 + + + +=== TEST 5: update plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +200 +200 +200 +503 +503 +503 +503 +503 +503 +503 + + + +=== TEST 7: update plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 5, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +200 +200 +200 +200 +200 +200 +503 +503 +503 +503 + + + +=== TEST 9: invalid route: missing key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-conn err: property \"conn\" is required"} + + + +=== TEST 10: invalid route: wrong conn +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": -1, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-conn err: property \"conn\" validation failed: expected -1 to be greater than 0"} + + + +=== TEST 11: invalid service: missing key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-conn err: property \"conn\" is required"} + + + +=== TEST 12: invalid service: wrong count +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": -1, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-conn err: property \"conn\" validation failed: expected -1 to be greater than 0"} + + + +=== TEST 13: disable plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +200 +200 +200 +200 +200 +200 +200 +200 +200 +200 + + + +=== TEST 15: set route(key: server_addr) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 100, + "burst": 50, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "server_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: key: http_x_real_ip +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 5, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "http_x_real_ip" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: exceeding the burst (X-Real-IP) +--- config +location /access_root_dir { + content_by_lua_block { + local port = ngx.var.server_port + local httpc = require "resty.http" + local hc = httpc:new() + + local res, err = hc:request_uri('http://127.0.0.1:' .. port .. '/limit_conn', { + keepalive = false, + headers = {["X-Real-IP"] = "10.10.10.1"} + }) + if res then + ngx.exit(res.status) + end + } +} + +location /test_concurrency { + content_by_lua_block { + local reqs = {} + for i = 1, 10 do + reqs[i] = { "/access_root_dir" } + end + local resps = { ngx.location.capture_multi(reqs) } + for i, resp in ipairs(resps) do + ngx.say(resp.status) + end + } +} +--- more_headers +X-Real-IP: 10.0.0.1 +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +200 +200 +200 +200 +200 +200 +503 +503 +503 +503 +--- error_log +limit key: 10.10.10.1route + + + +=== TEST 18: key: http_x_forwarded_for +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 5, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "http_x_forwarded_for" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 19: exceeding the burst(X-Forwarded-For) +--- config +location /access_root_dir { + content_by_lua_block { + local port = ngx.var.server_port + local httpc = require "resty.http" + local hc = httpc:new() + + local res, err = hc:request_uri('http://127.0.0.1:' .. port .. '/limit_conn', { + keepalive = false, + headers = {["X-Forwarded-For"] = "10.10.10.2"} + }) + if res then + ngx.exit(res.status) + end + } +} + +location /test_concurrency { + content_by_lua_block { + local reqs = {} + for i = 1, 10 do + reqs[i] = { "/access_root_dir" } + end + local resps = { ngx.location.capture_multi(reqs) } + for i, resp in ipairs(resps) do + ngx.say(resp.status) + end + } +} +--- more_headers +X-Real-IP: 10.0.0.1 +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +200 +200 +200 +200 +200 +200 +503 +503 +503 +503 +--- error_log +limit key: 10.10.10.2route + + + +=== TEST 20: default rejected_code +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 100, + "burst": 50, + "default_conn_delay": 0.1, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 21: set global rule with conn = 2 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "key": "remote_addr" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: exceeding the burst of global rule +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +200 +200 +200 +503 +503 +503 +503 +503 +503 +503 + + + +=== TEST 23: delete global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 24: not exceeding the burst +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +200 +200 +200 +200 +200 +200 +200 +200 +200 +200 + + + +=== TEST 25: invalid schema +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-conn") + local cases = { + {conn = 0, burst = 0, default_conn_delay = 0.1, rejected_code = 503, key = 'remote_addr'}, + {conn = 1, burst = 0, default_conn_delay = 0, rejected_code = 503, key = 'remote_addr'}, + } + for _, c in ipairs(cases) do + local ok, err = plugin.check_schema(c) + if not ok then + ngx.say(err) + end + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "conn" validation failed: expected 0 to be greater than 0 +property "default_conn_delay" validation failed: expected 0 to be greater than 0 +done + + + +=== TEST 26: create consumer and bind key-auth plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "consumer_jack", + "plugins": { + "key-auth": { + "key": "auth-jack" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 27: create route and enable plugin 'key-auth' +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {}, + "limit-conn": { + "conn": 100, + "burst": 50, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "consumer_name" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 28: not exceeding the burst +--- config + location /access_root_dir { + content_by_lua_block { + local port = ngx.var.server_port + local httpc = require "resty.http" + local hc = httpc:new() + + local res, err = hc:request_uri('http://127.0.0.1:' .. port .. '/limit_conn', { + headers = {["apikey"] = "auth-jack"} + }) + if res then + ngx.exit(res.status) + end + } + } + + location /test_concurrency { + content_by_lua_block { + local reqs = {} + for i = 1, 10 do + reqs[i] = { "/access_root_dir" } + end + local resps = { ngx.location.capture_multi(reqs) } + for i, resp in ipairs(resps) do + ngx.say(resp.status) + end + } + } +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +200 +200 +200 +200 +200 +200 +200 +200 +200 +200 +--- error_log_like eval +qr/limit key: consumer_jackroute&consumer\d+/ + + + +=== TEST 29: update plugin "limit-conn" configuration "conn" and "burst" +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {}, + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "consumer_name" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 30: exceeding the burst +--- config + location /access_root_dir { + content_by_lua_block { + local port = ngx.var.server_port + local httpc = require "resty.http" + local hc = httpc:new() + + local res, err = hc:request_uri('http://127.0.0.1:' .. port .. '/limit_conn', { + headers = {["apikey"] = "auth-jack"} + }) + if res then + ngx.exit(res.status) + end + } + } + + location /test_concurrency { + content_by_lua_block { + local reqs = {} + for i = 1, 10 do + reqs[i] = { "/access_root_dir" } + end + local resps = { ngx.location.capture_multi(reqs) } + for i, resp in ipairs(resps) do + ngx.say(resp.status) + end + } + } +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +200 +200 +200 +503 +503 +503 +503 +503 +503 +503 +--- error_log_like eval +qr/limit key: consumer_jackroute&consumer\d+/ + + + +=== TEST 31: plugin limit-conn uses the wrong value of key +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-conn") + local ok, err = plugin.check_schema({ + conn = 1, + default_conn_delay = 0.1, + rejected_code = 503, + key = 'consumer_name' + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "burst" is required +done + + + +=== TEST 32: enable plugin: conn=1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 0, + "default_conn_delay": 0.3, + "rejected_code": 503, + "key": "remote_addr", + "allow_degradation": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 33: hit route and should not be limited +--- pipelined_requests eval +[ + "GET /hello", "GET /hello", "GET /hello", + "GET /hello", "GET /hello", "GET /hello", +] +--- timeout: 10s +--- error_code eval +[ + 200, 200, 200, + 200, 200, 200 +] + + + +=== TEST 34: invalid route: wrong allow_degradation +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "allow_degradation": "true1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-conn err: property \"allow_degradation\" validation failed: wrong type: expected boolean, got string"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn2.t new file mode 100644 index 0000000..b15133c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn2.t @@ -0,0 +1,475 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the check leak tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + + +add_block_preprocessor(sub { + my ($block) = @_; + my $port = $ENV{TEST_NGINX_SERVER_PORT}; + + my $config = $block->config // <<_EOC_; + location /access_root_dir { + content_by_lua_block { + local httpc = require "resty.http" + local hc = httpc:new() + + local res, err = hc:request_uri('http://127.0.0.1:$port/limit_conn') + if res then + ngx.exit(res.status) + end + } + } + + location /test_concurrency { + content_by_lua_block { + local reqs = {} + for i = 1, 5 do + reqs[i] = { "/access_root_dir" } + end + local resps = { ngx.location.capture_multi(reqs) } + for i, resp in ipairs(resps) do + ngx.say(resp.status) + end + } + } +_EOC_ + + $block->set_value("config", $config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: limit-conn with retry upstream, set upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.2:1": 1, + "127.0.0.1:1980": 1 + }, + "retries": 2, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/mysleep", + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 0, + "default_conn_delay": 0.3, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit route +--- log_level: debug +--- request +GET /mysleep?seconds=0.1 +--- error_log +request latency is 0.1 +--- response_body +0.1 + + + +=== TEST 3: set both global and route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 0, + "default_conn_delay": 0.3, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 0, + "default_conn_delay": 0.3, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: hit route +--- log_level: debug +--- request +GET /hello +--- grep_error_log eval +qr/request latency is/ +--- grep_error_log_out +request latency is +request latency is + + + +=== TEST 5: set only_use_default_delay option to true in specific route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello1", + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 0, + "default_conn_delay": 0.3, + "only_use_default_delay": true, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: hit route +--- log_level: debug +--- request +GET /hello1 +--- grep_error_log eval +qr/request latency is nil/ +--- grep_error_log_out +request latency is nil + + + +=== TEST 7: invalid route: wrong rejected_msg type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "rejected_msg": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-conn err: property \"rejected_msg\" validation failed: wrong type: expected string, got boolean"} + + + +=== TEST 8: invalid route: wrong rejected_msg length +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 1, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "rejected_msg": "" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-conn err: property \"rejected_msg\" validation failed: string too short, expected at least 1, got 0"} + + + +=== TEST 9: update plugin to set key_type to var_combination +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 0, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "$http_a $http_b", + "key_type": "var_combination" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: Don't exceed the burst +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/limit_conn" + local ress = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {a = i}}) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- timeout: 10s +--- response_body +[200,200] + + + +=== TEST 11: request when key is missing +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +200 +503 +503 +503 +503 +--- error_log +The value of the configured key is empty, use client IP instead + + + +=== TEST 12: update plugin to set invalid key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 0, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "abcdefgh", + "key_type": "var_combination" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: request when key is invalid +--- request +GET /test_concurrency +--- timeout: 10s +--- response_body +200 +503 +503 +503 +503 +--- error_log +The value of the configured key is empty, use client IP instead diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn3.t new file mode 100644 index 0000000..a2fe369 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-conn3.t @@ -0,0 +1,126 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the check leak tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: create route with limit-conn plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 1, + "burst": 0, + "default_conn_delay": 0.1, + "rejected_code": 503, + "key": "$remote_addr", + "key_type": "var_combination" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/limit_conn", + "host": "www.test.com" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: create ssl(sni: www.test.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "www.test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 3: use HTTP version 2 to request +--- exec +curl --http2 --parallel -k https://www.test.com:1994/limit_conn https://www.test.com:1994/limit_conn --resolve www.test.com:1994:127.0.0.1 +--- response_body_like +503 Service Temporarily Unavailable.*.hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis-cluster.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis-cluster.t new file mode 100644 index 0000000..7a4798a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis-cluster.t @@ -0,0 +1,544 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route, missing redis_cluster_nodes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: else clause did not match"} + + + +=== TEST 2: set route, with redis_cluster_nodes and redis_cluster_name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_timeout": 1001, + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5001" + ], + "redis_cluster_name": "redis-cluster-1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5001" + ], + "redis_cluster_name": "redis-cluster-1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: up the limit +--- request +GET /hello +--- error_log +try to lock with key route#1#redis-cluster +unlock with key route#1#redis-cluster + + + +=== TEST 5: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 503, 503] + + + +=== TEST 6: up the limit again +--- pipelined_requests eval +["GET /hello1", "GET /hello", "GET /hello2", "GET /hello", "GET /hello"] +--- error_code eval +[404, 503, 404, 503, 503] + + + +=== TEST 7: set route, four redis nodes, only one is valid +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 9999, + "time_window": 60, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:8001", + "127.0.0.1:8002", + "127.0.0.1:8003" + ], + "redis_cluster_name": "redis-cluster-1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for i = 1, 20 do + local code, body = t('/hello', ngx.HTTP_GET) + ngx.say("code: ", code) + end + + } + } +--- response_body +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +code: 200 +--- timeout: 10 + + + +=== TEST 9: update route, use new limit configuration +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local function set_route(count) + t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": ]] .. count .. [[, + "time_window": 69, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5001" + ], + "redis_cluster_name": "redis-cluster-1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + end + + set_route(2) + local t = require("lib.test_admin").test + for i = 1, 5 do + local code, body = t('/hello', ngx.HTTP_GET) + ngx.say("code: ", code) + end + + set_route(3) + local t = require("lib.test_admin").test + for i = 1, 5 do + local code, body = t('/hello', ngx.HTTP_GET) + ngx.say("code: ", code) + end + } + } +--- response_body +code: 200 +code: 200 +code: 503 +code: 503 +code: 503 +code: 200 +code: 200 +code: 200 +code: 503 +code: 503 + + + +=== TEST 10: set route, four redis nodes, no one is valid, with enable degradation switch +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 9999, + "time_window": 60, + "key": "remote_addr", + "policy": "redis-cluster", + "allow_degradation": true, + "redis_cluster_nodes": [ + "127.0.0.1:8001", + "127.0.0.1:8002", + "127.0.0.1:8003", + "127.0.0.1:8004" + ], + "redis_cluster_name": "redis-cluster-1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: enable degradation switch for TEST 10 +--- request +GET /hello +--- response_body +hello world +--- error_log +connection refused + + + +=== TEST 12: set route, use error type for redis_cluster_ssl and redis_cluster_ssl_verify +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_timeout": 1001, + "redis_cluster_nodes": [ + "127.0.0.1:7000", + "127.0.0.1:7001" + ], + "redis_cluster_name": "redis-cluster-1", + "redis_cluster_ssl": "true", + "redis_cluster_ssl_verify": "false" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: else clause did not match"} + + + +=== TEST 13: set route, redis_cluster_ssl_verify is true(will cause ssl handshake err), with enable degradation switch +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "allow_degradation": true, + "redis_timeout": 1001, + "redis_cluster_nodes": [ + "127.0.0.1:7000", + "127.0.0.1:7001" + ], + "redis_cluster_name": "redis-cluster-1", + "redis_cluster_ssl": true, + "redis_cluster_ssl_verify": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: enable degradation switch for TEST 13 +--- request +GET /hello +--- response_body +hello world +--- error_log +failed to do ssl handshake + + + +=== TEST 15: set route, with redis_cluster_nodes and redis_cluster_name redis_cluster_ssl and redis_cluster_ssl_verify +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_timeout": 1001, + "redis_cluster_nodes": [ + "127.0.0.1:7000", + "127.0.0.1:7001" + ], + "redis_cluster_name": "redis-cluster-1", + "redis_cluster_ssl": true, + "redis_cluster_ssl_verify": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis-cluster2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis-cluster2.t new file mode 100644 index 0000000..ede8ee9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis-cluster2.t @@ -0,0 +1,139 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: update route, use new limit configuration +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello2", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 60, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5001" + ], + "redis_cluster_name": "redis-cluster-1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + local old_X_RateLimit_Reset = 61 + for i = 1, 3 do + local _, _, headers = t('/hello2', ngx.HTTP_GET) + ngx.sleep(1) + if tonumber(headers["X-RateLimit-Reset"]) < old_X_RateLimit_Reset then + old_X_RateLimit_Reset = tonumber(headers["X-RateLimit-Reset"]) + ngx.say("OK") + else + ngx.say("WRONG") + end + end + ngx.say("Done") + } + } +--- response_body +OK +OK +OK +Done + + + +=== TEST 2: test header X-RateLimit-Reset shouldn't be set to 0 after request be rejected +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello2", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5001" + ], + "redis_cluster_name": "redis-cluster-1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + for i = 1, 3 do + local _, _, headers = t('/hello2', ngx.HTTP_GET) + ngx.sleep(1) + if tonumber(headers["X-RateLimit-Reset"]) > 0 then + ngx.say("OK") + else + ngx.say("WRONG") + end + end + ngx.say("Done") + } + } +--- response_body +OK +OK +OK +Done diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis-cluster3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis-cluster3.t new file mode 100644 index 0000000..2a59918 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis-cluster3.t @@ -0,0 +1,185 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{REDIS_NODE_0} = "127.0.0.1:5000"; + $ENV{REDIS_NODE_1} = "127.0.0.1:5001"; +} + +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: modified redis script, cost == 2 +--- config + location /t { + content_by_lua_block { + local conf = { + redis_cluster_nodes = {"127.0.0.1:5000", "127.0.0.1:5001"}, + redis_cluster_name = "redis-cluster-1", + redis_cluster_ssl = false, + redis_timeout = 1000, + key_type = "var", + time_window = 60, + show_limit_quota_header = true, + allow_degradation = false, + key = "remote_addr", + rejected_code = 503, + count = 3, + policy = "redis-cluster", + redis_cluster_ssl_verify = false + } + + local lim_count_redis_cluster = require("apisix.plugins.limit-count.limit-count-redis-cluster") + local lim = lim_count_redis_cluster.new("limit-count", 3, 60, conf) + local uri = ngx.var.uri + local _, remaining, _ = lim:incoming(uri, 2) + + ngx.say("remaining: ", remaining) + } + } +--- response_body +remaining: 1 + + + +=== TEST 2: set route, with single node in redis_cluster_nodes and redis_cluster_name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_timeout": 1001, + "redis_cluster_nodes": [ + "127.0.0.1:5000" + ], + "redis_cluster_name": "redis-cluster-1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: up the limit for single node in redis_cluster_nodes +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503] + + + +=== TEST 4: set route, with redis_cluster_nodes as environment variables and redis_cluster_name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_timeout": 1001, + "redis_cluster_nodes": [ + "$ENV://REDIS_NODE_0", + "$ENV://REDIS_NODE_1" + ], + "redis_cluster_name": "redis-cluster-1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: up the limit with environment variables for redis_cluster_nodes +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503] diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis.t new file mode 100644 index 0000000..d061880 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis.t @@ -0,0 +1,562 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route, missing redis host +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: then clause did not match"} + + + +=== TEST 2: set route, with redis host and port +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_timeout": 1001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: set route(default value: port and timeout) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: up the limit +--- request +GET /hello +--- error_log +try to lock with key route#1#redis +unlock with key route#1#redis + + + +=== TEST 5: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 503, 503] + + + +=== TEST 6: up the limit +--- pipelined_requests eval +["GET /hello1", "GET /hello", "GET /hello2", "GET /hello", "GET /hello"] +--- error_code eval +[404, 503, 404, 503, 503] + + + +=== TEST 7: set route, with redis host, port and right password +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- set redis password + local redis = require "resty.redis" + + local red = redis:new() + + red:set_timeout(1000) -- 1 sec + + local ok, err = red:connect("127.0.0.1", 6379) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + -- for get_reused_times works + -- local ok, err = red:set_keepalive(10000, 100) + -- if not ok then + -- ngx.say("failed to set keepalive: ", err) + -- return + -- end + + local count + count, err = red:get_reused_times() + if 0 == count then + local res, err = red:config('set', 'requirepass', 'foobared') + if not res then + ngx.say("failed to set: ", err) + return + end + elseif err then + -- ngx.say("already set requirepass done: ", err) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_timeout": 1001, + "redis_password": "foobared" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 9: up the limit +--- pipelined_requests eval +["GET /hello1", "GET /hello", "GET /hello2", "GET /hello", "GET /hello"] +--- error_code eval +[404, 503, 404, 503, 503] + + + +=== TEST 10: set route, with redis host, port and wrong password +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_timeout": 1001, + "redis_password": "WRONG_foobared" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello_new" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code eval +200 + + + +=== TEST 11: request for TEST 10 +--- request +GET /hello_new +--- error_code eval +500 +--- error_log +failed to limit count: WRONGPASS invalid username-password pair or user is disabled + + + +=== TEST 12: multi request for TEST 10 +--- pipelined_requests eval +["GET /hello_new", "GET /hello1", "GET /hello1", "GET /hello_new"] +--- no_error_log +[alert] +--- error_code eval +[500, 404, 404, 500] + + + +=== TEST 13: set route, with redis host, port and bad username and good password +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_timeout": 1001, + "redis_username": "bob", + "redis_password": "somepassword" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: request for TEST 13 +--- request +GET /hello +--- error_code eval +500 +--- error_log +failed to limit count: WRONGPASS invalid username-password pair or user is disabled + + + +=== TEST 15: set route, with redis host, port and good username and bad password +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_timeout": 1001, + "redis_username": "alice", + "redis_password": "badpassword" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: request for TEST 15 +--- request +GET /hello +--- error_code eval +500 +--- error_log +failed to limit count: WRONGPASS invalid username-password pair or user is disabled + + + +=== TEST 17: set route, with redis host, port and right username and password +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_timeout": 1001, + "redis_username": "alice", + "redis_password": "somepassword" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 19: up the limit +--- pipelined_requests eval +["GET /hello1", "GET /hello", "GET /hello2", "GET /hello", "GET /hello"] +--- error_code eval +[404, 503, 404, 503, 503] + + + +=== TEST 20: restore redis password to '' +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- set redis password + local redis = require "resty.redis" + + local red = redis:new() + + red:set_timeout(1000) -- 1 sec + + local ok, err = red:connect("127.0.0.1", 6379) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + -- for get_reused_times works + -- local ok, err = red:set_keepalive(10000, 100) + -- if not ok then + -- ngx.say("failed to set keepalive: ", err) + -- return + -- end + + local count + count, err = red:get_reused_times() + if 0 == count then + local redis_password = "foobared" + if redis_password and redis_password ~= '' then + local ok, err = red:auth(redis_password) + if not ok then + return nil, err + end + end + local res, err = red:config('set', 'requirepass', '') + if not res then + ngx.say("failed to set: ", err) + return + end + elseif err then + -- ngx.say("already set requirepass done: ", err) + return + end + } + } +--- error_code eval +200 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis2.t new file mode 100644 index 0000000..f4f4ea1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis2.t @@ -0,0 +1,372 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route, with redis host and port and default database +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_timeout": 1001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: set route, with redis host and port but wrong database +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_database": 999999, + "redis_timeout": 1001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: use wrong database +--- request +GET /hello +--- error_code eval +500 +--- error_log +failed to limit count: failed to change redis db, err: ERR DB index is out of range + + + +=== TEST 4: set route, with redis host and port and right database +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_database": 1, + "redis_timeout": 1001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 6: set route, with redis host but wrong port, with enable degradation switch +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "allow_degradation": true, + "redis_host": "127.0.0.1", + "redis_port": 16379, + "redis_database": 1, + "redis_timeout": 1001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: enable degradation switch for TEST 6 +--- request +GET /hello +--- response_body +hello world +--- error_log +connection refused + + + +=== TEST 8: set route, with don't show limit quota header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "show_limit_quota_header": false, + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_database": 1, + "redis_timeout": 1001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: don't show limit quota header for TEST 8 +--- request +GET /hello +--- raw_response_headers_unlike eval +qr/X-RateLimit-Limit/ + + + +=== TEST 10: configuration from the same group should be the same +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "show_limit_quota_header": false, + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_database": 1, + "redis_timeout": 1001, + "group": "redis" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "show_limit_quota_header": false, + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_database": 2, + "redis_timeout": 1001, + "group": "redis" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- error_log +[error] +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: group conf mismatched"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis3.t new file mode 100644 index 0000000..bf952df --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis3.t @@ -0,0 +1,374 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route, counter will be shared +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 60, + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_database": 1, + "redis_timeout": 1001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: test X-RateLimit-Reset second number could be decline +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local old_X_RateLimit_Reset = 61 + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.sleep(2) + if tonumber(res.headers["X-RateLimit-Reset"]) < old_X_RateLimit_Reset then + old_X_RateLimit_Reset = tonumber(res.headers["X-RateLimit-Reset"]) + ngx.say("OK") + else + ngx.say("WRONG") + end + end + ngx.say("Done") + } + } +--- response_body +OK +OK +Done + + + +=== TEST 3: set router +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 10, + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_database": 1, + "redis_timeout": 1001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: test header X-RateLimit-Remaining exist when limit rejected +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.sleep(1) + table.insert(ress, res.headers["X-RateLimit-Remaining"]) + + end + ngx.say(json.encode(ress)) + } + } +--- response_body +["1","0","0"] + + + +=== TEST 5: set route, with redis host, port and SSL +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6380, + "redis_timeout": 1001, + "redis_ssl": true, + "redis_ssl_verify": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 7: up the limit +--- pipelined_requests eval +["GET /hello1", "GET /hello", "GET /hello2", "GET /hello", "GET /hello"] +--- error_code eval +[404, 503, 404, 503, 503] + + + +=== TEST 8: set route, with redis host, port, SSL and SSL verify is true(will cause ssl handshake err), with enable degradation switch +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "allow_degradation": true, + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6380, + "redis_timeout": 1001, + "redis_ssl": true, + "redis_ssl_verify": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: enable degradation switch for TEST 8 +--- request +GET /hello +--- response_body +hello world +--- error_log +failed to do ssl handshake + + + +=== TEST 10: set router +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_database": 1, + "redis_timeout": 1001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: test header X-RateLimit-Reset shouldn't be set to 0 after request be rejected +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.sleep(1) + local reset = res.headers["X-RateLimit-Reset"] + if tonumber(reset) <= 0 then + ngx.say("failed") + end + + end + ngx.say("success") + } + } +--- response_body +success diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis4.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis4.t new file mode 100644 index 0000000..d1fad19 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count-redis4.t @@ -0,0 +1,136 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } + + $ENV{REDIS_HOST} = "127.0.0.1"; +} + +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: modified redis script, cost == 2 +--- config + location /t { + content_by_lua_block { + local conf = { + allow_degradation = false, + rejected_code = 503, + redis_timeout = 1000, + key_type = "var", + time_window = 60, + show_limit_quota_header = true, + count = 3, + redis_host = "127.0.0.1", + redis_port = 6379, + redis_database = 0, + policy = "redis", + key = "remote_addr" + } + + local lim_count_redis = require("apisix.plugins.limit-count.limit-count-redis") + local lim = lim_count_redis.new("limit-count", 3, 60, conf) + local uri = ngx.var.uri + local _, remaining, _ = lim:incoming(uri, 2) + + ngx.say("remaining: ", remaining) + } + } +--- response_body +remaining: 1 + + + +=== TEST 2: set route, with redis host as environment variable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "$ENV://REDIS_HOST" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: up the limit with host environment variable +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503] diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count.t new file mode 100644 index 0000000..f150c01 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count.t @@ -0,0 +1,1192 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-count") + local ok, err = plugin.check_schema({count = 2, time_window = 60, rejected_code = 503, key = 'remote_addr'}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: set key empty +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-count") + local ok, err = plugin.check_schema({count = 2, time_window = 60, rejected_code = 503}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 3: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 5: up the limit +--- pipelined_requests eval +["GET /hello1", "GET /hello", "GET /hello2", "GET /hello", "GET /hello"] +--- error_code eval +[404, 503, 404, 503, 503] + + + +=== TEST 6: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 503] + + + +=== TEST 8: invalid route: missing count +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "time_window": 60, + "rejected_code": 503 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: property \"count\" is required"} + + + +=== TEST 9: invalid route: wrong count +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": -100, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: property \"count\" validation failed: expected -100 to be greater than 0"} + + + +=== TEST 10: invalid route: wrong count + POST method +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes', + ngx.HTTP_POST, + [[{ + "plugins": { + "limit-count": { + "count": -100, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: property \"count\" validation failed: expected -100 to be greater than 0"} + + + +=== TEST 11: invalid service: missing count +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "time_window": 60, + "rejected_code": 503 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: property \"count\" is required"} + + + +=== TEST 12: invalid service: wrong count +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": -100, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: property \"count\" validation failed: expected -100 to be greater than 0"} + + + +=== TEST 13: invalid service: wrong count + POST method +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services', + ngx.HTTP_POST, + [[{ + "plugins": { + "limit-count": { + "count": -100, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: property \"count\" validation failed: expected -100 to be greater than 0"} + + + +=== TEST 14: set route without id in post body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 61, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 16: disable plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 17: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 18: set route(key: server_addr) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "server_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 19: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 20: default rejected_code +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 80, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: when the count is changed, check the limit is correct +--- config + location /t1 { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 1, + "time_window": 80, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- pipelined_requests eval +["GET /hello", "GET /hello","GET /hello","GET /t1", "GET /hello","GET /hello"] +--- error_code eval +[200, 200, 503, 200, 200, 503] + + + +=== TEST 22: when the count is changed, check the limit is correct(from 1 to 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 82, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + location /t1 { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 1, + "time_window": 82, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- pipelined_requests eval +["GET /t1", "GET /hello", "GET /hello", "GET /t", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 200, 200, 200, 503] + + + +=== TEST 23: when the count is changed, check the limit is correct(from 2 to 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 83, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- pipelined_requests eval +["GET /t", "GET /hello", "GET /hello", "GET /hello", "GET /t", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 503, 200, 503, 503, 503] + + + +=== TEST 24: create consumer and bind key-auth plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "consumer_jack", + "plugins": { + "key-auth": { + "key": "auth-jack" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 25: create route and consumer_name is consumer_jack +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {}, + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "consumer_name" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 26: up the limit +--- more_headers +apikey: auth-jack +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 27: set service(id: 1) and binding limit-count plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "service_id" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 28: set route(id: 1) and bind service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "service_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 29: the number of requests exceeds the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 30: set service(id: 1), and no limit-count plugin configured +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{}]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 31: set route(id: 1) and bind service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 91, + "rejected_code": 503, + "key": "service_id" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "service_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 32: the number of requests exceeds the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 33: delete route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 34: delete service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 35: use 'remote_addr' as default key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 95, + "rejected_code": 503 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 36: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 37: add service and route, upstream is the domain name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 3, + "time_window": 60, + "rejected_code": 503 + } + }, + "upstream": { + "nodes": { + "test.com:1980": 1, + "foo.com:1981": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/hello", + "service_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 38: normal, the result is as expected +--- init_by_lua_block + require "resty.core" + apisix = require("apisix") + core = require("apisix.core") + apisix.http_init() + + local utils = require("apisix.core.utils") + utils.dns_parse = function (domain) -- mock: DNS parser + if domain == "test.com" then + return {address = "127.0.0.1"} + end + + if domain == "foo.com" then + return {address = "127.0.0.1"} + end + + error("unknown domain: " .. domain) + end +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 503, 503] + + + +=== TEST 39: plugin is bound to the route and upstream is the domain name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 3, + "time_window": 99, + "rejected_code": 503 + } + }, + "upstream": { + "nodes": { + "test.com:1980": 1, + "foo.com:1981": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 40: normal, the result is as expected +--- init_by_lua_block + require "resty.core" + apisix = require("apisix") + core = require("apisix.core") + apisix.http_init() + + local utils = require("apisix.core.utils") + utils.dns_parse = function (domain) -- mock: DNS parser + if domain == "test.com" then + return {address = "127.0.0.1"} + end + + if domain == "foo.com" then + return {address = "127.0.0.1"} + end + + error("unknown domain: " .. domain) + end +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 503, 503] + + + +=== TEST 41: check_schema failed (the `count` attribute is equal to 0) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-count") + local ok, err = plugin.check_schema({count = 0, time_window = 60, rejected_code = 503, key = 'remote_addr'}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body eval +qr/property \"count\" validation failed: expected 0 to be greater than 0/ + + + +=== TEST 42: check_schema failed (the `time_window` attribute is equal to 0) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-count") + local ok, err = plugin.check_schema({count = 2, time_window = 0, rejected_code = 503, key = 'remote_addr'}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body eval +qr/property \"time_window\" validation failed: expected 0 to be greater than 0/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count2.t new file mode 100644 index 0000000..0dadaf7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count2.t @@ -0,0 +1,805 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: invalid route: wrong rejected_msg type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 600, + "rejected_code": 503, + "rejected_msg": true, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: property \"rejected_msg\" validation failed: wrong type: expected string, got boolean"} + + + +=== TEST 2: invalid route: wrong rejected_msg length +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 600, + "rejected_code": 503, + "rejected_msg": "", + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: property \"rejected_msg\" validation failed: string too short, expected at least 1, got 0"} + + + +=== TEST 3: set route, with rejected_msg +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 600, + "rejected_code": 503, + "rejected_msg": "Requests are too frequent, please try again later.", + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: rejected_msg, request normal +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 5: rejected_msg, request frequent +--- request +GET /hello +--- error_code: 503 +--- response_body +{"error_msg":"Requests are too frequent, please try again later."} + + + +=== TEST 6: update route, use new limit configuration +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "http_a", + "key_type": "var" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: exceed the burst when key_type is var +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 4 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {a = 1}}) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200,503,503] + + + +=== TEST 8: bypass empty key when key_type is var +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 4 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200,503,503] + + + +=== TEST 9: update plugin to set key_type to var_combination +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "$http_a $http_b", + "key_type": "var_combination" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: exceed the burst when key_type is var_combination +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 4 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {a = 1}}) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200,503,503] + + + +=== TEST 11: don`t exceed the burst when key_type is var_combination +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {a = i}}) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[503,200] + + + +=== TEST 12: request when key is missing +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 4 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200,503,503] +--- error_log +The value of the configured key is empty, use client IP instead + + + +=== TEST 13: update plugin to set invalid key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "abcdefgh", + "key_type": "var_combination" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: request when key is invalid +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 4 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200,503,503] +--- error_log +The value of the configured key is empty, use client IP instead + + + +=== TEST 15: limit count in group +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "group": "services_1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "group": "services_1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello_chunked" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: hit multiple paths +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri1 = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local uri2 = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello_chunked" + local ress = {} + for i = 1, 4 do + local httpc = http.new() + local uri + if i % 2 == 1 then + uri = uri1 + else + uri = uri2 + end + + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200,503,503] + + + +=== TEST 17: limit count in group, configuration is from services +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "group": "afafafhao" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "service_id": "1", + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "service_id": "1", + "uri": "/hello_chunked" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: hit multiple paths +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri1 = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local uri2 = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello_chunked" + local ress = {} + for i = 1, 4 do + local httpc = http.new() + local uri + if i % 2 == 1 then + uri = uri1 + else + uri = uri2 + end + + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200,503,503] + + + +=== TEST 19: configuration from the same group should be the same +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 1, + "time_window": 60, + "rejected_code": 503, + "group": "afafafhao" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- error_log +[error] +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-count err: group conf mismatched"} + + + +=== TEST 20: group with constant key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key_type": "constant", + "group": "afafafhao2" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: hit multiple paths +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri1 = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local uri2 = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello_chunked" + local ress = {} + for i = 1, 4 do + local httpc = http.new() + local uri + if i % 2 == 1 then + uri = uri1 + else + uri = uri2 + end + + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- grep_error_log eval +qr/limit key: afafafhao2:remote_addr/ +--- grep_error_log_out +limit key: afafafhao2:remote_addr +limit key: afafafhao2:remote_addr +limit key: afafafhao2:remote_addr +limit key: afafafhao2:remote_addr +--- response_body +[200,200,503,503] + + + +=== TEST 22: group with disable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "group": "abcd", + "_meta": { + "disable": false + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count3.t new file mode 100644 index 0000000..1f5476f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count3.t @@ -0,0 +1,413 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route, counter will be shared +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200] + + + +=== TEST 3: set route with conf not changed +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60 + } + }, + "labels": {"l": "a"}, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: hit +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[503,503] + + + +=== TEST 5: set route with conf changed +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 61 + } + }, + "labels": {"l": "a"}, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: hit +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200] + + + +=== TEST 7: set another route with the same conf +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello1", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 61 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: avoid sharing the same counter +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello1" + local ress = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- response_body +[200,200] + + + +=== TEST 9: add consumer jack1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack1", + "plugins": { + "basic-auth": { + "username": "jack2019", + "password": "123456" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: add consumer jack2 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack2", + "plugins": { + "basic-auth": { + "username": "jack2020", + "password": "123456" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: set route with consumers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "basic-auth": {}, + "consumer-restriction":{ + "whitelist":[ + "jack1", + "jack2" + ], + "rejected_code": 403, + "type":"consumer_name" + }, + "limit-count": { + "count": 1, + "time_window": 60, + "rejected_code": 429 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: hit jack1, pass +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMTk6MTIzNDU2 +--- response_body +hello world + + + +=== TEST 13: hit jack2, reject, the two consumers share the same counter +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjA6MTIzNDU2 +--- error_code: 429 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count4.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count4.t new file mode 100644 index 0000000..a3453c5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count4.t @@ -0,0 +1,204 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route, counter will be shared +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 60 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: test X-RateLimit-Reset second number could be decline +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local old_X_RateLimit_Reset = 61 + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.sleep(2) + if tonumber(res.headers["X-RateLimit-Reset"]) < old_X_RateLimit_Reset then + old_X_RateLimit_Reset = tonumber(res.headers["X-RateLimit-Reset"]) + ngx.say("OK") + else + ngx.say("WRONG") + end + end + ngx.say("Done") + } + } +--- response_body +OK +OK +Done + + + +=== TEST 3: set route, counter will be shared +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: test header X-RateLimit-Remaining exist when limit rejected +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.sleep(1) + table.insert(ress, res.headers["X-RateLimit-Remaining"]) + + end + ngx.say(json.encode(ress)) + } + } +--- response_body +["1","0","0"] + + + +=== TEST 5: test header X-RateLimit-Reset shouldn't be set to 0 after request be rejected +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.sleep(1) + local reset = res.headers["X-RateLimit-Reset"] + if tonumber(reset) <= 0 then + ngx.say("failed") + end + + end + ngx.say("success") + } + } +--- response_body +success diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count5.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count5.t new file mode 100644 index 0000000..4227b4f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-count5.t @@ -0,0 +1,202 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } + + $ENV{LIMIT_COUNT_KEY} = "remote_addr"; +} + +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: modified limit-count.incoming, cost == 2 +--- config + location = /t { + content_by_lua_block { + local conf = { + time_window = 60, + count = 10, + allow_degradation = false, + key_type = "var", + policy = "local", + rejected_code = 503, + show_limit_quota_header = true, + key = "remote_addr" + } + local limit_count_local = require "apisix.plugins.limit-count.limit-count-local" + local lim = limit_count_local.new("plugin-limit-count", 10, 60) + local uri = ngx.var.uri + for i = 1, 7 do + local delay, err = lim:incoming(uri, true, conf, 2) + if not delay then + ngx.say(err) + else + local remaining = err + ngx.say("remaining: ", remaining) + end + end + } + } +--- request + GET /t +--- response_body +remaining: 8 +remaining: 6 +remaining: 4 +remaining: 2 +remaining: 0 +rejected +rejected + + + +=== TEST 2: set route(id: 1) using environment variable for key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "$ENV://LIMIT_COUNT_KEY" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: up the limit with environment variable for key +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 4: customize rate limit headers by plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 10, + "time_window": 60, + "rejected_code": 503, + "key_type": "var", + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + local code, meta_body = t('/apisix/admin/plugin_metadata/limit-count', + ngx.HTTP_PUT, + [[{ + "limit_header":"APISIX-RATELIMIT-QUOTA", + "remaining_header":"APISIX-RATELIMIT-REMAINING", + "reset_header":"APISIX-RATELIMIT-RESET" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 5: check rate limit headers +--- request +GET /hello +--- response_headers_like +APISIX-RATELIMIT-QUOTA: 10 +APISIX-RATELIMIT-REMAINING: 9 +APISIX-RATELIMIT-RESET: \d+ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req-redis-cluster.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req-redis-cluster.t new file mode 100644 index 0000000..4c36c22 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req-redis-cluster.t @@ -0,0 +1,605 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-req") + local ok, err = plugin.check_schema({ + rate = 1, + burst = 0, + rejected_code = 503, + key = 'remote_addr', + policy = 'redis', + redis_host = '127.0.0.1' + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: add plugin with redis cluster with ssl +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 4, + "burst": 1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_name": "test", + "redis_cluster_nodes": [ + "127.0.0.1:7000", + "127.0.0.1:7001", + "127.0.0.1:7002" + ], + "redis_cluster_ssl": true, + "redis_cluster_ssl_verify": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: not exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 4: exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello", "GET /hello", "GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 200, 200, 200, 200, 200, 200, 503] + + + +=== TEST 5: update plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 0.1, + "burst": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_name": "test", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5002" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 503, 503, 503] + + + +=== TEST 7: wrong type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": -1, + "burst": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_name": "test", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5002" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-req err: property \"rate\" validation failed: expected -1 to be greater than 0"} + + + +=== TEST 8: disable plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 10: set route (key: server_addr) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 4, + "burst": 2, + "rejected_code": 503, + "key": "server_addr", + "policy": "redis-cluster", + "redis_cluster_name": "test", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5002" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: default rejected_code +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 4, + "burst": 2, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_name": "test", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5002" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: consumer binds the limit-req plugin and `key` is `consumer_name` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "new_consumer", + "plugins": { + "key-auth": { + "key": "auth-jack" + }, + "limit-req": { + "rate": 3, + "burst": 2, + "rejected_code": 403, + "key": "consumer_name", + "policy": "redis-cluster", + "redis_cluster_name": "test", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5002" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: route add "key-auth" plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: not exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello"] +--- more_headers +apikey: auth-jack +--- error_code eval +[200, 200, 200] + + + +=== TEST 15: update the limit-req plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "new_consumer", + "plugins": { + "key-auth": { + "key": "auth-jack" + }, + "limit-req": { + "rate": 0.1, + "burst": 0.1, + "rejected_code": 403, + "key": "consumer_name", + "policy": "redis-cluster", + "redis_cluster_name": "test", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5002" + ] + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- more_headers +apikey: auth-jack +--- error_code eval +[403, 403, 403, 403] + + + +=== TEST 17: key is consumer_name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 2, + "burst": 1, + "key": "consumer_name", + "policy": "redis-cluster", + "redis_cluster_name": "test", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5002" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: get "consumer_name" is empty +--- request +GET /hello +--- response_body +hello world +--- error_log +The value of the configured key is empty, use client IP instead + + + +=== TEST 19: delete consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/new_consumer', ngx.HTTP_DELETE) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: delete route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + + ngx.status =code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 21: check_schema failed (the `rate` attribute is equal to 0) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-req") + local ok, err = plugin.check_schema({rate = 0, burst = 0, rejected_code = 503, key = 'remote_addr'}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body eval +qr/property \"rate\" validation failed: expected 0 to be greater than 0/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req-redis.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req-redis.t new file mode 100644 index 0000000..84664b7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req-redis.t @@ -0,0 +1,653 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + + +add_block_preprocessor(sub { + my ($block) = @_; + my $port = $ENV{TEST_NGINX_SERVER_PORT}; + + my $config = $block->config // <<_EOC_; + location /access_root_dir { + content_by_lua_block { + local httpc = require "resty.http" + local hc = httpc:new() + + local res, err = hc:request_uri('http://127.0.0.1:$port/limit_conn') + if res then + ngx.exit(res.status) + end + } + } +_EOC_ + + $block->set_value("config", $config); +}); + + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-req") + local ok, err = plugin.check_schema({ + rate = 1, + burst = 0, + rejected_code = 503, + key = 'remote_addr', + policy = 'redis', + redis_host = '127.0.0.1' + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: add plugin with redis +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 4, + "burst": 1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: not exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 4: exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello", "GET /hello", "GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 200, 200, 200, 200, 200, 200, 503] + + + +=== TEST 5: update plugin with username password +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 0.1, + "burst": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_username": "alice", + "redis_password": "somepassword" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 503, 503, 503] + + + +=== TEST 7: update plugin with username, wrong password +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 0.1, + "burst": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_username": "alice", + "redis_password": "someerrorpassword" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: catch wrong pass +--- request +GET /hello +--- error_code: 500 +--- error_log +failed to limit req: WRONGPASS invalid username-password pair or user is disabled. + + + +=== TEST 9: invalid route: missing redis_host +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 0.1, + "burst": 0.1, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-req err: then clause did not match"} + + + +=== TEST 10: disable plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 12: set route (key: server_addr) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 4, + "burst": 2, + "rejected_code": 503, + "key": "server_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: default rejected_code +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 4, + "burst": 2, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: consumer binds the limit-req plugin and `key` is `consumer_name` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "new_consumer", + "plugins": { + "key-auth": { + "key": "auth-jack" + }, + "limit-req": { + "rate": 3, + "burst": 2, + "rejected_code": 403, + "key": "consumer_name", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: route add "key-auth" plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: not exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello"] +--- more_headers +apikey: auth-jack +--- error_code eval +[200, 200, 200] + + + +=== TEST 17: update the limit-req plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "new_consumer", + "plugins": { + "key-auth": { + "key": "auth-jack" + }, + "limit-req": { + "rate": 0.1, + "burst": 0.1, + "rejected_code": 403, + "key": "consumer_name", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379 + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- more_headers +apikey: auth-jack +--- error_code eval +[403, 403, 403, 403] + + + +=== TEST 19: key is consumer_name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 2, + "burst": 1, + "key": "consumer_name", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: get "consumer_name" is empty +--- request +GET /hello +--- response_body +hello world +--- error_log +The value of the configured key is empty, use client IP instead + + + +=== TEST 21: delete consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/new_consumer', ngx.HTTP_DELETE) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: delete route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + + ngx.status =code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 23: check_schema failed (the `rate` attribute is equal to 0) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-req") + local ok, err = plugin.check_schema({rate = 0, burst = 0, rejected_code = 503, key = 'remote_addr'}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body eval +qr/property \"rate\" validation failed: expected 0 to be greater than 0/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req.t new file mode 100644 index 0000000..0f46374 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req.t @@ -0,0 +1,561 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-req") + local ok, err = plugin.check_schema({rate = 1, burst = 0, rejected_code = 503, key = 'remote_addr'}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: wrong value of key +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-conn") + local ok, err = plugin.check_schema({burst = 0, rejected_code = 503, key = 'remote_addr'}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body_like eval +qr/property "(conn|default_conn_delay)" is required +done +/ + + + +=== TEST 3: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 4, + "burst": 2, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: not exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 5: update plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 0.1, + "burst": 0.1, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 503, 503, 503] + + + +=== TEST 7: wrong type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": -1, + "burst": 0.1, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin limit-req err: property \"rate\" validation failed: expected -1 to be greater than 0"} + + + +=== TEST 8: disable plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 10: set route (key: server_addr) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 4, + "burst": 2, + "rejected_code": 503, + "key": "server_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: default rejected_code +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 4, + "burst": 2, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: consumer binds the limit-req plugin and `key` is `consumer_name` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "new_consumer", + "plugins": { + "key-auth": { + "key": "auth-jack" + }, + "limit-req": { + "rate": 3, + "burst": 2, + "rejected_code": 403, + "key": "consumer_name" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: route add "key-auth" plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: not exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello"] +--- more_headers +apikey: auth-jack +--- error_code eval +[200, 200, 200] + + + +=== TEST 15: update the limit-req plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "new_consumer", + "plugins": { + "key-auth": { + "key": "auth-jack" + }, + "limit-req": { + "rate": 0.1, + "burst": 0.1, + "rejected_code": 403, + "key": "consumer_name" + } + } + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: exceeding the burst +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- more_headers +apikey: auth-jack +--- error_code eval +[403, 403, 403, 403] + + + +=== TEST 17: key is consumer_name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 2, + "burst": 1, + "key": "consumer_name" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: get "consumer_name" is empty +--- request +GET /hello +--- response_body +hello world +--- error_log +The value of the configured key is empty, use client IP instead + + + +=== TEST 19: delete consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers/new_consumer', ngx.HTTP_DELETE) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: delete route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + + ngx.status =code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 21: check_schema failed (the `rate` attribute is equal to 0) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.limit-req") + local ok, err = plugin.check_schema({rate = 0, burst = 0, rejected_code = 503, key = 'remote_addr'}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body eval +qr/property \"rate\" validation failed: expected 0 to be greater than 0/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req2.t new file mode 100644 index 0000000..2a8ac68 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req2.t @@ -0,0 +1,317 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin for delay test +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 0.1, + "burst": 4, + "rejected_code": 503, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "desc": "upstream_node", + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: the second request will timeout because of delay, error code will be '' +--- abort +--- timeout: 500ms +--- pipelined_requests eval +["GET /hello", "GET /hello"] +--- error_code eval +[200, ''] + + + +=== TEST 3: add nodelay flag +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1/plugins', + ngx.HTTP_PATCH, + [[{ + "limit-req": { + "rate": 0.1, + "burst": 4, + "rejected_code": 503, + "key": "remote_addr", + "nodelay": true + } + }]] + ) + + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: the second request will not timeout because of nodelay +--- abort +--- timeout: 500ms +--- pipelined_requests eval +["GET /hello", "GET /hello"] +--- error_code eval +[200, 200] + + + +=== TEST 5: key type is var_combination +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 0.1, + "burst": 0.1, + "rejected_code": 503, + "key": "$http_a $http_b", + "key_type": "var_combination" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: exceed the burst +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + + local ress = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {a = 1}}) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- request +GET /t +--- response_body +[200,503] + + + +=== TEST 7: don't exceed the burst +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + + local ress = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {a = i}}) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- request +GET /t +--- response_body +[200,200] + + + +=== TEST 8: request when key is missing +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + + local ress = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- request +GET /t +--- response_body +[200,503] +--- error_log +The value of the configured key is empty, use client IP instead + + + +=== TEST 9: update plugin to set invalid key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 0.1, + "burst": 0.1, + "rejected_code": 503, + "key": "abcdefgh", + "key_type": "var_combination" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: request when key is invalid +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + + local ress = {} + for i = 1, 2 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + table.insert(ress, res.status) + end + ngx.say(json.encode(ress)) + } + } +--- request +GET /t +--- response_body +[200,503] +--- error_log +The value of the configured key is empty, use client IP instead diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req3.t new file mode 100644 index 0000000..684eb9d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/limit-req3.t @@ -0,0 +1,114 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: create route with limit-req plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-req": { + "rate": 0.1, + "burst": 0.1, + "rejected_code": 503, + "key": "$remote_addr", + "key_type": "var_combination" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "host": "www.test.com" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: create ssl(sni: www.test.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "www.test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 3: use HTTP version 2 to request +--- exec +curl --http2 --parallel -k https://www.test.com:1994/hello https://www.test.com:1994/hello --resolve www.test.com:1994:127.0.0.1 +--- response_body_like +503 Service Temporarily Unavailable.*.hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/log-rotate.t b/CloudronPackages/APISIX/apisix-source/t/plugin/log-rotate.t new file mode 100644 index 0000000..8e62507 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/log-rotate.t @@ -0,0 +1,218 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $extra_yaml_config = <<_EOC_; +plugins: # plugin list + - log-rotate + +plugin_attr: + log-rotate: + interval: 1 + max_kept: 3 +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: log rotate +--- config + location /t { + content_by_lua_block { + ngx.log(ngx.ERR, "start xxxxxx") + ngx.sleep(2.5) + local has_split_access_file = false + local has_split_error_file = false + local lfs = require("lfs") + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "__access.log$") then + has_split_access_file = true + end + + if string.match(file_name, "__error.log$") then + local f = assert(io.open(ngx.config.prefix() .. "/logs/" .. file_name, "r")) + local content = f:read("*all") + f:close() + local index = string.find(content, "start xxxxxx") + if index then + has_split_error_file = true + end + end + end + + if not has_split_access_file or not has_split_error_file then + ngx.status = 500 + else + ngx.status = 200 + end + } + } +--- error_code eval +[200] + + + +=== TEST 2: in current log +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.1) + ngx.log(ngx.WARN, "start xxxxxx") + ngx.say("done") + } + } +--- response_body +done +--- error_log +start xxxxxx + + + +=== TEST 3: fix: ensure only one timer is running +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.5) + local t = require("lib.test_admin").test + local code, _, org_body = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + + ngx.status = code + ngx.say(org_body) + + ngx.sleep(1) + + local lfs = require("lfs") + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "__error.log$") then + local f = assert(io.open(ngx.config.prefix() .. "/logs/" .. file_name, "r")) + local content = f:read("*all") + f:close() + local counter = 0 + ngx.re.gsub(content, [=[run timer\[plugin#log-rotate\]]=], function() + counter = counter + 1 + return "" + end) + + if counter ~= 1 then + ngx.say("not a single rotator run at the same time: ", file_name) + end + end + end + } + } +--- response_body +done + + + +=== TEST 4: disable log-rotate via hot reload +--- config + location /t { + content_by_lua_block { + local data = [[ +apisix: + node_listen: 1984 + admin_key: null +plugins: + - prometheus + ]] + require("lib.test_admin").set_config_yaml(data) + local t = require("lib.test_admin").test + local code, _, org_body = t('/apisix/admin/plugins/reload', + ngx.HTTP_PUT) + + ngx.status = code + ngx.say(org_body) + + ngx.sleep(2.1) -- make sure two files will be rotated out if we don't disable it + + local n_split_error_file = 0 + local lfs = require("lfs") + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "__error.log$") then + n_split_error_file = n_split_error_file + 1 + end + end + + -- Before hot reload, the log rotate may or may not take effect. + -- It depends on the time we start the test + ngx.say(n_split_error_file <= 1) + } + } +--- response_body +done +true + + + +=== TEST 5: check file changes (disable compression) +--- config + location /t { + content_by_lua_block { + ngx.sleep(2) + + local default_logs = {} + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "__error.log$") or string.match(file_name, "__access.log$") then + local filepath = ngx.config.prefix() .. "/logs/" .. file_name + local attr = lfs.attributes(filepath) + if attr then + default_logs[filepath] = { change = attr.change, size = attr.size } + end + end + end + + ngx.sleep(1) + + local passed = false + for filepath, origin_attr in pairs(default_logs) do + local check_attr = lfs.attributes(filepath) + if check_attr.change == origin_attr.change and check_attr.size == origin_attr.size then + passed = true + else + passed = false + break + end + end + + if passed then + ngx.say("passed") + end + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/log-rotate2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/log-rotate2.t new file mode 100644 index 0000000..636ad28 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/log-rotate2.t @@ -0,0 +1,203 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - log-rotate +plugin_attr: + log-rotate: + interval: 1 + max_kept: 3 + enable_compression: true +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: log rotate, with enable log file compression +--- config + location /t { + content_by_lua_block { + ngx.log(ngx.ERR, "start xxxxxx") + ngx.sleep(3.5) + local has_split_access_file = false + local has_split_error_file = false + local lfs = require("lfs") + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "__access.log.tar.gz$") then + has_split_access_file = true + end + + if string.match(file_name, "__error.log.tar.gz$") then + has_split_error_file = true + end + end + + if not has_split_access_file or not has_split_error_file then + ngx.status = 500 + else + ngx.status = 200 + end + } + } + + + +=== TEST 2: in current log, with enable log file compression +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.1) + ngx.log(ngx.WARN, "start xxxxxx") + ngx.say("done") + } + } +--- response_body +done +--- error_log +start xxxxxx + + + +=== TEST 3: check file changes (enable compression) +--- config + location /t { + content_by_lua_block { + ngx.sleep(3) + + local default_logs = {} + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "__error.log.tar.gz$") or string.match(file_name, "__access.log.tar.gz$") then + local filepath = ngx.config.prefix() .. "/logs/" .. file_name + local attr = lfs.attributes(filepath) + if attr then + default_logs[filepath] = { change = attr.change, size = attr.size } + end + end + end + + ngx.sleep(1) + + local passed = false + for filepath, origin_attr in pairs(default_logs) do + local check_attr = lfs.attributes(filepath) + if check_attr.change == origin_attr.change and check_attr.size == origin_attr.size then + passed = true + else + passed = false + break + end + end + + if passed then + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 4: test rotate time align +--- extra_yaml_config +plugins: + - log-rotate +plugin_attr: + log-rotate: + interval: 3600 + max_kept: 1 +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.1) + local log_file = ngx.config.prefix() .. "logs/error.log" + local file = io.open(log_file, "r") + local log = file:read("*a") + + local m, err = ngx.re.match(log, [[first init rotate time is: (\d+)]], "jom") + if not m then + ngx.log(ngx.ERR, "failed to gmatch: ", err) + return + end + + ngx.sleep(2) + + local now_time = ngx.time() + local interval = 3600 + local rotate_time = now_time + interval - (now_time % interval) + if tonumber(m[1]) == tonumber(rotate_time) then + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 5: max_kept effective on compression files +--- extra_yaml_config +plugins: + - log-rotate +plugin_attr: + log-rotate: + interval: 1 + max_kept: 1 + enable_compression: true +--- config + location /t { + content_by_lua_block { + ngx.sleep(3.5) + local has_split_access_file = false + local has_split_error_file = false + local lfs = require("lfs") + local count = 0 + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, ".tar.gz$") then + count = count + 1 + end + end + --- only two compression file, access.log.tar.gz and error.log.tar.gz + ngx.say(count) + } + } +--- response_body +2 +--- timeout: 5 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/log-rotate3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/log-rotate3.t new file mode 100644 index 0000000..3f1edd9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/log-rotate3.t @@ -0,0 +1,207 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - log-rotate +plugin_attr: + log-rotate: + interval: 86400 + max_size: 9 + max_kept: 3 + enable_compression: false +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: log rotate by max_size +--- config + location /t { + content_by_lua_block { + ngx.log(ngx.ERR, "start xxxxxx") + ngx.sleep(2) + local has_split_access_file = false + local has_split_error_file = false + local lfs = require("lfs") + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "__access.log$") then + has_split_access_file = true + end + + if string.match(file_name, "__error.log$") then + has_split_error_file = true + end + end + + if not has_split_access_file and has_split_error_file then + ngx.status = 200 + else + ngx.status = 500 + end + } + } + + + +=== TEST 2: in current log +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.1) + ngx.log(ngx.WARN, "start xxxxxx") + ngx.say("done") + } + } +--- response_body +done +--- error_log +start xxxxxx + + + +=== TEST 3: check file changes +--- config + location /t { + content_by_lua_block { + ngx.sleep(1) + + local default_logs = {} + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "__error.log$") or string.match(file_name, "__access.log$") then + local filepath = ngx.config.prefix() .. "/logs/" .. file_name + local attr = lfs.attributes(filepath) + if attr then + default_logs[filepath] = { change = attr.change, size = attr.size } + end + end + end + + ngx.sleep(1) + + local passed = false + for filepath, origin_attr in pairs(default_logs) do + local check_attr = lfs.attributes(filepath) + if check_attr.change == origin_attr.change and check_attr.size == origin_attr.size then + passed = true + else + passed = false + break + end + end + + if passed then + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 4: max_kept effective on differently named compression files +--- extra_yaml_config +plugins: + - log-rotate +plugin_attr: + log-rotate: + interval: 1 + max_kept: 1 + enable_compression: true +--- yaml_config +nginx_config: + error_log: logs/err1.log + http: + access_log: logs/acc1.log +--- config + location /t { + error_log logs/err1.log info; + access_log logs/acc1.log; + + content_by_lua_block { + ngx.sleep(3) + local lfs = require("lfs") + local count = 0 + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, ".tar.gz$") then + count = count + 1 + end + end + --- only two compression file + ngx.say(count) + } + } +--- response_body +2 + + + +=== TEST 5: check whether new log files were created +--- extra_yaml_config +plugins: + - log-rotate +plugin_attr: + log-rotate: + interval: 1 + max_kept: 0 + enable_compression: false +--- yaml_config +nginx_config: + error_log: logs/err2.log + http: + access_log: logs/acc2.log +--- config + location /t { + error_log logs/err2.log info; + access_log logs/acc2.log; + + content_by_lua_block { + ngx.sleep(3) + local lfs = require("lfs") + local count = 0 + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "err2.log$") or string.match(file_name, "acc2.log$") then + count = count + 1 + end + end + ngx.say(count) + } + } +--- response_body +2 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/loggly.t b/CloudronPackages/APISIX/apisix-source/t/plugin/loggly.t new file mode 100644 index 0000000..babfa0a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/loggly.t @@ -0,0 +1,845 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + $block->set_value("stream_conf_enable", 1); + + if (!defined $block->extra_stream_config) { + my $stream_config = <<_EOC_; + server { + listen 8126 udp; + content_by_lua_block { + -- mock udp server is just accepts udp connection and log into error.log + require("lib.mock_layer4").loggly() + } + } +_EOC_ + $block->set_value("extra_stream_config", $stream_config); + } + + my $http_config = $block->http_config // <<_EOC_; + + server { + listen 10420; + + location /loggly/bulk/tok/tag/bulk { + content_by_lua_block { + ngx.req.read_body() + local data = ngx.req.get_body_data() + local headers = ngx.req.get_headers() + ngx.log(ngx.ERR, "loggly body: ", data) + ngx.log(ngx.ERR, "loggly tags: " .. require("toolkit.json").encode(headers["X-LOGGLY-TAG"])) + ngx.say("ok") + } + } + + location /loggly/503 { + content_by_lua_block { + ngx.status = 503 + ngx.say("service temporarily unavailable") + ngx.exit(ngx.OK) + } + } + + location /loggly/410 { + content_by_lua_block { + ngx.status = 410 + ngx.say("expired link") + ngx.exit(ngx.OK) + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity check metadata +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.loggly") + local configs = { + -- full configuration + { + customer_token = "TEST-Token-Must-Be-Passed", + severity = "INFO", + tags = {"special-route", "highpriority-route"}, + max_retry_count = 0, + retry_delay = 1, + buffer_duration = 60, + inactive_timeout = 2, + batch_max_size = 10, + }, + -- minimize schema + { + customer_token = "minimized-cofig", + }, + -- property "customer_token" is required + { + severity = "DEBUG", + }, + -- unknown severity + { + customer_token = "test", + severity = "UNKNOWN", + }, + -- severity in lower case, should pass + { + customer_token = "test", + severity = "crit", + } + } + + for i = 1, #configs do + local ok, err = plugin.check_schema(configs[i]) + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + end + } + } +--- response_body +passed +passed +property "customer_token" is required +property "severity" validation failed: matches none of the enum values +passed + + + +=== TEST 2: set route with loggly enabled +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loggly": { + "customer_token" : "test-token", + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "name": "loggly-enabled-route", + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: update loggly metadata with host port +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/loggly', + ngx.HTTP_PUT, + [[{ + "host":"127.0.0.1", + "port": 8126 + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: testing udp packet with mock loggly udp suite +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- request 1 + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + + -- request 2 + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + } + } +--- wait: 0.5 +--- response_body +opentracing +opentracing +--- grep_error_log eval +qr/message received: .+?(?= \{)/ +--- grep_error_log_out eval +qr/message received: <14>1 [\d\-T:.]+Z [\d.]+ apisix [\d]+ - \[test-token\@41058 tag="apisix"] +message received: <14>1 [\d\-T:.]+Z [\d.]+ apisix [\d]+ - \[test-token\@41058 tag="apisix"]/ + + + +=== TEST 5: checking loggly tags +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loggly": { + "customer_token" : "token-1", + "batch_max_size": 1, + "tags": ["abc", "def"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + } + } +--- wait: 0.5 +--- response_body +passed +opentracing +--- grep_error_log eval +qr/message received: .+?(?= \{)/ +--- grep_error_log_out eval +qr/message received: <14>1 [\d\-T:.]+Z [\d.]+ apisix [\d]+ - \[token-1\@41058 tag="abc" tag="def"]/ + + + +=== TEST 6: checking loggly log severity +log severity is calculated based on PRIVAL +8 + LOG_SEVERITY value +CRIT has value 2 so test should return PRIVAL <10> +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loggly": { + "customer_token" : "token-1", + "batch_max_size": 1, + "severity": "CRIT" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + } + } +--- wait: 0.5 +--- response_body +passed +opentracing +--- grep_error_log eval +qr/message received: .+?(?= \{)/ +--- grep_error_log_out eval +qr/message received: <10>1 [\d\-T:.]+Z [\d.]+ apisix [\d]+ - \[token-1\@41058 tag="apisix"]/ + + + +=== TEST 7: collect response full log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + } + } +--- response_body +opentracing +--- grep_error_log eval +qr/message received: [ -~]+/ +--- grep_error_log_out eval +qr/message received: <10>1 [\d\-T:.]+Z [\d.]+ apisix [\d]+ - \[token-1\@41058 tag="apisix"] \{"apisix_latency":[\d.]*,"client_ip":"127\.0\.0\.1","latency":[\d.]*,"request":\{"headers":\{"content-type":"application\/x-www-form-urlencoded","host":"127\.0\.0\.1:1984","user-agent":"lua-resty-http\/[\d.]* \(Lua\) ngx_lua\/[\d]*"\},"method":"GET","querystring":\{\},"size":[\d]+,"uri":"\/opentracing","url":"http:\/\/127\.0\.0\.1:1984\/opentracing"\},"response":\{"headers":\{"connection":"close","content-type":"text\/plain","server":"APISIX\/[\d.]+","transfer-encoding":"chunked"\},"size":[\d]*,"status":200\},"route_id":"1","server":\{"hostname":"[ -~]*","version":"[\d.]+"\},"service_id":"","start_time":[\d]*,"upstream":"127\.0\.0\.1:1982","upstream_latency":[\d]*\}/ + + + +=== TEST 8: collect response log with include_resp_body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loggly": { + "customer_token" : "tok", + "batch_max_size": 1, + "include_resp_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + } + } +--- response_body +passed +opentracing +--- grep_error_log eval +qr/message received: [ -~]+/ +--- grep_error_log_out eval +qr/message received: <14>1 [\d\-T:.]+Z [\d.]+ apisix [\d]+ - \[tok\@41058 tag="apisix"] \{"apisix_latency":[\d.]*,"client_ip":"127\.0\.0\.1","latency":[\d.]*,"request":\{"headers":\{"content-type":"application\/x-www-form-urlencoded","host":"127\.0\.0\.1:1984","user-agent":"lua-resty-http\/[\d.]* \(Lua\) ngx_lua\/[\d]*"\},"method":"GET","querystring":\{\},"size":[\d]+,"uri":"\/opentracing","url":"http:\/\/127\.0\.0\.1:1984\/opentracing"\},"response":\{"body":"opentracing\\n","headers":\{"connection":"close","content-type":"text\/plain","server":"APISIX\/[\d.]+","transfer-encoding":"chunked"\},"size":[\d]*,"status":200\},"route_id":"1","server":\{"hostname":"[ -~]*","version":"[\d.]+"\},"service_id":"","start_time":[\d]*,"upstream":"127\.0\.0\.1:1982","upstream_latency":[\d]*\}/ + + + +=== TEST 9: collect log with include_resp_body_expr +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loggly": { + "customer_token" : "tok", + "batch_max_size": 1, + "include_resp_body": true, + "include_resp_body_expr": [ + ["arg_bar", "==", "bar"] + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + -- this will include resp body + local code, _, body = t("/opentracing?bar=bar", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + + } + } +--- response_body +passed +opentracing +--- grep_error_log eval +qr/message received: [ -~]+/ +--- grep_error_log_out eval +qr/message received: <14>1 [\d\-T:.]+Z [\d.]+ apisix [\d]+ - \[tok\@41058 tag="apisix"] \{"apisix_latency":[\d.]*,"client_ip":"127\.0\.0\.1","latency":[\d.]*,"request":\{"headers":\{"content-type":"application\/x-www-form-urlencoded","host":"127\.0\.0\.1:1984","user-agent":"lua-resty-http\/[\d.]* \(Lua\) ngx_lua\/[\d]*"\},"method":"GET","querystring":\{"bar":"bar"\},"size":[\d]+,"uri":"\/opentracing\?bar=bar","url":"http:\/\/127\.0\.0\.1:1984\/opentracing\?bar=bar"\},"response":\{"body":"opentracing\\n","headers":\{"connection":"close","content-type":"text\/plain","server":"APISIX\/[\d.]+","transfer-encoding":"chunked"\},"size":[\d]*,"status":200\},"route_id":"1","server":\{"hostname":"[ -~]*","version":"[\d.]+"\},"service_id":"","start_time":[\d]*,"upstream":"127\.0\.0\.1:1982","upstream_latency":[\d]*\}/ + + + +=== TEST 10: collect log with include_resp_body_expr mismatch +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, _, body = t("/opentracing?foo=bar", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + + } + } +--- response_body +opentracing +--- grep_error_log eval +qr/message received: [ -~]+/ +--- grep_error_log_out eval +qr/message received: <14>1 [\d\-T:.]+Z [\d.]+ apisix [\d]+ - \[tok\@41058 tag="apisix"] \{"apisix_latency":[\d.]*,"client_ip":"127\.0\.0\.1","latency":[\d.]*,"request":\{"headers":\{"content-type":"application\/x-www-form-urlencoded","host":"127\.0\.0\.1:1984","user-agent":"lua-resty-http\/[\d.]* \(Lua\) ngx_lua\/[\d]*"\},"method":"GET","querystring":\{"foo":"bar"\},"size":[\d]+,"uri":"\/opentracing\?foo=bar","url":"http:\/\/127\.0\.0\.1:1984\/opentracing\?foo=bar"\},"response":\{"headers":\{"connection":"close","content-type":"text\/plain","server":"APISIX\/[\d.]+","transfer-encoding":"chunked"\},"size":[\d]*,"status":200\},"route_id":"1","server":\{"hostname":"[ -~]*","version":"[\d.]+"\},"service_id":"","start_time":[\d]*,"upstream":"127\.0\.0\.1:1982","upstream_latency":[\d]*\}/ + + + +=== TEST 11: collect request log with include_req_body +--- log_level: info +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loggly": { + "customer_token" : "tok", + "batch_max_size": 1, + "include_req_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + local code, _, body = t("/opentracing", "POST", "body-data") + } + } +--- error_log +"request":{"body":"body-data" + + + +=== TEST 12: collect log with include_req_body_expr +--- log_level: debug +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loggly": { + "customer_token" : "tok", + "batch_max_size": 1, + "include_req_body": true, + "include_req_body_expr": [ + ["arg_bar", "==", "bar"] + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + -- this will include resp body + local code, _, body = t("/opentracing?bar=bar", "POST", "body-data") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + + } + } +--- error_log +"request":{"body":"body-data" + + + +=== TEST 13: collect log with include_req_body_expr mismatch +--- log_level: debug +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, _, body = t("/opentracing?foo=bar", "POST", "body-data") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + + } + } +--- no_error_log +"request":{"body":"body-data" + + + +=== TEST 14: collect log with log_format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/loggly', + ngx.HTTP_PUT, + [[{ + "host":"127.0.0.1", + "port": 8126, + "log_format":{ + "host":"$host", + "client":"$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + + local code, _, body = t("/opentracing?foo=bar", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + } + } +--- response_body +passed +opentracing +--- grep_error_log eval +qr/message received: [ -~]+/ +--- grep_error_log_out eval +qr/message received: <14>1 [\d\-T:.]+Z [\d.]+ apisix [\d]+ - \[tok\@41058 tag="apisix"] \{"client":"[\d.]+","host":"[\d.]+","route_id":"1"\}/ + + + +=== TEST 15: loggly http protocol +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/loggly', + ngx.HTTP_PUT, + { + host = ngx.var.server_addr .. ":10420/loggly", + protocol = "http", + log_format = { + ["route_id"] = "$route_id", + } + } + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + + local code, _, body = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + } + } +--- wait: 2 +--- response_body +passed +opentracing +--- error_log +loggly body: {"route_id":"1"} +loggly tags: "apisix" + + + +=== TEST 16: test setup for collecting syslog with severity based on http response code +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loggly": { + "customer_token" : "tok", + "batch_max_size": 1, + "severity_map": { + "503": "ERR", + "410": "ALERT" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:10420": 1 + }, + "type": "roundrobin" + }, + "uri": "/loggly/*" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + + local code, body = t('/apisix/admin/plugin_metadata/loggly', + ngx.HTTP_PUT, + [[{ + "host":"127.0.0.1", + "port": 8126, + "log_format":{ + "route_id": "$route_id" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + } + } +--- response_body +passed +passed + + + +=== TEST 17: syslog PRIVAL 9 for type severity level ALERT +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body, _ = t("/loggly/410", "GET") + ngx.print(body) + } + } +--- response_body +expired link +--- grep_error_log eval +qr/message received: [ -~]+/ +--- grep_error_log_out eval +qr/message received: <9>1 [\d\-T:.]+Z [\d.]+ apisix [\d]+ - \[tok\@41058 tag="apisix"] \{"route_id":"1"\}/ + + + +=== TEST 18: syslog PRIVAL 11 for type severity level ERR +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body, _ = t("/loggly/503", "GET") + ngx.print(body) + } + } +--- response_body +service temporarily unavailable +--- grep_error_log eval +qr/message received: [ -~]+/ +--- grep_error_log_out eval +qr/message received: <11>1 [\d\-T:.]+Z [\d.]+ apisix [\d]+ - \[tok\@41058 tag="apisix"] \{"route_id":"1"\}/ + + + +=== TEST 19: log format in plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/loggly', + ngx.HTTP_PUT, + [[{ + "host":"127.0.0.1", + "port": 8126, + "log_format":{ + "client":"$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loggly": { + "customer_token" : "tok", + "log_format":{ + "host":"$host", + "client":"$remote_addr" + }, + "batch_max_size": 1, + "inactive_timeout": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: hit +--- request +GET /opentracing?foo=bar +--- response_body +opentracing +--- wait: 0.5 +--- grep_error_log eval +qr/message received: [ -~]+/ +--- grep_error_log_out eval +qr/message received: <14>1 [\d\-T:.]+Z \w+ apisix [\d]+ - \[tok\@41058 tag="apisix"] \{"client":"[\d.]+","host":"\w+","route_id":"1"\}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/loki-logger.t b/CloudronPackages/APISIX/apisix-source/t/plugin/loki-logger.t new file mode 100644 index 0000000..ffeed27 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/loki-logger.t @@ -0,0 +1,425 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local test_cases = { + {endpoint_addrs = {"http://127.0.0.1:8199"}}, + {endpoint_addrs = "http://127.0.0.1:8199"}, + {endpoint_addrs = {}}, + {}, + {endpoint_addrs = {"http://127.0.0.1:8199"}, endpoint_uri = "/loki/api/v1/push"}, + {endpoint_addrs = {"http://127.0.0.1:8199"}, endpoint_uri = 1234}, + {endpoint_addrs = {"http://127.0.0.1:8199"}, tenant_id = 1234}, + {endpoint_addrs = {"http://127.0.0.1:8199"}, headers = 1234}, + {endpoint_addrs = {"http://127.0.0.1:8199"}, log_labels = "1234"}, + {endpoint_addrs = {"http://127.0.0.1:8199"}, log_labels = {job = "apisix6"}}, + } + local plugin = require("apisix.plugins.loki-logger") + + for _, case in ipairs(test_cases) do + local ok, err = plugin.check_schema(case) + ngx.say(ok and "done" or err) + end + } + } +--- response_body +done +property "endpoint_addrs" validation failed: wrong type: expected array, got string +property "endpoint_addrs" validation failed: expect array to have at least 1 items +property "endpoint_addrs" is required +done +property "endpoint_uri" validation failed: wrong type: expected string, got number +property "tenant_id" validation failed: wrong type: expected string, got number +property "headers" validation failed: wrong type: expected object, got number +property "log_labels" validation failed: wrong type: expected object, got string +done + + + +=== TEST 2: setup route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://127.0.0.1:3100"], + "tenant_id": "tenant_1", + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: hit route +--- request +GET /hello +--- more_headers +test-header: only-for-test#1 +--- response_body +hello world + + + +=== TEST 4: check loki log +--- config + location /t { + content_by_lua_block { + local cjson = require("cjson") + local now = ngx.now() * 1000 + local data, err = require("lib.grafana_loki").fetch_logs_from_loki( + tostring(now - 3000) .. "000000", -- from + tostring(now) .. "000000" -- to + ) + + assert(err == nil, "fetch logs error: " .. (err or "")) + assert(data.status == "success", "loki response error: " .. cjson.encode(data)) + assert(#data.data.result > 0, "loki log empty: " .. cjson.encode(data)) + + local entry = data.data.result[1] + assert(entry.stream.request_headers_test_header == "only-for-test#1", + "expected field request_headers_test_header value: " .. cjson.encode(entry)) + } + } +--- error_code: 200 + + + +=== TEST 5: setup route (with log_labels) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://127.0.0.1:3100"], + "tenant_id": "tenant_1", + "log_labels": { + "custom_label": "custom_label_value" + }, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: hit route +--- request +GET /hello +--- more_headers +test-header: only-for-test#2 +--- response_body +hello world + + + +=== TEST 7: check loki log (with custom_label) +--- config + location /t { + content_by_lua_block { + local cjson = require("cjson") + local now = ngx.now() * 1000 + local data, err = require("lib.grafana_loki").fetch_logs_from_loki( + tostring(now - 3000) .. "000000", -- from + tostring(now) .. "000000", -- to + { query = [[{custom_label="custom_label_value"} | json]] } + ) + + assert(err == nil, "fetch logs error: " .. (err or "")) + assert(data.status == "success", "loki response error: " .. cjson.encode(data)) + assert(#data.data.result > 0, "loki log empty: " .. cjson.encode(data)) + + local entry = data.data.result[1] + assert(entry.stream.request_headers_test_header == "only-for-test#2", + "expected field request_headers_test_header value: " .. cjson.encode(entry)) + } + } +--- error_code: 200 + + + +=== TEST 8: setup route (with tenant_id) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://127.0.0.1:3100"], + "tenant_id": "tenant_2", + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: hit route +--- request +GET /hello +--- more_headers +test-header: only-for-test#3 +--- response_body +hello world + + + +=== TEST 10: check loki log (with tenant_id tenant_1) +--- config + location /t { + content_by_lua_block { + local cjson = require("cjson") + local now = ngx.now() * 1000 + local data, err = require("lib.grafana_loki").fetch_logs_from_loki( + tostring(now - 10000) .. "000000", -- from + tostring(now) .. "000000" -- to + ) + + assert(err == nil, "fetch logs error: " .. (err or "")) + assert(data.status == "success", "loki response error: " .. cjson.encode(data)) + assert(#data.data.result > 0, "loki log empty: " .. cjson.encode(data)) + + local entry = data.data.result[1] + assert(entry.stream.request_headers_test_header ~= "only-for-test#3", + "expected field request_headers_test_header value: " .. cjson.encode(entry)) + } + } +--- error_code: 200 + + + +=== TEST 11: check loki log (with tenant_id tenant_2) +--- config + location /t { + content_by_lua_block { + local cjson = require("cjson") + local now = ngx.now() * 1000 + local data, err = require("lib.grafana_loki").fetch_logs_from_loki( + tostring(now - 3000) .. "000000", -- from + tostring(now) .. "000000", -- to + { headers = { + ["X-Scope-OrgID"] = "tenant_2" + } } + ) + + assert(err == nil, "fetch logs error: " .. (err or "")) + assert(data.status == "success", "loki response error: " .. cjson.encode(data)) + assert(#data.data.result > 0, "loki log empty: " .. cjson.encode(data)) + + local entry = data.data.result[1] + assert(entry.stream.request_headers_test_header == "only-for-test#3", + "expected field request_headers_test_header value: " .. cjson.encode(entry)) + } + } +--- error_code: 200 + + + +=== TEST 12: setup route (with log_labels as variables) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://127.0.0.1:3100"], + "tenant_id": "tenant_1", + "log_labels": { + "custom_label": "$remote_addr" + }, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit route +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 14: check loki log (with custom_label) +--- config + location /t { + content_by_lua_block { + local cjson = require("cjson") + local now = ngx.now() * 1000 + local data, err = require("lib.grafana_loki").fetch_logs_from_loki( + tostring(now - 3000) .. "000000", -- from + tostring(now) .. "000000", -- to + { query = [[{custom_label="127.0.0.1"} | json]] } + ) + + assert(err == nil, "fetch logs error: " .. (err or "")) + assert(data.status == "success", "loki response error: " .. cjson.encode(data)) + assert(#data.data.result > 0, "loki log empty: " .. cjson.encode(data)) + } + } +--- error_code: 200 + + + +=== TEST 15: setup route (test headers) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://127.0.0.1:1980"], + "endpoint_uri": "/log_request", + "headers": {"Authorization": "test1234"}, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: hit route (test headers) +--- request +GET /hello +--- response_body +hello world +--- error_log +go(): authorization: test1234 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/mcp-bridge.t b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp-bridge.t new file mode 100644 index 0000000..5598fc7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp-bridge.t @@ -0,0 +1,100 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local test_cases = { + {command = "npx"}, + {}, + {command = 123}, + {command = "npx", args = { "-y", "test" }}, + {command = "npx", args = "test"}, + } + local plugin = require("apisix.plugins.mcp-bridge") + + for _, case in ipairs(test_cases) do + local ok, err = plugin.check_schema(case) + ngx.say(ok and "done" or err) + end + } + } +--- response_body +done +property "command" is required +property "command" validation failed: wrong type: expected string, got number +done +property "args" validation failed: wrong type: expected array, got string + + + +=== TEST 2: setup route (mcp filesystem) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "mcp-bridge": { + "base_uri": "/mcp", + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/"] + } + }, + "uri": "/mcp/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: test mcp client +--- timeout: 15 +--- exec +cd t/plugin/mcp && pnpm test 2>&1 +--- no_error_log +failed to execute the script with status +--- response_body eval +qr/PASS .\/bridge.spec.ts/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/assets/bridge-list-tools.json b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/assets/bridge-list-tools.json new file mode 100644 index 0000000..9eaf964 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/assets/bridge-list-tools.json @@ -0,0 +1,232 @@ +{ + "tools": [ + { + "name": "read_file", + "description": "Read the complete contents of a file from the file system. Handles various text encodings and provides detailed error messages if the file cannot be read. Use this tool when you need to examine the contents of a single file. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + } + }, + "required": [ + "path" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "read_multiple_files", + "description": "Read the contents of multiple files simultaneously. This is more efficient than reading files one by one when you need to analyze or compare multiple files. Each file's content is returned with its path as a reference. Failed reads for individual files won't stop the entire operation. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "paths": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "paths" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "write_file", + "description": "Create a new file or completely overwrite an existing file with new content. Use with caution as it will overwrite existing files without warning. Handles text content with proper encoding. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "content": { + "type": "string" + } + }, + "required": [ + "path", + "content" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "edit_file", + "description": "Make line-based edits to a text file. Each edit replaces exact line sequences with new content. Returns a git-style diff showing the changes made. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "edits": { + "type": "array", + "items": { + "type": "object", + "properties": { + "oldText": { + "type": "string", + "description": "Text to search for - must match exactly" + }, + "newText": { + "type": "string", + "description": "Text to replace with" + } + }, + "required": [ + "oldText", + "newText" + ], + "additionalProperties": false + } + }, + "dryRun": { + "type": "boolean", + "default": false, + "description": "Preview changes using git-style diff format" + } + }, + "required": [ + "path", + "edits" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "create_directory", + "description": "Create a new directory or ensure a directory exists. Can create multiple nested directories in one operation. If the directory already exists, this operation will succeed silently. Perfect for setting up directory structures for projects or ensuring required paths exist. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + } + }, + "required": [ + "path" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "list_directory", + "description": "Get a detailed listing of all files and directories in a specified path. Results clearly distinguish between files and directories with [FILE] and [DIR] prefixes. This tool is essential for understanding directory structure and finding specific files within a directory. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + } + }, + "required": [ + "path" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "directory_tree", + "description": "Get a recursive tree view of files and directories as a JSON structure. Each entry includes 'name', 'type' (file/directory), and 'children' for directories. Files have no children array, while directories always have a children array (which may be empty). The output is formatted with 2-space indentation for readability. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + } + }, + "required": [ + "path" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "move_file", + "description": "Move or rename files and directories. Can move files between directories and rename them in a single operation. If the destination exists, the operation will fail. Works across different directories and can be used for simple renaming within the same directory. Both source and destination must be within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "source": { + "type": "string" + }, + "destination": { + "type": "string" + } + }, + "required": [ + "source", + "destination" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "search_files", + "description": "Recursively search for files and directories matching a pattern. Searches through all subdirectories from the starting path. The search is case-insensitive and matches partial names. Returns full paths to all matching items. Great for finding files when you don't know their exact location. Only searches within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "pattern": { + "type": "string" + }, + "excludePatterns": { + "type": "array", + "items": { + "type": "string" + }, + "default": [] + } + }, + "required": [ + "path", + "pattern" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "get_file_info", + "description": "Retrieve detailed metadata about a file or directory. Returns comprehensive information including size, creation time, last modified time, permissions, and type. This tool is perfect for understanding file characteristics without reading the actual content. Only works within allowed directories.", + "inputSchema": { + "type": "object", + "properties": { + "path": { + "type": "string" + } + }, + "required": [ + "path" + ], + "additionalProperties": false, + "$schema": "http://json-schema.org/draft-07/schema#" + } + }, + { + "name": "list_allowed_directories", + "description": "Returns the list of directories that this server is allowed to access. Use this to understand which directories are available before trying to access files.", + "inputSchema": { + "type": "object", + "properties": {}, + "required": [] + } + } + ] +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/bridge.spec.ts b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/bridge.spec.ts new file mode 100644 index 0000000..41df2c3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/bridge.spec.ts @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { Client } from "@modelcontextprotocol/sdk/client/index.js"; +import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js"; +import { readFileSync } from "node:fs"; +import { unlink, writeFile } from "node:fs/promises"; +import path from "node:path"; + +const tools = JSON.parse( + readFileSync(`./assets/bridge-list-tools.json`, "utf-8") +); +const sseEndpoint = new URL("http://localhost:1984/mcp/sse"); + +describe("mcp-bridge", () => { + let client: Client; + + beforeEach(async () => { + client = new Client({ name: "apisix-e2e-test", version: "1.0.0" }); + await expect( + client.connect(new SSEClientTransport(sseEndpoint)) + ).resolves.not.toThrow(); + }); + + afterEach(() => expect(client.close()).resolves.not.toThrow()); + + it("should list tools", () => + expect(client.listTools()).resolves.toMatchObject(tools)); + + it("should call tool", async () => { + const result = await client.callTool({ + name: "list_directory", + arguments: { path: "/" }, + }); + expect(result.content[0].text).toContain("[DIR] "); + }); + + it("should call both clients at the same time", async () => { + // write test file + await writeFile("/tmp/test.txt", "test file"); + + // create client2 + const client2 = new Client({ name: "apisix-e2e-test", version: "1.0.0" }); + await expect( + client2.connect(new SSEClientTransport(sseEndpoint)) + ).resolves.not.toThrow(); + + // list tools both clients + await expect(client.listTools()).resolves.toMatchObject(tools); + await expect(client2.listTools()).resolves.toMatchObject(tools); + + // list directory both clients + const result1 = await client.callTool({ + name: "list_directory", + arguments: { path: "/" }, + }); + const result2 = await client2.callTool({ + name: "list_directory", + arguments: { path: "/tmp" }, + }); + expect(result1.content[0].text).toContain("[DIR] home"); + expect(result2.content[0].text).toContain("[FILE] test.txt"); + + // close client2 + await expect(client2.close()).resolves.not.toThrow(); + + // remove test file + await unlink("/tmp/test.txt"); + }); +}); diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/jest.config.ts b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/jest.config.ts new file mode 100644 index 0000000..b72e3e3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/jest.config.ts @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import type {Config} from 'jest'; + +const config: Config = { + coverageProvider: "v8", + testEnvironment: "node", + transform: { + "^.+\.tsx?$": ["ts-jest",{}], + }, +}; + +export default config; diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/package.json b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/package.json new file mode 100644 index 0000000..685ae44 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/package.json @@ -0,0 +1,15 @@ +{ + "name": "mcp-test-suite", + "private": true, + "scripts": { + "test": "jest" + }, + "devDependencies": { + "@types/jest": "^29.5.14", + "@types/node": "^22.14.1", + "jest": "^29.7.0", + "ts-jest": "^29.3.2", + "@modelcontextprotocol/sdk": "^1.9.0", + "ts-node": "^10.9.2" + } +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/pnpm-lock.yaml b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/pnpm-lock.yaml new file mode 100644 index 0000000..4bfcef3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/pnpm-lock.yaml @@ -0,0 +1,3304 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + devDependencies: + '@modelcontextprotocol/sdk': + specifier: ^1.9.0 + version: 1.9.0 + '@types/jest': + specifier: ^29.5.14 + version: 29.5.14 + '@types/node': + specifier: ^22.14.1 + version: 22.14.1 + jest: + specifier: ^29.7.0 + version: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + ts-jest: + specifier: ^29.3.2 + version: 29.3.2(@babel/core@7.26.10)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.26.10))(jest@29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)))(typescript@5.8.3) + ts-node: + specifier: ^10.9.2 + version: 10.9.2(@types/node@22.14.1)(typescript@5.8.3) + +packages: + + '@ampproject/remapping@2.3.0': + resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} + engines: {node: '>=6.0.0'} + + '@babel/code-frame@7.26.2': + resolution: {integrity: sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==} + engines: {node: '>=6.9.0'} + + '@babel/compat-data@7.26.8': + resolution: {integrity: sha512-oH5UPLMWR3L2wEFLnFJ1TZXqHufiTKAiLfqw5zkhS4dKXLJ10yVztfil/twG8EDTA4F/tvVNw9nOl4ZMslB8rQ==} + engines: {node: '>=6.9.0'} + + '@babel/core@7.26.10': + resolution: {integrity: sha512-vMqyb7XCDMPvJFFOaT9kxtiRh42GwlZEg1/uIgtZshS5a/8OaduUfCi7kynKgc3Tw/6Uo2D+db9qBttghhmxwQ==} + engines: {node: '>=6.9.0'} + + '@babel/generator@7.27.0': + resolution: {integrity: sha512-VybsKvpiN1gU1sdMZIp7FcqphVVKEwcuj02x73uvcHE0PTihx1nlBcowYWhDwjpoAXRv43+gDzyggGnn1XZhVw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-compilation-targets@7.27.0': + resolution: {integrity: sha512-LVk7fbXml0H2xH34dFzKQ7TDZ2G4/rVTOrq9V+icbbadjbVxxeFeDsNHv2SrZeWoA+6ZiTyWYWtScEIW07EAcA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-imports@7.25.9': + resolution: {integrity: sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-transforms@7.26.0': + resolution: {integrity: sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/helper-plugin-utils@7.26.5': + resolution: {integrity: sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg==} + engines: {node: '>=6.9.0'} + + '@babel/helper-string-parser@7.25.9': + resolution: {integrity: sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.25.9': + resolution: {integrity: sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-option@7.25.9': + resolution: {integrity: sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==} + engines: {node: '>=6.9.0'} + + '@babel/helpers@7.27.0': + resolution: {integrity: sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.27.0': + resolution: {integrity: sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/plugin-syntax-async-generators@7.8.4': + resolution: {integrity: sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-bigint@7.8.3': + resolution: {integrity: sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-class-properties@7.12.13': + resolution: {integrity: sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-class-static-block@7.14.5': + resolution: {integrity: sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-import-attributes@7.26.0': + resolution: {integrity: sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-import-meta@7.10.4': + resolution: {integrity: sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-json-strings@7.8.3': + resolution: {integrity: sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-jsx@7.25.9': + resolution: {integrity: sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-logical-assignment-operators@7.10.4': + resolution: {integrity: sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-nullish-coalescing-operator@7.8.3': + resolution: {integrity: sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-numeric-separator@7.10.4': + resolution: {integrity: sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-object-rest-spread@7.8.3': + resolution: {integrity: sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-optional-catch-binding@7.8.3': + resolution: {integrity: sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-optional-chaining@7.8.3': + resolution: {integrity: sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-private-property-in-object@7.14.5': + resolution: {integrity: sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-top-level-await@7.14.5': + resolution: {integrity: sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-typescript@7.25.9': + resolution: {integrity: sha512-hjMgRy5hb8uJJjUcdWunWVcoi9bGpJp8p5Ol1229PoN6aytsLwNMgmdftO23wnCLMfVmTwZDWMPNq/D1SY60JQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/template@7.27.0': + resolution: {integrity: sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA==} + engines: {node: '>=6.9.0'} + + '@babel/traverse@7.27.0': + resolution: {integrity: sha512-19lYZFzYVQkkHkl4Cy4WrAVcqBkgvV2YM2TU3xG6DIwO7O3ecbDPfW3yM3bjAGcqcQHi+CCtjMR3dIEHxsd6bA==} + engines: {node: '>=6.9.0'} + + '@babel/types@7.27.0': + resolution: {integrity: sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg==} + engines: {node: '>=6.9.0'} + + '@bcoe/v8-coverage@0.2.3': + resolution: {integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==} + + '@cspotcode/source-map-support@0.8.1': + resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==} + engines: {node: '>=12'} + + '@istanbuljs/load-nyc-config@1.1.0': + resolution: {integrity: sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==} + engines: {node: '>=8'} + + '@istanbuljs/schema@0.1.3': + resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==} + engines: {node: '>=8'} + + '@jest/console@29.7.0': + resolution: {integrity: sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/core@29.7.0': + resolution: {integrity: sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + '@jest/environment@29.7.0': + resolution: {integrity: sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/expect-utils@29.7.0': + resolution: {integrity: sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/expect@29.7.0': + resolution: {integrity: sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/fake-timers@29.7.0': + resolution: {integrity: sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/globals@29.7.0': + resolution: {integrity: sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/reporters@29.7.0': + resolution: {integrity: sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + '@jest/schemas@29.6.3': + resolution: {integrity: sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/source-map@29.6.3': + resolution: {integrity: sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/test-result@29.7.0': + resolution: {integrity: sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/test-sequencer@29.7.0': + resolution: {integrity: sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/transform@29.7.0': + resolution: {integrity: sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/types@29.6.3': + resolution: {integrity: sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jridgewell/gen-mapping@0.3.8': + resolution: {integrity: sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==} + engines: {node: '>=6.0.0'} + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/set-array@1.2.1': + resolution: {integrity: sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.0': + resolution: {integrity: sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==} + + '@jridgewell/trace-mapping@0.3.25': + resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} + + '@jridgewell/trace-mapping@0.3.9': + resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} + + '@modelcontextprotocol/sdk@1.9.0': + resolution: {integrity: sha512-Jq2EUCQpe0iyO5FGpzVYDNFR6oR53AIrwph9yWl7uSc7IWUMsrmpmSaTGra5hQNunXpM+9oit85p924jWuHzUA==} + engines: {node: '>=18'} + + '@sinclair/typebox@0.27.8': + resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==} + + '@sinonjs/commons@3.0.1': + resolution: {integrity: sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==} + + '@sinonjs/fake-timers@10.3.0': + resolution: {integrity: sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==} + + '@tsconfig/node10@1.0.11': + resolution: {integrity: sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==} + + '@tsconfig/node12@1.0.11': + resolution: {integrity: sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==} + + '@tsconfig/node14@1.0.3': + resolution: {integrity: sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==} + + '@tsconfig/node16@1.0.4': + resolution: {integrity: sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==} + + '@types/babel__core@7.20.5': + resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} + + '@types/babel__generator@7.27.0': + resolution: {integrity: sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==} + + '@types/babel__template@7.4.4': + resolution: {integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==} + + '@types/babel__traverse@7.20.7': + resolution: {integrity: sha512-dkO5fhS7+/oos4ciWxyEyjWe48zmG6wbCheo/G2ZnHx4fs3EU6YC6UM8rk56gAjNJ9P3MTH2jo5jb92/K6wbng==} + + '@types/graceful-fs@4.1.9': + resolution: {integrity: sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==} + + '@types/istanbul-lib-coverage@2.0.6': + resolution: {integrity: sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==} + + '@types/istanbul-lib-report@3.0.3': + resolution: {integrity: sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==} + + '@types/istanbul-reports@3.0.4': + resolution: {integrity: sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==} + + '@types/jest@29.5.14': + resolution: {integrity: sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==} + + '@types/node@22.14.1': + resolution: {integrity: sha512-u0HuPQwe/dHrItgHHpmw3N2fYCR6x4ivMNbPHRkBVP4CvN+kiRrKHWk3i8tXiO/joPwXLMYvF9TTF0eqgHIuOw==} + + '@types/stack-utils@2.0.3': + resolution: {integrity: sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==} + + '@types/yargs-parser@21.0.3': + resolution: {integrity: sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==} + + '@types/yargs@17.0.33': + resolution: {integrity: sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==} + + accepts@2.0.0: + resolution: {integrity: sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==} + engines: {node: '>= 0.6'} + + acorn-walk@8.3.4: + resolution: {integrity: sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==} + engines: {node: '>=0.4.0'} + + acorn@8.14.1: + resolution: {integrity: sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==} + engines: {node: '>=0.4.0'} + hasBin: true + + ansi-escapes@4.3.2: + resolution: {integrity: sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==} + engines: {node: '>=8'} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + ansi-styles@5.2.0: + resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==} + engines: {node: '>=10'} + + anymatch@3.1.3: + resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} + engines: {node: '>= 8'} + + arg@4.1.3: + resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==} + + argparse@1.0.10: + resolution: {integrity: sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==} + + async@3.2.6: + resolution: {integrity: sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==} + + babel-jest@29.7.0: + resolution: {integrity: sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + '@babel/core': ^7.8.0 + + babel-plugin-istanbul@6.1.1: + resolution: {integrity: sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==} + engines: {node: '>=8'} + + babel-plugin-jest-hoist@29.6.3: + resolution: {integrity: sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + babel-preset-current-node-syntax@1.1.0: + resolution: {integrity: sha512-ldYss8SbBlWva1bs28q78Ju5Zq1F+8BrqBZZ0VFhLBvhh6lCpC2o3gDJi/5DRLs9FgYZCnmPYIVFU4lRXCkyUw==} + peerDependencies: + '@babel/core': ^7.0.0 + + babel-preset-jest@29.6.3: + resolution: {integrity: sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + '@babel/core': ^7.0.0 + + balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + body-parser@2.2.0: + resolution: {integrity: sha512-02qvAaxv8tp7fBa/mw1ga98OGm+eCbqzJOKoRt70sLmfEEi+jyBYVTDGfCL/k06/4EMk/z01gCe7HoCH/f2LTg==} + engines: {node: '>=18'} + + brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + + brace-expansion@2.0.1: + resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} + + braces@3.0.3: + resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} + engines: {node: '>=8'} + + browserslist@4.24.4: + resolution: {integrity: sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + + bs-logger@0.2.6: + resolution: {integrity: sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==} + engines: {node: '>= 6'} + + bser@2.1.1: + resolution: {integrity: sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==} + + buffer-from@1.1.2: + resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} + + bytes@3.1.2: + resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} + engines: {node: '>= 0.8'} + + call-bind-apply-helpers@1.0.2: + resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} + engines: {node: '>= 0.4'} + + call-bound@1.0.4: + resolution: {integrity: sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==} + engines: {node: '>= 0.4'} + + callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + + camelcase@5.3.1: + resolution: {integrity: sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==} + engines: {node: '>=6'} + + camelcase@6.3.0: + resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} + engines: {node: '>=10'} + + caniuse-lite@1.0.30001714: + resolution: {integrity: sha512-mtgapdwDLSSBnCI3JokHM7oEQBLxiJKVRtg10AxM1AyeiKcM96f0Mkbqeq+1AbiCtvMcHRulAAEMu693JrSWqg==} + + chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + + char-regex@1.0.2: + resolution: {integrity: sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==} + engines: {node: '>=10'} + + ci-info@3.9.0: + resolution: {integrity: sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==} + engines: {node: '>=8'} + + cjs-module-lexer@1.4.3: + resolution: {integrity: sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==} + + cliui@8.0.1: + resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==} + engines: {node: '>=12'} + + co@4.6.0: + resolution: {integrity: sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==} + engines: {iojs: '>= 1.0.0', node: '>= 0.12.0'} + + collect-v8-coverage@1.0.2: + resolution: {integrity: sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + + content-disposition@1.0.0: + resolution: {integrity: sha512-Au9nRL8VNUut/XSzbQA38+M78dzP4D+eqg3gfJHMIHHYa3bg067xj1KxMUWj+VULbiZMowKngFFbKczUrNJ1mg==} + engines: {node: '>= 0.6'} + + content-type@1.0.5: + resolution: {integrity: sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==} + engines: {node: '>= 0.6'} + + convert-source-map@2.0.0: + resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} + + cookie-signature@1.2.2: + resolution: {integrity: sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==} + engines: {node: '>=6.6.0'} + + cookie@0.7.2: + resolution: {integrity: sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==} + engines: {node: '>= 0.6'} + + cors@2.8.5: + resolution: {integrity: sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==} + engines: {node: '>= 0.10'} + + create-jest@29.7.0: + resolution: {integrity: sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + hasBin: true + + create-require@1.1.1: + resolution: {integrity: sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==} + + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + + debug@4.4.0: + resolution: {integrity: sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + dedent@1.5.3: + resolution: {integrity: sha512-NHQtfOOW68WD8lgypbLA5oT+Bt0xXJhiYvoR6SmmNXZfpzOGXwdKWmcwG8N7PwVVWV3eF/68nmD9BaJSsTBhyQ==} + peerDependencies: + babel-plugin-macros: ^3.1.0 + peerDependenciesMeta: + babel-plugin-macros: + optional: true + + deepmerge@4.3.1: + resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==} + engines: {node: '>=0.10.0'} + + depd@2.0.0: + resolution: {integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==} + engines: {node: '>= 0.8'} + + detect-newline@3.1.0: + resolution: {integrity: sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==} + engines: {node: '>=8'} + + diff-sequences@29.6.3: + resolution: {integrity: sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + diff@4.0.2: + resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} + engines: {node: '>=0.3.1'} + + dunder-proto@1.0.1: + resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} + engines: {node: '>= 0.4'} + + ee-first@1.1.1: + resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} + + ejs@3.1.10: + resolution: {integrity: sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==} + engines: {node: '>=0.10.0'} + hasBin: true + + electron-to-chromium@1.5.137: + resolution: {integrity: sha512-/QSJaU2JyIuTbbABAo/crOs+SuAZLS+fVVS10PVrIT9hrRkmZl8Hb0xPSkKRUUWHQtYzXHpQUW3Dy5hwMzGZkA==} + + emittery@0.13.1: + resolution: {integrity: sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==} + engines: {node: '>=12'} + + emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + + encodeurl@2.0.0: + resolution: {integrity: sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==} + engines: {node: '>= 0.8'} + + error-ex@1.3.2: + resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==} + + es-define-property@1.0.1: + resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} + engines: {node: '>= 0.4'} + + es-errors@1.3.0: + resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} + engines: {node: '>= 0.4'} + + es-object-atoms@1.1.1: + resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} + engines: {node: '>= 0.4'} + + escalade@3.2.0: + resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} + engines: {node: '>=6'} + + escape-html@1.0.3: + resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==} + + escape-string-regexp@2.0.0: + resolution: {integrity: sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==} + engines: {node: '>=8'} + + esprima@4.0.1: + resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==} + engines: {node: '>=4'} + hasBin: true + + etag@1.8.1: + resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} + engines: {node: '>= 0.6'} + + eventsource-parser@3.0.1: + resolution: {integrity: sha512-VARTJ9CYeuQYb0pZEPbzi740OWFgpHe7AYJ2WFZVnUDUQp5Dk2yJUgF36YsZ81cOyxT0QxmXD2EQpapAouzWVA==} + engines: {node: '>=18.0.0'} + + eventsource@3.0.6: + resolution: {integrity: sha512-l19WpE2m9hSuyP06+FbuUUf1G+R0SFLrtQfbRb9PRr+oimOfxQhgGCbVaXg5IvZyyTThJsxh6L/srkMiCeBPDA==} + engines: {node: '>=18.0.0'} + + execa@5.1.1: + resolution: {integrity: sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==} + engines: {node: '>=10'} + + exit@0.1.2: + resolution: {integrity: sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==} + engines: {node: '>= 0.8.0'} + + expect@29.7.0: + resolution: {integrity: sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + express-rate-limit@7.5.0: + resolution: {integrity: sha512-eB5zbQh5h+VenMPM3fh+nw1YExi5nMr6HUCR62ELSP11huvxm/Uir1H1QEyTkk5QX6A58pX6NmaTMceKZ0Eodg==} + engines: {node: '>= 16'} + peerDependencies: + express: ^4.11 || 5 || ^5.0.0-beta.1 + + express@5.1.0: + resolution: {integrity: sha512-DT9ck5YIRU+8GYzzU5kT3eHGA5iL+1Zd0EutOmTE9Dtk+Tvuzd23VBU+ec7HPNSTxXYO55gPV/hq4pSBJDjFpA==} + engines: {node: '>= 18'} + + fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + fb-watchman@2.0.2: + resolution: {integrity: sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==} + + filelist@1.0.4: + resolution: {integrity: sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==} + + fill-range@7.1.1: + resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} + engines: {node: '>=8'} + + finalhandler@2.1.0: + resolution: {integrity: sha512-/t88Ty3d5JWQbWYgaOGCCYfXRwV1+be02WqYYlL6h0lEiUAMPM8o8qKGO01YIkOHzka2up08wvgYD0mDiI+q3Q==} + engines: {node: '>= 0.8'} + + find-up@4.1.0: + resolution: {integrity: sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==} + engines: {node: '>=8'} + + forwarded@0.2.0: + resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==} + engines: {node: '>= 0.6'} + + fresh@2.0.0: + resolution: {integrity: sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==} + engines: {node: '>= 0.8'} + + fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + + gensync@1.0.0-beta.2: + resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} + engines: {node: '>=6.9.0'} + + get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + + get-intrinsic@1.3.0: + resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} + engines: {node: '>= 0.4'} + + get-package-type@0.1.0: + resolution: {integrity: sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==} + engines: {node: '>=8.0.0'} + + get-proto@1.0.1: + resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} + engines: {node: '>= 0.4'} + + get-stream@6.0.1: + resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==} + engines: {node: '>=10'} + + glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + deprecated: Glob versions prior to v9 are no longer supported + + globals@11.12.0: + resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} + engines: {node: '>=4'} + + gopd@1.2.0: + resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} + engines: {node: '>= 0.4'} + + graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + has-symbols@1.1.0: + resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} + engines: {node: '>= 0.4'} + + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} + + html-escaper@2.0.2: + resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} + + http-errors@2.0.0: + resolution: {integrity: sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==} + engines: {node: '>= 0.8'} + + human-signals@2.1.0: + resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==} + engines: {node: '>=10.17.0'} + + iconv-lite@0.6.3: + resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} + engines: {node: '>=0.10.0'} + + import-local@3.2.0: + resolution: {integrity: sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==} + engines: {node: '>=8'} + hasBin: true + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + ipaddr.js@1.9.1: + resolution: {integrity: sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==} + engines: {node: '>= 0.10'} + + is-arrayish@0.2.1: + resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} + + is-core-module@2.16.1: + resolution: {integrity: sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==} + engines: {node: '>= 0.4'} + + is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + + is-generator-fn@2.1.0: + resolution: {integrity: sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==} + engines: {node: '>=6'} + + is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + is-promise@4.0.0: + resolution: {integrity: sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==} + + is-stream@2.0.1: + resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} + engines: {node: '>=8'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + istanbul-lib-coverage@3.2.2: + resolution: {integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==} + engines: {node: '>=8'} + + istanbul-lib-instrument@5.2.1: + resolution: {integrity: sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==} + engines: {node: '>=8'} + + istanbul-lib-instrument@6.0.3: + resolution: {integrity: sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==} + engines: {node: '>=10'} + + istanbul-lib-report@3.0.1: + resolution: {integrity: sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==} + engines: {node: '>=10'} + + istanbul-lib-source-maps@4.0.1: + resolution: {integrity: sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==} + engines: {node: '>=10'} + + istanbul-reports@3.1.7: + resolution: {integrity: sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==} + engines: {node: '>=8'} + + jake@10.9.2: + resolution: {integrity: sha512-2P4SQ0HrLQ+fw6llpLnOaGAvN2Zu6778SJMrCUwns4fOoG9ayrTiZk3VV8sCPkVZF8ab0zksVpS8FDY5pRCNBA==} + engines: {node: '>=10'} + hasBin: true + + jest-changed-files@29.7.0: + resolution: {integrity: sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-circus@29.7.0: + resolution: {integrity: sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-cli@29.7.0: + resolution: {integrity: sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + hasBin: true + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + jest-config@29.7.0: + resolution: {integrity: sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + '@types/node': '*' + ts-node: '>=9.0.0' + peerDependenciesMeta: + '@types/node': + optional: true + ts-node: + optional: true + + jest-diff@29.7.0: + resolution: {integrity: sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-docblock@29.7.0: + resolution: {integrity: sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-each@29.7.0: + resolution: {integrity: sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-environment-node@29.7.0: + resolution: {integrity: sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-get-type@29.6.3: + resolution: {integrity: sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-haste-map@29.7.0: + resolution: {integrity: sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-leak-detector@29.7.0: + resolution: {integrity: sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-matcher-utils@29.7.0: + resolution: {integrity: sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-message-util@29.7.0: + resolution: {integrity: sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-mock@29.7.0: + resolution: {integrity: sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-pnp-resolver@1.2.3: + resolution: {integrity: sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==} + engines: {node: '>=6'} + peerDependencies: + jest-resolve: '*' + peerDependenciesMeta: + jest-resolve: + optional: true + + jest-regex-util@29.6.3: + resolution: {integrity: sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-resolve-dependencies@29.7.0: + resolution: {integrity: sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-resolve@29.7.0: + resolution: {integrity: sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-runner@29.7.0: + resolution: {integrity: sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-runtime@29.7.0: + resolution: {integrity: sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-snapshot@29.7.0: + resolution: {integrity: sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-util@29.7.0: + resolution: {integrity: sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-validate@29.7.0: + resolution: {integrity: sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-watcher@29.7.0: + resolution: {integrity: sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-worker@29.7.0: + resolution: {integrity: sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest@29.7.0: + resolution: {integrity: sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + hasBin: true + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + + js-yaml@3.14.1: + resolution: {integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==} + hasBin: true + + jsesc@3.1.0: + resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==} + engines: {node: '>=6'} + hasBin: true + + json-parse-even-better-errors@2.3.1: + resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + + json5@2.2.3: + resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==} + engines: {node: '>=6'} + hasBin: true + + kleur@3.0.3: + resolution: {integrity: sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==} + engines: {node: '>=6'} + + leven@3.1.0: + resolution: {integrity: sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==} + engines: {node: '>=6'} + + lines-and-columns@1.2.4: + resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} + + locate-path@5.0.0: + resolution: {integrity: sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==} + engines: {node: '>=8'} + + lodash.memoize@4.1.2: + resolution: {integrity: sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==} + + lru-cache@5.1.1: + resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} + + make-dir@4.0.0: + resolution: {integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==} + engines: {node: '>=10'} + + make-error@1.3.6: + resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} + + makeerror@1.0.12: + resolution: {integrity: sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==} + + math-intrinsics@1.1.0: + resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} + engines: {node: '>= 0.4'} + + media-typer@1.1.0: + resolution: {integrity: sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==} + engines: {node: '>= 0.8'} + + merge-descriptors@2.0.0: + resolution: {integrity: sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==} + engines: {node: '>=18'} + + merge-stream@2.0.0: + resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} + + micromatch@4.0.8: + resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} + engines: {node: '>=8.6'} + + mime-db@1.54.0: + resolution: {integrity: sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==} + engines: {node: '>= 0.6'} + + mime-types@3.0.1: + resolution: {integrity: sha512-xRc4oEhT6eaBpU1XF7AjpOFD+xQmXNB5OVKwp4tqCuBpHLS/ZbBDrc07mYTDqVMg6PfxUjjNp85O6Cd2Z/5HWA==} + engines: {node: '>= 0.6'} + + mimic-fn@2.1.0: + resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==} + engines: {node: '>=6'} + + minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + + minimatch@5.1.6: + resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==} + engines: {node: '>=10'} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + negotiator@1.0.0: + resolution: {integrity: sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==} + engines: {node: '>= 0.6'} + + node-int64@0.4.0: + resolution: {integrity: sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==} + + node-releases@2.0.19: + resolution: {integrity: sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==} + + normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + + npm-run-path@4.0.1: + resolution: {integrity: sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==} + engines: {node: '>=8'} + + object-assign@4.1.1: + resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} + engines: {node: '>=0.10.0'} + + object-inspect@1.13.4: + resolution: {integrity: sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==} + engines: {node: '>= 0.4'} + + on-finished@2.4.1: + resolution: {integrity: sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==} + engines: {node: '>= 0.8'} + + once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + + onetime@5.1.2: + resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} + engines: {node: '>=6'} + + p-limit@2.3.0: + resolution: {integrity: sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==} + engines: {node: '>=6'} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@4.1.0: + resolution: {integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==} + engines: {node: '>=8'} + + p-try@2.2.0: + resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==} + engines: {node: '>=6'} + + parse-json@5.2.0: + resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} + engines: {node: '>=8'} + + parseurl@1.3.3: + resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==} + engines: {node: '>= 0.8'} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + + path-to-regexp@8.2.0: + resolution: {integrity: sha512-TdrF7fW9Rphjq4RjrW0Kp2AW0Ahwu9sRGTkS6bvDi0SCwZlEZYmcfDbEsTz8RVk0EHIS/Vd1bv3JhG+1xZuAyQ==} + engines: {node: '>=16'} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + pirates@4.0.7: + resolution: {integrity: sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==} + engines: {node: '>= 6'} + + pkce-challenge@5.0.0: + resolution: {integrity: sha512-ueGLflrrnvwB3xuo/uGob5pd5FN7l0MsLf0Z87o/UQmRtwjvfylfc9MurIxRAWywCYTgrvpXBcqjV4OfCYGCIQ==} + engines: {node: '>=16.20.0'} + + pkg-dir@4.2.0: + resolution: {integrity: sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==} + engines: {node: '>=8'} + + pretty-format@29.7.0: + resolution: {integrity: sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + prompts@2.4.2: + resolution: {integrity: sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==} + engines: {node: '>= 6'} + + proxy-addr@2.0.7: + resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==} + engines: {node: '>= 0.10'} + + pure-rand@6.1.0: + resolution: {integrity: sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==} + + qs@6.14.0: + resolution: {integrity: sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==} + engines: {node: '>=0.6'} + + range-parser@1.2.1: + resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==} + engines: {node: '>= 0.6'} + + raw-body@3.0.0: + resolution: {integrity: sha512-RmkhL8CAyCRPXCE28MMH0z2PNWQBNk2Q09ZdxM9IOOXwxwZbN+qbWaatPkdkWIKL2ZVDImrN/pK5HTRz2PcS4g==} + engines: {node: '>= 0.8'} + + react-is@18.3.1: + resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==} + + require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + + resolve-cwd@3.0.0: + resolution: {integrity: sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==} + engines: {node: '>=8'} + + resolve-from@5.0.0: + resolution: {integrity: sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==} + engines: {node: '>=8'} + + resolve.exports@2.0.3: + resolution: {integrity: sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==} + engines: {node: '>=10'} + + resolve@1.22.10: + resolution: {integrity: sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==} + engines: {node: '>= 0.4'} + hasBin: true + + router@2.2.0: + resolution: {integrity: sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==} + engines: {node: '>= 18'} + + safe-buffer@5.2.1: + resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} + + safer-buffer@2.1.2: + resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} + + semver@6.3.1: + resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==} + hasBin: true + + semver@7.7.1: + resolution: {integrity: sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==} + engines: {node: '>=10'} + hasBin: true + + send@1.2.0: + resolution: {integrity: sha512-uaW0WwXKpL9blXE2o0bRhoL2EGXIrZxQ2ZQ4mgcfoBxdFmQold+qWsD2jLrfZ0trjKL6vOw0j//eAwcALFjKSw==} + engines: {node: '>= 18'} + + serve-static@2.2.0: + resolution: {integrity: sha512-61g9pCh0Vnh7IutZjtLGGpTA355+OPn2TyDv/6ivP2h/AdAVX9azsoxmg2/M6nZeQZNYBEwIcsne1mJd9oQItQ==} + engines: {node: '>= 18'} + + setprototypeof@1.2.0: + resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + side-channel-list@1.0.0: + resolution: {integrity: sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==} + engines: {node: '>= 0.4'} + + side-channel-map@1.0.1: + resolution: {integrity: sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==} + engines: {node: '>= 0.4'} + + side-channel-weakmap@1.0.2: + resolution: {integrity: sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==} + engines: {node: '>= 0.4'} + + side-channel@1.1.0: + resolution: {integrity: sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==} + engines: {node: '>= 0.4'} + + signal-exit@3.0.7: + resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} + + sisteransi@1.0.5: + resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} + + slash@3.0.0: + resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} + engines: {node: '>=8'} + + source-map-support@0.5.13: + resolution: {integrity: sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==} + + source-map@0.6.1: + resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} + engines: {node: '>=0.10.0'} + + sprintf-js@1.0.3: + resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} + + stack-utils@2.0.6: + resolution: {integrity: sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==} + engines: {node: '>=10'} + + statuses@2.0.1: + resolution: {integrity: sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==} + engines: {node: '>= 0.8'} + + string-length@4.0.2: + resolution: {integrity: sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==} + engines: {node: '>=10'} + + string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strip-bom@4.0.0: + resolution: {integrity: sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==} + engines: {node: '>=8'} + + strip-final-newline@2.0.0: + resolution: {integrity: sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==} + engines: {node: '>=6'} + + strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + supports-color@8.1.1: + resolution: {integrity: sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==} + engines: {node: '>=10'} + + supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + + test-exclude@6.0.0: + resolution: {integrity: sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==} + engines: {node: '>=8'} + + tmpl@1.0.5: + resolution: {integrity: sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==} + + to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + + toidentifier@1.0.1: + resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} + engines: {node: '>=0.6'} + + ts-jest@29.3.2: + resolution: {integrity: sha512-bJJkrWc6PjFVz5g2DGCNUo8z7oFEYaz1xP1NpeDU7KNLMWPpEyV8Chbpkn8xjzgRDpQhnGMyvyldoL7h8JXyug==} + engines: {node: ^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0} + hasBin: true + peerDependencies: + '@babel/core': '>=7.0.0-beta.0 <8' + '@jest/transform': ^29.0.0 + '@jest/types': ^29.0.0 + babel-jest: ^29.0.0 + esbuild: '*' + jest: ^29.0.0 + typescript: '>=4.3 <6' + peerDependenciesMeta: + '@babel/core': + optional: true + '@jest/transform': + optional: true + '@jest/types': + optional: true + babel-jest: + optional: true + esbuild: + optional: true + + ts-node@10.9.2: + resolution: {integrity: sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==} + hasBin: true + peerDependencies: + '@swc/core': '>=1.2.50' + '@swc/wasm': '>=1.2.50' + '@types/node': '*' + typescript: '>=2.7' + peerDependenciesMeta: + '@swc/core': + optional: true + '@swc/wasm': + optional: true + + type-detect@4.0.8: + resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} + engines: {node: '>=4'} + + type-fest@0.21.3: + resolution: {integrity: sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==} + engines: {node: '>=10'} + + type-fest@4.40.0: + resolution: {integrity: sha512-ABHZ2/tS2JkvH1PEjxFDTUWC8dB5OsIGZP4IFLhR293GqT5Y5qB1WwL2kMPYhQW9DVgVD8Hd7I8gjwPIf5GFkw==} + engines: {node: '>=16'} + + type-is@2.0.1: + resolution: {integrity: sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==} + engines: {node: '>= 0.6'} + + typescript@5.8.3: + resolution: {integrity: sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==} + engines: {node: '>=14.17'} + hasBin: true + + undici-types@6.21.0: + resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + + unpipe@1.0.0: + resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} + engines: {node: '>= 0.8'} + + update-browserslist-db@1.1.3: + resolution: {integrity: sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + + v8-compile-cache-lib@3.0.1: + resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} + + v8-to-istanbul@9.3.0: + resolution: {integrity: sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==} + engines: {node: '>=10.12.0'} + + vary@1.1.2: + resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} + engines: {node: '>= 0.8'} + + walker@1.0.8: + resolution: {integrity: sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==} + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + write-file-atomic@4.0.2: + resolution: {integrity: sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==} + engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} + + y18n@5.0.8: + resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} + engines: {node: '>=10'} + + yallist@3.1.1: + resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} + + yargs-parser@21.1.1: + resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} + engines: {node: '>=12'} + + yargs@17.7.2: + resolution: {integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==} + engines: {node: '>=12'} + + yn@3.1.1: + resolution: {integrity: sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==} + engines: {node: '>=6'} + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + + zod-to-json-schema@3.24.5: + resolution: {integrity: sha512-/AuWwMP+YqiPbsJx5D6TfgRTc4kTLjsh5SOcd4bLsfUg2RcEXrFMJl1DGgdHy2aCfsIA/cr/1JM0xcB2GZji8g==} + peerDependencies: + zod: ^3.24.1 + + zod@3.24.3: + resolution: {integrity: sha512-HhY1oqzWCQWuUqvBFnsyrtZRhyPeR7SUGv+C4+MsisMuVfSPx8HpwWqH8tRahSlt6M3PiFAcoeFhZAqIXTxoSg==} + +snapshots: + + '@ampproject/remapping@2.3.0': + dependencies: + '@jridgewell/gen-mapping': 0.3.8 + '@jridgewell/trace-mapping': 0.3.25 + + '@babel/code-frame@7.26.2': + dependencies: + '@babel/helper-validator-identifier': 7.25.9 + js-tokens: 4.0.0 + picocolors: 1.1.1 + + '@babel/compat-data@7.26.8': {} + + '@babel/core@7.26.10': + dependencies: + '@ampproject/remapping': 2.3.0 + '@babel/code-frame': 7.26.2 + '@babel/generator': 7.27.0 + '@babel/helper-compilation-targets': 7.27.0 + '@babel/helper-module-transforms': 7.26.0(@babel/core@7.26.10) + '@babel/helpers': 7.27.0 + '@babel/parser': 7.27.0 + '@babel/template': 7.27.0 + '@babel/traverse': 7.27.0 + '@babel/types': 7.27.0 + convert-source-map: 2.0.0 + debug: 4.4.0 + gensync: 1.0.0-beta.2 + json5: 2.2.3 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + + '@babel/generator@7.27.0': + dependencies: + '@babel/parser': 7.27.0 + '@babel/types': 7.27.0 + '@jridgewell/gen-mapping': 0.3.8 + '@jridgewell/trace-mapping': 0.3.25 + jsesc: 3.1.0 + + '@babel/helper-compilation-targets@7.27.0': + dependencies: + '@babel/compat-data': 7.26.8 + '@babel/helper-validator-option': 7.25.9 + browserslist: 4.24.4 + lru-cache: 5.1.1 + semver: 6.3.1 + + '@babel/helper-module-imports@7.25.9': + dependencies: + '@babel/traverse': 7.27.0 + '@babel/types': 7.27.0 + transitivePeerDependencies: + - supports-color + + '@babel/helper-module-transforms@7.26.0(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-module-imports': 7.25.9 + '@babel/helper-validator-identifier': 7.25.9 + '@babel/traverse': 7.27.0 + transitivePeerDependencies: + - supports-color + + '@babel/helper-plugin-utils@7.26.5': {} + + '@babel/helper-string-parser@7.25.9': {} + + '@babel/helper-validator-identifier@7.25.9': {} + + '@babel/helper-validator-option@7.25.9': {} + + '@babel/helpers@7.27.0': + dependencies: + '@babel/template': 7.27.0 + '@babel/types': 7.27.0 + + '@babel/parser@7.27.0': + dependencies: + '@babel/types': 7.27.0 + + '@babel/plugin-syntax-async-generators@7.8.4(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-bigint@7.8.3(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-class-static-block@7.14.5(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-import-attributes@7.26.0(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-import-meta@7.10.4(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-json-strings@7.8.3(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-jsx@7.25.9(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-logical-assignment-operators@7.10.4(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-nullish-coalescing-operator@7.8.3(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-numeric-separator@7.10.4(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-optional-catch-binding@7.8.3(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-optional-chaining@7.8.3(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-private-property-in-object@7.14.5(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-top-level-await@7.14.5(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-typescript@7.25.9(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/template@7.27.0': + dependencies: + '@babel/code-frame': 7.26.2 + '@babel/parser': 7.27.0 + '@babel/types': 7.27.0 + + '@babel/traverse@7.27.0': + dependencies: + '@babel/code-frame': 7.26.2 + '@babel/generator': 7.27.0 + '@babel/parser': 7.27.0 + '@babel/template': 7.27.0 + '@babel/types': 7.27.0 + debug: 4.4.0 + globals: 11.12.0 + transitivePeerDependencies: + - supports-color + + '@babel/types@7.27.0': + dependencies: + '@babel/helper-string-parser': 7.25.9 + '@babel/helper-validator-identifier': 7.25.9 + + '@bcoe/v8-coverage@0.2.3': {} + + '@cspotcode/source-map-support@0.8.1': + dependencies: + '@jridgewell/trace-mapping': 0.3.9 + + '@istanbuljs/load-nyc-config@1.1.0': + dependencies: + camelcase: 5.3.1 + find-up: 4.1.0 + get-package-type: 0.1.0 + js-yaml: 3.14.1 + resolve-from: 5.0.0 + + '@istanbuljs/schema@0.1.3': {} + + '@jest/console@29.7.0': + dependencies: + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + chalk: 4.1.2 + jest-message-util: 29.7.0 + jest-util: 29.7.0 + slash: 3.0.0 + + '@jest/core@29.7.0(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3))': + dependencies: + '@jest/console': 29.7.0 + '@jest/reporters': 29.7.0 + '@jest/test-result': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + ansi-escapes: 4.3.2 + chalk: 4.1.2 + ci-info: 3.9.0 + exit: 0.1.2 + graceful-fs: 4.2.11 + jest-changed-files: 29.7.0 + jest-config: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + jest-haste-map: 29.7.0 + jest-message-util: 29.7.0 + jest-regex-util: 29.6.3 + jest-resolve: 29.7.0 + jest-resolve-dependencies: 29.7.0 + jest-runner: 29.7.0 + jest-runtime: 29.7.0 + jest-snapshot: 29.7.0 + jest-util: 29.7.0 + jest-validate: 29.7.0 + jest-watcher: 29.7.0 + micromatch: 4.0.8 + pretty-format: 29.7.0 + slash: 3.0.0 + strip-ansi: 6.0.1 + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + - ts-node + + '@jest/environment@29.7.0': + dependencies: + '@jest/fake-timers': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + jest-mock: 29.7.0 + + '@jest/expect-utils@29.7.0': + dependencies: + jest-get-type: 29.6.3 + + '@jest/expect@29.7.0': + dependencies: + expect: 29.7.0 + jest-snapshot: 29.7.0 + transitivePeerDependencies: + - supports-color + + '@jest/fake-timers@29.7.0': + dependencies: + '@jest/types': 29.6.3 + '@sinonjs/fake-timers': 10.3.0 + '@types/node': 22.14.1 + jest-message-util: 29.7.0 + jest-mock: 29.7.0 + jest-util: 29.7.0 + + '@jest/globals@29.7.0': + dependencies: + '@jest/environment': 29.7.0 + '@jest/expect': 29.7.0 + '@jest/types': 29.6.3 + jest-mock: 29.7.0 + transitivePeerDependencies: + - supports-color + + '@jest/reporters@29.7.0': + dependencies: + '@bcoe/v8-coverage': 0.2.3 + '@jest/console': 29.7.0 + '@jest/test-result': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + '@jridgewell/trace-mapping': 0.3.25 + '@types/node': 22.14.1 + chalk: 4.1.2 + collect-v8-coverage: 1.0.2 + exit: 0.1.2 + glob: 7.2.3 + graceful-fs: 4.2.11 + istanbul-lib-coverage: 3.2.2 + istanbul-lib-instrument: 6.0.3 + istanbul-lib-report: 3.0.1 + istanbul-lib-source-maps: 4.0.1 + istanbul-reports: 3.1.7 + jest-message-util: 29.7.0 + jest-util: 29.7.0 + jest-worker: 29.7.0 + slash: 3.0.0 + string-length: 4.0.2 + strip-ansi: 6.0.1 + v8-to-istanbul: 9.3.0 + transitivePeerDependencies: + - supports-color + + '@jest/schemas@29.6.3': + dependencies: + '@sinclair/typebox': 0.27.8 + + '@jest/source-map@29.6.3': + dependencies: + '@jridgewell/trace-mapping': 0.3.25 + callsites: 3.1.0 + graceful-fs: 4.2.11 + + '@jest/test-result@29.7.0': + dependencies: + '@jest/console': 29.7.0 + '@jest/types': 29.6.3 + '@types/istanbul-lib-coverage': 2.0.6 + collect-v8-coverage: 1.0.2 + + '@jest/test-sequencer@29.7.0': + dependencies: + '@jest/test-result': 29.7.0 + graceful-fs: 4.2.11 + jest-haste-map: 29.7.0 + slash: 3.0.0 + + '@jest/transform@29.7.0': + dependencies: + '@babel/core': 7.26.10 + '@jest/types': 29.6.3 + '@jridgewell/trace-mapping': 0.3.25 + babel-plugin-istanbul: 6.1.1 + chalk: 4.1.2 + convert-source-map: 2.0.0 + fast-json-stable-stringify: 2.1.0 + graceful-fs: 4.2.11 + jest-haste-map: 29.7.0 + jest-regex-util: 29.6.3 + jest-util: 29.7.0 + micromatch: 4.0.8 + pirates: 4.0.7 + slash: 3.0.0 + write-file-atomic: 4.0.2 + transitivePeerDependencies: + - supports-color + + '@jest/types@29.6.3': + dependencies: + '@jest/schemas': 29.6.3 + '@types/istanbul-lib-coverage': 2.0.6 + '@types/istanbul-reports': 3.0.4 + '@types/node': 22.14.1 + '@types/yargs': 17.0.33 + chalk: 4.1.2 + + '@jridgewell/gen-mapping@0.3.8': + dependencies: + '@jridgewell/set-array': 1.2.1 + '@jridgewell/sourcemap-codec': 1.5.0 + '@jridgewell/trace-mapping': 0.3.25 + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/set-array@1.2.1': {} + + '@jridgewell/sourcemap-codec@1.5.0': {} + + '@jridgewell/trace-mapping@0.3.25': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.0 + + '@jridgewell/trace-mapping@0.3.9': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.0 + + '@modelcontextprotocol/sdk@1.9.0': + dependencies: + content-type: 1.0.5 + cors: 2.8.5 + cross-spawn: 7.0.6 + eventsource: 3.0.6 + express: 5.1.0 + express-rate-limit: 7.5.0(express@5.1.0) + pkce-challenge: 5.0.0 + raw-body: 3.0.0 + zod: 3.24.3 + zod-to-json-schema: 3.24.5(zod@3.24.3) + transitivePeerDependencies: + - supports-color + + '@sinclair/typebox@0.27.8': {} + + '@sinonjs/commons@3.0.1': + dependencies: + type-detect: 4.0.8 + + '@sinonjs/fake-timers@10.3.0': + dependencies: + '@sinonjs/commons': 3.0.1 + + '@tsconfig/node10@1.0.11': {} + + '@tsconfig/node12@1.0.11': {} + + '@tsconfig/node14@1.0.3': {} + + '@tsconfig/node16@1.0.4': {} + + '@types/babel__core@7.20.5': + dependencies: + '@babel/parser': 7.27.0 + '@babel/types': 7.27.0 + '@types/babel__generator': 7.27.0 + '@types/babel__template': 7.4.4 + '@types/babel__traverse': 7.20.7 + + '@types/babel__generator@7.27.0': + dependencies: + '@babel/types': 7.27.0 + + '@types/babel__template@7.4.4': + dependencies: + '@babel/parser': 7.27.0 + '@babel/types': 7.27.0 + + '@types/babel__traverse@7.20.7': + dependencies: + '@babel/types': 7.27.0 + + '@types/graceful-fs@4.1.9': + dependencies: + '@types/node': 22.14.1 + + '@types/istanbul-lib-coverage@2.0.6': {} + + '@types/istanbul-lib-report@3.0.3': + dependencies: + '@types/istanbul-lib-coverage': 2.0.6 + + '@types/istanbul-reports@3.0.4': + dependencies: + '@types/istanbul-lib-report': 3.0.3 + + '@types/jest@29.5.14': + dependencies: + expect: 29.7.0 + pretty-format: 29.7.0 + + '@types/node@22.14.1': + dependencies: + undici-types: 6.21.0 + + '@types/stack-utils@2.0.3': {} + + '@types/yargs-parser@21.0.3': {} + + '@types/yargs@17.0.33': + dependencies: + '@types/yargs-parser': 21.0.3 + + accepts@2.0.0: + dependencies: + mime-types: 3.0.1 + negotiator: 1.0.0 + + acorn-walk@8.3.4: + dependencies: + acorn: 8.14.1 + + acorn@8.14.1: {} + + ansi-escapes@4.3.2: + dependencies: + type-fest: 0.21.3 + + ansi-regex@5.0.1: {} + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + ansi-styles@5.2.0: {} + + anymatch@3.1.3: + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + + arg@4.1.3: {} + + argparse@1.0.10: + dependencies: + sprintf-js: 1.0.3 + + async@3.2.6: {} + + babel-jest@29.7.0(@babel/core@7.26.10): + dependencies: + '@babel/core': 7.26.10 + '@jest/transform': 29.7.0 + '@types/babel__core': 7.20.5 + babel-plugin-istanbul: 6.1.1 + babel-preset-jest: 29.6.3(@babel/core@7.26.10) + chalk: 4.1.2 + graceful-fs: 4.2.11 + slash: 3.0.0 + transitivePeerDependencies: + - supports-color + + babel-plugin-istanbul@6.1.1: + dependencies: + '@babel/helper-plugin-utils': 7.26.5 + '@istanbuljs/load-nyc-config': 1.1.0 + '@istanbuljs/schema': 0.1.3 + istanbul-lib-instrument: 5.2.1 + test-exclude: 6.0.0 + transitivePeerDependencies: + - supports-color + + babel-plugin-jest-hoist@29.6.3: + dependencies: + '@babel/template': 7.27.0 + '@babel/types': 7.27.0 + '@types/babel__core': 7.20.5 + '@types/babel__traverse': 7.20.7 + + babel-preset-current-node-syntax@1.1.0(@babel/core@7.26.10): + dependencies: + '@babel/core': 7.26.10 + '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.26.10) + '@babel/plugin-syntax-bigint': 7.8.3(@babel/core@7.26.10) + '@babel/plugin-syntax-class-properties': 7.12.13(@babel/core@7.26.10) + '@babel/plugin-syntax-class-static-block': 7.14.5(@babel/core@7.26.10) + '@babel/plugin-syntax-import-attributes': 7.26.0(@babel/core@7.26.10) + '@babel/plugin-syntax-import-meta': 7.10.4(@babel/core@7.26.10) + '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.26.10) + '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.26.10) + '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.26.10) + '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.26.10) + '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.26.10) + '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.26.10) + '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.26.10) + '@babel/plugin-syntax-private-property-in-object': 7.14.5(@babel/core@7.26.10) + '@babel/plugin-syntax-top-level-await': 7.14.5(@babel/core@7.26.10) + + babel-preset-jest@29.6.3(@babel/core@7.26.10): + dependencies: + '@babel/core': 7.26.10 + babel-plugin-jest-hoist: 29.6.3 + babel-preset-current-node-syntax: 1.1.0(@babel/core@7.26.10) + + balanced-match@1.0.2: {} + + body-parser@2.2.0: + dependencies: + bytes: 3.1.2 + content-type: 1.0.5 + debug: 4.4.0 + http-errors: 2.0.0 + iconv-lite: 0.6.3 + on-finished: 2.4.1 + qs: 6.14.0 + raw-body: 3.0.0 + type-is: 2.0.1 + transitivePeerDependencies: + - supports-color + + brace-expansion@1.1.11: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + brace-expansion@2.0.1: + dependencies: + balanced-match: 1.0.2 + + braces@3.0.3: + dependencies: + fill-range: 7.1.1 + + browserslist@4.24.4: + dependencies: + caniuse-lite: 1.0.30001714 + electron-to-chromium: 1.5.137 + node-releases: 2.0.19 + update-browserslist-db: 1.1.3(browserslist@4.24.4) + + bs-logger@0.2.6: + dependencies: + fast-json-stable-stringify: 2.1.0 + + bser@2.1.1: + dependencies: + node-int64: 0.4.0 + + buffer-from@1.1.2: {} + + bytes@3.1.2: {} + + call-bind-apply-helpers@1.0.2: + dependencies: + es-errors: 1.3.0 + function-bind: 1.1.2 + + call-bound@1.0.4: + dependencies: + call-bind-apply-helpers: 1.0.2 + get-intrinsic: 1.3.0 + + callsites@3.1.0: {} + + camelcase@5.3.1: {} + + camelcase@6.3.0: {} + + caniuse-lite@1.0.30001714: {} + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + char-regex@1.0.2: {} + + ci-info@3.9.0: {} + + cjs-module-lexer@1.4.3: {} + + cliui@8.0.1: + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 7.0.0 + + co@4.6.0: {} + + collect-v8-coverage@1.0.2: {} + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + concat-map@0.0.1: {} + + content-disposition@1.0.0: + dependencies: + safe-buffer: 5.2.1 + + content-type@1.0.5: {} + + convert-source-map@2.0.0: {} + + cookie-signature@1.2.2: {} + + cookie@0.7.2: {} + + cors@2.8.5: + dependencies: + object-assign: 4.1.1 + vary: 1.1.2 + + create-jest@29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)): + dependencies: + '@jest/types': 29.6.3 + chalk: 4.1.2 + exit: 0.1.2 + graceful-fs: 4.2.11 + jest-config: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + jest-util: 29.7.0 + prompts: 2.4.2 + transitivePeerDependencies: + - '@types/node' + - babel-plugin-macros + - supports-color + - ts-node + + create-require@1.1.1: {} + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + debug@4.4.0: + dependencies: + ms: 2.1.3 + + dedent@1.5.3: {} + + deepmerge@4.3.1: {} + + depd@2.0.0: {} + + detect-newline@3.1.0: {} + + diff-sequences@29.6.3: {} + + diff@4.0.2: {} + + dunder-proto@1.0.1: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-errors: 1.3.0 + gopd: 1.2.0 + + ee-first@1.1.1: {} + + ejs@3.1.10: + dependencies: + jake: 10.9.2 + + electron-to-chromium@1.5.137: {} + + emittery@0.13.1: {} + + emoji-regex@8.0.0: {} + + encodeurl@2.0.0: {} + + error-ex@1.3.2: + dependencies: + is-arrayish: 0.2.1 + + es-define-property@1.0.1: {} + + es-errors@1.3.0: {} + + es-object-atoms@1.1.1: + dependencies: + es-errors: 1.3.0 + + escalade@3.2.0: {} + + escape-html@1.0.3: {} + + escape-string-regexp@2.0.0: {} + + esprima@4.0.1: {} + + etag@1.8.1: {} + + eventsource-parser@3.0.1: {} + + eventsource@3.0.6: + dependencies: + eventsource-parser: 3.0.1 + + execa@5.1.1: + dependencies: + cross-spawn: 7.0.6 + get-stream: 6.0.1 + human-signals: 2.1.0 + is-stream: 2.0.1 + merge-stream: 2.0.0 + npm-run-path: 4.0.1 + onetime: 5.1.2 + signal-exit: 3.0.7 + strip-final-newline: 2.0.0 + + exit@0.1.2: {} + + expect@29.7.0: + dependencies: + '@jest/expect-utils': 29.7.0 + jest-get-type: 29.6.3 + jest-matcher-utils: 29.7.0 + jest-message-util: 29.7.0 + jest-util: 29.7.0 + + express-rate-limit@7.5.0(express@5.1.0): + dependencies: + express: 5.1.0 + + express@5.1.0: + dependencies: + accepts: 2.0.0 + body-parser: 2.2.0 + content-disposition: 1.0.0 + content-type: 1.0.5 + cookie: 0.7.2 + cookie-signature: 1.2.2 + debug: 4.4.0 + encodeurl: 2.0.0 + escape-html: 1.0.3 + etag: 1.8.1 + finalhandler: 2.1.0 + fresh: 2.0.0 + http-errors: 2.0.0 + merge-descriptors: 2.0.0 + mime-types: 3.0.1 + on-finished: 2.4.1 + once: 1.4.0 + parseurl: 1.3.3 + proxy-addr: 2.0.7 + qs: 6.14.0 + range-parser: 1.2.1 + router: 2.2.0 + send: 1.2.0 + serve-static: 2.2.0 + statuses: 2.0.1 + type-is: 2.0.1 + vary: 1.1.2 + transitivePeerDependencies: + - supports-color + + fast-json-stable-stringify@2.1.0: {} + + fb-watchman@2.0.2: + dependencies: + bser: 2.1.1 + + filelist@1.0.4: + dependencies: + minimatch: 5.1.6 + + fill-range@7.1.1: + dependencies: + to-regex-range: 5.0.1 + + finalhandler@2.1.0: + dependencies: + debug: 4.4.0 + encodeurl: 2.0.0 + escape-html: 1.0.3 + on-finished: 2.4.1 + parseurl: 1.3.3 + statuses: 2.0.1 + transitivePeerDependencies: + - supports-color + + find-up@4.1.0: + dependencies: + locate-path: 5.0.0 + path-exists: 4.0.0 + + forwarded@0.2.0: {} + + fresh@2.0.0: {} + + fs.realpath@1.0.0: {} + + fsevents@2.3.3: + optional: true + + function-bind@1.1.2: {} + + gensync@1.0.0-beta.2: {} + + get-caller-file@2.0.5: {} + + get-intrinsic@1.3.0: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-define-property: 1.0.1 + es-errors: 1.3.0 + es-object-atoms: 1.1.1 + function-bind: 1.1.2 + get-proto: 1.0.1 + gopd: 1.2.0 + has-symbols: 1.1.0 + hasown: 2.0.2 + math-intrinsics: 1.1.0 + + get-package-type@0.1.0: {} + + get-proto@1.0.1: + dependencies: + dunder-proto: 1.0.1 + es-object-atoms: 1.1.1 + + get-stream@6.0.1: {} + + glob@7.2.3: + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + + globals@11.12.0: {} + + gopd@1.2.0: {} + + graceful-fs@4.2.11: {} + + has-flag@4.0.0: {} + + has-symbols@1.1.0: {} + + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + + html-escaper@2.0.2: {} + + http-errors@2.0.0: + dependencies: + depd: 2.0.0 + inherits: 2.0.4 + setprototypeof: 1.2.0 + statuses: 2.0.1 + toidentifier: 1.0.1 + + human-signals@2.1.0: {} + + iconv-lite@0.6.3: + dependencies: + safer-buffer: 2.1.2 + + import-local@3.2.0: + dependencies: + pkg-dir: 4.2.0 + resolve-cwd: 3.0.0 + + imurmurhash@0.1.4: {} + + inflight@1.0.6: + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + + inherits@2.0.4: {} + + ipaddr.js@1.9.1: {} + + is-arrayish@0.2.1: {} + + is-core-module@2.16.1: + dependencies: + hasown: 2.0.2 + + is-fullwidth-code-point@3.0.0: {} + + is-generator-fn@2.1.0: {} + + is-number@7.0.0: {} + + is-promise@4.0.0: {} + + is-stream@2.0.1: {} + + isexe@2.0.0: {} + + istanbul-lib-coverage@3.2.2: {} + + istanbul-lib-instrument@5.2.1: + dependencies: + '@babel/core': 7.26.10 + '@babel/parser': 7.27.0 + '@istanbuljs/schema': 0.1.3 + istanbul-lib-coverage: 3.2.2 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + + istanbul-lib-instrument@6.0.3: + dependencies: + '@babel/core': 7.26.10 + '@babel/parser': 7.27.0 + '@istanbuljs/schema': 0.1.3 + istanbul-lib-coverage: 3.2.2 + semver: 7.7.1 + transitivePeerDependencies: + - supports-color + + istanbul-lib-report@3.0.1: + dependencies: + istanbul-lib-coverage: 3.2.2 + make-dir: 4.0.0 + supports-color: 7.2.0 + + istanbul-lib-source-maps@4.0.1: + dependencies: + debug: 4.4.0 + istanbul-lib-coverage: 3.2.2 + source-map: 0.6.1 + transitivePeerDependencies: + - supports-color + + istanbul-reports@3.1.7: + dependencies: + html-escaper: 2.0.2 + istanbul-lib-report: 3.0.1 + + jake@10.9.2: + dependencies: + async: 3.2.6 + chalk: 4.1.2 + filelist: 1.0.4 + minimatch: 3.1.2 + + jest-changed-files@29.7.0: + dependencies: + execa: 5.1.1 + jest-util: 29.7.0 + p-limit: 3.1.0 + + jest-circus@29.7.0: + dependencies: + '@jest/environment': 29.7.0 + '@jest/expect': 29.7.0 + '@jest/test-result': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + chalk: 4.1.2 + co: 4.6.0 + dedent: 1.5.3 + is-generator-fn: 2.1.0 + jest-each: 29.7.0 + jest-matcher-utils: 29.7.0 + jest-message-util: 29.7.0 + jest-runtime: 29.7.0 + jest-snapshot: 29.7.0 + jest-util: 29.7.0 + p-limit: 3.1.0 + pretty-format: 29.7.0 + pure-rand: 6.1.0 + slash: 3.0.0 + stack-utils: 2.0.6 + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + + jest-cli@29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)): + dependencies: + '@jest/core': 29.7.0(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + '@jest/test-result': 29.7.0 + '@jest/types': 29.6.3 + chalk: 4.1.2 + create-jest: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + exit: 0.1.2 + import-local: 3.2.0 + jest-config: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + jest-util: 29.7.0 + jest-validate: 29.7.0 + yargs: 17.7.2 + transitivePeerDependencies: + - '@types/node' + - babel-plugin-macros + - supports-color + - ts-node + + jest-config@29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)): + dependencies: + '@babel/core': 7.26.10 + '@jest/test-sequencer': 29.7.0 + '@jest/types': 29.6.3 + babel-jest: 29.7.0(@babel/core@7.26.10) + chalk: 4.1.2 + ci-info: 3.9.0 + deepmerge: 4.3.1 + glob: 7.2.3 + graceful-fs: 4.2.11 + jest-circus: 29.7.0 + jest-environment-node: 29.7.0 + jest-get-type: 29.6.3 + jest-regex-util: 29.6.3 + jest-resolve: 29.7.0 + jest-runner: 29.7.0 + jest-util: 29.7.0 + jest-validate: 29.7.0 + micromatch: 4.0.8 + parse-json: 5.2.0 + pretty-format: 29.7.0 + slash: 3.0.0 + strip-json-comments: 3.1.1 + optionalDependencies: + '@types/node': 22.14.1 + ts-node: 10.9.2(@types/node@22.14.1)(typescript@5.8.3) + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + + jest-diff@29.7.0: + dependencies: + chalk: 4.1.2 + diff-sequences: 29.6.3 + jest-get-type: 29.6.3 + pretty-format: 29.7.0 + + jest-docblock@29.7.0: + dependencies: + detect-newline: 3.1.0 + + jest-each@29.7.0: + dependencies: + '@jest/types': 29.6.3 + chalk: 4.1.2 + jest-get-type: 29.6.3 + jest-util: 29.7.0 + pretty-format: 29.7.0 + + jest-environment-node@29.7.0: + dependencies: + '@jest/environment': 29.7.0 + '@jest/fake-timers': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + jest-mock: 29.7.0 + jest-util: 29.7.0 + + jest-get-type@29.6.3: {} + + jest-haste-map@29.7.0: + dependencies: + '@jest/types': 29.6.3 + '@types/graceful-fs': 4.1.9 + '@types/node': 22.14.1 + anymatch: 3.1.3 + fb-watchman: 2.0.2 + graceful-fs: 4.2.11 + jest-regex-util: 29.6.3 + jest-util: 29.7.0 + jest-worker: 29.7.0 + micromatch: 4.0.8 + walker: 1.0.8 + optionalDependencies: + fsevents: 2.3.3 + + jest-leak-detector@29.7.0: + dependencies: + jest-get-type: 29.6.3 + pretty-format: 29.7.0 + + jest-matcher-utils@29.7.0: + dependencies: + chalk: 4.1.2 + jest-diff: 29.7.0 + jest-get-type: 29.6.3 + pretty-format: 29.7.0 + + jest-message-util@29.7.0: + dependencies: + '@babel/code-frame': 7.26.2 + '@jest/types': 29.6.3 + '@types/stack-utils': 2.0.3 + chalk: 4.1.2 + graceful-fs: 4.2.11 + micromatch: 4.0.8 + pretty-format: 29.7.0 + slash: 3.0.0 + stack-utils: 2.0.6 + + jest-mock@29.7.0: + dependencies: + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + jest-util: 29.7.0 + + jest-pnp-resolver@1.2.3(jest-resolve@29.7.0): + optionalDependencies: + jest-resolve: 29.7.0 + + jest-regex-util@29.6.3: {} + + jest-resolve-dependencies@29.7.0: + dependencies: + jest-regex-util: 29.6.3 + jest-snapshot: 29.7.0 + transitivePeerDependencies: + - supports-color + + jest-resolve@29.7.0: + dependencies: + chalk: 4.1.2 + graceful-fs: 4.2.11 + jest-haste-map: 29.7.0 + jest-pnp-resolver: 1.2.3(jest-resolve@29.7.0) + jest-util: 29.7.0 + jest-validate: 29.7.0 + resolve: 1.22.10 + resolve.exports: 2.0.3 + slash: 3.0.0 + + jest-runner@29.7.0: + dependencies: + '@jest/console': 29.7.0 + '@jest/environment': 29.7.0 + '@jest/test-result': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + chalk: 4.1.2 + emittery: 0.13.1 + graceful-fs: 4.2.11 + jest-docblock: 29.7.0 + jest-environment-node: 29.7.0 + jest-haste-map: 29.7.0 + jest-leak-detector: 29.7.0 + jest-message-util: 29.7.0 + jest-resolve: 29.7.0 + jest-runtime: 29.7.0 + jest-util: 29.7.0 + jest-watcher: 29.7.0 + jest-worker: 29.7.0 + p-limit: 3.1.0 + source-map-support: 0.5.13 + transitivePeerDependencies: + - supports-color + + jest-runtime@29.7.0: + dependencies: + '@jest/environment': 29.7.0 + '@jest/fake-timers': 29.7.0 + '@jest/globals': 29.7.0 + '@jest/source-map': 29.6.3 + '@jest/test-result': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + chalk: 4.1.2 + cjs-module-lexer: 1.4.3 + collect-v8-coverage: 1.0.2 + glob: 7.2.3 + graceful-fs: 4.2.11 + jest-haste-map: 29.7.0 + jest-message-util: 29.7.0 + jest-mock: 29.7.0 + jest-regex-util: 29.6.3 + jest-resolve: 29.7.0 + jest-snapshot: 29.7.0 + jest-util: 29.7.0 + slash: 3.0.0 + strip-bom: 4.0.0 + transitivePeerDependencies: + - supports-color + + jest-snapshot@29.7.0: + dependencies: + '@babel/core': 7.26.10 + '@babel/generator': 7.27.0 + '@babel/plugin-syntax-jsx': 7.25.9(@babel/core@7.26.10) + '@babel/plugin-syntax-typescript': 7.25.9(@babel/core@7.26.10) + '@babel/types': 7.27.0 + '@jest/expect-utils': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + babel-preset-current-node-syntax: 1.1.0(@babel/core@7.26.10) + chalk: 4.1.2 + expect: 29.7.0 + graceful-fs: 4.2.11 + jest-diff: 29.7.0 + jest-get-type: 29.6.3 + jest-matcher-utils: 29.7.0 + jest-message-util: 29.7.0 + jest-util: 29.7.0 + natural-compare: 1.4.0 + pretty-format: 29.7.0 + semver: 7.7.1 + transitivePeerDependencies: + - supports-color + + jest-util@29.7.0: + dependencies: + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + chalk: 4.1.2 + ci-info: 3.9.0 + graceful-fs: 4.2.11 + picomatch: 2.3.1 + + jest-validate@29.7.0: + dependencies: + '@jest/types': 29.6.3 + camelcase: 6.3.0 + chalk: 4.1.2 + jest-get-type: 29.6.3 + leven: 3.1.0 + pretty-format: 29.7.0 + + jest-watcher@29.7.0: + dependencies: + '@jest/test-result': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + ansi-escapes: 4.3.2 + chalk: 4.1.2 + emittery: 0.13.1 + jest-util: 29.7.0 + string-length: 4.0.2 + + jest-worker@29.7.0: + dependencies: + '@types/node': 22.14.1 + jest-util: 29.7.0 + merge-stream: 2.0.0 + supports-color: 8.1.1 + + jest@29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)): + dependencies: + '@jest/core': 29.7.0(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + '@jest/types': 29.6.3 + import-local: 3.2.0 + jest-cli: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + transitivePeerDependencies: + - '@types/node' + - babel-plugin-macros + - supports-color + - ts-node + + js-tokens@4.0.0: {} + + js-yaml@3.14.1: + dependencies: + argparse: 1.0.10 + esprima: 4.0.1 + + jsesc@3.1.0: {} + + json-parse-even-better-errors@2.3.1: {} + + json5@2.2.3: {} + + kleur@3.0.3: {} + + leven@3.1.0: {} + + lines-and-columns@1.2.4: {} + + locate-path@5.0.0: + dependencies: + p-locate: 4.1.0 + + lodash.memoize@4.1.2: {} + + lru-cache@5.1.1: + dependencies: + yallist: 3.1.1 + + make-dir@4.0.0: + dependencies: + semver: 7.7.1 + + make-error@1.3.6: {} + + makeerror@1.0.12: + dependencies: + tmpl: 1.0.5 + + math-intrinsics@1.1.0: {} + + media-typer@1.1.0: {} + + merge-descriptors@2.0.0: {} + + merge-stream@2.0.0: {} + + micromatch@4.0.8: + dependencies: + braces: 3.0.3 + picomatch: 2.3.1 + + mime-db@1.54.0: {} + + mime-types@3.0.1: + dependencies: + mime-db: 1.54.0 + + mimic-fn@2.1.0: {} + + minimatch@3.1.2: + dependencies: + brace-expansion: 1.1.11 + + minimatch@5.1.6: + dependencies: + brace-expansion: 2.0.1 + + ms@2.1.3: {} + + natural-compare@1.4.0: {} + + negotiator@1.0.0: {} + + node-int64@0.4.0: {} + + node-releases@2.0.19: {} + + normalize-path@3.0.0: {} + + npm-run-path@4.0.1: + dependencies: + path-key: 3.1.1 + + object-assign@4.1.1: {} + + object-inspect@1.13.4: {} + + on-finished@2.4.1: + dependencies: + ee-first: 1.1.1 + + once@1.4.0: + dependencies: + wrappy: 1.0.2 + + onetime@5.1.2: + dependencies: + mimic-fn: 2.1.0 + + p-limit@2.3.0: + dependencies: + p-try: 2.2.0 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@4.1.0: + dependencies: + p-limit: 2.3.0 + + p-try@2.2.0: {} + + parse-json@5.2.0: + dependencies: + '@babel/code-frame': 7.26.2 + error-ex: 1.3.2 + json-parse-even-better-errors: 2.3.1 + lines-and-columns: 1.2.4 + + parseurl@1.3.3: {} + + path-exists@4.0.0: {} + + path-is-absolute@1.0.1: {} + + path-key@3.1.1: {} + + path-parse@1.0.7: {} + + path-to-regexp@8.2.0: {} + + picocolors@1.1.1: {} + + picomatch@2.3.1: {} + + pirates@4.0.7: {} + + pkce-challenge@5.0.0: {} + + pkg-dir@4.2.0: + dependencies: + find-up: 4.1.0 + + pretty-format@29.7.0: + dependencies: + '@jest/schemas': 29.6.3 + ansi-styles: 5.2.0 + react-is: 18.3.1 + + prompts@2.4.2: + dependencies: + kleur: 3.0.3 + sisteransi: 1.0.5 + + proxy-addr@2.0.7: + dependencies: + forwarded: 0.2.0 + ipaddr.js: 1.9.1 + + pure-rand@6.1.0: {} + + qs@6.14.0: + dependencies: + side-channel: 1.1.0 + + range-parser@1.2.1: {} + + raw-body@3.0.0: + dependencies: + bytes: 3.1.2 + http-errors: 2.0.0 + iconv-lite: 0.6.3 + unpipe: 1.0.0 + + react-is@18.3.1: {} + + require-directory@2.1.1: {} + + resolve-cwd@3.0.0: + dependencies: + resolve-from: 5.0.0 + + resolve-from@5.0.0: {} + + resolve.exports@2.0.3: {} + + resolve@1.22.10: + dependencies: + is-core-module: 2.16.1 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + + router@2.2.0: + dependencies: + debug: 4.4.0 + depd: 2.0.0 + is-promise: 4.0.0 + parseurl: 1.3.3 + path-to-regexp: 8.2.0 + transitivePeerDependencies: + - supports-color + + safe-buffer@5.2.1: {} + + safer-buffer@2.1.2: {} + + semver@6.3.1: {} + + semver@7.7.1: {} + + send@1.2.0: + dependencies: + debug: 4.4.0 + encodeurl: 2.0.0 + escape-html: 1.0.3 + etag: 1.8.1 + fresh: 2.0.0 + http-errors: 2.0.0 + mime-types: 3.0.1 + ms: 2.1.3 + on-finished: 2.4.1 + range-parser: 1.2.1 + statuses: 2.0.1 + transitivePeerDependencies: + - supports-color + + serve-static@2.2.0: + dependencies: + encodeurl: 2.0.0 + escape-html: 1.0.3 + parseurl: 1.3.3 + send: 1.2.0 + transitivePeerDependencies: + - supports-color + + setprototypeof@1.2.0: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + side-channel-list@1.0.0: + dependencies: + es-errors: 1.3.0 + object-inspect: 1.13.4 + + side-channel-map@1.0.1: + dependencies: + call-bound: 1.0.4 + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + object-inspect: 1.13.4 + + side-channel-weakmap@1.0.2: + dependencies: + call-bound: 1.0.4 + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + object-inspect: 1.13.4 + side-channel-map: 1.0.1 + + side-channel@1.1.0: + dependencies: + es-errors: 1.3.0 + object-inspect: 1.13.4 + side-channel-list: 1.0.0 + side-channel-map: 1.0.1 + side-channel-weakmap: 1.0.2 + + signal-exit@3.0.7: {} + + sisteransi@1.0.5: {} + + slash@3.0.0: {} + + source-map-support@0.5.13: + dependencies: + buffer-from: 1.1.2 + source-map: 0.6.1 + + source-map@0.6.1: {} + + sprintf-js@1.0.3: {} + + stack-utils@2.0.6: + dependencies: + escape-string-regexp: 2.0.0 + + statuses@2.0.1: {} + + string-length@4.0.2: + dependencies: + char-regex: 1.0.2 + strip-ansi: 6.0.1 + + string-width@4.2.3: + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + strip-bom@4.0.0: {} + + strip-final-newline@2.0.0: {} + + strip-json-comments@3.1.1: {} + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + supports-color@8.1.1: + dependencies: + has-flag: 4.0.0 + + supports-preserve-symlinks-flag@1.0.0: {} + + test-exclude@6.0.0: + dependencies: + '@istanbuljs/schema': 0.1.3 + glob: 7.2.3 + minimatch: 3.1.2 + + tmpl@1.0.5: {} + + to-regex-range@5.0.1: + dependencies: + is-number: 7.0.0 + + toidentifier@1.0.1: {} + + ts-jest@29.3.2(@babel/core@7.26.10)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.26.10))(jest@29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)))(typescript@5.8.3): + dependencies: + bs-logger: 0.2.6 + ejs: 3.1.10 + fast-json-stable-stringify: 2.1.0 + jest: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + jest-util: 29.7.0 + json5: 2.2.3 + lodash.memoize: 4.1.2 + make-error: 1.3.6 + semver: 7.7.1 + type-fest: 4.40.0 + typescript: 5.8.3 + yargs-parser: 21.1.1 + optionalDependencies: + '@babel/core': 7.26.10 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + babel-jest: 29.7.0(@babel/core@7.26.10) + + ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3): + dependencies: + '@cspotcode/source-map-support': 0.8.1 + '@tsconfig/node10': 1.0.11 + '@tsconfig/node12': 1.0.11 + '@tsconfig/node14': 1.0.3 + '@tsconfig/node16': 1.0.4 + '@types/node': 22.14.1 + acorn: 8.14.1 + acorn-walk: 8.3.4 + arg: 4.1.3 + create-require: 1.1.1 + diff: 4.0.2 + make-error: 1.3.6 + typescript: 5.8.3 + v8-compile-cache-lib: 3.0.1 + yn: 3.1.1 + + type-detect@4.0.8: {} + + type-fest@0.21.3: {} + + type-fest@4.40.0: {} + + type-is@2.0.1: + dependencies: + content-type: 1.0.5 + media-typer: 1.1.0 + mime-types: 3.0.1 + + typescript@5.8.3: {} + + undici-types@6.21.0: {} + + unpipe@1.0.0: {} + + update-browserslist-db@1.1.3(browserslist@4.24.4): + dependencies: + browserslist: 4.24.4 + escalade: 3.2.0 + picocolors: 1.1.1 + + v8-compile-cache-lib@3.0.1: {} + + v8-to-istanbul@9.3.0: + dependencies: + '@jridgewell/trace-mapping': 0.3.25 + '@types/istanbul-lib-coverage': 2.0.6 + convert-source-map: 2.0.0 + + vary@1.1.2: {} + + walker@1.0.8: + dependencies: + makeerror: 1.0.12 + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + wrap-ansi@7.0.0: + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + + wrappy@1.0.2: {} + + write-file-atomic@4.0.2: + dependencies: + imurmurhash: 0.1.4 + signal-exit: 3.0.7 + + y18n@5.0.8: {} + + yallist@3.1.1: {} + + yargs-parser@21.1.1: {} + + yargs@17.7.2: + dependencies: + cliui: 8.0.1 + escalade: 3.2.0 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 21.1.1 + + yn@3.1.1: {} + + yocto-queue@0.1.0: {} + + zod-to-json-schema@3.24.5(zod@3.24.3): + dependencies: + zod: 3.24.3 + + zod@3.24.3: {} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/tsconfig.json b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/tsconfig.json new file mode 100644 index 0000000..6ede851 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/mcp/tsconfig.json @@ -0,0 +1,9 @@ +{ + "compilerOptions": { + "moduleResolution": "node", + "target": "esnext", + "module": "esnext", + "lib": ["esnext"], + "esModuleInterop": true + } +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/mocking.t b/CloudronPackages/APISIX/apisix-source/t/plugin/mocking.t new file mode 100644 index 0000000..d541591 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/mocking.t @@ -0,0 +1,506 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route(return response example:"hello world") +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "mocking": { + "delay": 1, + "content_type": "text/plain", + "response_status": 200, + "response_example": "hello world" + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit route(return response example:"hello world") +--- request +GET /hello +--- response_body chomp +hello world + + + +=== TEST 3: set route(return response schema: string case) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "mocking": { + "delay": 1, + "content_type": "text/plain", + "response_status": 200, + "response_schema": { + "type": "object", + "properties": { + "field1":{ + "type":"string", + "example":"hello" + } + } + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: hit route(return response schema: string case) +--- request +GET /hello +--- response_body chomp +{"field1":"hello"} + + + +=== TEST 5: set route(return response schema: integer case) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "mocking": { + "delay": 1, + "content_type": "text/plain", + "response_status": 200, + "response_schema": { + "type": "object", + "properties": { + "field1":{ + "type":"integer", + "example":4 + } + } + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: hit route(return response schema: integer case) +--- request +GET /hello +--- response_body chomp +{"field1":4} + + + +=== TEST 7: set route(return response schema: number case) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "mocking": { + "delay": 1, + "content_type": "text/plain", + "response_status": 200, + "response_schema": { + "type": "object", + "properties": { + "field1":{ + "type":"number", + "example":5.5 + } + } + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit route(return response schema: number case) +--- request +GET /hello +--- response_body chomp +{"field1":5.5} + + + +=== TEST 9: set route(return response schema: boolean case) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "mocking": { + "delay": 1, + "content_type": "text/plain", + "response_status": 200, + "response_schema": { + "type": "object", + "properties": { + "field1":{ + "type":"boolean", + "example":true + } + } + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: hit route(return response schema: boolean case) +--- request +GET /hello +--- response_body chomp +{"field1":true} + + + +=== TEST 11: set route(return response schema: object case) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "mocking": { + "delay": 1, + "content_type": "text/plain", + "response_status": 200, + "response_schema": { + "type": "object", + "properties": { + "field1":{ + "type":"object" + } + } + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: hit route(return response schema: object case) +--- request +GET /hello +--- response_body chomp +{"field1":{}} + + + +=== TEST 13: set route(return response header: application/json) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "mocking": { + "delay": 1, + "content_type": "application/json", + "response_status": 200, + "response_example": "{\"field1\":{}}" + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: hit route(return response header: application/json) +--- request +GET /hello +--- response_headers +Content-Type: application/json + + + +=== TEST 15: set route(return response example:"remote_addr:127.0.0.1") +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "mocking": { + "delay": 1, + "content_type": "text/plain", + "response_status": 200, + "response_example": "remote_addr:$remote_addr" + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: hit route(return response example:"remote_addr:127.0.0.1") +--- request +GET /hello +--- response_body chomp +remote_addr:127.0.0.1 + + + +=== TEST 17: set route(return response example:"empty_var:") +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "mocking": { + "delay": 1, + "content_type": "text/plain", + "response_status": 200, + "response_example": "empty_var:$foo" + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: hit route(return response example:"empty_var:") +--- request +GET /hello +--- response_body chomp +empty_var: + + + +=== TEST 19: set route (return headers) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "mocking": { + "response_example": "hello world", + "response_headers": { + "X-Apisix": "is, cool", + "X-Really": "yes" + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: hit route +--- request +GET /hello +--- response_headers +X-Apisix: is, cool +X-Really: yes + + + +=== TEST 21: set route (return headers support built-in variables) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "mocking": { + "response_example": "hello world", + "response_headers": { + "X-Route-Id": "$route_id" + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 22: hit route +--- request +GET /hello +--- response_headers +X-Route-Id: 1 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/multi-auth.t b/CloudronPackages/APISIX/apisix-source/t/plugin/multi-auth.t new file mode 100644 index 0000000..2bb3bab --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/multi-auth.t @@ -0,0 +1,613 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +no_shuffle(); +run_tests; + +__DATA__ + +=== TEST 1: add consumer with basic-auth and key-auth plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + }, + "key-auth": { + "key": "auth-one" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: enable multi auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "multi-auth": { + "auth_plugins": [ + { + "basic-auth": {} + }, + { + "key-auth": { + "query": "apikey", + "hide_credentials": true, + "header": "apikey" + } + }, + { + "jwt-auth": { + "cookie": "jwt", + "query": "jwt", + "hide_credentials": true, + "header": "authorization" + } + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: verify, missing authorization +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} + + + +=== TEST 4: verify basic-auth +--- request +GET /hello +--- more_headers +Authorization: Basic Zm9vOmJhcg== +--- response_body +hello world +--- error_log +find consumer foo + + + +=== TEST 5: verify key-auth +--- request +GET /hello +--- more_headers +apikey: auth-one +--- response_body +hello world + + + +=== TEST 6: verify, invalid basic credentials +--- request +GET /hello +--- more_headers +Authorization: Basic YmFyOmJhcgo= +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} + + + +=== TEST 7: verify, invalid api key +--- request +GET /hello +--- more_headers +apikey: auth-two +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} + + + +=== TEST 8: enable multi auth plugin with invalid plugin conf in first auth_plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "multi-auth": { + "auth_plugins": [ + { + "basic-auth": { + "hide_credentials": "false" + } + }, + { + "key-auth": {} + }, + { + "jwt-auth": {} + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin multi-auth err: plugin basic-auth check schema failed: property \"hide_credentials\" validation failed: wrong type: expected boolean, got string"} + + + +=== TEST 9: enable multi auth plugin with invalid plugin conf in second auth_plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "multi-auth": { + "auth_plugins": [ + { + "key-auth": {} + }, + { + "basic-auth": "blah" + }, + { + "jwt-auth": {} + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin multi-auth err: plugin basic-auth check schema failed: wrong type: expected object, got string"} + + + +=== TEST 10: enable multi auth plugin with invalid plugin conf in third auth_plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "multi-auth": { + "auth_plugins": [ + { + "key-auth": {} + }, + { + "basic-auth": {} + }, + { + "jwt-auth": { + "header": 123 + } + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin multi-auth err: plugin jwt-auth check schema failed: property \"header\" validation failed: wrong type: expected string, got number"} + + + +=== TEST 11: enable multi auth plugin with default plugin conf +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "multi-auth": { + "auth_plugins": [ + { + "basic-auth": {} + }, + { + "key-auth": {} + }, + { + "jwt-auth": {} + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: verify, missing authorization +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} + + + +=== TEST 13: verify basic-auth +--- request +GET /hello +--- more_headers +Authorization: Basic Zm9vOmJhcg== +--- response_body +hello world +--- error_log +find consumer foo + + + +=== TEST 14: verify key-auth +--- request +GET /hello +--- more_headers +apikey: auth-one +--- response_body +hello world + + + +=== TEST 15: verify, invalid basic credentials +--- request +GET /hello +--- more_headers +Authorization: Basic YmFyOmJhcgo= +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} + + + +=== TEST 16: verify, invalid api key +--- request +GET /hello +--- more_headers +apikey: auth-two +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} + + + +=== TEST 17: enable multi auth plugin using admin api, without any auth_plugins configuration +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "multi-auth": { } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like eval +qr/\{"error_msg":"failed to check the configuration of plugin multi-auth err: property \\"auth_plugins\\" is required"\}/ + + + +=== TEST 18: enable multi auth plugin using admin api, with auth_plugins configuration but with one authorization plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "multi-auth": { + "auth_plugins": [ + { + "basic-auth": {} + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body_like eval +qr/\{"error_msg":"failed to check the configuration of plugin multi-auth err: property \\"auth_plugins\\" validation failed: expect array to have at least 2 items"\}/ + + + +=== TEST 19: add consumer with username and jwt-auth plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "my-secret-key" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: sign / verify jwt-auth +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local sign = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsIm5iZiI6MTcyNzI3NDk4M30.N6ebc4U5ms976pwKZ_iQ88w_uJKqUVNtTYZ_nXhRpWo" + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- request +GET /t +--- response_body +hello world + + + +=== TEST 21: verify multi-auth with plugin config will cause the conf_version change +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, err = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "desc": "Multiple Authentication", + "plugins": { + "multi-auth": { + "auth_plugins": [ + { + "basic-auth": {} + }, + { + "key-auth": { + "query": "apikey", + "hide_credentials": true, + "header": "apikey" + } + }, + { + "jwt-auth": { + "cookie": "jwt", + "query": "jwt", + "hide_credentials": true, + "header": "authorization" + } + } + ] + } + } + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + + local code, err = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugin_config_id": 1 + }]] + ) + if code > 300 then + ngx.log(ngx.ERR, err) + return + end + ngx.sleep(0.1) + + local sign = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsIm5iZiI6MTcyNzI3NDk4M30.N6ebc4U5ms976pwKZ_iQ88w_uJKqUVNtTYZ_nXhRpWo" + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- request +GET /t +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/multi-auth2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/multi-auth2.t new file mode 100644 index 0000000..bc15a36 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/multi-auth2.t @@ -0,0 +1,368 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +no_shuffle(); +run_tests; + +__DATA__ + +=== TEST 1: add consumer with basic-auth and key-auth plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "foo", + "plugins": { + "basic-auth": { + "username": "foo", + "password": "bar" + }, + "key-auth": { + "key": "auth-one" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: enable multi auth plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "multi-auth": { + "auth_plugins": [ + { + "basic-auth": {} + }, + { + "key-auth": {} + }, + { + "jwt-auth": {} + }, + { + "hmac-auth": {} + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: invalid key-auth apikey +--- request +GET /hello +--- more_headers +apikey: 123 +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} +--- error_log +basic-auth failed to authenticate the request, code: 401. error: Missing authorization in request +key-auth failed to authenticate the request, code: 401. error: Invalid API key in request +jwt-auth failed to authenticate the request, code: 401. error: Missing JWT token in request +hmac-auth failed to authenticate the request, code: 401. error: client request can't be validated: missing Authorization header + + + +=== TEST 4: valid key-auth apikey +--- request +GET /hello +--- more_headers +apikey: auth-one +--- error_code: 200 +--- response_body +hello world +--- no_error_log +failed to authenticate the request + + + +=== TEST 5: invalid basic-auth credentials +--- request +GET /hello +--- more_headers +Authorization: Basic YmFyOmJhcgo= +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} +--- error_log +basic-auth failed to authenticate the request, code: 401. error: failed to find user: invalid user +key-auth failed to authenticate the request, code: 401. error: Missing API key in request +jwt-auth failed to authenticate the request, code: 401. error: JWT token invalid: invalid jwt string +hmac-auth failed to authenticate the request, code: 401. error: client request can't be validated: Authorization header does not start with 'Signature' + + + +=== TEST 6: valid basic-auth creds +--- request +GET /hello +--- more_headers +Authorization: Basic Zm9vOmJhcg== +--- response_body +hello world +--- no_error_log +failed to authenticate the request + + + +=== TEST 7: missing hmac auth authorization header +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} +--- error_log +hmac-auth failed to authenticate the request, code: 401. error: client request can't be validated: missing Authorization header + + + +=== TEST 8: hmac auth missing algorithm +--- request +GET /hello +--- more_headers +Authorization: Signature keyId="my-access-key",headers="@request-target date" ,signature="asdf" +Date: Thu, 24 Sep 2020 06:39:52 GMT +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} +--- error_log +hmac-auth failed to authenticate the request, code: 401. error: client request can't be validated: algorithm missing + + + +=== TEST 9: test with invalid jwt-auth token +--- request +GET /hello +--- more_headers +Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTU2Mzg3MDUwMX0.pPNVvh-TQsdDzorRwa-uuiLYiEBODscp9wv0cwD6c68 +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} +--- error_log +jwt-auth failed to authenticate the request, code: 401. error: Invalid user key in JWT token + + + +=== TEST 10: create public API route (jwt-auth sign) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/plugin/jwt/sign" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: add consumer with username and jwt-auth plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "my-secret-key" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: test with expired jwt token +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} +--- error_log +jwt-auth failed to authenticate the request, code: 401. error: failed to verify jwt: 'exp' claim expired at Tue, 23 Jul 2019 08:28:21 GMT +--- more_headers +Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTU2Mzg3MDUwMX0.pPNVvh-TQsdDzorRwa-uuiLYiEBODscp9wv0cwD6c68 + + + +=== TEST 13: test with jwt token containing wrong signature +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} +--- error_log +jwt-auth failed to authenticate the request, code: 401. error: failed to verify jwt: signature mismatch: fNtFJnNnJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- more_headers +Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNnJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs + + + +=== TEST 14: verify jwt-auth +--- request +GET /hello +--- more_headers +Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs +--- response_body +hello world +--- no_error_log +failed to authenticate the request + + + +=== TEST 15: enable multi auth plugin with non-existent anonymous consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "multi-auth": { + "auth_plugins": [ + { + "basic-auth": { + "anonymous_consumer": "not-found-anonymous" + } + }, + { + "key-auth": { + "anonymous_consumer": "not-found-anonymous" + } + }, + { + "jwt-auth": { + "anonymous_consumer": "not-found-anonymous" + } + }, + { + "hmac-auth": { + "anonymous_consumer": "anonymous" + } + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: invalid basic-auth credentials +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Authorization Failed"} +--- error_log +basic-auth failed to authenticate the request, code: 401. error: failed to get anonymous consumer not-found-anonymous +key-auth failed to authenticate the request, code: 401. error: failed to get anonymous consumer not-found-anonymous +jwt-auth failed to authenticate the request, code: 401. error: failed to get anonymous consumer not-found-anonymous +hmac-auth failed to authenticate the request, code: 401. error: failed to get anonymous consumer anonymous diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/node-status.t b/CloudronPackages/APISIX/apisix-source/t/plugin/node-status.t new file mode 100644 index 0000000..903948a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/node-status.t @@ -0,0 +1,138 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + my $extra_yaml_config = <<_EOC_; +plugins: + - public-api + - node-status +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +run_tests; + +__DATA__ + +=== TEST 1: pre-create public API route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/status" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: sanity +--- config +location /t { + content_by_lua_block { + ngx.sleep(0.5) + local t = require("lib.test_admin").test + local code, body, body_org = t('/apisix/status', ngx.HTTP_GET) + + if code >= 300 then + ngx.status = code + end + ngx.say(body_org) + } +} +--- response_body eval +qr/"accepted":/ + + + +=== TEST 3: test for unsupported method +--- request +PATCH /apisix/status +--- error_code: 404 + + + +=== TEST 4: test for use default uuid as apisix_uid +--- config +location /t { + content_by_lua_block { + ngx.sleep(0.5) + local t = require("lib.test_admin").test + local code, body, body_org = t('/apisix/status', ngx.HTTP_GET) + + if code >= 300 then + ngx.status = code + end + local json_decode = require("cjson").decode + local body_json = json_decode(body_org) + ngx.say(body_json.id) + } +} +--- response_body_like eval +qr/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ + + + +=== TEST 5: test for allow user to specify a meaningful id as apisix_uid +--- yaml_config +apisix: + id: "user-set-apisix-instance-id-A" +#END +--- config +location /t { + content_by_lua_block { + ngx.sleep(0.5) + local t = require("lib.test_admin").test + local code, body, body_org = t('/apisix/status', ngx.HTTP_GET) + + if code >= 300 then + ngx.status = code + end + ngx.say(body_org) + } +} +--- response_body eval +qr/"id":"user-set-apisix-instance-id-A"/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ocsp-stapling.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ocsp-stapling.t new file mode 100644 index 0000000..1d32c02 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ocsp-stapling.t @@ -0,0 +1,676 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +my $openssl_bin = $ENV{OPENSSL_BIN}; +if (! -x $openssl_bin) { + $ENV{OPENSSL_BIN} = '/usr/local/openresty/openssl3/bin/openssl'; + if (! -x $ENV{OPENSSL_BIN}) { + plan(skip_all => "openssl3 not installed"); + } +} + +add_block_preprocessor(sub { + my ($block) = @_; + + # setup default conf.yaml + my $extra_yaml_config = $block->extra_yaml_config // <<_EOC_; +plugins: + - ocsp-stapling +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: disable ocsp-stapling plugin +--- extra_yaml_config +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com", + ocsp_stapling = {} + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + ngx.status = code + ngx.print(body) + } +} +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: additional properties forbidden, found ocsp_stapling"} + + + +=== TEST 2: check schema when enabled ocsp-stapling plugin +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local json = require("toolkit.json") + + for _, conf in ipairs({ + {}, + {enabled = true}, + {skip_verify = true}, + {cache_ttl = 6000}, + {enabled = true, skip_verify = true, cache_ttl = 6000}, + }) do + local ok, err = core.schema.check(core.schema.ssl.properties.ocsp_stapling, conf) + if not ok then + ngx.say(err) + return + end + ngx.say(json.encode(conf)) + end + } +} +--- response_body +{"cache_ttl":3600,"enabled":false,"skip_verify":false} +{"cache_ttl":3600,"enabled":true,"skip_verify":false} +{"cache_ttl":3600,"enabled":false,"skip_verify":true} +{"cache_ttl":6000,"enabled":false,"skip_verify":false} +{"cache_ttl":6000,"enabled":true,"skip_verify":true} + + + +=== TEST 3: ssl config without "ocsp-stapling" field when enabled ocsp-stapling plugin +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com", + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 4: hit, handshake ok:1 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect localhost:1994 -servername test.com -status 2>&1 | cat +--- response_body eval +qr/CONNECTED/ +--- error_log +no 'ocsp_stapling' field found, no need to run ocsp-stapling plugin + + + +=== TEST 5: hit, no ocsp response send:2 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect localhost:1994 -servername test.com -status 2>&1 | cat +--- response_body eval +qr/OCSP response: no response sent/ +--- error_log +no 'ocsp_stapling' field found, no need to run ocsp-stapling plugin + + + +=== TEST 6: client hello without status request extension required when enabled ocsp-stapling plugin +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/ocsp/rsa_good.crt") + local ssl_key = t.read_file("t/certs/ocsp/rsa_good.key") + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "ocsp.test.com", + ocsp_stapling = { + enabled = true + } + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 7: hit, handshake ok and no ocsp response send +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect localhost:1994 -servername ocsp.test.com 2>&1 | cat +--- response_body eval +qr/CONNECTED/ +--- error_log +no status request required, no need to send ocsp response + + + +=== TEST 8: cert without ocsp supported when enabled ocsp-stapling plugin +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com", + ocsp_stapling = { + enabled = true + } + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 9: hit, handshake ok:1 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect localhost:1994 -servername test.com -status 2>&1 | cat +--- response_body eval +qr/CONNECTED/ +--- error_log +no ocsp response send: failed to get ocsp url: cert not contains authority_information_access extension + + + +=== TEST 10: hit, no ocsp response send due to get ocsp responder url failed:2 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect localhost:1994 -servername test.com -status 2>&1 | cat +--- response_body eval +qr/OCSP response: no response sent/ +--- error_log +no ocsp response send: failed to get ocsp url: cert not contains authority_information_access extension + + + +=== TEST 11: run ocsp responder, will exit when test finished +--- config +location /t { + content_by_lua_block { + local shell = require("resty.shell") + local cmd = [[ openssl ocsp -index t/certs/ocsp/index.txt -port 11451 -rsigner t/certs/ocsp/signer.crt -rkey t/certs/ocsp/signer.key -CA t/certs/apisix.crt -nrequest 16 2>&1 1>/dev/null & ]] + local ok, stdout, stderr, reason, status = shell.run(cmd, nil, 1000, 8096) + if not ok then + ngx.log(ngx.WARN, "failed to execute the script with status: " .. status .. ", reason: " .. reason .. ", stderr: " .. stderr) + return + end + ngx.print(stderr) + } +} + + + +=== TEST 12: cert with ocsp supported when enabled ocsp-stapling plugin +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/ocsp/rsa_good.crt") + local ssl_key = t.read_file("t/certs/ocsp/rsa_good.key") + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "ocsp.test.com", + ocsp_stapling = { + enabled = true + } + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 13: hit, handshake ok:1 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -status -connect localhost:1994 -servername ocsp.test.com 2>&1 | cat +--- max_size: 16096 +--- response_body eval +qr/CONNECTED/ + + + +=== TEST 14: hit, get ocsp response and status is good:2 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -status -connect localhost:1994 -servername ocsp.test.com 2>&1 | cat +--- max_size: 16096 +--- response_body eval +qr/Cert Status: good/ + + + +=== TEST 15: muilt cert with ocsp supported when enabled ocsp-stapling plugin +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local rsa_cert = t.read_file("t/certs/ocsp/rsa_good.crt") + local rsa_key = t.read_file("t/certs/ocsp/rsa_good.key") + + local ecc_cert = t.read_file("t/certs/ocsp/ecc_good.crt") + local ecc_key = t.read_file("t/certs/ocsp/ecc_good.key") + + local data = { + cert = rsa_cert, + key = rsa_key, + certs = { ecc_cert }, + keys = { ecc_key }, + sni = "ocsp.test.com", + ocsp_stapling = { + enabled = true + } + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "ocsp.test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 16: hit ecc cert, handshake ok:1 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect localhost:1994 -servername ocsp.test.com -status -tls1_2 -cipher ECDHE-ECDSA-AES128-GCM-SHA256 2>&1 | cat +--- max_size: 16096 +--- response_body eval +qr/CONNECTED/ + + + +=== TEST 17: hit ecc cert, get cert signature type:2 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect localhost:1994 -servername ocsp.test.com -status -tls1_2 -cipher ECDHE-ECDSA-AES128-GCM-SHA256 2>&1 | cat +--- max_size: 16096 +--- response_body eval +qr/Peer signature type: ECDSA/ + + + +=== TEST 18: hit ecc cert, get ocsp response and status is good:3 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect localhost:1994 -servername ocsp.test.com -status -tls1_2 -cipher ECDHE-ECDSA-AES128-GCM-SHA256 2>&1 | cat +--- max_size: 16096 +--- response_body eval +qr/Cert Status: good/ + + + +=== TEST 19: hit rsa cert, handshake ok:1 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect localhost:1994 -servername ocsp.test.com -status -tls1_2 -cipher ECDHE-RSA-AES128-GCM-SHA256 2>&1 | cat +--- max_size: 16096 +--- response_body eval +qr/CONNECTED/ + + + +=== TEST 20: hit rsa cert, get cert signature type:2 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect localhost:1994 -servername ocsp.test.com -status -tls1_2 -cipher ECDHE-RSA-AES128-GCM-SHA256 2>&1 | cat +--- max_size: 16096 +--- response_body eval +qr/Peer signature type: RSA/ + + + +=== TEST 21: hit rsa cert, get ocsp response and status is good:3 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -connect localhost:1994 -servername ocsp.test.com -status -tls1_2 -cipher ECDHE-RSA-AES128-GCM-SHA256 2>&1 | cat +--- max_size: 16096 +--- response_body eval +qr/Cert Status: good/ + + + +=== TEST 22: cert with ocsp supported and revoked when enabled ocsp-stapling plugin +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/ocsp/rsa_revoked.crt") + local ssl_key = t.read_file("t/certs/ocsp/rsa_revoked.key") + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "ocsp-revoked.test.com", + ocsp_stapling = { + enabled = true + } + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 23: hit revoked rsa cert, handshake ok:1 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -status -connect localhost:1994 -servername ocsp-revoked.test.com 2>&1 | cat +--- response_body eval +qr/CONNECTED/ +--- error_log +no ocsp response send: failed to validate ocsp response: certificate status "revoked" in the OCSP response + + + +=== TEST 24: hit revoked rsa cert, no ocsp response send:2 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -status -connect localhost:1994 -servername ocsp-revoked.test.com 2>&1 | cat +--- response_body eval +qr/OCSP response: no response sent/ +--- error_log +no ocsp response send: failed to validate ocsp response: certificate status "revoked" in the OCSP response + + + +=== TEST 25: cert with ocsp supported and revoked when enabled ocsp-stapling plugin, and skip verify +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/ocsp/rsa_revoked.crt") + local ssl_key = t.read_file("t/certs/ocsp/rsa_revoked.key") + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "ocsp-revoked.test.com", + ocsp_stapling = { + enabled = true, + skip_verify = true, + } + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 26: hit revoked rsa cert, handshake ok:1 +--- max_size: 16096 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -status -connect localhost:1994 -servername ocsp-revoked.test.com 2>&1 | cat +--- response_body eval +qr/CONNECTED/ + + + +=== TEST 27: hit revoked rsa cert, get ocsp response and status is revoked:2 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -status -connect localhost:1994 -servername ocsp-revoked.test.com 2>&1 | cat +--- max_size: 16096 +--- response_body eval +qr/Cert Status: revoked/ + + + +=== TEST 28: cert with ocsp supported and unknown status when enabled ocsp-stapling plugin +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/ocsp/rsa_unknown.crt") + local ssl_key = t.read_file("t/certs/ocsp/rsa_unknown.key") + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "ocsp-unknown.test.com", + ocsp_stapling = { + enabled = true + } + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 29: hit unknown rsa cert, handshake ok:1 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -status -connect localhost:1994 -servername ocsp-unknown.test.com 2>&1 | cat +--- response_body eval +qr/CONNECTED/ +--- error_log +no ocsp response send: failed to validate ocsp response: certificate status "unknown" in the OCSP response + + + +=== TEST 30: hit unknown rsa cert, no ocsp response send:2 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -status -connect localhost:1994 -servername ocsp-unknown.test.com 2>&1 | cat +--- response_body eval +qr/OCSP response: no response sent/ +--- error_log +no ocsp response send: failed to validate ocsp response: certificate status "unknown" in the OCSP response + + + +=== TEST 31: cert with ocsp supported and unknown status when enabled ocsp-stapling plugin, and skip verify +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/ocsp/rsa_unknown.crt") + local ssl_key = t.read_file("t/certs/ocsp/rsa_unknown.key") + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "ocsp-unknown.test.com", + ocsp_stapling = { + enabled = true, + skip_verify = true, + } + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 32: hit unknown rsa cert, handshake ok:1 +--- max_size: 16096 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -status -connect localhost:1994 -servername ocsp-unknown.test.com 2>&1 | cat +--- response_body eval +qr/CONNECTED/ + + + +=== TEST 33: hit unknown rsa cert, get ocsp response and status is unknown:2 +--- max_size: 16096 +--- exec +echo -n "Q" | $OPENSSL_BIN s_client -status -connect localhost:1994 -servername ocsp-unknown.test.com 2>&1 | cat +--- response_body eval +qr/Cert Status: unknown/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/opa.t b/CloudronPackages/APISIX/apisix-source/t/plugin/opa.t new file mode 100644 index 0000000..9d731ae --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/opa.t @@ -0,0 +1,225 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local test_cases = { + {host = "http://127.0.0.1:8181", policy = "example/allow"}, + {host = "http://127.0.0.1:8181"}, + {host = 3233, policy = "example/allow"}, + } + local plugin = require("apisix.plugins.opa") + + for _, case in ipairs(test_cases) do + local ok, err = plugin.check_schema(case) + ngx.say(ok and "done" or err) + end + } + } +--- response_body +done +property "policy" is required +property "host" validation failed: wrong type: expected string, got number + + + +=== TEST 2: setup route with plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "opa": { + "host": "http://127.0.0.1:8181", + "policy": "example" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello", "/test"] + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: hit route (with correct request) +--- request +GET /hello?test=1234&user=none +--- more_headers +test-header: only-for-test +--- response_body +hello world + + + +=== TEST 4: hit route (with wrong header request) +--- request +GET /hello?test=1234&user=none +--- more_headers +test-header: not-for-test +--- error_code: 403 + + + +=== TEST 5: hit route (with wrong query request) +--- request +GET /hello?test=abcd&user=none +--- more_headers +test-header: only-for-test +--- error_code: 403 + + + +=== TEST 6: hit route (with wrong method request) +--- request +POST /hello?test=1234&user=none +--- more_headers +test-header: only-for-test +--- error_code: 403 + + + +=== TEST 7: hit route (with wrong path request) +--- request +GET /test?test=1234&user=none +--- more_headers +test-header: only-for-test +--- error_code: 403 + + + +=== TEST 8: hit route (response status code and header) +--- request +GET /test?test=abcd&user=alice +--- more_headers +test-header: only-for-test +--- error_code: 302 +--- response_headers +Location: http://example.com/auth + + + +=== TEST 9: hit route (response multiple header reason) +--- request +GET /test?test=abcd&user=bob +--- more_headers +test-header: only-for-test +--- error_code: 403 +--- response_headers +test: abcd +abcd: test + + + +=== TEST 10: hit route (response string reason) +--- request +GET /test?test=abcd&user=carla +--- more_headers +test-header: only-for-test +--- error_code: 403 +--- response +Give you a string reason + + + +=== TEST 11: hit route (response json reason) +--- request +GET /test?test=abcd&user=dylon +--- more_headers +test-header: only-for-test +--- error_code: 403 +--- response +{"code":40001,"desc":"Give you a object reason"} + + + +=== TEST 12: setup route with plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "opa": { + "host": "http://127.0.0.1:8181", + "policy": "example", + "send_headers_upstream": ["user"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/echo"] + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit route +--- request +GET /echo?test=1234&user=none +--- response_headers +user: none diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/opa2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/opa2.t new file mode 100644 index 0000000..2dd0877 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/opa2.t @@ -0,0 +1,314 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: setup all-in-one test +--- config + location /t { + content_by_lua_block { + local data = { + { + url = "/apisix/admin/upstreams/u1", + data = [[{ + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }]], + }, + { + url = "/apisix/admin/consumers", + data = [[{ + "username": "test", + "plugins": { + "key-auth": { + "_meta": { + "disable": false + }, + "key": "test-key" + } + } + }]], + }, + { + url = "/apisix/admin/services/s1", + data = [[{ + "name": "s1", + "plugins": { + "key-auth": { + "_meta": { + "disable": false + } + } + } + }]], + }, + { + url = "/apisix/admin/routes/1", + data = [[{ + "plugins": { + "opa": { + "host": "http://127.0.0.1:8181", + "policy": "echo", + "with_route": true, + "with_consumer": true, + "with_service": true + } + }, + "upstream_id": "u1", + "service_id": "s1", + "uri": "/hello" + }]], + }, + } + + local t = require("lib.test_admin").test + + for _, data in ipairs(data) do + local code, body = t(data.url, ngx.HTTP_PUT, data.data) + ngx.say(code..body) + end + } + } +--- response_body eval +"201passed\n" x 4 + + + +=== TEST 2: hit route (test route data) +--- request +GET /hello +--- more_headers +test-header: only-for-test +apikey: test-key +--- error_code: 403 +--- response_body eval +qr/\"route\":/ and qr/\"id\":\"r1\"/ and qr/\"plugins\":\{\"opa\"/ and +qr/\"with_route\":true/ + + + +=== TEST 3: hit route (test consumer data) +--- request +GET /hello +--- more_headers +test-header: only-for-test +apikey: test-key +--- error_code: 403 +--- response_body eval +qr/\"consumer\":/ and qr/\"username\":\"test\"/ and qr/\"key\":\"test-key\"/ + + + +=== TEST 4: hit route (test service data) +--- request +GET /hello +--- more_headers +test-header: only-for-test +apikey: test-key +--- error_code: 403 +--- response_body eval +qr/\"service\":/ and qr/\"id\":\"s1\"/ and qr/\"query\":\"apikey\"/ and +qr/\"header\":\"apikey\"/ + + + +=== TEST 5: setup route without service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "opa": { + "host": "http://127.0.0.1:8181", + "policy": "echo", + "with_route": true, + "with_consumer": true, + "with_service": true + } + }, + "upstream_id": "u1", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: hit route (test without service and consumer) +--- request +GET /hello +--- more_headers +test-header: only-for-test +apikey: test-key +--- error_code: 403 +--- response_body_unlike eval +qr/\"service\"/ and qr/\"consumer\"/ + + + +=== TEST 7: setup route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "opa": { + "host": "http://127.0.0.1:8181", + "policy": "example" + } + }, + "upstream_id": "u1", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit route (with JSON empty array) +--- request +GET /hello?user=elisa +--- error_code: 403 +--- response_body chomp +{"info":[]} + + + +=== TEST 9: create route: `with_route = true` and opa validation passes when route name == "valid" +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "valid", + "plugins": { + "opa": { + "host": "http://127.0.0.1:8181", + "policy": "with_route", + "with_route": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: hit route +--- request +GET /hello +--- error_code: 200 + + + +=== TEST 11: create route: `with_route = true` and opa validation fails when route name != "valid" +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "not_valid", + "plugins": { + "opa": { + "host": "http://127.0.0.1:8181", + "policy": "with_route", + "with_route": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: hit route +--- request +GET /hello +--- error_code: 403 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/openfunction.t b/CloudronPackages/APISIX/apisix-source/t/plugin/openfunction.t new file mode 100644 index 0000000..1b364e3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/openfunction.t @@ -0,0 +1,331 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openfunction") + local ok, err = plugin.check_schema({function_uri = "http://127.0.0.1:30585/default/test-body"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: missing `function_uri` +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openfunction") + local ok, err = plugin.check_schema({timeout = 60000}) + if not ok then + ngx.say(err) + end + } + } +--- response_body +property "function_uri" is required + + + +=== TEST 3: wrong type for `function_uri` +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openfunction") + local ok, err = plugin.check_schema({function_uri = 30858}) + if not ok then + ngx.say(err) + end + } + } +--- response_body +property "function_uri" validation failed: wrong type: expected string, got number + + + +=== TEST 4: setup route with plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30584/function-sample" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: hit route (with GET request) +--- request +GET /hello +--- response_body +Hello, function-sample! + + + +=== TEST 6: reset route with test-body function +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30585/default/test-body" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: hit route with POST method +--- request +POST /hello +test +--- response_body +Hello, test! + + + +=== TEST 8: reset route with test-header function with service_token +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30583/", + "authorization": { + "service_token": "test:test" + } + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: hit route with POST request with service_token +--- request +POST /hello +--- response_body chomp +[Basic dGVzdDp0ZXN0] + + + +=== TEST 10: reset route with test-header function without service_token +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30583/" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: hit route with user-specific Authorization header +--- request +POST /hello +--- more_headers +authorization: user-token-xxx +--- response_body chomp +[user-token-xxx] + + + +=== TEST 12: reset route to non-existent function_uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30584/default/non-existent" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit route (with non-existent function_uri) +--- request +POST /hello +test +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 404 +--- response_body_like eval +qr/not found/ + + + +=== TEST 14: reset route with test-uri function and path forwarding +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openfunction": { + "function_uri": "http://127.0.0.1:30584" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: hit route with GET method +--- request +GET /hello/openfunction +--- response_body +Hello, openfunction! diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect.t b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect.t new file mode 100644 index 0000000..54fdc80 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect.t @@ -0,0 +1,1572 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Sanity check with minimal valid configuration. +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openid-connect") + local ok, err = plugin.check_schema({client_id = "a", client_secret = "b", discovery = "c"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: Missing `client_id`. +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openid-connect") + local ok, err = plugin.check_schema({client_secret = "b", discovery = "c"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "client_id" is required +done + + + +=== TEST 3: Wrong type for `client_id`. +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openid-connect") + local ok, err = plugin.check_schema({client_id = 123, client_secret = "b", discovery = "c"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "client_id" validation failed: wrong type: expected string, got number +done + + + +=== TEST 4: Set up new route with plugin matching URI `/hello`. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "client_rsa_private_key": "89ae4c8edadf1cd1c9f034335f136f87ad84b625c8f1", + "discovery": "http://127.0.0.1:1980/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "scope": "apisix", + "use_pkce": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: verify encrypted field +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + + -- get plugin conf from etcd, client_rsa_private_key is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["openid-connect"].client_rsa_private_key) + + } + } +--- response_body +qO8TJbXcxCUnkkaTs3PxWDk5a54lv7FmngKQaxuXV4cL+7Kp1R4D8NS4w88so4e+ + + + +=== TEST 6: Access route w/o bearer token. Should redirect to authentication endpoint of ID provider. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + ngx.status = res.status + local location = res.headers['Location'] + if location and string.find(location, 'https://samples.auth0.com/authorize') ~= -1 and + string.find(location, 'scope=apisix') ~= -1 and + string.find(location, 'client_id=kbyuFDidLLm280LIwVFiazOqjO3ty8KH') ~= -1 and + string.find(location, 'response_type=code') ~= -1 and + string.find(location, 'redirect_uri=https://iresty.com') ~= -1 then + ngx.say(true) + end + } + } +--- timeout: 10s +--- response_body +true +--- error_code: 302 + + + +=== TEST 7: Modify route to match catch-all URI `/*` and point plugin to local Keycloak instance. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "realm": "University", + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "redirect_uri": "http://127.0.0.1:]] .. ngx.var.server_port .. [[/authenticated", + "ssl_verify": false, + "timeout": 10, + "introspection_endpoint_auth_method": "client_secret_post", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect", + "set_access_token_header": true, + "access_token_in_authorization_header": false, + "set_id_token_header": true, + "set_userinfo_header": true, + "set_refresh_token_header": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: Access route w/o bearer token and go through the full OIDC Relying Party authentication process. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local login_keycloak = require("lib.keycloak").login_keycloak + local concatenate_cookies = require("lib.keycloak").concatenate_cookies + + local httpc = http.new() + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/uri" + local res, err = login_keycloak(uri, "teacher@gmail.com", "123456") + if err then + ngx.status = 500 + ngx.say(err) + return + end + + local cookie_str = concatenate_cookies(res.headers['Set-Cookie']) + -- Make the final call back to the original URI. + local redirect_uri = "http://127.0.0.1:" .. ngx.var.server_port .. res.headers['Location'] + res, err = httpc:request_uri(redirect_uri, { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + + if not res then + -- No response, must be an error. + ngx.status = 500 + ngx.say(err) + return + elseif res.status ~= 200 then + -- Not a valid response. + -- Use 500 to indicate error. + ngx.status = 500 + ngx.say("Invoking the original URI didn't return the expected result.") + return + end + + ngx.status = res.status + ngx.say(res.body) + } + } +--- response_body_like +uri: /uri +cookie: .* +host: 127.0.0.1:1984 +user-agent: .* +x-access-token: ey.* +x-id-token: ey.* +x-real-ip: 127.0.0.1 +x-refresh-token: ey.* +x-userinfo: ey.* + + + +=== TEST 9: Re-configure plugin with respect to headers that get sent to upstream. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "realm": "University", + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "redirect_uri": "http://127.0.0.1:]] .. ngx.var.server_port .. [[/authenticated", + "ssl_verify": false, + "timeout": 10, + "introspection_endpoint_auth_method": "client_secret_post", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect", + "set_access_token_header": true, + "access_token_in_authorization_header": true, + "set_id_token_header": false, + "set_userinfo_header": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: Access route w/o bearer token and go through the full OIDC Relying Party authentication process. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local login_keycloak = require("lib.keycloak").login_keycloak + local concatenate_cookies = require("lib.keycloak").concatenate_cookies + + local httpc = http.new() + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/uri" + local res, err = login_keycloak(uri, "teacher@gmail.com", "123456") + if err then + ngx.status = 500 + ngx.say(err) + return + end + + local cookie_str = concatenate_cookies(res.headers['Set-Cookie']) + -- Make the final call back to the original URI. + local redirect_uri = "http://127.0.0.1:" .. ngx.var.server_port .. res.headers['Location'] + res, err = httpc:request_uri(redirect_uri, { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + + if not res then + -- No response, must be an error. + ngx.status = 500 + ngx.say(err) + return + elseif res.status ~= 200 then + -- Not a valid response. + -- Use 500 to indicate error. + ngx.status = 500 + ngx.say("Invoking the original URI didn't return the expected result.") + return + end + + ngx.status = res.status + ngx.say(res.body) + } + } +--- response_body_like +uri: /uri +authorization: Bearer ey.* +cookie: .* +host: 127.0.0.1:1984 +user-agent: .* +x-real-ip: 127.0.0.1 + + + +=== TEST 11: Update plugin with `bearer_only=true`. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "scope": "apisix" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: Access route w/o bearer token. Should return 401 (Unauthorized). +--- timeout: 10s +--- request +GET /hello +--- error_code: 401 +--- response_headers_like +WWW-Authenticate: Bearer realm="apisix" +--- error_log +OIDC introspection failed: No bearer token found in request. + + + +=== TEST 13: Access route with invalid Authorization header value. Should return 400 (Bad Request). +--- timeout: 10s +--- request +GET /hello +--- more_headers +Authorization: foo +--- error_code: 400 +--- error_log +OIDC introspection failed: Invalid Authorization header format. + + + +=== TEST 14: Update plugin with ID provider public key, so tokens can be validated locally. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "scope": "apisix", + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAw86xcJwNxL2MkWnjIGiw\n]] .. + [[94QY78Sq89dLqMdV/Ku2GIX9lYkbS0VDGtmxDGJLBOYW4cKTX+pigJyzglLgE+nD\n]] .. + [[z3VJf2oCqSV74gTyEdi7sw9e1rCyR6dR8VA7LEpIHwmhnDhhjXy1IYSKRdiVHLS5\n]] .. + [[sYmaAGckpUo3MLqUrgydGj5tFzvK/R/ELuZBdlZM+XuWxYry05r860E3uL+VdVCO\n]] .. + [[oU4RJQknlJnTRd7ht8KKcZb6uM14C057i26zX/xnOJpaVflA4EyEo99hKQAdr8Sh\n]] .. + [[G70MOLYvGCZxl1o8S3q4X67MxcPlfJaXnbog2AOOGRaFar88XiLFWTbXMCLuz7xD\n]] .. + [[zQIDAQAB\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256", + "claim_validator": { + "issuer": { + "valid_issuers": ["Mysoft corp"] + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: Access route with valid token. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = [[Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCBjb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.Vq_sBN7nH67vMDbiJE01EP4hvJYE_5ju6izjkOX8pF5OS4g2RWKWpL6h6-b0tTkCzG4JD5BEl13LWW-Gxxw0i9vEK0FLg_kC_kZLYB8WuQ6B9B9YwzmZ3OLbgnYzt_VD7D-7psEbwapJl5hbFsIjDgOAEx-UCmjUcl2frZxZavG2LUiEGs9Ri7KqOZmTLgNDMWfeWh1t1LyD0_b-eTInbasVtKQxMlb5kR0Ln_Qg5092L-irJ7dqaZma7HItCnzXJROdqJEsMIBAYRwDGa_w5kIACeMOdU85QKtMHzOenYFkm6zh_s59ndziTctKMz196Y8AL08xuTi6d1gEWpM92A]] + } + }) + ngx.status = res.status + if res.status == 200 then + ngx.say(true) + end + } + } +--- response_body +true + + + +=== TEST 16: Update route URI to '/uri' where upstream endpoint returns request headers in response body. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "scope": "apisix", + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAw86xcJwNxL2MkWnjIGiw\n]] .. + [[94QY78Sq89dLqMdV/Ku2GIX9lYkbS0VDGtmxDGJLBOYW4cKTX+pigJyzglLgE+nD\n]] .. + [[z3VJf2oCqSV74gTyEdi7sw9e1rCyR6dR8VA7LEpIHwmhnDhhjXy1IYSKRdiVHLS5\n]] .. + [[sYmaAGckpUo3MLqUrgydGj5tFzvK/R/ELuZBdlZM+XuWxYry05r860E3uL+VdVCO\n]] .. + [[oU4RJQknlJnTRd7ht8KKcZb6uM14C057i26zX/xnOJpaVflA4EyEo99hKQAdr8Sh\n]] .. + [[G70MOLYvGCZxl1o8S3q4X67MxcPlfJaXnbog2AOOGRaFar88XiLFWTbXMCLuz7xD\n]] .. + [[zQIDAQAB\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256", + "claim_validator": { + "issuer": { + "valid_issuers": ["Mysoft corp"] + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/uri" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 17: Access route with valid token in `Authorization` header. Upstream should additionally get the token in the `X-Access-Token` header. +--- request +GET /uri HTTP/1.1 +--- more_headers +Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCBjb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.Vq_sBN7nH67vMDbiJE01EP4hvJYE_5ju6izjkOX8pF5OS4g2RWKWpL6h6-b0tTkCzG4JD5BEl13LWW-Gxxw0i9vEK0FLg_kC_kZLYB8WuQ6B9B9YwzmZ3OLbgnYzt_VD7D-7psEbwapJl5hbFsIjDgOAEx-UCmjUcl2frZxZavG2LUiEGs9Ri7KqOZmTLgNDMWfeWh1t1LyD0_b-eTInbasVtKQxMlb5kR0Ln_Qg5092L-irJ7dqaZma7HItCnzXJROdqJEsMIBAYRwDGa_w5kIACeMOdU85QKtMHzOenYFkm6zh_s59ndziTctKMz196Y8AL08xuTi6d1gEWpM92A +--- response_body_like +uri: /uri +authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCBjb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.Vq_sBN7nH67vMDbiJE01EP4hvJYE_5ju6izjkOX8pF5OS4g2RWKWpL6h6-b0tTkCzG4JD5BEl13LWW-Gxxw0i9vEK0FLg_kC_kZLYB8WuQ6B9B9YwzmZ3OLbgnYzt_VD7D-7psEbwapJl5hbFsIjDgOAEx-UCmjUcl2frZxZavG2LUiEGs9Ri7KqOZmTLgNDMWfeWh1t1LyD0_b-eTInbasVtKQxMlb5kR0Ln_Qg5092L-irJ7dqaZma7HItCnzXJROdqJEsMIBAYRwDGa_w5kIACeMOdU85QKtMHzOenYFkm6zh_s59ndziTctKMz196Y8AL08xuTi6d1gEWpM92A +host: localhost +x-access-token: eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCBjb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.Vq_sBN7nH67vMDbiJE01EP4hvJYE_5ju6izjkOX8pF5OS4g2RWKWpL6h6-b0tTkCzG4JD5BEl13LWW-Gxxw0i9vEK0FLg_kC_kZLYB8WuQ6B9B9YwzmZ3OLbgnYzt_VD7D-7psEbwapJl5hbFsIjDgOAEx-UCmjUcl2frZxZavG2LUiEGs9Ri7KqOZmTLgNDMWfeWh1t1LyD0_b-eTInbasVtKQxMlb5kR0Ln_Qg5092L-irJ7dqaZma7HItCnzXJROdqJEsMIBAYRwDGa_w5kIACeMOdU85QKtMHzOenYFkm6zh_s59ndziTctKMz196Y8AL08xuTi6d1gEWpM92A +x-real-ip: 127.0.0.1 +x-userinfo: ey.* +--- error_code: 200 + + + +=== TEST 18: Update plugin to only use `Authorization` header. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "scope": "apisix", + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAw86xcJwNxL2MkWnjIGiw\n]] .. + [[94QY78Sq89dLqMdV/Ku2GIX9lYkbS0VDGtmxDGJLBOYW4cKTX+pigJyzglLgE+nD\n]] .. + [[z3VJf2oCqSV74gTyEdi7sw9e1rCyR6dR8VA7LEpIHwmhnDhhjXy1IYSKRdiVHLS5\n]] .. + [[sYmaAGckpUo3MLqUrgydGj5tFzvK/R/ELuZBdlZM+XuWxYry05r860E3uL+VdVCO\n]] .. + [[oU4RJQknlJnTRd7ht8KKcZb6uM14C057i26zX/xnOJpaVflA4EyEo99hKQAdr8Sh\n]] .. + [[G70MOLYvGCZxl1o8S3q4X67MxcPlfJaXnbog2AOOGRaFar88XiLFWTbXMCLuz7xD\n]] .. + [[zQIDAQAB\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256", + "set_access_token_header": true, + "access_token_in_authorization_header": true, + "set_id_token_header": false, + "set_userinfo_header": false, + "claim_validator": { + "issuer": { + "valid_issuers": ["Mysoft corp"] + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/uri" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 19: Access route with valid token in `Authorization` header. Upstream should not get the additional `X-Access-Token` header. +--- request +GET /uri HTTP/1.1 +--- more_headers +Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCBjb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.Vq_sBN7nH67vMDbiJE01EP4hvJYE_5ju6izjkOX8pF5OS4g2RWKWpL6h6-b0tTkCzG4JD5BEl13LWW-Gxxw0i9vEK0FLg_kC_kZLYB8WuQ6B9B9YwzmZ3OLbgnYzt_VD7D-7psEbwapJl5hbFsIjDgOAEx-UCmjUcl2frZxZavG2LUiEGs9Ri7KqOZmTLgNDMWfeWh1t1LyD0_b-eTInbasVtKQxMlb5kR0Ln_Qg5092L-irJ7dqaZma7HItCnzXJROdqJEsMIBAYRwDGa_w5kIACeMOdU85QKtMHzOenYFkm6zh_s59ndziTctKMz196Y8AL08xuTi6d1gEWpM92A +--- response_body +uri: /uri +authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCBjb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.Vq_sBN7nH67vMDbiJE01EP4hvJYE_5ju6izjkOX8pF5OS4g2RWKWpL6h6-b0tTkCzG4JD5BEl13LWW-Gxxw0i9vEK0FLg_kC_kZLYB8WuQ6B9B9YwzmZ3OLbgnYzt_VD7D-7psEbwapJl5hbFsIjDgOAEx-UCmjUcl2frZxZavG2LUiEGs9Ri7KqOZmTLgNDMWfeWh1t1LyD0_b-eTInbasVtKQxMlb5kR0Ln_Qg5092L-irJ7dqaZma7HItCnzXJROdqJEsMIBAYRwDGa_w5kIACeMOdU85QKtMHzOenYFkm6zh_s59ndziTctKMz196Y8AL08xuTi6d1gEWpM92A +host: localhost +x-real-ip: 127.0.0.1 +--- error_code: 200 + + + +=== TEST 20: Switch route URI back to `/hello`. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "scope": "apisix", + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAw86xcJwNxL2MkWnjIGiw\n]] .. + [[94QY78Sq89dLqMdV/Ku2GIX9lYkbS0VDGtmxDGJLBOYW4cKTX+pigJyzglLgE+nD\n]] .. + [[z3VJf2oCqSV74gTyEdi7sw9e1rCyR6dR8VA7LEpIHwmhnDhhjXy1IYSKRdiVHLS5\n]] .. + [[sYmaAGckpUo3MLqUrgydGj5tFzvK/R/ELuZBdlZM+XuWxYry05r860E3uL+VdVCO\n]] .. + [[oU4RJQknlJnTRd7ht8KKcZb6uM14C057i26zX/xnOJpaVflA4EyEo99hKQAdr8Sh\n]] .. + [[G70MOLYvGCZxl1o8S3q4X67MxcPlfJaXnbog2AOOGRaFar88XiLFWTbXMCLuz7xD\n]] .. + [[zQIDAQAB\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256", + "claim_validator": { + "issuer": { + "valid_issuers": ["Mysoft corp"] + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: Access route with invalid token. Should return 401. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9" .. + ".eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk" .. + "4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCB" .. + "jb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.u1ISx7JbuK_GFRIUqIMP175FqX" .. + "RyF9V7y86480Q4N3jNxs3ePbc51TFtIHDrKttstU4Tub28PYVSlr-HXfjo7", + } + }) + ngx.status = res.status + if res.status == 200 then + ngx.say(true) + end + } + } +--- error_code: 401 +--- error_log +jwt signature verification failed + + + +=== TEST 22: Update route with Keycloak introspection endpoint and public key removed. Should now invoke introspection endpoint to validate tokens. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "redirect_uri": "http://localhost:3000", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "realm": "University", + "introspection_endpoint_auth_method": "client_secret_post", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 23: Obtain valid token and access route with it. +--- config + location /t { + content_by_lua_block { + -- Obtain valid access token from Keycloak using known username and password. + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + -- Check response from keycloak and fail quickly if there's no response. + if not res then + ngx.say(err) + return + end + + -- Check if response code was ok. + if res.status == 200 then + -- Get access token from JSON response body. + local body = json_decode(res.body) + local accessToken = body["access_token"] + + -- Access route using access token. Should work. + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. body["access_token"] + } + }) + + if res.status == 200 then + -- Route accessed successfully. + ngx.say(true) + else + -- Couldn't access route. + ngx.say(false) + end + else + -- Response from Keycloak not ok. + ngx.say(false) + end + } + } +--- response_body +true +--- grep_error_log eval +qr/token validate successfully by \w+/ +--- grep_error_log_out +token validate successfully by introspection + + + +=== TEST 24: Access route with an invalid token. +--- config + location /t { + content_by_lua_block { + -- Access route using a fake access token. + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. "fake access token", + } + }) + + if res.status == 200 then + ngx.say(true) + else + ngx.say(false) + end + } + } +--- response_body +false +--- error_log +OIDC introspection failed: invalid token + + + +=== TEST 25: Check defaults. +--- config + location /t { + content_by_lua_block { + local json = require("t.toolkit.json") + local plugin = require("apisix.plugins.openid-connect") + local s = { + client_id = "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + client_secret = "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + discovery = "http://127.0.0.1:1980/.well-known/openid-configuration", + } + local ok, err = plugin.check_schema(s) + if not ok then + ngx.say(err) + end + + -- ensure session secret generated when bearer_only = false + -- then remove it from table, because it's a random value that I cannot verify it by response body + assert(s.session and s.session.secret, "no session secret generated") + s.session = nil + + ngx.say(json.encode(s)) + } + } +--- response_body +{"accept_none_alg":false,"accept_unsupported_alg":true,"access_token_expires_leeway":0,"access_token_in_authorization_header":false,"bearer_only":false,"client_id":"kbyuFDidLLm280LIwVFiazOqjO3ty8KH","client_jwt_assertion_expires_in":60,"client_secret":"60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa","discovery":"http://127.0.0.1:1980/.well-known/openid-configuration","force_reauthorize":false,"iat_slack":120,"introspection_endpoint_auth_method":"client_secret_basic","introspection_interval":0,"jwk_expires_in":86400,"jwt_verification_cache_ignore":false,"logout_path":"/logout","realm":"apisix","renew_access_token_on_expiry":true,"revoke_tokens_on_logout":false,"scope":"openid","set_access_token_header":true,"set_id_token_header":true,"set_refresh_token_header":false,"set_userinfo_header":true,"ssl_verify":false,"timeout":3,"token_endpoint_auth_method":"client_secret_basic","unauth_action":"auth","use_nonce":false,"use_pkce":false} + + + +=== TEST 26: Update plugin with ID provider jwks endpoint for token verification. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "redirect_uri": "http://localhost:3000", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "use_jwks": true, + "realm": "University", + "introspection_endpoint_auth_method": "client_secret_post", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 27: Obtain valid token and access route with it. +--- config + location /t { + content_by_lua_block { + -- Obtain valid access token from Keycloak using known username and password. + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + -- Check response from keycloak and fail quickly if there's no response. + if not res then + ngx.say(err) + return + end + + -- Check if response code was ok. + if res.status == 200 then + -- Get access token from JSON response body. + local body = json_decode(res.body) + local accessToken = body["access_token"] + + -- Access route using access token. Should work. + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. body["access_token"] + } + }) + + if res.status == 200 then + -- Route accessed successfully. + ngx.say(true) + else + -- Couldn't access route. + ngx.say(false) + end + else + -- Response from Keycloak not ok. + ngx.say(false) + end + } + } +--- response_body +true +--- grep_error_log eval +qr/token validate successfully by \w+/ +--- grep_error_log_out +token validate successfully by jwks + + + +=== TEST 28: Access route with an invalid token. +--- config + location /t { + content_by_lua_block { + -- Access route using a fake access token. + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. "fake access token", + } + }) + + if res.status == 200 then + ngx.say(true) + else + ngx.say(false) + end + } + } +--- response_body +false +--- error_log +OIDC introspection failed: invalid jwt: invalid jwt string + + + +=== TEST 29: Modify route to match catch-all URI `/*` and add post_logout_redirect_uri option. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "realm": "University", + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "redirect_uri": "http://127.0.0.1:]] .. ngx.var.server_port .. [[/authenticated", + "ssl_verify": false, + "timeout": 10, + "introspection_endpoint_auth_method": "client_secret_post", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect", + "set_access_token_header": true, + "access_token_in_authorization_header": false, + "set_id_token_header": true, + "set_userinfo_header": true, + "post_logout_redirect_uri": "http://127.0.0.1:]] .. ngx.var.server_port .. [[/hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 30: Access route w/o bearer token and request logout to redirect to post_logout_redirect_uri. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local login_keycloak = require("lib.keycloak").login_keycloak + local concatenate_cookies = require("lib.keycloak").concatenate_cookies + + local httpc = http.new() + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/uri" + local res, err = login_keycloak(uri, "teacher@gmail.com", "123456") + if err then + ngx.status = 500 + ngx.say(err) + return + end + + local cookie_str = concatenate_cookies(res.headers['Set-Cookie']) + + -- Request the logout uri with the log-in cookie + local logout_uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/logout" + res, err = httpc:request_uri(logout_uri, { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + if not res then + -- No response, must be an error + -- Use 500 to indicate error + ngx.status = 500 + ngx.say(err) + return + elseif res.status ~= 302 then + ngx.status = 500 + ngx.say("Request the logout URI didn't return the expected status.") + return + end + + -- Request the location, it's a URL of keycloak and contains the post_logout_redirect_uri + -- Like: + -- http://127.0.0.1:8080/realms/University/protocol/openid-connect/logout?post_logout_redirect=http://127.0.0.1:1984/hello + local location = res.headers["Location"] + res, err = httpc:request_uri(location, { + method = "GET" + }) + if not res then + ngx.status = 500 + ngx.say(err) + return + elseif res.status ~= 302 then + ngx.status = 500 + ngx.say("Request the keycloak didn't return the expected status.") + return + end + + ngx.status = 200 + ngx.say(res.headers["Location"]) + } + } +--- response_body_like +http://127.0.0.1:.*/hello + + + +=== TEST 31: Switch route URI back to `/hello` and enable pkce. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "http://127.0.0.1:1980/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "scope": "apisix", + "use_pkce": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 32: Access route w/o bearer token. Should redirect to authentication endpoint of ID provider with code_challenge parameters. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + ngx.status = res.status + local location = res.headers['Location'] + if location and string.find(location, 'https://samples.auth0.com/authorize') ~= -1 and + string.find(location, 'scope=apisix') ~= -1 and + string.find(location, 'client_id=kbyuFDidLLm280LIwVFiazOqjO3ty8KH') ~= -1 and + string.find(location, 'response_type=code') ~= -1 and + string.find(location, 'redirect_uri=https://iresty.com') ~= -1 and + string.match(location, '.*code_challenge=.*') and + string.match(location, '.*code_challenge_method=S256.*') then + ngx.say(true) + end + } + } +--- timeout: 10s +--- response_body +true +--- error_code: 302 + + + +=== TEST 33: set use_jwks and set_userinfo_header to validate "x-userinfo" in request header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "realm": "University", + "bearer_only": true, + "access_token_in_authorization_header": true, + "set_userinfo_header": true, + "use_jwks": true, + "redirect_uri": "http://localhost:3000", + "ssl_verify": false, + "timeout": 10, + "introspection_endpoint_auth_method": "client_secret_post", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 34: Access route to validate "x-userinfo" in request header +--- config + location /t { + content_by_lua_block { + -- Obtain valid access token from Keycloak using known username and password. + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + -- Check response from keycloak and fail quickly if there's no response. + if not res then + ngx.say(err) + return + end + + -- Check if response code was ok. + if res.status == 200 then + -- Get access token from JSON response body. + local body = json_decode(res.body) + local accessToken = body["access_token"] + + -- Access route using access token. Should work. + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/uri" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. body["access_token"] + } + }) + + if not res then + -- No response, must be an error. + ngx.status = 500 + ngx.say(err) + return + elseif res.status ~= 200 then + -- Not a valid response. + -- Use 500 to indicate error. + ngx.status = 500 + ngx.say("Invoking the original URI didn't return the expected result.") + return + end + + ngx.status = res.status + ngx.say(res.body) + + else + -- Response from Keycloak not ok. + ngx.say(false) + end + } + } +--- response_body_like +x-userinfo: ey.* + + + +=== TEST 35: Set up new route with plugin matching URI `/*` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "post_logout_redirect_uri": "https://iresty.com", + "scope": "openid profile" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 36: Check whether auth0 can redirect normally using post_logout_redirect_uri configuration +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/logout" + local res, err = httpc:request_uri(uri, {method = "GET"}) + ngx.status = res.status + local location = res.headers['Location'] + if location and string.find(location, 'https://iresty.com') ~= -1 and + string.find(location, 'post_logout_redirect_uri=https://iresty.com') ~= -1 then + ngx.say(true) + end + } + } +--- timeout: 10s +--- response_body +true +--- error_code: 302 + + + +=== TEST 37: Set up new route with plugin matching URI `/*` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ "plugins": { + "openid-connect": { + "client_id": "942299072001-vhduu1uljmdhhbbp7g22m3qsmo246a75.apps.googleusercontent.com", + "client_secret": "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", + "discovery": "https://accounts.google.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "post_logout_redirect_uri": "https://iresty.com", + "scope": "openid profile" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 38: Check whether google can redirect normally using post_logout_redirect_uri configuration +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/logout" + local res, err = httpc:request_uri(uri, {method = "GET"}) + ngx.status = res.status + local location = res.headers['Location'] + if location and string.find(location, 'https://iresty.com') ~= -1 and + string.find(location, 'post_logout_redirect_uri=https://iresty.com') ~= -1 then + ngx.say(true) + end + } + } +--- timeout: 10s +--- response_body +true +--- error_code: 302 + + + +=== TEST 39: Update plugin config to use_jwk and bear_only false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "redirect_uri": "http://localhost:3000", + "ssl_verify": false, + "timeout": 10, + "bearer_only": false, + "use_jwks": true, + "realm": "University", + "introspection_endpoint_auth_method": "client_secret_post", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 40: Test that jwt with bearer_only false still allows a valid Authorization header +--- config + location /t { + content_by_lua_block { + -- Obtain valid access token from Keycloak using known username and password. + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + -- Check response from keycloak and fail quickly if there's no response. + if not res then + ngx.say(err) + return + end + + -- Check if response code was ok. + if res.status == 200 then + -- Get access token from JSON response body. + local body = json_decode(res.body) + local accessToken = body["access_token"] + + -- Access route using access token. Should work. + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. body["access_token"] + } + }) + + if res.status == 200 then + -- Route accessed successfully. + ngx.say(true) + else + -- Couldn't access route. + ngx.say(false) + end + else + -- Response from Keycloak not ok. + ngx.say(false) + end + } + } +--- response_body +true +--- grep_error_log eval +qr/token validate successfully by \w+/ +--- grep_error_log_out +token validate successfully by jwks diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect/configuration.json b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect/configuration.json new file mode 100644 index 0000000..0788a9b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect/configuration.json @@ -0,0 +1,75 @@ +{ + "issuer": "https://samples.auth0.com/", + "authorization_endpoint": "https://samples.auth0.com/authorize", + "token_endpoint": "https://samples.auth0.com/oauth/token", + "device_authorization_endpoint": "https://samples.auth0.com/oauth/device/code", + "userinfo_endpoint": "https://samples.auth0.com/userinfo", + "mfa_challenge_endpoint": "https://samples.auth0.com/mfa/challenge", + "jwks_uri": "https://samples.auth0.com/.well-known/jwks.json", + "registration_endpoint": "https://samples.auth0.com/oidc/register", + "revocation_endpoint": "https://samples.auth0.com/oauth/revoke", + "scopes_supported": [ + "openid", + "profile", + "offline_access", + "name", + "given_name", + "family_name", + "nickname", + "email", + "email_verified", + "picture", + "created_at", + "identities", + "phone", + "address" + ], + "response_types_supported": [ + "code", + "token", + "id_token", + "code token", + "code id_token", + "token id_token", + "code token id_token" + ], + "code_challenge_methods_supported": [ + "S256", + "plain" + ], + "response_modes_supported": [ + "query", + "fragment", + "form_post" + ], + "subject_types_supported": [ + "public" + ], + "id_token_signing_alg_values_supported": [ + "HS256", + "RS256" + ], + "token_endpoint_auth_methods_supported": [ + "client_secret_basic", + "client_secret_post" + ], + "claims_supported": [ + "aud", + "auth_time", + "created_at", + "email", + "email_verified", + "exp", + "family_name", + "given_name", + "iat", + "identities", + "iss", + "name", + "nickname", + "phone_number", + "picture", + "sub" + ], + "request_uri_parameter_supported": false +} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect2.t new file mode 100644 index 0000000..bbb775b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect2.t @@ -0,0 +1,403 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local test_cases = { + { + name = "sanity (bearer_only = true)", + data = {client_id = "a", client_secret = "b", discovery = "c", bearer_only = true}, + cb = function(ok, err, case) + assert(ok and not case.session, "not expect session was generated") + end, + }, + { + name = "sanity (bearer_only = false)", + data = {client_id = "a", client_secret = "b", discovery = "c", bearer_only = false}, + cb = function(ok, err, case) + assert(ok and case.session and case.session.secret, "no session secret generated") + end, + }, + { + name = "sanity (bearer_only = false, user-set secret, less than 16 characters)", + data = {client_id = "a", client_secret = "b", discovery = "c", bearer_only = false, session = {secret = "test"}}, + cb = function(ok, err, case) + assert(not ok and err == "property \"session\" validation failed: property \"secret\" validation failed: string too short, expected at least 16, got 4", "too short key passes validation") + end, + }, + { + name = "sanity (bearer_only = false, user-set secret, more than 16 characters)", + data = {client_id = "a", client_secret = "b", discovery = "c", bearer_only = false, session = {secret = "test_secret_more_than_16"}}, + cb = function(ok, err, case) + assert(ok and case.session and case.session.secret and case.session.secret == "test_secret_more_than_16", "user-set secret is incorrect") + end, + }, + } + + local plugin = require("apisix.plugins.openid-connect") + for _, case in ipairs(test_cases) do + local ok, err = plugin.check_schema(case.data) + case.cb(ok, err, case.data) + end + } + } + + + +=== TEST 2: data encryption for client_secret +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "http://127.0.0.1:1980/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "scope": "apisix", + "use_pkce": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["openid-connect"].client_secret) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["openid-connect"].client_secret) + } + } +--- response_body +60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa +xMlerg8pE2lPSDlQdPi+MsAwBnzqpyLRar3lUhP2Tdc2oXnWmit92p8cannhDYkBPc6P/Hlx0wSA0T2wle9QyHaW2oqw3bXDQSWWk8Vqq0o= + + + +=== TEST 3: Set up route with plugin matching URI `/hello` with unauth_action = "auth". +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "http://127.0.0.1:1980/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "scope": "apisix", + "unauth_action": "auth", + "use_pkce": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: Access route w/o bearer token. Should redirect to authentication endpoint of ID provider. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + ngx.status = res.status + local location = res.headers['Location'] + if location and string.find(location, 'https://samples.auth0.com/authorize') ~= -1 and + string.find(location, 'scope=apisix') ~= -1 and + string.find(location, 'client_id=kbyuFDidLLm280LIwVFiazOqjO3ty8KH') ~= -1 and + string.find(location, 'response_type=code') ~= -1 and + string.find(location, 'redirect_uri=https://iresty.com') ~= -1 then + ngx.say(true) + end + } + } +--- timeout: 10s +--- response_body +true +--- error_code: 302 + + + +=== TEST 5: Set up route with plugin matching URI `/hello` with unauth_action = "deny". +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "http://127.0.0.1:1980/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "scope": "apisix", + "unauth_action": "deny", + "use_pkce": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: Access route w/o bearer token. Should return unauthorized. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + ngx.status = res.status + ngx.say(true) + } + } +--- timeout: 10s +--- response_body +true +--- error_code: 401 + + + +=== TEST 7: Set up route with plugin matching URI `/hello` with unauth_action = "pass". +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "http://127.0.0.1:1980/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "scope": "apisix", + "unauth_action": "pass", + "use_pkce": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: Access route w/o bearer token. Should return ok. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + if res.status == 200 then + ngx.say(true) + end + } + } +--- timeout: 10s +--- response_body +true + + + +=== TEST 9: Set up route with plugin matching URI `/hello` with redirect_uri use default value. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "http://127.0.0.1:1980/.well-known/openid-configuration", + "ssl_verify": false, + "timeout": 10, + "scope": "apisix", + "unauth_action": "auth", + "use_pkce": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: The value of redirect_uri should be appended to `.apisix/redirect` in the original request. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local redirect_uri = uri .. "/.apisix/redirect" + local res, err = httpc:request_uri(uri, {method = "GET"}) + ngx.status = res.status + local location = res.headers['Location'] + if location and string.find(location, 'https://samples.auth0.com/authorize') ~= -1 and + string.find(location, 'scope=apisix') ~= -1 and + string.find(location, 'client_id=kbyuFDidLLm280LIwVFiazOqjO3ty8KH') ~= -1 and + string.find(location, 'response_type=code') ~= -1 and + string.find(location, 'redirect_uri=' .. redirect_uri) ~= -1 then + ngx.say(true) + end + } + } +--- timeout: 10s +--- response_body +true +--- error_code: 302 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect3.t new file mode 100644 index 0000000..84eb7c2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect3.t @@ -0,0 +1,111 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Set up new route access the auth server via http proxy +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "scope": "apisix", + "proxy_opts": { + "http_proxy": "http://127.0.0.1:8080", + "http_proxy_authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQK" + }, + "use_pkce": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + } + } +--- response_body +passed + + + +=== TEST 2: Access route w/o bearer token. Should redirect to authentication endpoint of ID provider. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + ngx.status = res.status + local location = res.headers['Location'] + if location and string.find(location, 'https://samples.auth0.com/authorize') ~= -1 and + string.find(location, 'scope=apisix') ~= -1 and + string.find(location, 'client_id=kbyuFDidLLm280LIwVFiazOqjO3ty8KH') ~= -1 and + string.find(location, 'response_type=code') ~= -1 and + string.find(location, 'redirect_uri=https://iresty.com') ~= -1 then + ngx.say(true) + end + } + } +--- timeout: 10s +--- response_body +true +--- error_code: 302 +--- error_log +use http proxy diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect4.t b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect4.t new file mode 100644 index 0000000..9df55be --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect4.t @@ -0,0 +1,311 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Set up new route access the auth server with header test +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "authorization_params":{ + "test":"abc" + }, + "ssl_verify": false, + "timeout": 10, + "scope": "apisix", + "proxy_opts": { + "http_proxy": "http://127.0.0.1:8080", + "http_proxy_authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQK" + }, + "use_pkce": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + } + } +--- response_body +passed + + + +=== TEST 2: Check the uri of the authorization endpoint for passed headers +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + ngx.status = res.status + local location = res.headers['Location'] + if location and string.find(location, 'https://samples.auth0.com/authorize') ~= -1 and + string.find(location, 'test=abc') ~= -1 then + ngx.say(true) + end + } + } +--- timeout: 10s +--- response_body +true +--- error_code: 302 +--- error_log +use http proxy + + + +=== TEST 3: Set an unsupported scope in the required scopes field +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + require("apisix.plugins.openid-connect") + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "redirect_uri": "http://localhost:3000", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "realm": "University", + "required_scopes": ["unsupported"], + "introspection_endpoint_auth_method": "client_secret_post", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: Access route +--- config + location /t { + content_by_lua_block { + -- Obtain valid access token from Keycloak using known username and password. + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + -- Check response from keycloak and fail quickly if there's no response. + if not res then + ngx.say(err) + return + end + + -- Check if response code was ok. + if res.status == 200 then + -- Get access token from JSON response body. + local body = json_decode(res.body) + local accessToken = body["access_token"] + + -- Access route using access token. Should work. + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. body["access_token"] + } + }) + if res.status == 200 then + -- Route accessed successfully. + ngx.say(true) + else + -- Couldn't access route. + ngx.say(false) + end + else + -- Response from Keycloak not ok. + ngx.say(false) + end + } + } +--- error_log +required scopes not present + + + +=== TEST 5: Set a supported scope in the required scopes field +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + require("apisix.plugins.openid-connect") + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "redirect_uri": "http://localhost:3000", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "realm": "University", + "required_scopes": ["profile"], + "introspection_endpoint_auth_method": "client_secret_post", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: Access route +--- config + location /t { + content_by_lua_block { + -- Obtain valid access token from Keycloak using known username and password. + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + -- Check response from keycloak and fail quickly if there's no response. + if not res then + ngx.say(err) + return + end + + -- Check if response code was ok. + if res.status == 200 then + -- Get access token from JSON response body. + local body = json_decode(res.body) + local accessToken = body["access_token"] + + -- Access route using access token. Should work. + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. body["access_token"] + } + }) + if res.status == 200 then + -- Route accessed successfully. + ngx.say(true) + else + -- Couldn't access route. + ngx.say(false) + end + else + -- Response from Keycloak not ok. + ngx.say(false) + end + } + } +--- response_body +true diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect5.t b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect5.t new file mode 100644 index 0000000..53d16ed --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect5.t @@ -0,0 +1,233 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Call to route with locking session storage should not block subsequent requests with same session +--- config + set $session_storage redis; + set $session_redis_prefix sessions; + set $session_redis_database 0; + set $session_redis_connect_timeout 1000; # (in milliseconds) + set $session_redis_send_timeout 1000; # (in milliseconds) + set $session_redis_read_timeout 1000; # (in milliseconds) + set $session_redis_host 127.0.0.1; + set $session_redis_port 6379; + set $session_redis_ssl off; + set $session_redis_ssl_verify off; + set $session_redis_uselocking on; + set $session_redis_spinlockwait 150; # (in milliseconds) + set $session_redis_maxlockwait 30; # (in seconds) + + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local http = require "resty.http" + local login_keycloak = require("lib.keycloak").login_keycloak + local concatenate_cookies = require("lib.keycloak").concatenate_cookies + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "realm": "University", + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "redirect_uri": "http://127.0.0.1:]] .. ngx.var.server_port .. [[/authenticated", + "ssl_verify": false, + "bearer_only" : false, + "timeout": 10, + "introspection_endpoint_auth_method": "client_secret_post", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect", + "set_access_token_header": true, + "access_token_in_authorization_header": false, + "set_id_token_header": true, + "set_userinfo_header": true, + "set_refresh_token_header": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + + local res, err = login_keycloak(uri, "teacher@gmail.com", "123456") + if err then + ngx.status = 500 + ngx.say(err) + return + end + + local cookie_str = concatenate_cookies(res.headers['Set-Cookie']) + local redirect_uri = "http://127.0.0.1:" .. ngx.var.server_port .. res.headers['Location'] + + -- Make the final call to protected route + local function firstRequest() + local httpc = http.new() + httpc:request_uri(redirect_uri, { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + end + + ngx.thread.spawn(firstRequest) + + -- Make second call to protected route which should not timeout due to blocked session + local httpc = http.new() + httpc:set_timeout(2000) + + res, err = httpc:request_uri(redirect_uri, { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + + if err then + ngx.say("request error: ", err) + return + end + + ngx.say(res.body) + } + } +--- response_body_like +hello world + + + +=== TEST 2: Call to route with locking session storage, no authentication and unauth_action 'deny' should not block subsequent requests on same session +--- config + set $session_storage redis; + set $session_redis_uselocking on; + + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local http = require "resty.http" + local login_keycloak = require("lib.keycloak").login_keycloak + local concatenate_cookies = require("lib.keycloak").concatenate_cookies + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "realm": "University", + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "redirect_uri": "http://127.0.0.1:]] .. ngx.var.server_port .. [[/authenticated", + "ssl_verify": false, + "unauth_action": "deny" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + + -- Make the final call to protected route WITHOUT cookie + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + + -- Extract cookie which is not authenticated + local cookie_str = concatenate_cookies(res.headers['Set-Cookie']) + + -- Make the call to protected route with cookie + local function firstRequest() + local httpc = http.new() + + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + + if not res then + ngx.log(ngx.ERR, "request failed with err: ", err) + return + end + return res + end + + local thread = ngx.thread.spawn(firstRequest) + ok, res = ngx.thread.wait(thread) + + if not ok then + ngx.log(ngx.ERR, "First request did not complete: ", res) + return + end + + if res.status ~= 401 then + ngx.log(ngx.ERR, "Expected status 401 received: ", res.status) + return + end + + -- Make second call to protected route and same cookie which should not timeout due to a blocked session + local httpc = http.new() + httpc:set_timeout(2000) + + res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Cookie"] = cookie_str + } + }) + ngx.status = res.status + } + } +--- error_code: 401 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect6.t b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect6.t new file mode 100644 index 0000000..2406c13 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect6.t @@ -0,0 +1,365 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +# no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Check configuration of cookie +--- config + location /t { + content_by_lua_block { + local test_cases = { + { + client_id = "course_management", + client_secret = "tbsmDOpsHwdgIqYl2NltGRTKzjIzvEmT", + discovery = "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + session = { + secret = "6S8IO+Pydgb33LIor8T9ClER0T/sglFAjClFeAF3RsY=", + cookie = { + lifetime = 86400 + } + } + }, + } + local plugin = require("apisix.plugins.openid-connect") + for _, case in ipairs(test_cases) do + local ok, err = plugin.check_schema(case) + ngx.say(ok and "done" or err) + end + } + } +--- response_body +done + + + +=== TEST 2: Set up new route access the auth server +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "realm": "University", + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "redirect_uri": "http://127.0.0.1:]] .. ngx.var.server_port .. [[/authenticated", + "ssl_verify": false, + "bearer_only" : false, + "timeout": 10, + "introspection_endpoint_auth_method": "client_secret_post", + "required_scopes": ["profile"], + "introspection_endpoint_auth_method": "client_secret_post", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect", + "set_access_token_header": true, + "access_token_in_authorization_header": false, + "set_id_token_header": true, + "set_userinfo_header": true, + "set_refresh_token_header": true, + "session": { + "secret": "jwcE5v3pM9VhqLxmxFOH9uZaLo8u7KQK", + "cookie": { + "lifetime": 86400 + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: Call to route to get session +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local login_keycloak = require("lib.keycloak").login_keycloak + local concatenate_cookies = require("lib.keycloak").concatenate_cookies + + local current_time = os.time() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + + local res, err = login_keycloak(uri, "teacher@gmail.com", "123456") + if err then + ngx.status = 500 + ngx.say(err) + return + end + + local cookie_str = concatenate_cookies(res.headers['Set-Cookie']) + local parts = {} + for part in string.gmatch(cookie_str, "[^|]+") do + table.insert(parts, part) + end + local target_number = tonumber(parts[2], 10) - 86400 + -- ngx.say(target_number, current_time) + if target_number >= current_time then + ngx.say("passed") + end + } + } +--- response_body +passed + + + +=== TEST 4: Update route with Keycloak introspection endpoint and introspection addon headers. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "redirect_uri": "http://localhost:3000", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "realm": "University", + "introspection_endpoint_auth_method": "client_secret_post", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect", + "introspection_addon_headers": ["X-Addon-Header-A", "X-Addon-Header-B"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: Obtain valid token and access route with it, introspection work as expected when configured extras headers. +--- config + location /t { + content_by_lua_block { + -- Obtain valid access token from Keycloak using known username and password. + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + -- Check response from keycloak and fail quickly if there's no response. + if not res then + ngx.say(err) + return + end + + -- Check if response code was ok. + if res.status == 200 then + -- Get access token from JSON response body. + local body = json_decode(res.body) + local accessToken = body["access_token"] + + -- Access route using access token. Should work. + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. body["access_token"], + ["X-Addon-Header-A"] = "Value-A", + ["X-Addon-Header-B"] = "Value-b" + } + }) + + if res.status == 200 then + -- Route accessed successfully. + ngx.say(true) + else + -- Couldn't access route. + ngx.say(false) + end + else + -- Response from Keycloak not ok. + ngx.say(false) + end + } + } +--- response_body +true +--- error_log +token validate successfully by introspection + + + +=== TEST 6: Access route with an invalid token, should fail. +--- config + location /t { + content_by_lua_block { + -- Access route using a fake access token. + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. "fake access token", + ["X-Addon-Header-A"] = "Value-A", + ["X-Addon-Header-B"] = "Value-b" + } + }) + + if res.status == 200 then + ngx.say(true) + else + ngx.say(false) + end + } + } +--- response_body +false +--- error_log +OIDC introspection failed: invalid token + + + +=== TEST 7: Update route with fake Keycloak introspection endpoint and introspection addon headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "course_management", + "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "redirect_uri": "http://localhost:3000", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "realm": "University", + "introspection_endpoint_auth_method": "client_secret_post", + "introspection_endpoint": "http://127.0.0.1:1980/log_request", + "introspection_addon_headers": ["X-Addon-Header-A", "X-Addon-Header-B"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: Check http headers from fake introspection endpoint. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. "fake access token", + ["X-Addon-Header-A"] = "Value-A", + ["X-Addon-Header-B"] = "Value-b" + } + }) + ngx.status = res.status + } + } +--- error_code: 401 +--- error_log +OIDC introspection failed: JSON decoding failed +--- grep_error_log eval +qr/x-addon-header-.{10}/ +--- grep_error_log_out +x-addon-header-a: Value-A +x-addon-header-b: Value-b diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect7.t b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect7.t new file mode 100644 index 0000000..b1999f1 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect7.t @@ -0,0 +1,473 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +# no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Create route (jwt local, audience required) +It reuses Keycloak's TLS private key to export the public key. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "apisix", + "client_secret": "secret", + "discovery": "http://127.0.0.1:8080/realms/basic/.well-known/openid-configuration", + "bearer_only": true, + "claim_validator": { + "audience": { + "required": true + } + }, + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvxeMCu3jE1QChgzCwlxP\n]] .. + [[mOkRHQORlOvwGpCX9zRCkMAq7a6jvlQTyM+OOfnnX9xBF4YxRRj3VOqdBJBdEjC2\n]] .. + [[jLFQUECdqnD+hZaCGIsk91grP4G7XaFqud7nAH1rniMh1rKLy3NFYTl5tK4U2IPP\n]] .. + [[JzIye8ur2JHyzE+qpcAEp/U6M4I2rdPX1gE2ze8gYuIr1VbCg6Nkt45DslZ2GDI8\n]] .. + [[2TtwkpMlEjJfmbEnrLHkigPXNs6IHyiFPN95462gPG5TBX3YpxDCP/cnHhMeeyFI\n]] .. + [[56WNYlhy0iLYmRfiyhKXi76fYKa/PIIUfOSErrKgKsHJp7HQKo48O4Gz5tQyL1IF\n]] .. + [[QQIDAQAB\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: Access route with a valid token (with audience) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local res, err = httpc:request_uri("http://127.0.0.1:8080/realms/basic/protocol/openid-connect/token", { + method = "POST", + body = "client_id=apisix&client_secret=secret&grant_type=password&username=jack&password=jack", + headers = { ["Content-Type"] = "application/x-www-form-urlencoded" } + }) + if not res then + ngx.say("FAILED: ", err) + return + end + local access_token = require("toolkit.json").decode(res.body).access_token + local res, err = httpc:request_uri("http://127.0.0.1:1980/hello", { + method = "GET", + headers = { Authorization = "Bearer " .. access_token } + }) + if not res then + ngx.say("FAILED: ", err) + return + end + ngx.status = res.status + } + } + + + +=== TEST 3: Update route (jwt local, audience required, custom claim) +Use a custom non-existent claim to simulate the case where the standard field "aud" is not included. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "apisix", + "client_secret": "secret", + "discovery": "http://127.0.0.1:8080/realms/basic/.well-known/openid-configuration", + "bearer_only": true, + "claim_validator": { + "audience": { + "claim": "custom_claim", + "required": true + } + }, + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvxeMCu3jE1QChgzCwlxP\n]] .. + [[mOkRHQORlOvwGpCX9zRCkMAq7a6jvlQTyM+OOfnnX9xBF4YxRRj3VOqdBJBdEjC2\n]] .. + [[jLFQUECdqnD+hZaCGIsk91grP4G7XaFqud7nAH1rniMh1rKLy3NFYTl5tK4U2IPP\n]] .. + [[JzIye8ur2JHyzE+qpcAEp/U6M4I2rdPX1gE2ze8gYuIr1VbCg6Nkt45DslZ2GDI8\n]] .. + [[2TtwkpMlEjJfmbEnrLHkigPXNs6IHyiFPN95462gPG5TBX3YpxDCP/cnHhMeeyFI\n]] .. + [[56WNYlhy0iLYmRfiyhKXi76fYKa/PIIUfOSErrKgKsHJp7HQKo48O4Gz5tQyL1IF\n]] .. + [[QQIDAQAB\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: Access route with an invalid token (without audience) +Use a custom non-existent claim to simulate the case where the standard field "aud" is not included. +Note the assertion in the error log, where it is shown that the custom claim field name did take effect. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local res, err = httpc:request_uri("http://127.0.0.1:8080/realms/basic/protocol/openid-connect/token", { + method = "POST", + body = "client_id=apisix&client_secret=secret&grant_type=password&username=jack&password=jack", + headers = { ["Content-Type"] = "application/x-www-form-urlencoded" } + }) + if not res then + ngx.say("FAILED: ", err) + return + end + local access_token = require("toolkit.json").decode(res.body).access_token + res, err = httpc:request_uri("http://127.0.0.1:"..ngx.var.server_port.."/hello", { + method = "GET", + headers = { Authorization = "Bearer " .. access_token } + }) + if not res then + ngx.say("FAILED: ", err) + return + end + ngx.status = res.status + ngx.say(res.body) + } + } +--- error_code: 403 +--- response_body +{"error":"required audience claim not present"} +--- error_log +OIDC introspection failed: required audience (custom_claim) not present + + + +=== TEST 5: Update route (jwt local, audience required, custom claim) +Use "iss" to fake "aud". +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "apisix", + "client_secret": "secret", + "discovery": "http://127.0.0.1:8080/realms/basic/.well-known/openid-configuration", + "bearer_only": true, + "claim_validator": { + "audience": { + "claim": "iss", + "required": true + } + }, + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvxeMCu3jE1QChgzCwlxP\n]] .. + [[mOkRHQORlOvwGpCX9zRCkMAq7a6jvlQTyM+OOfnnX9xBF4YxRRj3VOqdBJBdEjC2\n]] .. + [[jLFQUECdqnD+hZaCGIsk91grP4G7XaFqud7nAH1rniMh1rKLy3NFYTl5tK4U2IPP\n]] .. + [[JzIye8ur2JHyzE+qpcAEp/U6M4I2rdPX1gE2ze8gYuIr1VbCg6Nkt45DslZ2GDI8\n]] .. + [[2TtwkpMlEjJfmbEnrLHkigPXNs6IHyiFPN95462gPG5TBX3YpxDCP/cnHhMeeyFI\n]] .. + [[56WNYlhy0iLYmRfiyhKXi76fYKa/PIIUfOSErrKgKsHJp7HQKo48O4Gz5tQyL1IF\n]] .. + [[QQIDAQAB\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: Access route with an valid token (with custom audience claim) +Use "iss" to fake "aud". +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local res, err = httpc:request_uri("http://127.0.0.1:8080/realms/basic/protocol/openid-connect/token", { + method = "POST", + body = "client_id=apisix&client_secret=secret&grant_type=password&username=jack&password=jack", + headers = { ["Content-Type"] = "application/x-www-form-urlencoded" } + }) + if not res then + ngx.say("FAILED: ", err) + return + end + local access_token = require("toolkit.json").decode(res.body).access_token + res, err = httpc:request_uri("http://127.0.0.1:"..ngx.var.server_port.."/hello", { + method = "GET", + headers = { Authorization = "Bearer " .. access_token } + }) + if not res then + ngx.say("FAILED: ", err) + return + end + ngx.status = res.status + ngx.say(res.body) + } + } + + + +=== TEST 7: Update route (jwt local, audience required, match client_id) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "apisix", + "client_secret": "secret", + "discovery": "http://127.0.0.1:8080/realms/basic/.well-known/openid-configuration", + "bearer_only": true, + "claim_validator": { + "audience": { + "required": true, + "match_with_client_id": true + } + }, + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvxeMCu3jE1QChgzCwlxP\n]] .. + [[mOkRHQORlOvwGpCX9zRCkMAq7a6jvlQTyM+OOfnnX9xBF4YxRRj3VOqdBJBdEjC2\n]] .. + [[jLFQUECdqnD+hZaCGIsk91grP4G7XaFqud7nAH1rniMh1rKLy3NFYTl5tK4U2IPP\n]] .. + [[JzIye8ur2JHyzE+qpcAEp/U6M4I2rdPX1gE2ze8gYuIr1VbCg6Nkt45DslZ2GDI8\n]] .. + [[2TtwkpMlEjJfmbEnrLHkigPXNs6IHyiFPN95462gPG5TBX3YpxDCP/cnHhMeeyFI\n]] .. + [[56WNYlhy0iLYmRfiyhKXi76fYKa/PIIUfOSErrKgKsHJp7HQKo48O4Gz5tQyL1IF\n]] .. + [[QQIDAQAB\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: Access route with an valid token (with client id as audience) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local res, err = httpc:request_uri("http://127.0.0.1:8080/realms/basic/protocol/openid-connect/token", { + method = "POST", + body = "client_id=apisix&client_secret=secret&grant_type=password&username=jack&password=jack", + headers = { ["Content-Type"] = "application/x-www-form-urlencoded" } + }) + if not res then + ngx.say("FAILED: ", err) + return + end + local access_token = require("toolkit.json").decode(res.body).access_token + res, err = httpc:request_uri("http://127.0.0.1:"..ngx.var.server_port.."/hello", { + method = "GET", + headers = { Authorization = "Bearer " .. access_token } + }) + if not res then + ngx.say("FAILED: ", err) + return + end + ngx.status = res.status + ngx.say(res.body) + } + } + + + +=== TEST 9: Update route (jwt local, audience required, match client_id) +Use the apisix-no-aud client. According to Keycloak's default implementation, when unconfigured, +only the account is listed as an audience, not the client id. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "apisix-no-aud", + "client_secret": "secret", + "discovery": "http://127.0.0.1:8080/realms/basic/.well-known/openid-configuration", + "bearer_only": true, + "claim_validator": { + "audience": { + "required": true, + "match_with_client_id": true + } + }, + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvxeMCu3jE1QChgzCwlxP\n]] .. + [[mOkRHQORlOvwGpCX9zRCkMAq7a6jvlQTyM+OOfnnX9xBF4YxRRj3VOqdBJBdEjC2\n]] .. + [[jLFQUECdqnD+hZaCGIsk91grP4G7XaFqud7nAH1rniMh1rKLy3NFYTl5tK4U2IPP\n]] .. + [[JzIye8ur2JHyzE+qpcAEp/U6M4I2rdPX1gE2ze8gYuIr1VbCg6Nkt45DslZ2GDI8\n]] .. + [[2TtwkpMlEjJfmbEnrLHkigPXNs6IHyiFPN95462gPG5TBX3YpxDCP/cnHhMeeyFI\n]] .. + [[56WNYlhy0iLYmRfiyhKXi76fYKa/PIIUfOSErrKgKsHJp7HQKo48O4Gz5tQyL1IF\n]] .. + [[QQIDAQAB\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: Access route with an invalid token (without client id as audience) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local res, err = httpc:request_uri("http://127.0.0.1:8080/realms/basic/protocol/openid-connect/token", { + method = "POST", + body = "client_id=apisix-no-aud&client_secret=secret&grant_type=password&username=jack&password=jack", + headers = { ["Content-Type"] = "application/x-www-form-urlencoded" } + }) + if not res then + ngx.say("FAILED: ", err) + return + end + local access_token = require("toolkit.json").decode(res.body).access_token + res, err = httpc:request_uri("http://127.0.0.1:"..ngx.var.server_port.."/hello", { + method = "GET", + headers = { Authorization = "Bearer " .. access_token } + }) + if not res then + ngx.say("FAILED: ", err) + return + end + ngx.status = res.status + ngx.say(res.body) + } + } +--- error_code: 403 +--- response_body +{"error":"mismatched audience"} +--- error_log +OIDC introspection failed: audience does not match the client id diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect8.t b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect8.t new file mode 100644 index 0000000..6cd5eed --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/openid-connect8.t @@ -0,0 +1,444 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +# no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Set up new route with wrong valid_issuers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "dummy", + "client_secret": "dummy", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "ssl_verify": true, + "timeout": 10, + "bearer_only": true, + "use_jwks": true, + "claim_validator": { + "issuer": { + "valid_issuers": 123 + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"failed to check the configuration of plugin openid-connect err: property \\"claim_validator\\" validation failed.*"\}/ + + + +=== TEST 2: Set up new route with valid valid_issuers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "dummy", + "client_secret": "dummy", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "ssl_verify": true, + "timeout": 10, + "bearer_only": true, + "use_jwks": true, + "claim_validator": { + "issuer": { + "valid_issuers": ["https://securetoken.google.com/test-firebase-project"] + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: Update plugin with ID provider jwks endpoint for token verification with invalid issuer. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "not required", + "client_secret": "not required", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "redirect_uri": "http://localhost:3000", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "use_jwks": true, + "realm": "University", + "claim_validator": { + "issuer": { + "valid_issuers": ["https://securetoken.google.com/test-firebase-project"] + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: verification fails because issuer not in valid_issuer +--- config + location /t { + content_by_lua_block { + -- Obtain valid access token from Keycloak using known username and password. + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + -- Check response from keycloak and fail quickly if there's no response. + if not res then + ngx.say(err) + return + end + + -- Check if response code was ok. + if res.status == 200 then + -- Get access token from JSON response body. + local body = json_decode(res.body) + local accessToken = body["access_token"] + + -- Access route using access token. Should work. + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. body["access_token"] + } + }) + + if res.status == 200 then + -- Route accessed successfully. + ngx.say(true) + else + -- Couldn't access route. + ngx.say(false) + end + else + -- Response from Keycloak not ok. + ngx.say(false) + end + } + } +--- response_body +false +--- error_log +OIDC introspection failed: jwt signature verification failed: Claim 'iss' ('http://127.0.0.1:8080/realms/University') returned failure + + + +=== TEST 5: Update plugin with ID provider jwks endpoint for token verification with valid issuer. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "dummy", + "client_secret": "dummy", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "redirect_uri": "http://localhost:3000", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "use_jwks": true, + "realm": "University", + "claim_validator": { + "issuer": { + "valid_issuers": ["http://127.0.0.1:8080/realms/University"] + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: Obtain valid token and access route with it. +--- config + location /t { + content_by_lua_block { + -- Obtain valid access token from Keycloak using known username and password. + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + -- Check response from keycloak and fail quickly if there's no response. + if not res then + ngx.say(err) + return + end + + -- Check if response code was ok. + if res.status == 200 then + -- Get access token from JSON response body. + local body = json_decode(res.body) + local accessToken = body["access_token"] + + -- Access route using access token. Should work. + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. body["access_token"] + } + }) + + if res.status == 200 then + -- Route accessed successfully. + ngx.say(true) + else + -- Couldn't access route. + ngx.say(false) + end + else + -- Response from Keycloak not ok. + ngx.say(false) + end + } + } +--- response_body +true +--- grep_error_log eval +qr/token validate successfully by \w+/ +--- grep_error_log_out +token validate successfully by jwks + + + +=== TEST 7: Update plugin with ID provider jwks endpoint for token verification with valid issuer in discovery endpoint. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "dummy", + "client_secret": "dummy", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", + "redirect_uri": "http://localhost:3000", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "use_jwks": true, + "realm": "University" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: Obtain valid token and access route with it. Use valid_issuer from discovery endpoint. +--- config + location /t { + content_by_lua_block { + -- Obtain valid access token from Keycloak using known username and password. + local json_decode = require("toolkit.json").decode + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" + local res, err = httpc:request_uri(uri, { + method = "POST", + body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded" + } + }) + + -- Check response from keycloak and fail quickly if there's no response. + if not res then + ngx.say(err) + return + end + + -- Check if response code was ok. + if res.status == 200 then + -- Get access token from JSON response body. + local body = json_decode(res.body) + local accessToken = body["access_token"] + + -- Access route using access token. Should work. + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer " .. body["access_token"] + } + }) + + if res.status == 200 then + -- Route accessed successfully. + ngx.say(true) + else + -- Couldn't access route. + ngx.say(false) + end + else + -- Response from Keycloak not ok. + ngx.say(false) + end + } + } +--- response_body +true +--- grep_error_log eval +qr/token validate successfully by \w+/ +--- grep_error_log_out +token validate successfully by jwks +--- error_log +valid_issuers not provided explicitly, using issuer from discovery doc: http://127.0.0.1:8080/realms/University diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry.t b/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry.t new file mode 100644 index 0000000..daf91e3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry.t @@ -0,0 +1,436 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - opentelemetry +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!defined $block->response_body) { + $block->set_value("response_body", "passed\n"); + } + $block; +}); +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("debug"); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/opentelemetry', + ngx.HTTP_PUT, + [[{ + "batch_span_processor": { + "max_export_batch_size": 1, + "inactive_timeout": 0.5 + }, + "collector": { + "address": "127.0.0.1:4318", + "request_timeout": 3, + "request_headers": { + "foo": "bar" + } + }, + "trace_id_source": "x-request-id" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 2: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "opentelemetry": { + "sampler": { + "name": "always_on" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 3: trigger opentelemetry +--- request +GET /opentracing +--- wait: 2 +--- response_body +opentracing + + + +=== TEST 4: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*opentelemetry-lua.*/ + + + +=== TEST 5: use trace_id_ratio sampler, fraction = 1.0 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "opentelemetry": { + "sampler": { + "name": "trace_id_ratio", + "options": { + "fraction": 1.0 + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 6: trigger opentelemetry +--- request +GET /opentracing +--- wait: 2 +--- response_body +opentracing + + + +=== TEST 7: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*opentelemetry-lua.*/ + + + +=== TEST 8: use parent_base sampler, root sampler = trace_id_ratio with default fraction = 0 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "opentelemetry": { + "sampler": { + "name": "parent_base", + "options": { + "root": { + "name": "trace_id_ratio" + } + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 9: trigger opentelemetry, trace_flag = 1 +--- request +GET /opentracing +--- more_headers +traceparent: 00-00000000000000000000000000000001-0000000000000001-01 +--- wait: 2 +--- response_body +opentracing + + + +=== TEST 10: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*"traceId":"00000000000000000000000000000001",.*/ + + + +=== TEST 11: use parent_base sampler, root sampler = trace_id_ratio with fraction = 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "opentelemetry": { + "sampler": { + "name": "parent_base", + "options": { + "root": { + "name": "trace_id_ratio", + "options": { + "fraction": 1.0 + } + } + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 12: trigger opentelemetry, trace_flag = 1 +--- request +GET /opentracing +--- more_headers +traceparent: 00-00000000000000000000000000000001-0000000000000001-01 +--- wait: 2 +--- response_body +opentracing + + + +=== TEST 13: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*"traceId":"00000000000000000000000000000001",.*/ + + + +=== TEST 14: set additional_attributes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "name": "service_name", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "route_name", + "plugins": { + "opentelemetry": { + "sampler": { + "name": "always_on" + }, + "additional_attributes": [ + "http_user_agent", + "arg_foo", + "cookie_token", + "remote_addr" + ] + } + }, + "uri": "/opentracing", + "service_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 15: trigger opentelemetry +--- request +GET /opentracing?foo=bar&a=b +--- more_headers +X-Request-Id: 01010101010101010101010101010101 +User-Agent: test_nginx +Cookie: token=auth_token; +--- wait: 2 +--- response_body +opentracing + + + +=== TEST 16: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*\/opentracing\?foo=bar.*/ + + + +=== TEST 17: create route for /specific_status +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "route_name", + "plugins": { + "opentelemetry": { + "sampler": { + "name": "always_on" + } + } + }, + "uri": "/specific_status", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 18: test response empty body +--- request +HEAD /specific_status +--- response_body +--- wait: 2 + + + +=== TEST 19: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*\/specific_status.*/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry2.t new file mode 100644 index 0000000..6129f44 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry2.t @@ -0,0 +1,136 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - example-plugin + - key-auth + - opentelemetry +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/opentelemetry', + ngx.HTTP_PUT, + [[{ + "batch_span_processor": { + "max_export_batch_size": 1, + "inactive_timeout": 0.5 + }, + "trace_id_source": "x-request-id" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + + + +=== TEST 2: trace request rejected by auth +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {}, + "example-plugin": {"i": 1}, + "opentelemetry": { + "sampler": { + "name": "always_on" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: trigger opentelemetry +--- request +GET /hello +--- error_code: 401 +--- wait: 2 + + + +=== TEST 4: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*\/hello.*/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry3.t new file mode 100644 index 0000000..ff7ea14 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry3.t @@ -0,0 +1,203 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - http-logger + - opentelemetry +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + my $upstream_server_config = $block->upstream_server_config // <<_EOC_; + set \$opentelemetry_context_traceparent ""; + set \$opentelemetry_trace_id ""; + set \$opentelemetry_span_id ""; + access_log logs/error.log opentelemetry_log; +_EOC_ + + $block->set_value("upstream_server_config", $upstream_server_config); + + my $http_config = $block->http_config // <<_EOC_; + log_format opentelemetry_log '{"time": "\$time_iso8601","opentelemetry_context_traceparent": "\$opentelemetry_context_traceparent","opentelemetry_trace_id": "\$opentelemetry_trace_id","opentelemetry_span_id": "\$opentelemetry_span_id","remote_addr": "\$remote_addr","uri": "\$uri"}'; +_EOC_ + + $block->set_value("http_config", $http_config); + + if (!$block->extra_init_by_lua) { + my $extra_init_by_lua = <<_EOC_; +-- mock exporter http client +local client = require("opentelemetry.trace.exporter.http_client") +client.do_request = function() + ngx.log(ngx.INFO, "opentelemetry export span") + return "ok" +end +_EOC_ + + $block->set_value("extra_init_by_lua", $extra_init_by_lua); + } + + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/http-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "opentelemetry_context_traceparent": "$opentelemetry_context_traceparent", + "opentelemetry_trace_id": "$opentelemetry_trace_id", + "opentelemetry_span_id": "$opentelemetry_span_id" + } + }]] + ) + if code >= 300 then + ngx.status = code + return body + end + + local code, body = t('/apisix/admin/plugin_metadata/opentelemetry', + ngx.HTTP_PUT, + [[{ + "batch_span_processor": { + "max_export_batch_size": 1, + "inactive_timeout": 0.5 + }, + "set_ngx_var": true + }]] + ) + if code >= 300 then + ngx.status = code + return body + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1980/log", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "concat_method": "new_line" + }, + "opentelemetry": { + "sampler": { + "name": "always_on" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >=300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: trigger opentelemetry with open set variables +--- request +GET /hello +--- response_body +hello world +--- wait: 1 +--- grep_error_log eval +qr/opentelemetry export span/ +--- grep_error_log_out +opentelemetry export span +--- error_log eval +qr/request log: \{.*"opentelemetry_context_traceparent":"00-\w{32}-\w{16}-01".*\}/ + + + +=== TEST 3: trigger opentelemetry with disable set variables +--- extra_yaml_config +plugins: + - http-logger + - opentelemetry +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/plugin_metadata/opentelemetry', + ngx.HTTP_PUT, + [[{ + "set_ngx_var": false + }]] + ) + if code >= 300 then + ngx.status = code + return body + end + } + } +--- request +GET /t + + + +=== TEST 4: trigger opentelemetry with open set variables +--- request +GET /hello +--- response_body +hello world +--- wait: 1 +--- error_log eval +qr/request log: \{.*"opentelemetry_context_traceparent":"".*\}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry4-bugfix-pb-state.t b/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry4-bugfix-pb-state.t new file mode 100644 index 0000000..bc8405d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry4-bugfix-pb-state.t @@ -0,0 +1,195 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - opentelemetry +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + $block; +}); +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("debug"); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/opentelemetry', + ngx.HTTP_PUT, + [[{ + "batch_span_processor": { + "max_export_batch_size": 1, + "inactive_timeout": 0.5 + }, + "trace_id_source": "x-request-id", + "collector": { + "address": "127.0.0.1:4318", + "request_timeout": 3, + "request_headers": { + "foo": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: set additional_attributes with match +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "route_name", + "plugins": { + "opentelemetry": { + "sampler": { + "name": "always_on" + }, + "additional_header_prefix_attributes": [ + "x-my-header-*" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: opentelemetry expands headers +--- extra_init_by_lua + local otlp = require("opentelemetry.trace.exporter.otlp") + local orig_export_spans = otlp.export_spans + otlp.export_spans = function(self, spans) + if (#spans ~= 1) then + ngx.log(ngx.ERR, "unexpected spans length: ", #spans) + return + end + + local attributes_names = {} + local attributes = {} + local span = spans[1] + for _, attribute in ipairs(span.attributes) do + table.insert(attributes_names, attribute.key) + attributes[attribute.key] = attribute.value.string_value or "" + ::skip:: + end + table.sort(attributes_names) + for _, attribute in ipairs(attributes_names) do + ngx.log(ngx.INFO, "attribute " .. attribute .. ": \"" .. attributes[attribute] .. "\"") + end + + ngx.log(ngx.INFO, "opentelemetry export span") + return orig_export_spans(self, spans) + end +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + }" + }]] + ) + + if code >= 300 then + ngx.status = code + end + local http = require "resty.http" + local httpc = http.new() + local uri1 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local headers = { + ["x-my-header-name"] = "william", + ["x-my-header-nick"] = "bill", + } + local res, err = httpc:request_uri(uri1, {method = "GET", headers = headers}) + if not res then + ngx.say(err) + return + end + ngx.status = res.status + } + } +--- request +GET /t +--- wait: 1 +--- error_code: 200 +--- no_error_log +type 'opentelemetry.proto.trace.v1.TracesData' does not exists +--- grep_error_log eval +qr/attribute (apisix|x-my).+?:.[^,]*/ +--- grep_error_log_out +attribute apisix.route_id: "1" +attribute apisix.route_name: "route_name" +attribute x-my-header-name: "william" +attribute x-my-header-nick: "bill" diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry5.t b/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry5.t new file mode 100644 index 0000000..4d147dc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/opentelemetry5.t @@ -0,0 +1,209 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - opentelemetry + - proxy-rewrite +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + if (!defined $block->response_body) { + $block->set_value("response_body", "passed\n"); + } + $block; +}); +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("debug"); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/opentelemetry', + ngx.HTTP_PUT, + [[{ + "batch_span_processor": { + "max_export_batch_size": 1, + "inactive_timeout": 0.5 + }, + "trace_id_source": "x-request-id", + "resource": { + "service.name": "APISIX" + }, + "collector": { + "address": "127.0.0.1:4318", + "request_timeout": 3, + "request_headers": { + "foo": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "route-name", + "plugins": { + "opentelemetry": { + "sampler": { + "name": "always_on" + } + }, + "proxy-rewrite": {"uri": "/opentracing"} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/articles/*/comments" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: trigger opentelemetry +--- request +GET /articles/12345/comments?foo=bar +--- more_headers +User-Agent: test-client +--- wait: 2 +--- response_body +opentracing + + + +=== TEST 4: (resource) check service.name +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/\{"key":"service.name","value":\{"stringValue":"APISIX"\}\}/ + + + +=== TEST 5: (span) check name +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/"name":"GET \/articles\/\*\/comments"/ + + + +=== TEST 6: (span) check http.status_code +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/\{"key":"http.status_code","value":\{"intValue":"200"\}\}/ + + + +=== TEST 7: (span) check http.method +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/\{"key":"http.method","value":\{"stringValue":"GET"\}\}/ + + + +=== TEST 8: (span) check http.host +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/\{"key":"net.host.name","value":\{"stringValue":"localhost"\}\}/ + + + +=== TEST 9: (span) check http.user_agent +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/\{"key":"http.user_agent","value":\{"stringValue":"test-client"\}\}/ + + + +=== TEST 10: (span) check http.target +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/\{"key":"http.target","value":\{"stringValue":"\/articles\/12345\/comments\?foo=bar"\}\}/ + + + +=== TEST 11: (span) check http.route +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/\{"key":"http.route","value":\{"stringValue":"\/articles\/\*\/comments"\}\}/ + + + +=== TEST 12: (span) check apisix.route_id +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/\{"key":"apisix.route_id","value":\{"stringValue":"1"\}\}/ + + + +=== TEST 13: (span) check apisix.route_name +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/\{"key":"apisix.route_name","value":\{"stringValue":"route-name"\}\}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/openwhisk.t b/CloudronPackages/APISIX/apisix-source/t/plugin/openwhisk.t new file mode 100644 index 0000000..dfd4d83 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/openwhisk.t @@ -0,0 +1,468 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity check with minimal valid configuration. +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openwhisk") + local ok, err = plugin.check_schema({api_host = "http://127.0.0.1:3233", service_token = "test:test", namespace = "test", action = "test"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: missing `api_host` +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openwhisk") + local ok, err = plugin.check_schema({service_token = "test:test", namespace = "test", action = "test"}) + if not ok then + ngx.say(err) + end + } + } +--- response_body +property "api_host" is required + + + +=== TEST 3: wrong type for `api_host` +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openwhisk") + local ok, err = plugin.check_schema({api_host = 3233, service_token = "test:test", namespace = "test", action = "test"}) + if not ok then + ngx.say(err) + end + } + } +--- response_body +property "api_host" validation failed: wrong type: expected string, got number + + + +=== TEST 4: setup route with plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openwhisk": { + "api_host": "http://127.0.0.1:3233", + "service_token": "23bc46b1-71f6-4ed5-8c54-816aa4f8c502:123zO3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP", + "namespace": "guest", + "action": "test-params" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: verify encrypted field +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + + -- get plugin conf from etcd, service_token is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["openwhisk"].service_token) + + } + } +--- response_body +pe14btxogtzJ4qPM/W2qj0AQeUK/O5oegLkKJLkkSEsKUIjP+bgyO+qsTXuLrY/h/esLKrRulD2TOtf+Zt/Us+hxZ/svsMwXZqZ9T9/2wWyi8SKALLfTUZDiV69mxCwD2zNBze1jslMlPtdA9JFIOQ== + + + +=== TEST 6: hit route (with GET request) +--- request +GET /hello +--- response_body chomp +{"hello":"test"} + + + +=== TEST 7: hit route (with POST method and non-json format request body) +--- request +POST /hello +test=test +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 400 +--- response_body_like eval +qr/"error":"The request content was malformed/ + + + +=== TEST 8: setup route with plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openwhisk": { + "api_host": "http://127.0.0.1:3233", + "service_token": "23bc46b1-71f6-4ed5-8c54-816aa4f8c502:123zO3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP", + "namespace": "guest", + "action": "test-params" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: hit route (with POST and correct request body) +--- request +POST /hello +{"name": "world"} +--- more_headers +Content-Type: application/json +--- response_body chomp +{"hello":"world"} + + + +=== TEST 10: reset route to non-existent action +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openwhisk": { + "api_host": "http://127.0.0.1:3233", + "service_token": "23bc46b1-71f6-4ed5-8c54-816aa4f8c502:123zO3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP", + "namespace": "guest", + "action": "non-existent" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: hit route (with non-existent action) +--- request +POST /hello +{"name": "world"} +--- more_headers +Content-Type: application/json +--- error_code: 404 +--- response_body_like eval +qr/"error":"The requested resource does not exist."/ + + + +=== TEST 12: reset route to wrong api_host +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openwhisk": { + "api_host": "http://127.0.0.1:1979", + "service_token": "23bc46b1-71f6-4ed5-8c54-816aa4f8c502:123zO3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP", + "namespace": "guest", + "action": "non-existent" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit route (with wrong api_host) +--- request +POST /hello +{"name": "world"} +--- more_headers +Content-Type: application/json +--- error_code: 503 +--- error_log +failed to process openwhisk action, err: + + + +=== TEST 14: reset route to packaged action +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openwhisk": { + "api_host": "http://127.0.0.1:3233", + "service_token": "23bc46b1-71f6-4ed5-8c54-816aa4f8c502:123zO3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP", + "namespace": "guest", + "package": "pkg", + "action": "testpkg" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: hit route (with packaged action) +--- request +GET /hello +--- response_body chomp +{"hello":"world"} + + + +=== TEST 16: reset route to status code action +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openwhisk": { + "api_host": "http://127.0.0.1:3233", + "service_token": "23bc46b1-71f6-4ed5-8c54-816aa4f8c502:123zO3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP", + "namespace": "guest", + "action": "test-statuscode" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 17: hit route (with packaged action) +--- request +GET /hello +--- error_code: 407 + + + +=== TEST 18: reset route to headers action +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openwhisk": { + "api_host": "http://127.0.0.1:3233", + "service_token": "23bc46b1-71f6-4ed5-8c54-816aa4f8c502:123zO3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP", + "namespace": "guest", + "action": "test-headers" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 19: hit route (with headers action) +--- request +GET /hello +--- response_headers +test: header + + + +=== TEST 20: reset route to body action +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openwhisk": { + "api_host": "http://127.0.0.1:3233", + "service_token": "23bc46b1-71f6-4ed5-8c54-816aa4f8c502:123zO3xZCLrMN6v2BKK1dXYFpXlPkccOFqm12CdAsMgRU4VrNZ9lyGVCGuMDGIwP", + "namespace": "guest", + "action": "test-body" + } + }, + "upstream": { + "nodes": {}, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: hit route (with body action) +--- request +GET /hello +--- response_body +{"test":"body"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/plugin.t b/CloudronPackages/APISIX/apisix-source/t/plugin/plugin.t new file mode 100644 index 0000000..53c87b0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/plugin.t @@ -0,0 +1,767 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + $block->set_value("no_error_log", "[error]"); + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +no_long_string(); +no_root_location(); +log_level("info"); +run_tests; + +__DATA__ + +=== TEST 1: ensure all plugins have exposed their name +--- config + location /t { + content_by_lua_block { + local lfs = require("lfs") + for file_name in lfs.dir(ngx.config.prefix() .. "/../../apisix/plugins/") do + if string.match(file_name, ".lua$") then + local expected = file_name:sub(1, #file_name - 4) + local plugin = require("apisix.plugins." .. expected) + if plugin.name ~= expected then + ngx.say("expected ", expected, " got ", plugin.name) + return + end + end + end + ngx.say('ok') + } + } +--- response_body +ok + + + +=== TEST 2: define route for /* +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "jwt-auth": { + "key": "user-key", + "secret": "my-secret-key" + } + } + }]]) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: sign and verify +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, _, res = t('/hello?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs', + ngx.HTTP_GET + ) + + ngx.status = code + ngx.print(res) + } + } +--- response_body +hello world + + + +=== TEST 4: delete /* and define route for /apisix/plugin/blah +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/routes/1', "DELETE") + if code >= 300 then + ngx.status = code + return + end + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/apisix/plugin/blah" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: hit +--- request +GET /apisix/plugin/blah +--- error_code: 401 +--- response_body +{"message":"Missing JWT token in request"} + + + +=== TEST 6: ensure all plugins have unique priority +--- config + location /t { + content_by_lua_block { + local lfs = require("lfs") + local pri_name = {} + for file_name in lfs.dir(ngx.config.prefix() .. "/../../apisix/plugins/") do + if string.match(file_name, ".lua$") then + local name = file_name:sub(1, #file_name - 4) + local plugin = require("apisix.plugins." .. name) + if pri_name[plugin.priority] then + ngx.say(name, " has same priority with ", pri_name[plugin.priority]) + return + end + pri_name[plugin.priority] = plugin.name + end + end + ngx.say('ok') + } + } +--- response_body +ok + + + +=== TEST 7: plugin with custom error message +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": { + "_meta": { + "error_response": { + "message":"Missing credential in request" + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: verify, missing token +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing credential in request"} + + + +=== TEST 9: validate custom error message configuration +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + for _, case in ipairs({ + {input = true}, + {input = { + error_response = true + }}, + {input = { + error_response = "OK" + }}, + }) do + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + { + plugins = { + ["jwt-auth"] = { + _meta = case.input + } + } + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + end + } + } +--- response_body +{"error_msg":"failed to check the configuration of plugin jwt-auth err: property \"_meta\" validation failed: wrong type: expected object, got boolean"} +{"error_msg":"failed to check the configuration of plugin jwt-auth err: property \"_meta\" validation failed: property \"error_response\" validation failed: value should match only one schema, but matches none"} +passed + + + +=== TEST 10: invalid _meta filter vars schema with wrong type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + { + plugins = { + ["jwt-auth"] = { + _meta = { + filter = "arg_k == v" + } + } + } + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +{"error_msg":"failed to check the configuration of plugin jwt-auth err: property \"_meta\" validation failed: property \"filter\" validation failed: wrong type: expected array, got string"} + + + +=== TEST 11: invalid _meta filter schema with wrong expr +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + for _, filter in ipairs({ + {"arg_name", "==", "json"}, + { + {"arg_name", "*=", "json"} + } + }) do + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + { + plugins = { + ["jwt-auth"] = { + _meta = { + filter = filter + } + } + } + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + end + } + } +--- response_body +{"error_msg":"failed to validate the 'vars' expression: rule should be wrapped inside brackets"} +{"error_msg":"failed to validate the 'vars' expression: invalid operator '*='"} + + + +=== TEST 12: proxy-rewrite plugin run with _meta filter vars +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + { + plugins = { + ["proxy-rewrite"] = { + _meta = { + filter = { + {"arg_version", "==", "v2"} + } + }, + uri = "/echo", + headers = { + ["X-Api-Version"] = "v2" + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + }, + uri = "/hello" + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +passed + + + +=== TEST 13: hit route: run proxy-rewrite plugin +--- request +GET /hello?version=v2 +--- response_headers +x-api-version: v2 + + + +=== TEST 14: hit route: not run proxy-rewrite plugin +--- request +GET /hello?version=v1 +--- response_body +hello world + + + +=== TEST 15: different route,same plugin, different filter (for expr_lrucache) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + { + plugins = { + ["proxy-rewrite"] = { + _meta = { + filter = { + {"arg_version", "==", "v3"} + } + }, + uri = "/echo", + headers = { + ["X-Api-Version"] = "v3" + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + }, + uri = "/hello1" + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +passed + + + +=== TEST 16: hit route: run proxy-rewrite plugin +--- request +GET /hello1?version=v3 +--- response_headers +x-api-version: v3 + + + +=== TEST 17: same plugin, same id between routes and global_rules, different filter (for expr_lrucache) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/2', + ngx.HTTP_PUT, + { + plugins = { + ["proxy-rewrite"] = { + _meta = { + filter = { + {"arg_version", "==", "v4"} + } + }, + uri = "/echo", + headers = { + ["X-Api-Version"] = "v4" + } + } + } + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +passed + + + +=== TEST 18: hit route: run global proxy-rewrite plugin +--- request +GET /hello1?version=v4 +--- response_headers +x-api-version: v4 + + + +=== TEST 19: different global_rules with the same plugin will not use the same meta.filter cache +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/3', + ngx.HTTP_PUT, + { + plugins = { + ["proxy-rewrite"] = { + _meta = { + filter = { + {"arg_version", "==", "v5"} + } + }, + uri = "/echo", + headers = { + ["X-Api-Version"] = "v5" + } + } + } + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +passed + + + +=== TEST 20: hit global_rules which has the same plugin with different meta.filter +--- pipelined_requests eval +["GET /hello1?version=v4", "GET /hello1?version=v5"] +--- response_headers eval +["x-api-version: v4", "x-api-version: v5"] + + + +=== TEST 21: use _meta.filter in response-rewrite plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "_meta": { + "filter": [ + ["upstream_status", "~=", 200] + ] + }, + "headers": { + "set": { + "test-header": "error" + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 22: upstream_status = 502, enable response-rewrite plugin +--- request +GET /specific_status +--- more_headers +x-test-upstream-status: 502 +--- response_headers +test-header: error +--- error_code: 502 + + + +=== TEST 23: upstream_status = 200, disable response-rewrite plugin +--- request +GET /hello +--- response_headers +!test-header + + + +=== TEST 24: use _meta.filter in response-rewrite plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "headers": { + "foo-age": "$arg_age" + } + }, + "response-rewrite": { + "_meta": { + "filter": [ + ["http_foo_age", "==", "18"] + ] + }, + "status_code": 403 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 25: proxy-rewrite plugin will set $http_foo_age, response-rewrite plugin return 403 +--- request +GET /hello?age=18 +--- error_code: 403 + + + +=== TEST 26: response-rewrite plugin disable, return 200 +--- request +GET /hello + + + +=== TEST 27: use response var in meta.filter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "_meta": { + "filter": [ + ["upstream_status", "==", "200"] + ] + }, + "uri": "/echo", + "headers": { + "x-version": "v1" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 28: hit route: disable proxy-rewrite plugin +--- request +GET /hello +--- response_headers +!x-version + + + +=== TEST 29: use APISIX's built-in variables in meta.filter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + { + plugins = { + ["proxy-rewrite"] = { + _meta = { + filter = { + {"post_arg_key", "==", "abc"} + } + }, + uri = "/echo", + headers = { + ["X-Api-Version"] = "ga" + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + uri = "/hello" + } + ) + if code >= 300 then + ngx.print(body) + else + ngx.say(body) + end + } + } +--- response_body +passed + + + +=== TEST 30: hit route: proxy-rewrite enable with post_arg_xx in meta.filter +--- request +POST /hello +key=abc +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- response_headers +x-api-version: ga diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus-metric-expire.t b/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus-metric-expire.t new file mode 100644 index 0000000..caad85e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus-metric-expire.t @@ -0,0 +1,132 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route with prometheus ttl +--- yaml_config +plugin_attr: + prometheus: + default_buckets: + - 15 + - 55 + - 105 + - 205 + - 505 + metrics: + http_status: + expire: 1 + http_latency: + expire: 1 + bandwidth: + expire: 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/routes/metrics', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/prometheus/metrics" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + local code, body = t('/hello1', + ngx.HTTP_GET, + "", + nil, + nil + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(2) + local code, pass, body = t('/apisix/prometheus/metrics', + ngx.HTTP_GET, + "", + nil, + nil + ) + + local metrics_to_check = {"apisix_bandwidth", "http_latency", "http_status",} + + -- verify that above mentioned metrics are not in the metrics response + for _, v in pairs(metrics_to_check) do + local match, err = ngx.re.match(body, "\\b" .. v .. "\\b", "m") + if match then + ngx.status = 500 + ngx.say("error found " .. v .. " in metrics") + return + end + end + + ngx.say("passed") + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus.t b/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus.t new file mode 100644 index 0000000..9254de5 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus.t @@ -0,0 +1,632 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.prometheus") + local ok, err = plugin.check_schema({}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: setup public API route and test route +--- config + location /t { + content_by_lua_block { + local data = { + { + url = "/apisix/admin/routes/1", + data = [[{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]], + }, + { + url = "/apisix/admin/routes/metrics", + data = [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/prometheus/metrics" + }]] + }, + } + + local t = require("lib.test_admin").test + + for _, data in ipairs(data) do + local code, body = t(data.url, ngx.HTTP_PUT, data.data) + ngx.say(code..body) + end + } + } +--- response_body eval +"201passed\n" x 2 + + + +=== TEST 3: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body_like +apisix_etcd_reachable 1 + + + +=== TEST 4: request from client (all hit) +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 5: request from client (part hit) +--- pipelined_requests eval +["GET /hello1", "GET /hello", "GET /hello2", "GET /hello", "GET /hello"] +--- error_code eval +[404, 200, 404, 200, 200] + + + +=== TEST 6: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_bandwidth\{type="egress",route="1",service="",consumer="",node="127.0.0.1"\} \d+/ + + + +=== TEST 7: test for unsupported method +--- request +PATCH /apisix/prometheus/metrics +--- error_code: 404 + + + +=== TEST 8: set route without id in post body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "prometheus": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: pipeline of client request +--- pipelined_requests eval +["GET /hello", "GET /not_found", "GET /hello", "GET /hello"] +--- error_code eval +[200, 404, 200, 200] + + + +=== TEST 10: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_bandwidth\{type="egress",route="1",service="",consumer="",node="127.0.0.1"\} \d+/ + + + +=== TEST 11: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_latency_count\{type="request",route="1",service="",consumer="",node="127.0.0.1"\} \d+/ + + + +=== TEST 12: create service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: use service 1 in route 2 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "service_id": 1, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: pipeline of client request +--- pipelined_requests eval +["GET /hello1", "GET /not_found", "GET /hello1", "GET /hello1"] +--- error_code eval +[200, 404, 200, 200] + + + +=== TEST 15: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_bandwidth\{type="egress",route="2",service="1",consumer="",node="127.0.0.1"\} \d+/ + + + +=== TEST 16: delete route 2 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 17: set it in route with plugin `fault-injection` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {}, + "fault-injection": { + "abort": { + "http_status": 200, + "body": "Fault Injection!" + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: pipeline of client request +--- pipelined_requests eval +["GET /hello", "GET /not_found", "GET /hello", "GET /hello"] +--- error_code eval +[200, 404, 200, 200] + + + +=== TEST 19: set it in global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/routes/3', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello3" + }]] + ) + ngx.say(body) + } + } +--- response_body +passed +passed + + + +=== TEST 20: request from client +--- pipelined_requests eval +["GET /hello3", "GET /hello3"] +--- error_code eval +[404, 404] + + + +=== TEST 21: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_status\{code="404",route="3",matched_uri="\/hello3",matched_host="",service="",consumer="",node="127.0.0.1"\} 2/ + + + +=== TEST 22: fetch the prometheus metric data with apisix latency +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/.*apisix_http_latency_bucket\{type="apisix".*/ + + + +=== TEST 23: add service 3 to distinguish other services +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/3', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 24: add a route 4 to redirect /mysleep?seconds=1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/4', + ngx.HTTP_PUT, + [[{ + "service_id": 3, + "uri": "/mysleep" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 25: request from client to /mysleep?seconds=1 ( all hit) +--- pipelined_requests eval +["GET /mysleep?seconds=1", "GET /mysleep?seconds=1", "GET /mysleep?seconds=1"] +--- error_code eval +[200, 200, 200] + + + +=== TEST 26: fetch the prometheus metric data with apisix latency (latency < 1s) +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_latency_bucket\{type="apisix".*service=\"3\".*le=\"500.*/ + + + +=== TEST 27: delete route 4 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/4', + ngx.HTTP_DELETE + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 28: delete service 3 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/3', + ngx.HTTP_DELETE + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 29: fetch the prometheus metric data with `modify_indexes consumers` +--- request +GET /apisix/prometheus/metrics +--- response_body_like eval +qr/apisix_etcd_modify_indexes\{key="consumers"\} \d+/ + + + +=== TEST 30: fetch the prometheus metric data with `modify_indexes global_rules` +--- request +GET /apisix/prometheus/metrics +--- response_body_like eval +qr/apisix_etcd_modify_indexes\{key="global_rules"\} \d+/ + + + +=== TEST 31: fetch the prometheus metric data with `modify_indexes max_modify_index` +--- request +GET /apisix/prometheus/metrics +--- response_body_like eval +qr/apisix_etcd_modify_indexes\{key="max_modify_index"\} \d+/ + + + +=== TEST 32: fetch the prometheus metric data with `modify_indexes protos` +--- request +GET /apisix/prometheus/metrics +--- response_body_like eval +qr/apisix_etcd_modify_indexes\{key="protos"\} \d+/ + + + +=== TEST 33: fetch the prometheus metric data with `modify_indexes routes` +--- request +GET /apisix/prometheus/metrics +--- response_body_like eval +qr/apisix_etcd_modify_indexes\{key="routes"\} \d+/ + + + +=== TEST 34: fetch the prometheus metric data with `modify_indexes services` +--- request +GET /apisix/prometheus/metrics +--- response_body_like eval +qr/apisix_etcd_modify_indexes\{key="services"\} \d+/ + + + +=== TEST 35: fetch the prometheus metric data with `modify_indexes ssls` +--- request +GET /apisix/prometheus/metrics +--- response_body_like eval +qr/apisix_etcd_modify_indexes\{key="ssls"\} \d+/ + + + +=== TEST 36: fetch the prometheus metric data with `modify_indexes stream_routes` +--- request +GET /apisix/prometheus/metrics +--- response_body_like eval +qr/apisix_etcd_modify_indexes\{key="stream_routes"\} \d+/ + + + +=== TEST 37: fetch the prometheus metric data with `modify_indexes upstreams` +--- request +GET /apisix/prometheus/metrics +--- response_body_like eval +qr/apisix_etcd_modify_indexes\{key="upstreams"\} \d+/ + + + +=== TEST 38: fetch the prometheus metric data with `modify_indexes prev_index` +--- request +GET /apisix/prometheus/metrics +--- response_body_like eval +qr/apisix_etcd_modify_indexes\{key="prev_index"\} \d+/ + + + +=== TEST 39: fetch the prometheus metric data with `modify_indexes x_etcd_index` +--- request +GET /apisix/prometheus/metrics +--- response_body_like eval +qr/apisix_etcd_modify_indexes\{key="x_etcd_index"\} \d+/ + + + +=== TEST 40: fetch the prometheus metric data -- hostname +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_node_info\{hostname=".*"\} 1/ + + + +=== TEST 41: don't try to provide etcd metrics when you don't use it +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +--- apisix_yaml +routes: + - + uri: /apisix/prometheus/metrics + plugins: + public-api: {} + - + uri: /hello + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /apisix/prometheus/metrics +--- response_body_like eval +qr/apisix_/ +--- response_body_unlike eval +qr/etcd/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus2.t new file mode 100644 index 0000000..40b2cdc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus2.t @@ -0,0 +1,923 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: pre-create public API route +--- config + location /t { + content_by_lua_block { + local data = { + { + url = "/apisix/admin/routes/metrics", + data = [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/prometheus/metrics" + }]] + }, + { + url = "/apisix/admin/routes/metrics-custom-uri", + data = [[{ + "plugins": { + "public-api": {} + }, + "uri": "/a" + }]] + }, + } + + local t = require("lib.test_admin").test + + for _, data in ipairs(data) do + local code, body = t(data.url, ngx.HTTP_PUT, data.data) + ngx.say(code..body) + end + } + } +--- response_body eval +"201passed\n" x 2 + + + +=== TEST 2: set route with key-auth enabled for consumer metrics +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {}, + "key-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: pipeline of client request without api-key +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[401, 401, 401, 401] + + + +=== TEST 4: fetch the prometheus metric data: consumer is empty +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_bandwidth\{type="egress",route="1",service="",consumer="",node=""\} \d+/ + + + +=== TEST 5: set consumer for metrics data collection +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: pipeline of client request with successfully authorized +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- more_headers +apikey: auth-one +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 7: fetch the prometheus metric data: consumer is jack +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_status\{code="200",route="1",matched_uri="\/hello",matched_host="",service="",consumer="jack",node="127.0.0.1"\} \d+/ + + + +=== TEST 8: set route(id: 9) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/9', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "hosts": ["foo.com", "bar.com"], + "uris": ["/foo*", "/bar*"] + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: set it in global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: 404 Route Not Found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 11: fetch the prometheus metric data: 404 Route Not Found +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_status\{code="404",route="",matched_uri="",matched_host="",service="",consumer="",node=""\} \d+/ + + + +=== TEST 12: hit routes(uri = "/foo*", host = "foo.com") +--- request +GET /foo1 +--- more_headers +Host: foo.com +--- error_code: 404 +--- response_body eval +qr/404 Not Found/ + + + +=== TEST 13: fetch the prometheus metric data: hit routes(uri = "/foo*", host = "foo.com") +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_status\{code="404",route="9",matched_uri="\/foo\*",matched_host="foo.com",service="",consumer="",node="127.0.0.1"\} \d+/ + + + +=== TEST 14: hit routes(uri = "/bar*", host = "bar.com") +--- request +GET /bar1 +--- more_headers +Host: bar.com +--- error_code: 404 +--- response_body eval +qr/404 Not Found/ + + + +=== TEST 15: fetch the prometheus metric data: hit routes(uri = "/bar*", host = "bar.com") +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_status\{code="404",route="9",matched_uri="\/bar\*",matched_host="bar.com",service="",consumer="",node="127.0.0.1"\} \d+/ + + + +=== TEST 16: customize export uri, not found +--- yaml_config +plugin_attr: + prometheus: + export_uri: /a +--- request +GET /apisix/prometheus/metrics +--- error_code: 404 + + + +=== TEST 17: customize export uri, found +--- yaml_config +plugin_attr: + prometheus: + export_uri: /a +--- request +GET /a +--- error_code: 200 + + + +=== TEST 18: customize export uri, missing plugin, use default +--- yaml_config +plugin_attr: + x: + y: z +--- request +GET /apisix/prometheus/metrics +--- error_code: 200 + + + +=== TEST 19: customize export uri, missing attr, use default +--- yaml_config +plugin_attr: + prometheus: + y: z +--- request +GET /apisix/prometheus/metrics +--- error_code: 200 + + + +=== TEST 20: set sys plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/9', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "prometheus": {}, + "syslog": { + "host": "127.0.0.1", + "include_req_body": false, + "max_retry_count": 1, + "tls": false, + "retry_delay": 1, + "batch_max_size": 1000, + "buffer_duration": 60, + "port": 1000, + "name": "sys-logger", + "flush_limit": 4096, + "sock_type": "tcp", + "timeout": 3, + "drop_limit": 1048576, + "pool_size": 5 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/batch-process-metrics" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: hit batch-process-metrics +--- request +GET /batch-process-metrics +--- error_code: 404 + + + +=== TEST 22: check sys logger metrics +--- request +GET /apisix/prometheus/metrics +--- error_code: 200 +--- response_body_like eval +qr/apisix_batch_process_entries\{name="sys-logger",route_id="9",server_addr="127.0.0.1"\} \d+/ + + + +=== TEST 23: set zipkin plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/9', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "prometheus": {}, + "zipkin": { + "endpoint": "http://127.0.0.1:9447", + "service_name": "APISIX", + "sample_ratio": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/batch-process-metrics" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 24: hit batch-process-metrics +--- request +GET /batch-process-metrics +--- error_code: 404 + + + +=== TEST 25: check zipkin log metrics +--- request +GET /apisix/prometheus/metrics +--- error_code: 200 +--- response_body_like eval +qr/apisix_batch_process_entries\{name="zipkin_report",route_id="9",server_addr="127.0.0.1"\} \d+/ + + + +=== TEST 26: set http plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/9', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "prometheus": {}, + "http-logger": { + "inactive_timeout": 5, + "include_req_body": false, + "timeout": 3, + "name": "http-logger", + "retry_delay": 1, + "buffer_duration": 60, + "uri": "http://127.0.0.1:19080/report", + "concat_method": "json", + "batch_max_size": 1000, + "max_retry_count": 0 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/batch-process-metrics" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 27: hit batch-process-metrics +--- request +GET /batch-process-metrics +--- error_code: 404 + + + +=== TEST 28: check http log metrics +--- request +GET /apisix/prometheus/metrics +--- error_code: 200 +--- response_body_like eval +qr/apisix_batch_process_entries\{name="http-logger",route_id="9",server_addr="127.0.0.1"\} \d+/ + + + +=== TEST 29: set tcp-logger plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/10', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "prometheus": {}, + "tcp-logger": { + "host": "127.0.0.1", + "include_req_body": false, + "timeout": 1000, + "name": "tcp-logger", + "retry_delay": 1, + "buffer_duration": 60, + "port": 1000, + "batch_max_size": 1000, + "inactive_timeout": 60, + "tls": false, + "max_retry_count": 0 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/batch-process-metrics-10" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 30: trigger metrics batch-process-metrics +--- request +GET /batch-process-metrics-10 +--- error_code: 404 + + + +=== TEST 31: check tcp log metrics +--- request +GET /apisix/prometheus/metrics +--- error_code: 200 +--- response_body_like eval +qr/apisix_batch_process_entries\{name="tcp-logger",route_id="10",server_addr="127.0.0.1"\} \d+/ + + + +=== TEST 32: set udp-logger plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/10', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "prometheus": {}, + "udp-logger": { + "host": "127.0.0.1", + "port": 1000, + "include_req_body": false, + "timeout": 3, + "batch_max_size": 1000, + "name": "udp-logger", + "inactive_timeout": 5, + "buffer_duration": 60 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/batch-process-metrics-10" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 33: trigger metrics batch-process-metrics +--- request +GET /batch-process-metrics-10 +--- error_code: 404 + + + +=== TEST 34: check udp log metrics +--- request +GET /apisix/prometheus/metrics +--- error_code: 200 +--- response_body_like eval +qr/apisix_batch_process_entries\{name="udp-logger",route_id="10",server_addr="127.0.0.1"\} \d+/ + + + +=== TEST 35: set sls-logger plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/10', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "prometheus": {}, + "sls-logger": { + "host": "127.0.0.1", + "batch_max_size": 1000, + "name": "sls-logger", + "inactive_timeout": 5, + "logstore": "your_logstore", + "buffer_duration": 60, + "port": 10009, + "max_retry_count": 0, + "retry_delay": 1, + "access_key_id": "your_access_id", + "access_key_secret": "your_key_secret", + "timeout": 5000, + "project": "your_project" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/batch-process-metrics-10" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 36: trigger metrics batch-process-metrics +--- request +GET /batch-process-metrics-10 +--- error_code: 404 + + + +=== TEST 37: check sls-logger metrics +--- request +GET /apisix/prometheus/metrics +--- error_code: 200 +--- response_body_like eval +qr/apisix_batch_process_entries\{name="sls-logger",route_id="10",server_addr="127.0.0.1"\} \d+/ + + + +=== TEST 38: create service and route both with name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "name": "service_name", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "route_name", + "service_id": 1, + "plugins": { + "prometheus": { + "prefer_name": true + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed +passed + + + +=== TEST 39: pipeline of client request +--- request +GET /hello +--- error_code: 200 + + + +=== TEST 40: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_bandwidth\{type="egress",route="route_name",service="service_name",consumer="",node="127.0.0.1"\} \d+/ + + + +=== TEST 41: set route name but remove service name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 42: pipeline of client request +--- request +GET /hello +--- error_code: 200 + + + +=== TEST 43: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_bandwidth\{type="egress",route="route_name",service="1",consumer="",node="127.0.0.1"\} \d+/ + + + +=== TEST 44: set service name but remove route name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "name": "service_name", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "service_id": 1, + "plugins": { + "prometheus": { + "prefer_name": true + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed +passed + + + +=== TEST 45: pipeline of client request +--- request +GET /hello +--- error_code: 200 + + + +=== TEST 46: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_bandwidth\{type="egress",route="1",service="service_name",consumer="",node="127.0.0.1"\} \d+/ + + + +=== TEST 47: remove both name, but still set prefer_name to true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "service_id": 1, + "plugins": { + "prometheus": { + "prefer_name": true + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 48: pipeline of client request +--- request +GET /hello +--- error_code: 200 + + + +=== TEST 49: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_bandwidth\{type="egress",route="1",service="service_name",consumer="",node="127.0.0.1"\} \d+/ + + + +=== TEST 50: fetch the prometheus shared dict data +--- http_config +lua_shared_dict test-shared-dict 10m; +--- request +GET /apisix/prometheus/metrics +--- response_body_like +.*apisix_shared_dict_capacity_bytes\{name="test-shared-dict"\} 10485760(?:.|\n)* +apisix_shared_dict_free_space_bytes\{name="test-shared-dict"\} \d+.* diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus3.t new file mode 100644 index 0000000..b80986a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus3.t @@ -0,0 +1,262 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: setup public API route and test route +--- config + location /t { + content_by_lua_block { + local data = { + { + url = "/apisix/admin/plugin_configs/1", + data = [[{ + "plugins": { + "prometheus":{} + } + }]] + }, + { + url = "/apisix/admin/routes/1", + data = [[{ + "plugin_config_id": 1, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + }, + { + url = "/apisix/admin/routes/metrics", + data = [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/prometheus/metrics" + }]] + }, + } + + local t = require("lib.test_admin").test + + for _, data in ipairs(data) do + local code, body = t(data.url, ngx.HTTP_PUT, data.data) + ngx.say(code..body) + end + } + } +--- response_body eval +"201passed\n" x 3 + + + +=== TEST 2: hit +--- pipelined_requests eval +["GET /hello", "GET /apisix/prometheus/metrics"] +--- error_code eval +[200, 200] + + + +=== TEST 3: apisix_batch_process_entries, mess with global rules +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/batch-process-metrics-aa" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "tcp": { + "host": "127.0.0.1", + "port": 1999 + }, + "max_retry_count": 1000, + "level": "NOTICE" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:1979" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: check metrics +--- yaml_config +plugins: + - public-api + - error-log-logger + - prometheus + - http-logger +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/batch-process-metrics-aa" + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + + ngx.sleep(2) + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/apisix/prometheus/metrics" + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + ngx.say(res.body) + } + } +--- response_body_like eval +qr/apisix_batch_process_entries\{name="http logger",route_id="1",server_addr="127.0.0.1"\} \d+/ + + + +=== TEST 5: set prometheus plugin at both global rule and route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + } + }]] + ) + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: test prometheus plugin at both global rule and route +--- request +GET /opentracing +--- response_body +opentracing + + + +=== TEST 7: fetch prometheus plugin at both global rule and route data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_status\{code="200",route="1",matched_uri="\/opentracing",matched_host="",service="",consumer="",node="127.0.0.1\"} 1/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus4.t b/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus4.t new file mode 100644 index 0000000..eac1f60 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/prometheus4.t @@ -0,0 +1,323 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: pre-create public API route +--- config + location /t { + content_by_lua_block { + + local t = require("lib.test_admin").test + local code = t('/apisix/admin/routes/metrics', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/prometheus/metrics" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + } + } + + + +=== TEST 2: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/10', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: client request +--- yaml_config +plugin_attr: + prometheus: + metrics: + bandwidth: + extra_labels: + - upstream_addr: $upstream_addr + - upstream_status: $upstream_status +--- request +GET /hello + + + +=== TEST 4: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_bandwidth\{type="egress",route="10",service="",consumer="",node="127.0.0.1",upstream_addr="127.0.0.1:1980",upstream_status="200"\} \d+/ + + + +=== TEST 5: client request, label with nonexist ngx variable +--- yaml_config +plugin_attr: + prometheus: + metrics: + http_status: + extra_labels: + - dummy: $dummy +--- request +GET /hello + + + +=== TEST 6: fetch the prometheus metric data, with nonexist ngx variable +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_status\{code="200",route="10",matched_uri="\/hello",matched_host="",service="",consumer="",node="127.0.0.1",dummy=""\} \d+/ + + + +=== TEST 7: set route +--- yaml_config +plugin_attr: + prometheus: + default_buckets: + - 15 + - 55 + - 105 + - 205 + - 505 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- pipelined_requests eval +["GET /t", "GET /hello1"] +--- response_body eval +["passed\n", "hello1 world\n"] + + + +=== TEST 8: fetch metrics +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="15"\} \d+ +apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="55"\} \d+ +apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="105"\} \d+ +apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="205"\} \d+ +apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="505"\} \d+/ + + + +=== TEST 9: set sys plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/9', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "prometheus": {}, + "syslog": { + "host": "127.0.0.1", + "include_req_body": false, + "max_retry_times": 1, + "tls": false, + "retry_interval": 1, + "batch_max_size": 1000, + "buffer_duration": 60, + "port": 1000, + "name": "sys-logger", + "flush_limit": 4096, + "sock_type": "tcp", + "timeout": 3, + "drop_limit": 1048576, + "pool_size": 5 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/batch-process-metrics" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 10: remove prometheus -> reload -> send batch request -> add prometheus for next tests +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: + - example-plugin +plugin_attr: + example-plugin: + val: 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, _, org_body = t('/v1/plugins/reload', ngx.HTTP_PUT) + local code, body = t('/batch-process-metrics', ngx.HTTP_GET) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 404 +--- response_body_like eval +qr/404 Not Found/ + + + +=== TEST 11: fetch prometheus metrics -> batch_process_entries metrics should not be present +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 +plugins: + - prometheus + - public-api +--- request +GET /apisix/prometheus/metrics +--- error_code: 200 +--- response_body_unlike eval +qr/apisix_batch_process_entries\{name="sys-logger",route_id="9",server_addr="127.0.0.1"\} \d+/ + + + +=== TEST 12: hit batch-process-metrics with prometheus enabled from TEST 11 +--- request +GET /batch-process-metrics +--- error_code: 404 + + + +=== TEST 13: batch_process_entries metrics should be present now +--- request +GET /apisix/prometheus/metrics +--- error_code: 200 +--- response_body_like eval +qr/apisix_batch_process_entries\{name="sys-logger",route_id="9",server_addr="127.0.0.1"\} \d+/ + + + +=== TEST 14: node_info metric contains the current apisix version +--- request +GET /apisix/prometheus/metrics +--- error_code: 200 +--- response_body_like eval +qr/apisix_node_info\{hostname="[^"]+",version="\d+\.\d+\.\d+"\} \d+/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-cache/disk.t b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-cache/disk.t new file mode 100644 index 0000000..1d3a124 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-cache/disk.t @@ -0,0 +1,755 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); +log_level('info'); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $http_config = $block->http_config // <<_EOC_; + + # for proxy cache + proxy_cache_path /tmp/disk_cache_one levels=1:2 keys_zone=disk_cache_one:50m inactive=1d max_size=1G; + proxy_cache_path /tmp/disk_cache_two levels=1:2 keys_zone=disk_cache_two:50m inactive=1d max_size=1G; + + # for proxy cache + map \$upstream_cache_zone \$upstream_cache_zone_info { + disk_cache_one /tmp/disk_cache_one,1:2; + disk_cache_two /tmp/disk_cache_two,1:2; + } + + server { + listen 1986; + server_tokens off; + + location / { + expires 60s; + return 200 "hello world!"; + } + + location /hello-not-found { + return 404; + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity check (missing cache_zone field, the default value is disk_cache_one) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "cache_http_status": [200], + "hide_cache_headers": true, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: sanity check (invalid type for cache_method) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_zone": "disk_cache_one", + "cache_bypass": ["$arg_bypass"], + "cache_method": "GET", + "cache_http_status": [200], + "hide_cache_headers": true, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin proxy-cache/ + + + +=== TEST 3: sanity check (invalid type for cache_key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_zone": "disk_cache_one", + "cache_key": "${uri}-cache-key", + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "cache_http_status": [200], + "hide_cache_headers": true, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1985": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin proxy-cache/ + + + +=== TEST 4: sanity check (invalid type for cache_bypass) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_zone": "disk_cache_one", + "cache_bypass": "$arg_bypass", + "cache_method": ["GET"], + "cache_http_status": [200], + "hide_cache_headers": true, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1985": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin proxy-cache/ + + + +=== TEST 5: sanity check (invalid type for no_cache) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_zone": "disk_cache_one", + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "cache_http_status": [200], + "hide_cache_headers": true, + "no_cache": "$arg_no_cache" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1985": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin proxy-cache/ + + + +=== TEST 6: sanity check (illegal character for cache_key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_zone": "disk_cache_one", + "cache_key": ["$uri-", "-cache-id"], + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "cache_http_status": [200], + "hide_cache_headers": true, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1985": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin proxy-cache/ + + + +=== TEST 7: sanity check (normal case) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_key":["$host","$uri"], + "cache_zone": "disk_cache_one", + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "cache_http_status": [200], + "hide_cache_headers": true, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1986": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 8: hit route (cache miss) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 9: hit route (cache hit) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: HIT +--- raw_response_headers_unlike +Expires: + + + +=== TEST 10: hit route (cache bypass) +--- request +GET /hello?bypass=1 +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: BYPASS + + + +=== TEST 11: purge cache +--- request +PURGE /hello +--- error_code: 200 + + + +=== TEST 12: hit route (nocache) +--- request +GET /hello?no_cache=1 +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 13: hit route (there's no cache indeed) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: MISS +--- raw_response_headers_unlike +Expires: + + + +=== TEST 14: hit route (will be cached) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: HIT + + + +=== TEST 15: hit route (not found) +--- request +GET /hello-not-found +--- error_code: 404 +--- response_body eval +qr/404 Not Found/ +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 16: hit route (404 there's no cache indeed) +--- request +GET /hello-not-found +--- error_code: 404 +--- response_body eval +qr/404 Not Found/ +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 17: hit route (will be cached) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: HIT + + + +=== TEST 18: hit route (HEAD method mismatch cache_method) +--- request +HEAD /hello +--- error_code: 200 +--- response_headers +Apisix-Cache-Status: BYPASS + + + +=== TEST 19: hide cache headers = false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_zone": "disk_cache_one", + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "cache_http_status": [200], + "hide_cache_headers": false, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1986": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 20: hit route (catch the cache headers) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: HIT +--- response_headers_like +Cache-Control: + + + +=== TEST 21: purge cache +--- request +PURGE /hello +--- error_code: 200 + + + +=== TEST 22: purge cache (not found) +--- request +PURGE /hello-world +--- error_code: 404 + + + +=== TEST 23: invalid cache zone +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_zone": "invalid_disk_cache", + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "cache_http_status": [200], + "hide_cache_headers": false, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1986": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/cache_zone invalid_disk_cache not found/ + + + +=== TEST 24: sanity check (invalid variable for cache_key) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_zone": "disk_cache_one", + "cache_key": ["$uri", "$request_method"], + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "cache_http_status": [200], + "hide_cache_headers": true, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1985": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin proxy-cache err: cache_key variable \$request_method unsupported/ + + + +=== TEST 25: don't override cache relative headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 26: hit route +--- request +GET /echo +--- more_headers +Apisix-Cache-Status: Foo +Cache-Control: bar +Expires: any +--- response_headers +Apisix-Cache-Status: Foo +Cache-Control: bar +Expires: any + + + +=== TEST 27: sanity check (invalid method for cache_method) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_zone": "disk_cache_one", + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET", "PUT"], + "cache_http_status": [200], + "hide_cache_headers": true, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin proxy-cache err/ + + + +=== TEST 28: nil vars for cache_key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_key": ["$arg_foo", "$arg_bar", "$arg_baz"], + "cache_zone": "disk_cache_one", + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "cache_http_status": [200], + "hide_cache_headers": true, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1986": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 29: hit route with nil vars in cache_key +--- request +GET /hello?bar=a +--- response_body chop +hello world! diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-cache/memory.t b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-cache/memory.t new file mode 100644 index 0000000..1bdc21e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-cache/memory.t @@ -0,0 +1,706 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{TEST_NGINX_FORCE_RESTART_ON_TEST} = 0; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); +log_level('info'); + + + +add_block_preprocessor(sub { + my ($block) = @_; + + my $http_config = $block->http_config // <<_EOC_; + + # for proxy cache + proxy_cache_path /tmp/disk_cache_one levels=1:2 keys_zone=disk_cache_one:50m inactive=1d max_size=1G; + proxy_cache_path /tmp/disk_cache_two levels=1:2 keys_zone=disk_cache_two:50m inactive=1d max_size=1G; + lua_shared_dict memory_cache 50m; + + # for proxy cache + map \$upstream_cache_zone \$upstream_cache_zone_info { + disk_cache_one /tmp/disk_cache_one,1:2; + disk_cache_two /tmp/disk_cache_two,1:2; + } + + server { + listen 1986; + server_tokens off; + + location / { + expires 60s; + + if (\$arg_expires) { + expires \$arg_expires; + } + + if (\$arg_cc) { + expires off; + add_header Cache-Control \$arg_cc; + } + + return 200 "hello world!"; + } + + location /hello-not-found { + return 404; + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity check (invalid cache strategy) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_strategy": "network", + "cache_key":["$host","$uri"], + "cache_zone": "disk_cache_one", + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "cache_http_status": [200], + "hide_cache_headers": true, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1986": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin proxy-cache err: property \\"cache_strategy\\" validation failed: matches none of the enum values/ + + + +=== TEST 2: sanity check (invalid cache_zone when specifying cache_strategy as memory) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_strategy": "memory", + "cache_key":["$host","$uri"], + "cache_zone": "invalid_cache_zone", + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "cache_http_status": [200], + "hide_cache_headers": true, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1986": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin proxy-cache err: cache_zone invalid_cache_zone not found"/ + + + +=== TEST 3: sanity check (normal case for memory strategy) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_strategy": "memory", + "cache_key":["$host","$uri"], + "cache_zone": "memory_cache", + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "hide_cache_headers": false, + "cache_ttl": 300, + "cache_http_status": [200], + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1986": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 4: hit route (cache miss) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 5: hit route (cache hit) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: HIT + + + +=== TEST 6: hit route (cache bypass) +--- request +GET /hello?bypass=1 +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: BYPASS + + + +=== TEST 7: purge cache +--- request +PURGE /hello +--- error_code: 200 + + + +=== TEST 8: hit route (nocache) +--- request +GET /hello?no_cache=1 +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 9: hit route (there's no cache indeed) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: MISS +--- raw_response_headers_unlike +Expires: + + + +=== TEST 10: hit route (will be cached) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: HIT + + + +=== TEST 11: hit route (not found) +--- request +GET /hello-not-found +--- error_code: 404 +--- response_body eval +qr/404 Not Found/ +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 12: hit route (404 there's no cache indeed) +--- request +GET /hello-not-found +--- error_code: 404 +--- response_body eval +qr/404 Not Found/ +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 13: hit route (HEAD method) +--- request +HEAD /hello-world +--- error_code: 200 +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 14: hit route (HEAD method there's no cache) +--- request +HEAD /hello-world +--- error_code: 200 +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 15: purge cache +--- request +PURGE /hello +--- error_code: 200 + + + +=== TEST 16: purge cache (not found) +--- request +PURGE /hello-world +--- error_code: 404 + + + +=== TEST 17: hide cache headers = false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_strategy": "memory", + "cache_key":["$host","$uri"], + "cache_zone": "memory_cache", + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "cache_ttl": 300, + "cache_http_status": [200], + "hide_cache_headers": false, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1986": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 18: hit route (catch the cache headers) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: MISS +--- response_headers_like +Cache-Control: + + + +=== TEST 19: don't override cache relative headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: hit route +--- request +GET /echo +--- more_headers +Apisix-Cache-Status: Foo +Cache-Control: bar +Expires: any +--- response_headers +Apisix-Cache-Status: Foo +Cache-Control: bar +Expires: any + + + +=== TEST 21: set cache_ttl to 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_strategy": "memory", + "cache_key":["$host","$uri"], + "cache_zone": "memory_cache", + "cache_bypass": ["$arg_bypass"], + "cache_method": ["GET"], + "cache_ttl": 2, + "cache_http_status": [200], + "hide_cache_headers": false, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1986": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 22: hit route (MISS) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 23: hit route (HIT) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: HIT +--- wait: 2 + + + +=== TEST 24: hit route (MISS) +--- request +GET /hello +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: EXPIRED + + + +=== TEST 25: enable cache_control option +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_strategy": "memory", + "cache_key":["$host","$uri"], + "cache_zone": "memory_cache", + "cache_bypass": ["$arg_bypass"], + "cache_control": true, + "cache_method": ["GET"], + "cache_ttl": 10, + "cache_http_status": [200], + "hide_cache_headers": false, + "no_cache": ["$arg_no_cache"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1986": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 200 +--- response_body +passed + + + +=== TEST 26: hit route (MISS) +--- request +GET /hello +--- more_headers +Cache-Control: max-age=60 +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: MISS +--- wait: 1 + + + +=== TEST 27: hit route (request header cache-control with max-age) +--- request +GET /hello +--- more_headers +Cache-Control: max-age=1 +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: STALE + + + +=== TEST 28: hit route (request header cache-control with min-fresh) +--- request +GET /hello +--- more_headers +Cache-Control: min-fresh=300 +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: STALE +--- wait: 1 + + + +=== TEST 29: purge cache +--- request +PURGE /hello +--- error_code: 200 + + + +=== TEST 30: hit route (request header cache-control with no-store) +--- request +GET /hello +--- more_headers +Cache-Control: no-store +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: BYPASS + + + +=== TEST 31: hit route (request header cache-control with no-cache) +--- request +GET /hello +--- more_headers +Cache-Control: no-cache +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: BYPASS + + + +=== TEST 32: hit route (response header cache-control with private) +--- request +GET /hello?cc=private +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 33: hit route (response header cache-control with no-store) +--- request +GET /hello?cc=no-store +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 34: hit route (response header cache-control with no-cache) +--- request +GET /hello?cc=no-cache +--- response_body chop +hello world! +--- response_headers +Apisix-Cache-Status: MISS + + + +=== TEST 35: hit route (request header cache-control with only-if-cached) +--- request +GET /hello +--- more_headers +Cache-Control: only-if-cached +--- error_code: 504 + + + +=== TEST 36: configure plugin without memory_cache zone for cache_strategy = memory +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_strategy": "memory", + "cache_key":["$host","$uri"], + "cache_bypass": ["$arg_bypass"], + "cache_control": true, + "cache_method": ["GET"], + "cache_ttl": 10, + "cache_http_status": [200] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1986": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like +.*err: invalid or empty cache_zone for cache_strategy: memory.* +--- error_code: 400 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-control.t b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-control.t new file mode 100644 index 0000000..7643100 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-control.t @@ -0,0 +1,134 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: proxy_request_buffering off +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "proxy-control": { + "request_buffering": false + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 2: hit, only the upstream server will buffer the request +--- request eval +"POST /hello +" . "12345" x 10240 +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file + + + +=== TEST 3: proxy_request_buffering on +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "proxy-control": { + "request_buffering": true + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 4: hit +--- request eval +"POST /hello +" . "12345" x 10240 +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file +a client request body is buffered to a temporary file diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-mirror.t b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-mirror.t new file mode 100644 index 0000000..458bcf3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-mirror.t @@ -0,0 +1,912 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); +log_level('info'); +worker_connections(1024); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $http_config = $block->http_config // <<_EOC_; + + server { + listen 1986; + server_tokens off; + + location / { + content_by_lua_block { + local core = require("apisix.core") + core.log.info("upstream_http_version: ", ngx.req.http_version()) + + local headers_tab = ngx.req.get_headers() + local headers_key = {} + for k in pairs(headers_tab) do + core.table.insert(headers_key, k) + end + core.table.sort(headers_key) + + for _, v in pairs(headers_key) do + core.log.info(v, ": ", headers_tab[v]) + end + + core.log.info("uri: ", ngx.var.request_uri) + ngx.say("hello world") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity check (invalid schema) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "ftp://127.0.0.1:1999" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin proxy-mirror/ + + + +=== TEST 2: sanity check (invalid port format) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1::1999" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin proxy-mirror/ + + + +=== TEST 3: sanity check (without schema) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "127.0.0.1:1999" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin proxy-mirror/ + + + +=== TEST 4: sanity check (without port) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 5: sanity check (include uri) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1:1999/invalid_uri" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 400 +--- response_body eval +qr/failed to check the configuration of plugin proxy-mirror/ + + + +=== TEST 6: sanity check (normal case) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1:1986" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 7: hit route +--- request +GET /hello +--- error_code: 200 +--- response_body +hello world +--- error_log +uri: /hello + + + +=== TEST 8: sanity check (normal case), and uri is "/uri" +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1:1986" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/uri" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 9: the request header does not change +--- request +GET /uri +--- error_code: 200 +--- more_headers +host: 127.0.0.2 +api-key: hello +api-key2: world +name: jake +--- response_body +uri: /uri +api-key: hello +api-key2: world +host: 127.0.0.2 +name: jake +x-real-ip: 127.0.0.1 +--- error_log +api-key: hello +api-key2: world +host: 127.0.0.2 +name: jake + + + +=== TEST 10: sanity check (normal case), used to test http version +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1:1986" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 11: after the mirroring request, the upstream http version is 1.1 +--- request +GET /hello +--- error_code: 200 +--- more_headers +host: 127.0.0.2 +api-key: hello +--- response_body +hello world +--- error_log +upstream_http_version: 1.1 +api-key: hello +host: 127.0.0.2 + + + +=== TEST 12: delete route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 13: sanity check (invalid sample_ratio) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1:1986", + "sample_ratio": 10 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin proxy-mirror err: property \"sample_ratio\" validation failed: expected 10 to be at most 1"} + + + +=== TEST 14: set mirror requests sample_ratio to 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1:1986", + "sample_ratio": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 15: hit route with sample_ratio 1 +--- request +GET /hello?sample_ratio=1 +--- error_code: 200 +--- response_body +hello world +--- error_log_like eval +qr/uri: \/hello\?sample_ratio=1/ + + + +=== TEST 16: set mirror requests sample_ratio to 0.5 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1:1986", + "sample_ratio": 0.5 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 17: send batch requests and get mirror stat count +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + -- send batch requests + local tb = {} + for i = 1, 200 do + local th = assert(ngx.thread.spawn(function(i) + t('/hello?sample_ratio=0.5', ngx.HTTP_GET) + end, i)) + table.insert(tb, th) + end + for i, th in ipairs(tb) do + ngx.thread.wait(th) + end + } + } +--- error_log_like eval +qr/(uri: \/hello\?sample_ratio=0\.5){75,125}/ + + + +=== TEST 18: custom path +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1:1986", + "path": "/a" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 19: hit route +--- request +GET /hello +--- response_body +hello world +--- error_log +uri: /a, + + + +=== TEST 20: hit route with args +--- request +GET /hello?a=1 +--- response_body +hello world +--- error_log +uri: /a?a=1 + + + +=== TEST 21: sanity check (path) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for _, p in ipairs({ + "a", + "/a?a=c", + }) do + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1:1999", + "path": "]] .. p .. [[" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + ngx.log(ngx.WARN, body) + end + } + } +--- grep_error_log eval +qr/property \\"path\\" validation failed: failed to match pattern/ +--- grep_error_log_out +property \"path\" validation failed: failed to match pattern +property \"path\" validation failed: failed to match pattern + + + +=== TEST 22: sanity check (host) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for _, p in ipairs({ + "http://a", + "http://ab.com", + "http://[::1]", + "http://[::1]:202", + }) do + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "]] .. p .. [[" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + ngx.log(ngx.WARN, body) + end + } + } +--- grep_error_log eval +qr/(passed|property \\"host\\" validation failed: failed to match pattern)/ +--- grep_error_log_out +passed +passed +passed +passed + + + +=== TEST 23: set mirror requests host to domain +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://test.com:1980", + "path": "/hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 24: hit route resolver domain +--- request +GET /hello +--- response_body +hello world +--- error_log_like eval +qr/http:\/\/test\.com is resolved to: http:\/\/((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})(\.((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})){3}/ + + + +=== TEST 25: set as a domain name that cannot be found. +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://not-find-domian.notfind", + "path": "/get" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 26: hit route resolver error domain +--- request +GET /hello +--- response_body +hello world +--- error_log +dns resolver resolves domain: not-find-domian.notfind error: + + + +=== TEST 27: custom path with prefix path_concat_mode +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "http://127.0.0.1:1986", + "path": "/a", + "path_concat_mode": "prefix" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 28: hit route with prefix path_concat_mode +--- request +GET /hello +--- response_body +hello world +--- error_log +uri: /a/hello, + + + +=== TEST 29: hit route with args and prefix path_concat_mode +--- request +GET /hello?a=1 +--- response_body +hello world +--- error_log +uri: /a/hello?a=1 + + + +=== TEST 30: (grpc) sanity check (normal case grpc) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "grpc://127.0.0.1:1986" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "scheme": "grpc", + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 31: (grpcs) sanity check (normal case for grpcs) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "grpcs://127.0.0.1:1986" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "scheme": "grpc", + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-mirror2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-mirror2.t new file mode 100644 index 0000000..adc2b4c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-mirror2.t @@ -0,0 +1,128 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); +log_level('info'); +worker_connections(1024); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $http_config = $block->http_config // <<_EOC_; + + server { + listen 1986; + server_tokens off; + + location / { + content_by_lua_block { + local core = require("apisix.core") + core.log.info("upstream_http_version: ", ngx.req.http_version()) + + local headers_tab = ngx.req.get_headers() + local headers_key = {} + for k in pairs(headers_tab) do + core.table.insert(headers_key, k) + end + core.table.sort(headers_key) + + for _, v in pairs(headers_key) do + core.log.info(v, ": ", headers_tab[v]) + end + + core.log.info("uri: ", ngx.var.request_uri) + ngx.say("hello world") + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: use proxy-rewrite to change uri before mirror +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite":{ + "_meta": { + "priority": 1010 + }, + "uri": "/hello" + }, + "proxy-mirror": { + "_meta": { + "priority": 1008 + }, + "host": "http://127.0.0.1:1986" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/nope" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit route (with proxy-rewrite) +--- request +GET /nope +--- response_body +hello world +--- error_log +uri: /hello + + + +=== TEST 3: hit route (with proxy-rewrite and args) +--- request +GET /nope?a=b&b=c&c=d +--- response_body +hello world +--- error_log +uri: /hello?a=b&b=c&c=d diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-mirror3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-mirror3.t new file mode 100644 index 0000000..2fdc90d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-mirror3.t @@ -0,0 +1,76 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + if (!$block->request) { + $block->set_value("request", "POST /hello"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: grpc mirror +--- log_level: debug +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHello + methods: [ + POST + ] + plugins: + proxy-mirror: + host: grpc://127.0.0.1:19797 + sample_ratio: 1 + upstream: + scheme: grpc + nodes: + "127.0.0.1:10051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} +--- error_log eval +qr/Connection refused\) while connecting to upstream/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-rewrite.t b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-rewrite.t new file mode 100644 index 0000000..4d74658 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-rewrite.t @@ -0,0 +1,1233 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.proxy-rewrite") + local ok, err = plugin.check_schema({ + uri = '/apisix/home', + host = 'apisix.iresty.com' + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/test/add", + "host": "apisix.iresty.com" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: update plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/test/update", + "host": "apisix.iresty.com" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: disable plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: set route(rewrite host) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "uri": "/plugin_proxy_rewrite", + "host": "apisix.iresty.com" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: rewrite host +--- request +GET /hello HTTP/1.1 +--- response_body +uri: /plugin_proxy_rewrite +host: apisix.iresty.com +scheme: http + + + +=== TEST 7: set route(rewrite host + upstream scheme is https) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "uri": "/plugin_proxy_rewrite", + "host": "test.com" + } + }, + "upstream": { + "scheme": "https", + "nodes": { + "127.0.0.1:1983": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: rewrite host + upstream scheme is https +--- request +GET /hello HTTP/1.1 +--- response_body +uri: /plugin_proxy_rewrite +host: test.com +scheme: https + + + +=== TEST 9: set route(rewrite headers) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/uri/plugin_proxy_rewrite", + "headers": { + "X-Api-Version": "v2" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: rewrite headers +--- request +GET /hello HTTP/1.1 +--- more_headers +X-Api-Version:v1 +--- response_body +uri: /uri/plugin_proxy_rewrite +host: localhost +x-api-version: v2 +x-real-ip: 127.0.0.1 + + + +=== TEST 11: set route(add headers) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/uri/plugin_proxy_rewrite", + "headers": { + "X-Api-Engine": "apisix" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: add headers +--- request +GET /hello HTTP/1.1 +--- response_body +uri: /uri/plugin_proxy_rewrite +host: localhost +x-api-engine: apisix +x-real-ip: 127.0.0.1 + + + +=== TEST 13: set route(rewrite empty headers) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/uri/plugin_proxy_rewrite", + "headers": { + "X-Api-Test": "hello" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: rewrite empty headers +--- request +GET /hello HTTP/1.1 +--- more_headers +X-Api-Test: +--- response_body +uri: /uri/plugin_proxy_rewrite +host: localhost +x-api-test: hello +x-real-ip: 127.0.0.1 + + + +=== TEST 15: set route(rewrite uri args) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/plugin_proxy_rewrite_args" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: rewrite uri args +--- request +GET /hello?q=apisix&a=iresty HTTP/1.1 +--- response_body +uri: /plugin_proxy_rewrite_args +a: iresty +q: apisix + + + +=== TEST 17: set route(rewrite uri empty args) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/plugin_proxy_rewrite_args" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: rewrite uri empty args +--- request +GET /hello HTTP/1.1 +--- response_body +uri: /plugin_proxy_rewrite_args + + + +=== TEST 19: remove header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/uri/plugin_proxy_rewrite", + "headers": { + "X-Api-Engine": "APISIX", + "X-Api-Test": "" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: remove header +--- request +GET /hello HTTP/1.1 +--- more_headers +X-Api-Test: foo +X-Api-Engine: bar +--- response_body +uri: /uri/plugin_proxy_rewrite +host: localhost +x-api-engine: APISIX +x-real-ip: 127.0.0.1 + + + +=== TEST 21: set route(only using regex_uri) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "regex_uri": ["^/test/(.*)/(.*)/(.*)", "/$1_$2_$3"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/test/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: hit route(rewrite uri using regex_uri) +--- request +GET /test/plugin/proxy/rewrite HTTP/1.1 +--- response_body +uri: /plugin_proxy_rewrite +host: localhost +scheme: http + + + +=== TEST 23: hit route(404 not found) +--- request +GET /test/not/found HTTP/1.1 +--- error_code: 404 + + + +=== TEST 24: set route(Using both uri and regex_uri) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/hello", + "regex_uri": ["^/test/(.*)", "/${1}1"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/test/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 25: hit route(rewrite uri using uri & regex_uri property) +--- request +GET /test/hello HTTP/1.1 +--- response_body +hello world + + + +=== TEST 26: set route(invalid regex_uri) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "regex_uri": ["^/test/(.*)"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/test/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 + + + +=== TEST 27: set route(invalid regex syntax for the first element) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "regex_uri": ["[^/test/(.*)", "/$1"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/test/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/invalid regex_uri/ + + + +=== TEST 28: set route(invalid regex syntax for the second element) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "regex_uri": ["^/test/(.*)", "/$`1"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/test/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- error_log +invalid capturing variable name found + + + +=== TEST 29: set route(invalid uri) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/failed to match pattern/ + + + +=== TEST 30: wrong value of uri +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.proxy-rewrite") + local ok, err = plugin.check_schema({ + uri = 'home' + }) + if not ok then + ngx.say(err) + return + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "uri" validation failed: failed to match pattern "^\\/.*" with "home" + + + +=== TEST 31: set route(invalid header field) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/uri/plugin_proxy_rewrite", + "headers": { + "X-Api:Version": "v2" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/invalid field character/ +--- error_log +header field: X-Api:Version + + + +=== TEST 32: set route(invalid header value) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/uri/plugin_proxy_rewrite", + "headers": { + "X-Api-Version": "v2\r\n" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/invalid value character/ + + + +=== TEST 33: set route(rewrite uri with args) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/plugin_proxy_rewrite_args?q=apisix" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 34: rewrite uri with args +--- request +GET /hello?a=iresty +--- response_body_like eval +qr/uri: \/plugin_proxy_rewrite_args( +q: apisix +a: iresty| +a: iresty +q: apisix) +/ + + + +=== TEST 35: print the plugin `conf` in etcd, no dirty data +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local encode_with_keys_sorted = require("toolkit.json").encode + + local code, _, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/uri/plugin_proxy_rewrite", + "headers": { + "X-Api": "v2" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local resp_data = core.json.decode(body) + ngx.say(encode_with_keys_sorted(resp_data.value.plugins)) + } + } +--- request +GET /t +--- response_body +{"proxy-rewrite":{"headers":{"X-Api":"v2"},"uri":"/uri/plugin_proxy_rewrite","use_real_request_uri_unsafe":false}} + + + +=== TEST 36: set route(header contains nginx variables) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/uri", + "headers": { + "x-api": "$remote_addr", + "name": "$arg_name", + "x-key": "$http_key" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 37: hit route(header supports nginx variables) +--- request +GET /hello?name=Bill HTTP/1.1 +--- more_headers +key: X-APISIX +--- response_body +uri: /uri +host: localhost +key: X-APISIX +name: Bill +x-api: 127.0.0.1 +x-key: X-APISIX +x-real-ip: 127.0.0.1 + + + +=== TEST 38: set route(nginx variable does not exist) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/uri", + "headers": { + "x-api": "$helle", + "name": "$arg_world", + "x-key": "$http_key", + "Version": "nginx_var_does_not_exist" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 39: hit route(get nginx variable is nil) +--- request +GET /hello HTTP/1.1 +--- response_body +uri: /uri +host: localhost +version: nginx_var_does_not_exist +x-real-ip: 127.0.0.1 + + + +=== TEST 40: set route(rewrite uri based on ctx.var) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/$arg_new_uri" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/test" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 41: hit route(upstream uri: should be /hello) +--- request +GET /test?new_uri=hello +--- response_body +hello world + + + +=== TEST 42: host with port +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.proxy-rewrite") + local ok, err = plugin.check_schema({ + host = 'apisix.iresty.com:6443', + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 43: set route(rewrite host with port), ensure ngx.var.uri matched the rewritten version +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "uri": "/uri", + "host": "test.com:6443" + }, + "serverless-post-function": { + "phase": "access", + "functions" : ["return function(conf, ctx) + assert(ngx.var.uri == \"/uri\", \"proxy-rewrite do not call ngx.req.set_uri\") + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 44: rewrite host with port +--- request +GET /hello +--- response_body +uri: /uri +host: test.com:6443 +x-real-ip: 127.0.0.1 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-rewrite2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-rewrite2.t new file mode 100644 index 0000000..7096e4a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-rewrite2.t @@ -0,0 +1,232 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + if (!$block->request) { + $block->set_value("request", "GET /hello"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: access $upstream_uri before proxy-rewrite +--- apisix_yaml +global_rules: + - + id: 1 + plugins: + serverless-pre-function: + phase: rewrite + functions: + - "return function() ngx.log(ngx.WARN, 'serverless [', ngx.var.upstream_uri, ']') end" +routes: + - + id: 1 + uri: /hello + plugins: + proxy-rewrite: + uri: "/plugin_proxy_rewrite" + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- error_log +serverless [] +--- response_body +uri: /plugin_proxy_rewrite +host: localhost +scheme: http + + + +=== TEST 2: default X-Forwarded-Proto +--- apisix_yaml +routes: + - + id: 1 + uri: /echo + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /echo +--- response_headers +X-Forwarded-Proto: http + + + +=== TEST 3: pass X-Forwarded-Proto +--- apisix_yaml +routes: + - + id: 1 + uri: /echo + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /echo +--- more_headers +X-Forwarded-Proto: https +--- response_headers +X-Forwarded-Proto: https + + + +=== TEST 4: customize X-Forwarded-Proto +--- apisix_yaml +routes: + - + id: 1 + uri: /echo + plugins: + proxy-rewrite: + headers: + X-Forwarded-Proto: https + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /echo +--- more_headers +X-Forwarded-Proto: grpc +--- response_headers +X-Forwarded-Proto: https + + + +=== TEST 5: make sure X-Forwarded-Proto hit the `core.request.header` cache +--- apisix_yaml +routes: + - + id: 1 + uri: /echo + plugins: + serverless-pre-function: + phase: rewrite + functions: + - return function(conf, ctx) local core = require("apisix.core"); ngx.log(ngx.ERR, core.request.header(ctx, "host")); end + proxy-rewrite: + headers: + X-Forwarded-Proto: https-rewrite + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /echo +--- more_headers +X-Forwarded-Proto: grpc +--- response_headers +X-Forwarded-Proto: https-rewrite +--- error_log +localhost + + + +=== TEST 6: pass duplicate X-Forwarded-Proto +--- apisix_yaml +routes: + - + id: 1 + uri: /echo + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /echo +--- more_headers +X-Forwarded-Proto: http +X-Forwarded-Proto: grpc +--- response_headers +X-Forwarded-Proto: http, grpc + + + +=== TEST 7: customize X-Forwarded-Port +--- apisix_yaml +routes: + - + id: 1 + uri: /echo + plugins: + proxy-rewrite: + headers: + X-Forwarded-Port: 10080 + upstream_id: 1 +upstreams: + - + id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END +--- request +GET /echo +--- more_headers +X-Forwarded-Port: 8080 +--- response_headers +X-Forwarded-Port: 10080 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-rewrite3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-rewrite3.t new file mode 100644 index 0000000..55afe14 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/proxy-rewrite3.t @@ -0,0 +1,1001 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route(rewrite method) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "uri": "/plugin_proxy_rewrite", + "method": "POST", + "host": "apisix.iresty.com" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit route(upstream uri: should be /hello) +--- request +GET /hello +--- error_log +plugin_proxy_rewrite get method: POST + + + +=== TEST 3: set route(update rewrite method) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "uri": "/plugin_proxy_rewrite", + "method": "GET", + "host": "apisix.iresty.com" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: hit route(upstream uri: should be /hello) +--- request +GET /hello +--- error_log +plugin_proxy_rewrite get method: GET + + + +=== TEST 5: wrong value of method key +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.proxy-rewrite") + local ok, err = plugin.check_schema({ + uri = '/apisix/home', + method = 'GET1', + host = 'apisix.iresty.com' + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "method" validation failed: matches none of the enum values +done + + + +=== TEST 6: set route(rewrite method with headers) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "uri": "/plugin_proxy_rewrite", + "method": "POST", + "host": "apisix.iresty.com", + "headers":{ + "x-api-version":"v1" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: hit route(with header) +--- request +GET /hello +--- error_log +plugin_proxy_rewrite get method: POST + + + +=== TEST 8: set route(unsafe uri not normalized at request) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "use_real_request_uri_unsafe": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/print_uri_detailed" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: unsafe uri not normalized at request +--- request +GET /print%5Furi%5Fdetailed HTTP/1.1 +--- response_body +ngx.var.uri: /print_uri_detailed +ngx.var.request_uri: /print%5Furi%5Fdetailed + + + +=== TEST 10: set route(safe uri not normalized at request) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "use_real_request_uri_unsafe": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/print_uri_detailed" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: safe uri not normalized at request +--- request +GET /print_uri_detailed HTTP/1.1 +--- response_body +ngx.var.uri: /print_uri_detailed +ngx.var.request_uri: /print_uri_detailed + + + +=== TEST 12: set route(rewrite X-Forwarded-Host) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "headers": { + "X-Forwarded-Host": "test.com" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: rewrite X-Forwarded-Host +--- request +GET /echo HTTP/1.1 +--- more_headers +X-Forwarded-Host: apisix.ai +--- response_headers +X-Forwarded-Host: test.com + + + +=== TEST 14: set route header test +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "headers": { + "add":{"test": "123"}, + "set":{"test2": "2233"}, + "remove":["hello"] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: add exist header in muti-header +--- request +GET /echo HTTP/1.1 +--- more_headers +test: sssss +test: bbb +--- response_headers +test: sssss, bbb, 123 + + + +=== TEST 16: add header to exist header +--- request +GET /echo HTTP/1.1 +--- more_headers +test: sssss +--- response_headers +test: sssss, 123 + + + +=== TEST 17: remove header +--- request +GET /echo HTTP/1.1 +--- more_headers +hello: word +--- response_headers +hello: + + + +=== TEST 18: set header success +--- request +GET /echo HTTP/1.1 +--- response_headers +test2: 2233 + + + +=== TEST 19: header priority test +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "headers": { + "add":{"test": "test_in_add"}, + "set":{"test": "test_in_set"} + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: set and test priority test & deprecated calls test +--- request +GET /echo HTTP/1.1 +--- response_headers +test: test_in_set +--- no_error_log +DEPRECATED: use add_header(ctx, header_name, header_value) instead +DEPRECATED: use set_header(ctx, header_name, header_value) instead + + + +=== TEST 21: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "host": "test.xxxx.com" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:8125": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: hit with CRLF +--- request +GET /hello%3f0z=700%26a=c%20HTTP/1.1%0D%0AHost:google.com%0d%0a%0d%0a +--- http_config + server { + listen 8125; + location / { + content_by_lua_block { + ngx.say(ngx.var.host) + ngx.say(ngx.var.request_uri) + } + } + } +--- response_body +test.xxxx.com +/hello%3F0z=700&a=c%20HTTP/1.1%0D%0AHost:google.com%0D%0A%0D%0A + + + +=== TEST 23: set route with uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "uri": "/$uri/remain", + "host": "test.xxxx.com" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:8125": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 24: hit with CRLF +--- request +GET /hello%3f0z=700%26a=c%20HTTP/1.1%0D%0AHost:google.com%0d%0a%0d%0a +--- http_config + server { + listen 8125; + location / { + content_by_lua_block { + ngx.say(ngx.var.host) + ngx.say(ngx.var.request_uri) + } + } + } +--- response_body +test.xxxx.com +//hello%253F0z=700&a=c%20HTTP/1.1%0D%0AHost:google.com%0D%0A%0D%0A/remain + + + +=== TEST 25: regex_uri with args +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "regex_uri": ["^/test/(.*)/(.*)/(.*)", "/$1_$2_$3?a=c"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:8125": 1 + }, + "type": "roundrobin" + }, + "uri": "/test/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 26: hit +--- request +GET /test/plugin/proxy/rewrite HTTP/1.1 +--- http_config + server { + listen 8125; + location / { + content_by_lua_block { + ngx.say(ngx.var.request_uri) + } + } + } +--- response_body +/plugin_proxy_rewrite?a=c + + + +=== TEST 27: use variables in headers when captured by regex_uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/test/*", + "plugins": { + "proxy-rewrite": { + "regex_uri": ["^/test/(.*)/(.*)/(.*)", "/echo"], + "headers": { + "add": { + "X-Request-ID": "$1/$2/$3" + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 28: hit +--- request +GET /test/plugin/proxy/rewrite HTTP/1.1 +--- response_headers +X-Request-ID: plugin/proxy/rewrite + + + +=== TEST 29: use variables in header when not matched regex_uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo*", + "plugins": { + "proxy-rewrite": { + "regex_uri": ["^/test/(.*)/(.*)/(.*)", "/echo"], + "headers": { + "add": { + "X-Request-ID": "$1/$2/$3" + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 30: hit +--- request +GET /echo HTTP/1.1 +--- more_headers +X-Foo: Foo +--- response_headers +X-Foo: Foo + + + +=== TEST 31: use variables in headers when captured by regex_uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/test/*", + "plugins": { + "proxy-rewrite": { + "regex_uri": ["^/test/(not_matched)?.*", "/echo"], + "headers": { + "add": { + "X-Request-ID": "test1/$1/$2/test2" + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 32: hit +--- request +GET /test/plugin/proxy/rewrite HTTP/1.1 +--- response_headers +X-Request-ID: test1///test2 + + + +=== TEST 33: set route (test if X-Forwarded-Port can be set before proxy) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "headers": { + "X-Forwarded-Port": "9882" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 34: test if X-Forwarded-Port can be set before proxy +--- request +GET /echo HTTP/1.1 +--- more_headers +X-Forwarded-Port: 9881 +--- response_headers +X-Forwarded-Port: 9882 + + + +=== TEST 35: set route (test if X-Forwarded-For can be set before proxy) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "headers": { + "X-Forwarded-For": "22.22.22.22" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 36: test if X-Forwarded-For can be set before proxy +--- request +GET /echo HTTP/1.1 +--- more_headers +X-Forwarded-For: 11.11.11.11 +--- response_headers +X-Forwarded-For: 22.22.22.22, 127.0.0.1 + + + +=== TEST 37: setting multiple regex_uris +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "regex_uri": [ + "^/test/(.*)/(.*)/(.*)/hello", + "/hello/$1_$2_$3", + "^/test/(.*)/(.*)/(.*)/world", + "/world/$1_$2_$3" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:8125": 1 + }, + "type": "roundrobin" + }, + "uri": "/test/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 38: hit +--- request +GET /test/plugin/proxy/rewrite/hello HTTP/1.1 +--- http_config + server { + listen 8125; + location / { + content_by_lua_block { + ngx.say(ngx.var.request_uri) + } + } + } +--- response_body +/hello/plugin_proxy_rewrite + + + +=== TEST 39: hit +--- request +GET /test/plugin/proxy/rewrite/world HTTP/1.1 +--- http_config + server { + listen 8125; + location / { + content_by_lua_block { + ngx.say(ngx.var.request_uri) + } + } + } +--- response_body +/world/plugin_proxy_rewrite + + + +=== TEST 40: use regex uri with unsafe allowed +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "regex_uri": [ + "/hello/(.+)", + "/hello?unsafe_variable=$1" + ], + "use_real_request_uri_unsafe": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:8125": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 41: hit +--- request +GET /hello/%ED%85%8C%EC%8A%A4%ED%8A%B8 HTTP/1.1 +--- http_config + server { + listen 8125; + location / { + content_by_lua_block { + ngx.say(ngx.var.request_uri) + } + } + } +--- response_body +/hello?unsafe_variable=%ED%85%8C%EC%8A%A4%ED%8A%B8 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/public-api.t b/CloudronPackages/APISIX/apisix-source/t/plugin/public-api.t new file mode 100644 index 0000000..6b4c9f3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/public-api.t @@ -0,0 +1,200 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local test_cases = { + {uri = "/apisix/plugin/wolf-rbac/user_info"}, + {uri = 3233} + } + local plugin = require("apisix.plugins.public-api") + + for _, case in ipairs(test_cases) do + local ok, err = plugin.check_schema(case) + ngx.say(ok and "done" or err) + end + } + } +--- response_body +done +property "uri" validation failed: wrong type: expected string, got number + + + +=== TEST 2: set route +--- config + location /t { + content_by_lua_block { + local data = { + { + uri = "/apisix/admin/consumers", + data = [[{ + "username": "alice", + "plugins": { + "jwt-auth": { + "key": "user-key", + "algorithm": "HS256" + } + } + }]] + }, + { + uri = "/apisix/admin/routes/direct-wolf-rbac-userinfo", + data = [[{ + "plugins": { + "public-api": {}, + "serverless-pre-function": { + "phase": "rewrite", + "functions": ["return function(conf, ctx) require(\"apisix.core\").log.warn(\"direct-wolf-rbac-userinfo was triggered\"); end"] + } + }, + "uri": "/apisix/plugin/wolf-rbac/user_info" + }]], + }, + { + uri = "/apisix/admin/routes/wrong-public-api", + data = [[{ + "plugins": { + "public-api": { + "uri": "/apisix/plugin/balalbala" + } + }, + "uri": "/wrong-public-api" + }]] + } + } + + local t = require("lib.test_admin").test + + for _, data in ipairs(data) do + local code, body = t(data.uri, ngx.HTTP_PUT, data.data) + ngx.say(code..body) + end + } + } +--- response_body eval +"201passed\n" x 3 + + + +=== TEST 3: hit route (direct-wolf-rbac-userinfo) +--- request +GET /apisix/plugin/wolf-rbac/user_info +--- error_code: 401 +--- error_log +direct-wolf-rbac-userinfo was triggered + + + +=== TEST 4: missing route (non-exist public API) +--- request +GET /apisix/plugin/balalbala +--- error_code: 404 + + + +=== TEST 5: hit route (wrong public-api uri) +--- request +GET /wrong-public-api +--- error_code: 404 + + + +=== TEST 6: setup route (protect public API) +--- config + location /t { + content_by_lua_block { + local data = { + { + uri = "/apisix/admin/consumers", + data = [[{ + "username": "bob", + "plugins": { + "key-auth": { + "key": "testkey" + } + } + }]] + }, + { + uri = "/apisix/admin/routes/custom-user-info", + data = [[{ + "plugins": { + "public-api": { + "uri": "/apisix/plugin/wolf-rbac/user_info" + }, + "key-auth": {}, + "serverless-pre-function": { + "phase": "rewrite", + "functions": ["return function(conf, ctx) require(\"apisix.core\").log.warn(\"direct-wolf-rbac-userinfo was triggered\"); end"] + } + }, + "uri": "/get_user_info" + }]], + } + } + + local t = require("lib.test_admin").test + + for _, data in ipairs(data) do + local code, body = t(data.uri, ngx.HTTP_PUT, data.data) + ngx.say(code..body) + end + } + } +--- response_body +201passed +201passed + + + +=== TEST 7: hit route (with key-auth header) +--- request +GET /get_user_info?key=user-key +--- more_headers +apikey: testkey +--- error_code: 401 +--- error_log +direct-wolf-rbac-userinfo was triggered + + + +=== TEST 8: hit route (without key-auth header) +--- request +GET /get_user_info?key=user-key +--- error_code: 401 +--- response_body +{"message":"Missing API key in request"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/real-ip.t b/CloudronPackages/APISIX/apisix-source/t/plugin/real-ip.t new file mode 100644 index 0000000..e6e0447 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/real-ip.t @@ -0,0 +1,472 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: schema check +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for _, case in ipairs({ + {input = {}}, + {input = { + source = "http_xff", + trusted_addresses = {"127.0.0.1/33"} + }}, + {input = { + source = "http_xff", + trusted_addresses = {"::1/129"} + }}, + }) do + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["real-ip"] = case.input + } + } + ) + ngx.print(body) + end + } +} +--- response_body +{"error_msg":"failed to check the configuration of plugin real-ip err: property \"source\" is required"} +{"error_msg":"failed to check the configuration of plugin real-ip err: property \"trusted_addresses\" validation failed: failed to validate item 1: object matches none of the required"} +{"error_msg":"failed to check the configuration of plugin real-ip err: invalid ip address: ::1/129"} + + + +=== TEST 2: sanity +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "real-ip": { + "source": "http_xff" + }, + "ip-restriction": { + "whitelist": ["1.1.1.1"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 3: hit +--- request +GET /hello +--- more_headers +XFF: 1.1.1.1 + + + +=== TEST 4: with port +--- request +GET /hello +--- more_headers +XFF: 1.1.1.1:80 + + + +=== TEST 5: miss address +--- request +GET /hello +--- error_code: 403 + + + +=== TEST 6: bad address +--- request +GET /hello +--- more_headers +XFF: 1.1.1.1.1 +--- error_code: 403 + + + +=== TEST 7: bad port +--- request +GET /hello +--- more_headers +XFF: 1.1.1.1:65536 +--- error_code: 403 + + + +=== TEST 8: ipv6 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "real-ip": { + "source": "http_xff" + }, + "ip-restriction": { + "whitelist": ["::2"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 9: hit +--- request +GET /hello +--- more_headers +XFF: ::2 + + + +=== TEST 10: with port +--- request +GET /hello +--- more_headers +XFF: [::2]:80 + + + +=== TEST 11: with bracket +--- request +GET /hello +--- more_headers +XFF: [::2] + + + +=== TEST 12: check port +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "real-ip": { + "source": "http_xff" + }, + "response-rewrite": { + "headers": { + "remote_port": "$remote_port" + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 13: hit +--- request +GET /hello +--- more_headers +XFF: 1.1.1.1:7090 +--- response_headers +remote_port: 7090 + + + +=== TEST 14: X-Forwarded-For +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "real-ip": { + "source": "http_x_forwarded_for" + }, + "ip-restriction": { + "whitelist": ["::2"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 15: hit +--- request +GET /hello +--- more_headers +X-Forwarded-For: ::1, ::2 + + + +=== TEST 16: hit (multiple X-Forwarded-For) +--- request +GET /hello +--- more_headers +X-Forwarded-For: ::1 +X-Forwarded-For: ::2 + + + +=== TEST 17: miss address +--- request +GET /hello +--- error_code: 403 + + + +=== TEST 18: trusted addresses (not trusted) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "real-ip": { + "trusted_addresses": ["192.128.0.0/16"], + "source": "http_x_forwarded_for" + }, + "ip-restriction": { + "whitelist": ["1.1.1.1"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 19: hit +--- request +GET /hello +--- more_headers +X-Forwarded-For: 1.1.1.1 +--- error_code: 403 + + + +=== TEST 20: trusted addresses (trusted) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "real-ip": { + "trusted_addresses": ["192.128.0.0/16", "127.0.0.0/24"], + "source": "http_x_forwarded_for" + }, + "ip-restriction": { + "whitelist": ["1.1.1.1"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 21: hit +--- request +GET /hello +--- more_headers +X-Forwarded-For: 1.1.1.1 + + + +=== TEST 22: X-Forwarded-For and recursive +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "real-ip": { + "trusted_addresses": ["192.128.0.0/16", "127.0.0.0/24"], + "source": "http_x_forwarded_for", + "recursive": true + }, + "ip-restriction": { + "whitelist": ["1.1.1.1"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 23: hit +--- request +GET /hello +--- more_headers +X-Forwarded-For: 1.1.1.1, 192.128.1.1, 127.0.0.1 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/redirect.t b/CloudronPackages/APISIX/apisix-source/t/plugin/redirect.t new file mode 100644 index 0000000..e27f2f7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/redirect.t @@ -0,0 +1,1078 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +$ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); +log_level('info'); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.redirect") + local ok, err = plugin.check_schema({ + ret_code = 302, + uri = '/foo', + }) + if not ok then + ngx.say(err) + return + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: default ret_code +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.redirect") + local ok, err = plugin.check_schema({ + -- ret_code = 302, + uri = '/foo', + }) + if not ok then + ngx.say(err) + return + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 3: add plugin with new uri: /test/add +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "redirect": { + "uri": "/test/add", + "ret_code": 301 + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: redirect +--- request +GET /hello +--- response_headers +Location: /test/add +--- error_code: 301 + + + +=== TEST 5: add plugin with new uri: $uri/test/add +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "redirect": { + "uri": "$uri/test/add", + "ret_code": 301 + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: redirect +--- request +GET /hello +--- response_headers +Location: /hello/test/add +--- error_code: 301 + + + +=== TEST 7: add plugin with new uri: $uri/test/a${arg_name}c +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "redirect": { + "uri": "$uri/test/a${arg_name}c", + "ret_code": 302 + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: redirect +--- request +GET /hello?name=json +--- response_headers +Location: /hello/test/ajsonc +--- error_code: 302 + + + +=== TEST 9: add plugin with new uri: /foo$$uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "redirect": { + "uri": "/foo$$uri", + "ret_code": 302 + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: redirect +--- request +GET /hello?name=json +--- response_headers +Location: /foo$/hello +--- error_code: 302 + + + +=== TEST 11: add plugin with new uri: \\$uri/foo$uri\\$uri/bar +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "redirect": { + "uri": "\\$uri/foo$uri\\$uri/bar", + "ret_code": 301 + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: redirect +--- request +GET /hello +--- response_headers +Location: \$uri/foo/hello\$uri/bar +--- error_code: 301 + + + +=== TEST 13: add plugin with new uri: $uri/$bad_var/bar +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "redirect": { + "uri": "$uri/$bad_var/bar", + "ret_code": 301 + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: redirect +--- request +GET /hello +--- response_headers +Location: /hello//bar +--- error_code: 301 + + + +=== TEST 15: http -> https redirect +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "vars": [ + [ + "scheme", + "==", + "http" + ] + ], + "plugins": { + "redirect": { + "uri": "https://$host$request_uri", + "ret_code": 301 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: redirect +--- request +GET /hello +--- more_headers +Host: foo.com +--- error_code: 301 +--- response_headers +Location: https://foo.com/hello + + + +=== TEST 17: enable http_to_https +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: redirect(port using `plugin_attr.redirect.https_port`) +--- extra_yaml_config +plugin_attr: + redirect: + https_port: 8443 +--- request +GET /hello +--- more_headers +Host: foo.com +--- error_code: 301 +--- response_headers +Location: https://foo.com:8443/hello + + + +=== TEST 19: redirect(port using `apisix.ssl.listen`) +--- yaml_config +apisix: + ssl: + enable: true + listen: + - port: 9445 +--- request +GET /hello +--- more_headers +Host: foo.com +--- error_code: 301 +--- response_headers +Location: https://foo.com:9445/hello + + + +=== TEST 20: redirect(port using `apisix.ssl.listen` when listen length is one) +--- request +GET /hello +--- more_headers +Host: foo.com +--- error_code: 301 +--- response_headers +Location: https://foo.com:9443/hello + + + +=== TEST 21: redirect(port using `apisix.ssl.listen` when listen length more than one) +--- yaml_config +apisix: + ssl: + enable: true + listen: + - port: 6443 + - port: 7443 + - port: 8443 + - port: 9443 +--- request +GET /hello +--- more_headers +Host: foo.com +--- error_code: 301 +--- response_headers_like +Location: https://foo.com:[6-9]443/hello + + + +=== TEST 22: redirect(port using `https default port`) +--- yaml_config +apisix: + ssl: + enable: null +--- extra_yaml_config +plugin_attr: + redirect: null +--- request +GET /hello +--- more_headers +Host: foo.com +--- error_code: 301 +--- response_headers +Location: https://foo.com/hello + + + +=== TEST 23: enable http_to_https with ret_code(not take effect) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true, + "ret_code": 302 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 24: redirect +--- request +GET /hello +--- more_headers +Host: foo.com +--- error_code: 301 +--- response_headers +Location: https://foo.com:9443/hello + + + +=== TEST 25: wrong configure, enable http_to_https with uri +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true, + "uri": "/hello" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/error_msg":"failed to check the configuration of plugin redirect err: value should match only one schema, but matches both schemas 1 and 3/ + + + +=== TEST 26: enable http_to_https with upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "test.com", + "plugins": { + "redirect": { + "http_to_https": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 27: redirect +--- request +GET /hello +--- more_headers +Host: test.com +--- error_code: 301 +--- response_headers +Location: https://test.com:9443/hello + + + +=== TEST 28: set ssl(sni: test.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 29: client https request +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "test.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + + local req = "GET /hello HTTP/1.0\r\nHost: test.com\r\nConnection: close\r\n\r\n" + local bytes, err = sock:send(req) + if not bytes then + ngx.say("failed to send http request: ", err) + return + end + + ngx.say("sent http request: ", bytes, " bytes.") + + while true do + local line, err = sock:receive() + if not line then + -- ngx.say("failed to receive response status line: ", err) + break + end + + ngx.say("received: ", line) + end + + local ok, err = sock:close() + ngx.say("close: ", ok, " ", err) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 58 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- no_error_log +[error] +[alert] + + + +=== TEST 30: add plugin with new uri: /test/add +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods":["POST","GET","HEAD"], + "plugins": { + "redirect": { + "http_to_https": true, + "ret_code": 307 + } + }, + "host": "test.com", + "uri": "/hello-https" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 31: http to https post redirect +--- request +POST /hello-https +--- more_headers +Host: test.com +--- response_headers +Location: https://test.com:9443/hello-https +--- error_code: 308 + + + +=== TEST 32: http to https get redirect +--- request +GET /hello-https +--- more_headers +Host: test.com +--- response_headers +Location: https://test.com:9443/hello-https +--- error_code: 301 + + + +=== TEST 33: http to https head redirect +--- request +HEAD /hello-https +--- more_headers +Host: test.com +--- response_headers +Location: https://test.com:9443/hello-https +--- error_code: 301 + + + +=== TEST 34: add plugin with new regex_uri: /test/1 redirect to http://test.com/1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "redirect": { + "regex_uri": ["^/test/(.*)", "http://test.com/${1}"], + "ret_code": 301 + } + }, + "uris": ["/test/*", "/hello"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 35: regex_uri redirect +--- request +GET /test/1 +--- response_headers +Location: http://test.com/1 +--- error_code: 301 + + + +=== TEST 36: regex_uri not match, get response from upstream +--- request +GET /hello +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 37: add plugin with new regex_uri: encode_uri = true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "redirect": { + "regex_uri": ["^/test/(.*)", "http://test.com/${1}"], + "ret_code": 301, + "encode_uri": true + } + }, + "uri": "/test/*", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 38: regex_uri redirect with special characters +--- request +GET /test/with%20space +--- error_code: 200 +--- response_headers +Location: http://test.com/with%20space +--- error_code: 301 + + + +=== TEST 39: add plugin with new uri: encode_uri = true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "redirect": { + "uri": "$uri", + "ret_code": 301, + "encode_uri": true + } + }, + "uri": "/hello*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 40: redirect with special characters +--- request +GET /hello/with%20space +--- response_headers +Location: /hello/with%20space +--- error_code: 301 + + + +=== TEST 41: add plugin with new uri: $uri (append_query_string = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "redirect": { + "uri": "$uri", + "ret_code": 302, + "append_query_string": true + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 42: redirect +--- request +GET /hello?name=json +--- response_headers +Location: /hello?name=json +--- error_code: 302 + + + +=== TEST 43: add plugin with new uri: $uri?type=string (append_query_string = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "redirect": { + "uri": "$uri?type=string", + "ret_code": 302, + "append_query_string": true + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 44: redirect +--- request +GET /hello?name=json +--- response_headers +Location: /hello?type=string&name=json +--- error_code: 302 + + + +=== TEST 45: enable http_to_https (pass X-Forwarded-Proto) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "vars": [ + [ + "scheme", + "==", + "http" + ] + ], + "plugins": { + "redirect": { + "http_to_https": true + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 46: enable http_to_https (pass X-Forwarded-Proto) +--- request +GET /hello +--- more_headers +Host: foo.com +X-Forwarded-Proto: http +--- error_code: 301 +--- response_headers +Location: https://foo.com:9443/hello + + + +=== TEST 47: wrong configure, enable http_to_https with append_query_string +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true, + "append_query_string": true + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body eval +qr/error_msg":"failed to check the configuration of plugin redirect err: only one of `http_to_https` and `append_query_string` can be configured."/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/redirect2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/redirect2.t new file mode 100644 index 0000000..6dcf144 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/redirect2.t @@ -0,0 +1,106 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set use regex_uri redirect and enable append_query_string route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "redirect": { + "regex_uri": ["^/test/(.*)", "http://test.com/${1}?q=apisix"], + "append_query_string": true + } + }, + "uri": "/test/*", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: set use regex_uri redirect and enable append_query_string route +--- request +GET /test/hello?o=apache +--- response_headers +Location: http://test.com/hello?q=apisix&o=apache +--- error_code: 302 + + + +=== TEST 3: compatible with old version configuration +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "plugins": { + "redirect": { + "http_to_https": true, + "append_query_string": false + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/referer-restriction.t b/CloudronPackages/APISIX/apisix-source/t/plugin/referer-restriction.t new file mode 100644 index 0000000..fa4fe5f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/referer-restriction.t @@ -0,0 +1,269 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + $block->set_value("no_error_log", "[error]"); + + $block; +}); + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +run_tests; + +__DATA__ + +=== TEST 1: set whitelist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "referer-restriction": { + "whitelist": [ + "*.xx.com", + "yy.com" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route and in the whitelist (wildcard) +--- request +GET /hello +--- more_headers +Referer: http://www.xx.com +--- response_body +hello world + + + +=== TEST 3: hit route and in the whitelist +--- request +GET /hello +--- more_headers +Referer: https://yy.com/am +--- response_body +hello world + + + +=== TEST 4: hit route and not in the whitelist +--- request +GET /hello +--- more_headers +Referer: https://www.yy.com/am +--- error_code: 403 + + + +=== TEST 5: hit route and without Referer +--- request +GET /hello +--- error_code: 403 + + + +=== TEST 6: set whitelist, allow Referer missing +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "referer-restriction": { + "bypass_missing": true, + "whitelist": [ + "*.xx.com", + "yy.com" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: hit route and without Referer +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 8: malformed Referer is treated as missing +--- request +GET /hello +--- more_headers +Referer: www.yy.com +--- response_body +hello world + + + +=== TEST 9: invalid schema +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.referer-restriction") + local cases = { + "x.*", + "~y.xn", + } + for _, c in ipairs(cases) do + local ok, err = plugin.check_schema({ + whitelist = {c} + }) + if ok then + ngx.log(ngx.ERR, c) + end + end + } + } +--- request +GET /t + + + +=== TEST 10: set blacklist with reject message +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "referer-restriction": { + "blacklist": [ + "*.xx.com" + ], + "message": "Your referer host is deny" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: hit route and in the blacklist +--- request +GET /hello +--- more_headers +Referer: http://www.xx.com +--- error_code: 403 +--- response_body +{"message":"Your referer host is deny"} + + + +=== TEST 12: hit route and not in the blacklist +--- request +GET /hello +--- more_headers +Referer: https://yy.com +--- response_body +hello world + + + +=== TEST 13: whitelist and blacklist mutual exclusive +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.referer-restriction") + local ok, err = plugin.check_schema({whitelist={"xx.com"}, blacklist={"yy.com"}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +value should match only one schema, but matches both schemas 1 and 2 +done diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/request-id.t b/CloudronPackages/APISIX/apisix-source/t/plugin/request-id.t new file mode 100644 index 0000000..39e9f94 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/request-id.t @@ -0,0 +1,507 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +worker_connections(1024); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.request-id") + local ok, err = plugin.check_schema({}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: wrong type +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.request-id") + local ok, err = plugin.check_schema({include_in_response = "bad_type"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "include_in_response" validation failed: wrong type: expected boolean, got string +done + + + +=== TEST 3: add plugin with include_in_response true (default true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-id": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: check for request id in response header (default header name) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" + local res, err = httpc:request_uri(uri, + { + method = "GET", + headers = { + ["Content-Type"] = "application/json", + } + }) + + if res.headers["X-Request-Id"] then + ngx.say("request header present") + else + ngx.say("failed") + end + } + } +--- response_body +request header present + + + +=== TEST 5: check for unique id +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local t = {} + local ids = {} + for i = 1, 180 do + local th = assert(ngx.thread.spawn(function() + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" + local res, err = httpc:request_uri(uri, + { + method = "GET", + headers = { + ["Content-Type"] = "application/json", + } + } + ) + if not res then + ngx.log(ngx.ERR, err) + return + end + + local id = res.headers["X-Request-Id"] + if not id then + return -- ignore if the data is not synced yet. + end + + if ids[id] == true then + ngx.say("ids not unique") + return + end + ids[id] = true + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + + ngx.say("true") + } + } +--- wait: 5 +--- response_body +true + + + +=== TEST 6: add plugin with custom header name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-id": { + "header_name": "Custom-Header-Name" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: check for request id in response header (custom header name) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" + local res, err = httpc:request_uri(uri, + { + method = "GET", + headers = { + ["Content-Type"] = "application/json", + } + }) + + if res.headers["Custom-Header-Name"] then + ngx.say("request header present") + else + ngx.say("failed") + end + } + } +--- response_body +request header present + + + +=== TEST 8: add plugin with include_in_response false (default true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-id": { + "include_in_response": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: check for request id is not present in the response header +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" + local res, err = httpc:request_uri(uri, + { + method = "GET", + headers = { + ["Content-Type"] = "application/json", + } + }) + + if not res.headers["X-Request-Id"] then + ngx.say("request header not present") + else + ngx.say("failed") + end + } + } +--- response_body +request header not present + + + +=== TEST 10: add plugin with custom header name in global rule and add plugin with default header name in specific route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-id": { + "header_name":"Custom-Header-Name" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-id": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: check for multiple request-ids in the response header are different +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" + local res, err = httpc:request_uri(uri, + { + method = "GET", + headers = { + ["Content-Type"] = "application/json", + } + }) + + if res.headers["X-Request-Id"] ~= res.headers["Custom-Header-Name"] then + ngx.say("X-Request-Id and Custom-Header-Name are different") + else + ngx.say("failed") + end + } + } +--- response_body +X-Request-Id and Custom-Header-Name are different + + + +=== TEST 12: wrong algorithm type +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.request-id") + local ok, err = plugin.check_schema({algorithm = "bad_algorithm"}) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +property "algorithm" validation failed: matches none of the enum values +done + + + +=== TEST 13: add plugin with include_in_response true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-id": { + "include_in_response": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: echo back the client's header if given +--- request +GET /opentracing +--- more_headers +X-Request-ID: 123 +--- response_headers +X-Request-ID: 123 + + + +=== TEST 15: add plugin with algorithm nanoid (default uuid) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local http = require "resty.http" + local v = {} + local ids = {} + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-id": { + "algorithm": "nanoid" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + if code >= 300 then + ngx.say("algorithm nanoid is error") + end + for i = 1, 180 do + local th = assert(ngx.thread.spawn(function() + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" + local res, err = httpc:request_uri(uri, + { + method = "GET", + headers = { + ["Content-Type"] = "application/json", + } + } + ) + if not res then + ngx.log(ngx.ERR, err) + return + end + local id = res.headers["X-Request-Id"] + if not id then + return -- ignore if the data is not synced yet. + end + if ids[id] == true then + ngx.say("ids not unique") + return + end + ids[id] = true + end, i)) + table.insert(v, th) + end + for i, th in ipairs(v) do + ngx.thread.wait(th) + end + ngx.say("true") + } + } +--- wait: 5 +--- response_body +true diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/request-id2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/request-id2.t new file mode 100644 index 0000000..421a089 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/request-id2.t @@ -0,0 +1,188 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +worker_connections(1024); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: check config with algorithm range_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-id": { + "algorithm": "range_id", + "range_id": { + "char_set": "abcdefg", + "length": 20 + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: add plugin with algorithm range_id (set automatic default) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-id": { + "algorithm": "range_id" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: hit +--- request +GET /opentracing + + + +=== TEST 4: add plugin with algorithm range_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local http = require "resty.http" + local v = {} + local ids = {} + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-id": { + "algorithm": "range_id", + "range_id": {} + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + if code >= 300 then + ngx.say("algorithm range_id is error") + end + for i = 1, 180 do + local th = assert(ngx.thread.spawn(function() + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" + local res, err = httpc:request_uri(uri, + { + method = "GET", + headers = { + ["Content-Type"] = "application/json", + } + } + ) + if not res then + ngx.log(ngx.ERR, err) + return + end + local id = res.headers["X-Request-Id"] + if not id then + return -- ignore if the data is not synced yet. + end + if #id ~= 16 then + ngx.say(id) + ngx.say("incorrect length for id") + return + end + local start, en = string.find(id, '[a-zA-Z0-9]*') + if start ~= 1 or en ~= 16 then + ngx.say("incorrect char set for id") + ngx.say(id) + return + end + if ids[id] == true then + ngx.say("ids not unique") + return + end + ids[id] = true + end, i)) + table.insert(v, th) + end + for i, th in ipairs(v) do + ngx.thread.wait(th) + end + ngx.say("true") + } + } +--- wait: 5 +--- response_body +true diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/request-validation.t b/CloudronPackages/APISIX/apisix-source/t/plugin/request-validation.t new file mode 100644 index 0000000..f1272c6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/request-validation.t @@ -0,0 +1,1785 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.request-validation") + local ok, err = plugin.check_schema({body_schema = {}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: missing schema for header and body +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.request-validation") + local ok, err = plugin.check_schema({}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body_like eval +qr/object matches none of the required/ + + + +=== TEST 3: add plugin with all combinations +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + plugins = { + ["request-validation"] = { + body_schema = { + type = "object", + required = { "required_payload" }, + properties = { + required_payload = { + type = "string" + }, + boolean_payload = { + type = "boolean" + }, + timeouts = { + type = "integer", + minimum = 1, + maximum = 254, + default = 3 + }, + req_headers = { + type = "array", + minItems = 1, + items = { + type = "string" + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1982"] = 1 + }, + type = "roundrobin" + }, + uri = "/opentracing" + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: required payload missing +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" + local res, err = httpc:request_uri(uri, + { + method = "POST", + body = '{"boolean-payload": true}', + headers = { + ["Content-Type"] = "application/json", + } + }) + + if res.status == 400 then + ngx.say("required field missing") + else + ngx.say("failed") + end + } + } +--- request +GET /t +--- response_body +required field missing +--- error_log +property "required_payload" is required + + + +=== TEST 5: required payload added +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" + local res, err = httpc:request_uri(uri, + { + method = "POST", + body = '{"boolean-payload": true,' .. + '"required_payload": "hello"}', + headers = { + ["Content-Type"] = "application/json", + } + }) + + if res.status == 200 then + ngx.say("hello1 world") + else + ngx.say("failed") + end + } + } +--- request +GET /t +--- response_body +hello1 world +--- no_error_log + + + +=== TEST 6: Add plugin with header_schema +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + plugins = { + ["request-validation"] = { + header_schema = { + type = "object", + required = { "required_payload" }, + properties = { + required_payload = { + type = "string" + }, + boolean_payload = { + type = "boolean" + }, + timeouts = { + type = "integer", + minimum = 1, + maximum = 254, + default = 3 + }, + req_headers = { + type = "array", + minItems = 1, + items = { + type = "string" + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1982"] = 1 + }, + type = "roundrobin" + }, + uri = "/opentracing" + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: required header payload missing +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" + local res, err = httpc:request_uri(uri, + { + method = "GET", + headers = { + ["Content-Type"] = "application/json" + } + }) + + if res.status == 400 then + ngx.say("required field missing") + else + ngx.say("failed") + end + } + } +--- request +GET /t +--- response_body +required field missing +--- error_log +property "required_payload" is required + + + +=== TEST 8: required header added in header +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" + local res, err = httpc:request_uri(uri, + { + method = "GET", + headers = { + ["Content-Type"] = "application/json", + ["required_payload"] = "test payload" + } + }) + + if res.status == 200 then + ngx.say("hello1 world") + else + ngx.say("failed") + end + } + } +--- request +GET /t +--- response_body +hello1 world + + + +=== TEST 9: add route (test request validation `body_schema`) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "object", + "required": ["required_payload"], + "properties": { + "required_payload": {"type": "string"}, + "boolean_payload": {"type": "boolean"}, + "timeouts": { + "type": "integer", + "minimum": 1, + "maximum": 254, + "default": 3 + }, + "req_headers": { + "type": "array", + "minItems": 1, + "items": { + "type": "string" + } + } + } + } + } + },]] .. [[ + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: add route (test request validation `body_schema.type` is object) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "object" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: add route (test request validation `body_schema.type` is array) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "array" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: add route (test request validation `body_schema.type` is string) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "string" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: add route (test request validation `body_schema.type` is number) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "number" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: add route (test request validation `body_schema.type` is integer) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "integer" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: add route (test request validation `body_schema.type` is table) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "table" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: add route (test request validation `body_schema.type` is function) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "function" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: add route (test request validation `body_schema.type` failure) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "test" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/invalid JSON type: test/ +--- error_code chomp +400 + + + +=== TEST 18: add route (test request validation `body_schema.enum` failure) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "string", + "properties": { + "test": { + "type": "string", + "enum": "test-enum" + } + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/table expected, got string/ +--- error_code chomp +400 + + + +=== TEST 19: add route (test request validation `body_schema.enum` success) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "string", + "properties": { + "test": { + "type": "string", + "enum": ["a", "b", "c"] + } + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: add route (test request validation `body_schema.required` failure) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "object", + "properties": { + "test": { + "type": "string", + "enum": ["a", "b", "c"] + } + }, + "required": "test-required" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/table expected, got string/ +--- error_code chomp +400 + + + +=== TEST 21: add route (test request validation `body_schema.required` success) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "object", + "properties": { + "test": { + "type": "string", + "enum": ["a", "b", "c"] + } + }, + "required": ["test"] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: add route (test request validation `header_schema`) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "required": ["required_payload"], + "properties": { + "required_payload": {"type": "string"}, + "boolean_payload": {"type": "boolean"}, + "timeouts": { + "type": "integer", + "minimum": 1, + "maximum": 254, + "default": 3 + }, + "req_headers": { + "type": "array", + "minItems": 1, + "items": { + "type": "string" + } + } + } + } + } + },]] .. [[ + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 23: add route (test request validation `header_schema.type` is object) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "object" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 24: add route (test request validation `header_schema.type` is array) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "array" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 25: add route (test request validation `header_schema.type` is string) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "string" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 26: add route (test request validation `header_schema.type` is number) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "number" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 27: add route (test request validation `header_schema.type` is integer) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "integer" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 28: add route (test request validation `header_schema.type` is table) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "table" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 29: add route (test request validation `header_schema.type` is function) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "function" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 30: add route (test request validation `header_schema.type` failure) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "test" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/invalid JSON type: test/ +--- error_code chomp +400 + + + +=== TEST 31: add route (test request validation `header_schema.enum` failure) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "string", + "properties": { + "test": { + "type": "string", + "enum": "test-enum" + } + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/table expected, got string/ +--- error_code chomp +400 + + + +=== TEST 32: add route (test request validation `header_schema.enum` success) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "string", + "properties": { + "test": { + "type": "string", + "enum": ["a", "b", "c"] + } + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 33: add route (test request validation `header_schema.required` failure) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "properties": { + "test": { + "type": "string", + "enum": ["a", "b", "c"] + } + }, + "required": "test-required" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/table expected, got string/ +--- error_code chomp +400 + + + +=== TEST 34: add route (test request validation `header_schema.required` success) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "properties": { + "test": { + "type": "string", + "enum": ["a", "b", "c"] + } + }, + "required": ["test"] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 35: add route (test request validation `header_schema.required` success with custom reject message) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "properties": { + "test": { + "type": "string", + "enum": ["a", "b", "c"] + } + }, + "required": ["test"] + }, + "rejected_msg": "customize reject message for header_schema.required" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 36: use empty header to hit `header_schema.required with custom reject message` rule +--- request +GET /opentracing +--- error_code: 400 +--- response_body chomp +customize reject message for header_schema.required +--- error_log eval +qr/schema validation failed/ + + + +=== TEST 37: use bad header value to hit `header_schema.required with custom reject message` rule +--- request +GET /opentracing +--- more_headers +test: abc +--- error_code: 400 +--- response_body chomp +customize reject message for header_schema.required +--- error_log eval +qr/schema validation failed/ + + + +=== TEST 38: pass `header_schema.required with custom reject message` rule +--- request +GET /opentracing +--- more_headers +test: a +--- error_code: 200 +--- response_body eval +qr/opentracing/ + + + +=== TEST 39: add route (test request validation `body_schema.required` success with custom reject message) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "object", + "properties": { + "test": { + "type": "string", + "enum": ["a", "b", "c"] + } + }, + "required": ["test"] + }, + "rejected_msg": "customize reject message for body_schema.required" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 40: use empty body to hit `body_schema.required with custom reject message` rule +--- request +GET /opentracing +--- error_code: 400 +--- response_body chomp +customize reject message for body_schema.required + + + +=== TEST 41: use bad body value to hit `body_schema.required with custom reject message` rule +--- request +POST /opentracing +{"test":"abc"} +--- error_code: 400 +--- response_body chomp +customize reject message for body_schema.required +--- error_log eval +qr/schema validation failed/ + + + +=== TEST 42: pass `body_schema.required with custom reject message` rule +--- request +POST /opentracing +{"test":"a"} +--- error_code: 200 +--- response_body eval +qr/opentracing/ + + + +=== TEST 43: add route (test request validation `header_schema.required` failure with custom reject message) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "properties": { + "test": { + "type": "string", + "enum": ["a", "b", "c"] + } + }, + "required": ["test"] + }, + "rejected_msg": "customize reject message customize reject message customize reject message customize reject message customize reject message customize reject message customize reject message customize reject message customize reject message customize reject message customize reject message" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/string too long/ +--- error_code: 400 + + + +=== TEST 44: add route (test request validation schema with custom reject message only) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "rejected_msg": "customize reject message" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/object matches none of the required/ +--- error_code: 400 + + + +=== TEST 45: add route (test request validation `body_schema.required` success with custom reject code) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "object", + "properties": { + "test": { + "type": "string", + "enum": ["a", "b", "c"] + } + }, + "required": ["test"] + }, + "rejected_code": 505 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 46: use empty body to hit custom rejected code rule +--- request +GET /opentracing +--- error_code: 505 + + + +=== TEST 47: use bad body value to hit custom rejected code rule +--- request +POST /opentracing +{"test":"abc"} +--- error_code: 505 +--- error_log eval +qr/schema validation failed/ + + + +=== TEST 48: pass custom rejected code rule +--- request +POST /opentracing +{"test":"a"} +--- error_code: 200 +--- response_body eval +qr/opentracing/ + + + +=== TEST 49: add route (test request validation `header_schema.required` failure with custom reject code) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "header_schema": { + "type": "object", + "properties": { + "test": { + "type": "string", + "enum": ["a", "b", "c"] + } + }, + "required": ["test"] + }, + "rejected_code": 10000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/expected 10000 to be at most 599/ +--- error_code: 400 + + + +=== TEST 50: add route (test request validation schema with custom reject code only) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "rejected_code": 505 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/plugin/request/validation" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like eval +qr/object matches none of the required/ +--- error_code: 400 + + + +=== TEST 51: add route for urlencoded post data validation +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "request-validation": { + "body_schema": { + "type": "object", + "required": ["required_payload"], + "properties": { + "required_payload": {"type": "string"} + }, + "rejected_msg": "customize reject message" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]]) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 52: test urlencoded post data +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- request eval +"POST /echo +" . "a=b&" x 101 . "required_payload=101-hello" +--- response_body eval +qr/101-hello/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/request-validation2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/request-validation2.t new file mode 100644 index 0000000..de4687a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/request-validation2.t @@ -0,0 +1,79 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: json body with duplicate key +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + plugins = { + ["request-validation"] = { + body_schema = { + type = "object", + properties = { + k = {pattern = "^good$"} + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + }, + uri = "/echo" + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit +--- request +POST /echo +{"k":"bad","k":"good"} +--- response_body chomp +{"k":"good"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/response-rewrite.t b/CloudronPackages/APISIX/apisix-source/t/plugin/response-rewrite.t new file mode 100644 index 0000000..2e4dcf4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/response-rewrite.t @@ -0,0 +1,735 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: add plugin +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + body = 'Hello world', + headers = { + ["X-Server-id"] = 3 + } + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: add plugin with wrong status_code +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + status_code = 599 + }) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- request +GET /t +--- response_body +property "status_code" validation failed: expected 599 to be at most 598 + + + +=== TEST 3: add plugin fail +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + body = 2, + headers = { + ["X-Server-id"] = "3" + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- request +GET /t +--- response_body +property "body" validation failed: wrong type: expected string, got number + + + +=== TEST 4: set header(rewrite header and body) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers" : { + "X-Server-id": 3, + "X-Server-status": "on", + "Content-Type": "" + }, + "body": "new body\n" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/with_header" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: check body with deleted header +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/with_header" + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say(err) + return + end + + if res.headers['Content-Type'] then + ngx.say('fail content-type should not be exist, now is'..res.headers['Content-Type']) + return + end + + if res.headers['X-Server-status'] ~= 'on' then + ngx.say('fail X-Server-status needs to be on') + return + end + + if res.headers['X-Server-id'] ~= '3' then + ngx.say('fail X-Server-id needs to be 3') + return + end + + ngx.print(res.body) + ngx.exit(200) + } + } +--- request +GET /t +--- response_body +new body + + + +=== TEST 6: set body only and keep header the same +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "body": "new body2\n" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/with_header" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: check body and header not changed +--- request +GET /with_header +--- more_headers +resp-X-Server-id: 100 +resp-Content-Type: application/xml +resp-Content-Encoding: gzip +resp-Content-Length: 4 +resp-Last-Modified: Wed, 21 Oct 2015 07:28:00 GMT +resp-ETag: "33a64df551425fcc55e4d42a148795d9f25f89d4" +--- response_body +new body2 +--- response_headers +X-Server-id: 100 +Content-Type: application/xml +Content-Length: +Content-Encoding: +Last-Modified: +ETag: + + + +=== TEST 8: set location header with 302 code +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "Location":"https://www.iresty.com" + }, + "status_code":302 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: check 302 redirect +--- request +GET /hello +--- error_code eval +302 +--- response_headers +Location: https://www.iresty.com + + + +=== TEST 10: empty string in header field +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + status_code = 200, + headers = { + [""] = 2 + } + }) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- request +GET /t +--- response_body +invalid field length in header + + + +=== TEST 11: array in header value +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + status_code = 200, + headers = { + ["X-Name"] = {} + } + }) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- request +GET /t +--- response_body +invalid type as header value + + + +=== TEST 12: set body in base64 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "body": "SGVsbG8K", + "body_base64": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: check base64 content +--- request +GET /hello +--- response_body +Hello + + + +=== TEST 14: set body with not well formed base64 +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + body = "1", + body_base64 = true + }) + if not ok then + ngx.say(err) + return + end + } + } +--- request +GET /t +--- response_body +invalid base64 content + + + +=== TEST 15: print the plugin `conf` in etcd, no dirty data +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local encode_with_keys_sorted = require("toolkit.json").encode + + local code, _, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers" : { + "X-Server-id": 3, + "X-Server-status": "on", + "Content-Type": "" + }, + "body": "new body\n" + } + }, + "uri": "/with_header" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local resp_data = core.json.decode(body) + ngx.say(encode_with_keys_sorted(resp_data.value.plugins)) + } + } +--- request +GET /t +--- response_body +{"response-rewrite":{"body":"new body\n","body_base64":false,"headers":{"Content-Type":"","X-Server-id":3,"X-Server-status":"on"}}} + + + +=== TEST 16: add validate vars +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + vars = { + {"status","==",200} + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 17: add plugin with invalidate vars +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + vars = { + {} + } + }) + + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- request +GET /t +--- response_body +failed to validate the 'vars' expression: rule too short + + + +=== TEST 18: set route with http status code as expr +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "body": "new body3\n", + "status_code": 403, + "vars": [ + ["status","==",500] + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/server_error","/hello"] + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 19: check http code that matches http_status +--- request +GET /server_error +--- response_body +new body3 +--- error_code eval +403 +--- error_log +500 Internal Server Error + + + +=== TEST 20: check http code that not matches http_status +--- request +GET /hello +--- response_body +hello world +--- error_code eval +200 + + + +=== TEST 21: set an empty body with setting body_base64 to true +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + body = "", + body_base64 = true + }) + if not ok then + ngx.say(err) + return + end + } + } +--- request +GET /t +--- response_body +invalid base64 content + + + +=== TEST 22: set an nil body with setting body_base64 to true +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + body_base64 = true + }) + if not ok then + ngx.say(err) + return + end + } + } +--- request +GET /t +--- response_body +invalid base64 content + + + +=== TEST 23: rewrite header with variables +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers" : { + "X-A": "$remote_addr", + "X-B": "from $remote_addr to $balancer_ip:$balancer_port" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/with_header" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 24: hit +--- request +GET /with_header +--- response_headers +X-A: 127.0.0.1 +X-B: from 127.0.0.1 to 127.0.0.1:1980 + + + +=== TEST 25: set empty body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "body": "" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 26: hit set empty body +--- request +GET /hello +--- response_body + + + +=== TEST 27: test add header with one word +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "add": [ + "X-Server-test:a" + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/response-rewrite2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/response-rewrite2.t new file mode 100644 index 0000000..0757d98 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/response-rewrite2.t @@ -0,0 +1,693 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local test_cases = { + {body = "test"}, + {filters = { + { + regex = "l", + replace = "m", + }, + }}, + {body = "test", filters = { + { + regex = "l", + replace = "m", + }, + }}, + {filters = {}}, + {filters = { + {regex = "l"}, + }}, + {filters = { + { + regex = "", + replace = "m", + }, + }}, + {filters = { + { + regex = "l", + replace = "m", + scope = "" + }, + }}, + } + local plugin = require("apisix.plugins.response-rewrite") + + for _, case in ipairs(test_cases) do + local ok, err = plugin.check_schema(case) + ngx.say(ok and "done" or err) + end + } + } +--- response_body eval +qr/done +done +failed to validate dependent schema for "filters|body": value wasn't supposed to match schema +property "filters" validation failed: expect array to have at least 1 items +property "filters" validation failed: failed to validate item 1: property "replace" is required +property "filters" validation failed: failed to validate item 1: property "regex" validation failed: string too short, expected at least 1, got 0 +property "filters" validation failed: failed to validate item 1: property "scope" validation failed: matches none of the enum values/ + + + +=== TEST 2: add plugin with valid filters +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + filters = { + { + regex = "Hello", + scope = "global", + replace = "World", + options = "jo" + } + } + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 3: add plugin with invalid filter required filed +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + filters = { + { + regex = "Hello", + } + } + }) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- response_body +property "filters" validation failed: failed to validate item 1: property "replace" is required + + + +=== TEST 4: add plugin with invalid filter scope +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + filters = { + { + regex = "Hello", + scope = "two", + replace = "World", + options = "jo" + } + } + }) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- response_body +property "filters" validation failed: failed to validate item 1: property "scope" validation failed: matches none of the enum values + + + +=== TEST 5: add plugin with invalid filter empty value +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + filters = { + { + regex = "", + replace = "world" + } + } + }) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- response_body +property "filters" validation failed: failed to validate item 1: property "regex" validation failed: string too short, expected at least 1, got 0 + + + +=== TEST 6: add plugin with invalid filter regex options +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({ + filters = { + { + regex = "hello", + replace = "HELLO", + options = "h" + } + } + }) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- error_code eval +200 +--- response_body +regex "hello" validation failed: unknown flag "h" (flags "h") + + + +=== TEST 7: set route with filters and vars expr +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "filters": [ + { + "regex": "hello", + "replace": "test" + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: check http body that matches filters +--- request +GET /hello +--- response_body +test world + + + +=== TEST 9: filter substitute global +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "filters": [ + { + "regex": "l", + "replace": "t", + "scope": "global" + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: check http body that substitute global +--- request +GET /hello +--- response_body +hetto wortd + + + +=== TEST 11: filter replace with empty +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "filters": [ + { + "regex": "hello", + "replace": "" + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: check http body that replace with empty +--- request +GET /hello +--- response_body + world + + + +=== TEST 13: filter replace with words +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "filters": [ + { + "regex": "\\w\\S+$", + "replace": "*" + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: check http body that replace with words +--- request +GET /hello +--- response_body +hello * + + + +=== TEST 15: set multiple filters +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "filters": [ + { + "regex": "hello", + "replace": "HELLO" + }, + { + "regex": "L", + "replace": "T" + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: check http body that set multiple filters +--- request +GET /hello +--- response_body +HETLO world + + + +=== TEST 17: filters no any match +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "filters": [ + { + "regex": "test", + "replace": "TEST" + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: check http body that filters no any match +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 19: schema check for headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + for _, case in ipairs({ + {add = { + {"headers:"} + }}, + {remove = { + {"headers:"} + }}, + {set = { + {"headers"} + }}, + {set = { + {[""] = 1} + }}, + {set = { + {["a"] = true} + }}, + }) do + local plugin = require("apisix.plugins.response-rewrite") + local ok, err = plugin.check_schema({headers = case}) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + end + } +} +--- response_body eval +"property \"headers\" validation failed: object matches none of the required\n" x 5 + + + +=== TEST 20: add headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "add": [ + "Cache-Control: no-cache", + "Cache-Control : max-age=0, must-revalidate" + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: hit +--- request +GET /hello +--- response_headers +Cache-Control: no-cache, max-age=0, must-revalidate + + + +=== TEST 22: set headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "add": [ + "Cache-Control: no-cache" + ], + "set": { + "Cache-Control": "max-age=0, must-revalidate" + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 23: hit +--- request +GET /hello +--- response_headers +Cache-Control: max-age=0, must-revalidate + + + +=== TEST 24: remove headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "add": [ + "Set-Cookie: =; Max-Age=" + ], + "set": { + "Cache-Control": "max-age=0, must-revalidate" + }, + "remove": [ + "Set-Cookie", + "Cache-Control" + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 25: hit +--- request +GET /hello +--- response_headers +Cache-Control: +Set-Cookie: diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/response-rewrite3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/response-rewrite3.t new file mode 100644 index 0000000..dc43074 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/response-rewrite3.t @@ -0,0 +1,677 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + server { + listen 11451; + gzip on; + gzip_types *; + gzip_min_length 1; + location /gzip_hello { + content_by_lua_block { + ngx.req.read_body() + local s = "hello world" + ngx.header['Content-Length'] = #s + 1 + ngx.say(s) + } + } + } + + server { + listen 11452; + location /brotli_hello { + content_by_lua_block { + ngx.req.read_body() + local s = "hello world hello world hello world" + ngx.header['Content-Length'] = #s + 1 + ngx.say(s) + } + header_filter_by_lua_block { + local conf = { + comp_level = 6, + http_version = 1.1, + lgblock = 0, + lgwin = 19, + min_length = 1, + mode = 0, + types = "*", + } + local brotli = require("apisix.plugins.brotli") + brotli.header_filter(conf, ngx.ctx) + } + body_filter_by_lua_block { + local conf = { + comp_level = 6, + http_version = 1.1, + lgblock = 0, + lgwin = 19, + min_length = 1, + mode = 0, + types = "*", + } + local brotli = require("apisix.plugins.brotli") + brotli.body_filter(conf, ngx.ctx) + } + } + } + +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route use gzip upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/gzip_hello", + "upstream": { + "nodes": { + "127.0.0.1:11451": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: should return gzip body +--- request +GET /gzip_hello +--- more_headers +Accept-Encoding: gzip +--- response_headers +Content-Encoding: gzip + + + +=== TEST 3: set route use gzip upstream and response-rewrite body conf +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/gzip_hello", + "upstream": { + "nodes": { + "127.0.0.1:11451": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "body": "new body\n" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: should rewrite body and clear Content-Encoding header +--- request +GET /gzip_hello +--- more_headers +Accept-Encoding: gzip +--- response_body +new body +--- response_headers +Content-Encoding: + + + +=== TEST 5: set route use gzip upstream and response-rewrite filter conf +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/gzip_hello", + "upstream": { + "nodes": { + "127.0.0.1:11451": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "filters": [ + { + "regex": "hello", + "replace": "test" + } + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: gzip decode support, should rewrite body and clear Content-Encoding header +--- request +GET /gzip_hello +--- more_headers +Accept-Encoding: gzip +--- response_body +test world +--- response_headers +Content-Encoding: + + + +=== TEST 7: set route use response-write body conf, and mock unsupported compression encoding type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "body": "new body\n" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: use body conf will ignore encoding, should rewrite body and clear Content-Encoding header +--- request +POST /echo +fake body with mock content encoding header +--- more_headers +Content-Encoding: deflate +--- response_body +new body +--- response_headers +Content-Encoding: + + + +=== TEST 9: set route use response-write filter conf, and mock unsupported compression encoding type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "filters": [ + { + "regex": "hello", + "replace": "test" + } + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: use filter conf will report unsupported encoding type error +--- request +POST /echo +fake body with mock content encoding header +--- more_headers +Content-Encoding: deflate +--- response_headers +Content-Encoding: +--- error_log +filters may not work as expected due to unsupported compression encoding type: deflate + + + +=== TEST 11: set route use response-write plugin but not use filter conf or body conf +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/gzip_hello", + "upstream": { + "nodes": { + "127.0.0.1:11451": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "headers": { + "set": { + "X-Server-id": 3, + "X-Server-status": "on", + "Content-Type": "" + } + } + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: should keep Content-Encoding +--- request +GET /gzip_hello +--- more_headers +Accept-Encoding: gzip +--- response_headers +Content-Encoding: gzip +X-Server-id: 3 +X-Server-status: on +Content-Type: + + + +=== TEST 13: response-write without filter conf or body conf, and mock unsupported compression encoding type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/echo", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "headers": { + "set": { + "X-Server-id": 3, + "X-Server-status": "on", + "Content-Type": "" + } + } + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: should keep Content-Encoding +--- request +POST /echo +fake body with mock content encoding header +--- more_headers +Content-Encoding: deflate +--- response_headers +Content-Encoding: deflate +X-Server-id: 3 +X-Server-status: on +Content-Type: + + + +=== TEST 15: set route use brotli upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/brotli_hello", + "upstream": { + "nodes": { + "127.0.0.1:11452": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: should return brotli body +--- request +GET /brotli_hello +--- more_headers +Accept-Encoding: br +--- response_headers +Content-Encoding: br + + + +=== TEST 17: set route use brotli upstream and response-rewrite body conf +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/brotli_hello", + "upstream": { + "nodes": { + "127.0.0.1:11452": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "body": "new body\n" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: should rewrite body and clear Content-Encoding header +--- request +GET /brotli_hello +--- more_headers +Accept-Encoding: br +--- response_body +new body +--- response_headers +Content-Encoding: + + + +=== TEST 19: set route use brotli upstream and response-rewrite filter conf +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/brotli_hello", + "upstream": { + "nodes": { + "127.0.0.1:11452": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "filters": [ + { + "regex": "hello", + "replace": "test" + } + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: brotli decode support, should rewrite body and clear Content-Encoding header +--- request +GET /brotli_hello +--- more_headers +Accept-Encoding: br +--- response_body +test world hello world hello world +--- response_headers +Content-Encoding: + + + +=== TEST 21: set route use response-write plugin but not use filter conf or body conf +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/brotli_hello", + "upstream": { + "nodes": { + "127.0.0.1:11452": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "response-rewrite": { + "vars": [ + ["status","==",200] + ], + "headers": { + "set": { + "X-Server-id": 3, + "X-Server-status": "on", + "Content-Type": "" + } + } + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: should keep Content-Encoding +--- request +GET /brotli_hello +--- more_headers +Accept-Encoding: br +--- response_headers +Content-Encoding: br +X-Server-id: 3 +X-Server-status: on +Content-Type: diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/rocketmq-logger-log-format.t b/CloudronPackages/APISIX/apisix-source/t/plugin/rocketmq-logger-log-format.t new file mode 100644 index 0000000..e7512db --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/rocketmq-logger-log-format.t @@ -0,0 +1,160 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +repeat_each(1); +no_long_string(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/rocketmq-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: set route(id: 1), batch_max_size=1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "key" : "key1", + "tag" : "tag1", + "timeout" : 1, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: hit route and report rocketmq logger +--- request +GET /hello +--- response_body +hello world +--- wait: 0.5 +--- error_log eval +qr/send data to rocketmq: \{.*"host":"localhost"/ + + + +=== TEST 4: log format in plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "key" : "key1", + "tag" : "tag1", + "log_format": { + "x_ip": "$remote_addr" + }, + "timeout" : 1, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: hit route and report logger +--- request +GET /hello +--- response_body +hello world +--- wait: 0.5 +--- error_log eval +qr/send data to rocketmq: \{.*"x_ip":"127.0.0.1".*\}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/rocketmq-logger.t b/CloudronPackages/APISIX/apisix-source/t/plugin/rocketmq-logger.t new file mode 100644 index 0000000..c657b6a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/rocketmq-logger.t @@ -0,0 +1,574 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.rocketmq-logger") + local ok, err = plugin.check_schema({ + topic = "test", + key = "key1", + nameserver_list = { + "127.0.0.1:3" + } + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: missing nameserver list +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.rocketmq-logger") + local ok, err = plugin.check_schema({topic = "test", key= "key1"}) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +property "nameserver_list" is required +done + + + +=== TEST 3: wrong type of string +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.rocketmq-logger") + local ok, err = plugin.check_schema({ + nameserver_list = { + "127.0.0.1:3000" + }, + timeout = "10", + topic ="test", + key= "key1" + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +property "timeout" validation failed: wrong type: expected integer, got string +done + + + +=== TEST 4: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: access +--- request +GET /hello +--- response_body +hello world + +--- wait: 2 + + + +=== TEST 6: unavailable nameserver +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9877" ], + "topic" : "test2", + "producer_type": "sync", + "key" : "key1", + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + } + } +--- error_log +failed to send data to rocketmq topic +[error] +--- wait: 1 + + + +=== TEST 7: set route(meta_format = origin, include_req_body = true) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": true, + "meta_format": "origin" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit route, report log to rocketmq +--- request +GET /hello?ab=cd +abcdef +--- response_body +hello world + +--- error_log +send data to rocketmq: GET /hello?ab=cd HTTP/1.1 +host: localhost +content-length: 6 +connection: close + +abcdef +--- wait: 2 + + + +=== TEST 9: set route(meta_format = origin, include_req_body = false) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false, + "meta_format": "origin" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: hit route, report log to rocketmq +--- request +GET /hello?ab=cd +abcdef +--- response_body +hello world + +--- error_log +send data to rocketmq: GET /hello?ab=cd HTTP/1.1 +host: localhost +content-length: 6 +connection: close +--- wait: 2 + + + +=== TEST 11: set route(meta_format = default) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "key" : "key1", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: hit route, report log to rocketmq +--- request +GET /hello?ab=cd +abcdef +--- response_body +hello world + +--- error_log_like eval +qr/send data to rocketmq: \{.*"upstream":"127.0.0.1:1980"/ +--- wait: 2 + + + +=== TEST 13: set route(id: 1), missing key field +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "timeout" : 1, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: access, test key field is optional +--- request +GET /hello +--- response_body +hello world + +--- wait: 2 + + + +=== TEST 15: set route(meta_format = default), missing key field +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: hit route, report log to rocketmq +--- request +GET /hello?ab=cd +abcdef +--- response_body +hello world + +--- error_log_like eval +qr/send data to rocketmq: \{.*"upstream":"127.0.0.1:1980"/ +--- wait: 2 + + + +=== TEST 17: use the topic with 3 partitions +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test3", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: report log to rocketmq by different partitions +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test3", + "producer_type": "sync", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + } + } +--- timeout: 5s +--- ignore_response + +--- error_log eval +[qr/queue: 1/, +qr/queue: 0/, +qr/queue: 2/] + + + +=== TEST 19: report log to rocketmq by different partitions in async mode +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test3", + "producer_type": "async", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + } + } +--- timeout: 5s +--- ignore_response + +--- error_log eval +[qr/queue: 1/, +qr/queue: 0/, +qr/queue: 2/] diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/rocketmq-logger2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/rocketmq-logger2.t new file mode 100644 index 0000000..3b6087c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/rocketmq-logger2.t @@ -0,0 +1,659 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: update the nameserver_list, generate different rocketmq producers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + ngx.sleep(0.5) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + code, body = t('/apisix/admin/routes/1/plugins', + ngx.HTTP_PATCH, + [[{ + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + + code, body = t('/apisix/admin/routes/1/plugins', + ngx.HTTP_PATCH, + [[{ + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:19876" ], + "topic" : "test4", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + + ngx.sleep(2) + ngx.say("passed") + } + } +--- timeout: 10 +--- response +passed +--- wait: 5 +--- error_log +phase_func(): rocketmq nameserver_list[1] port 9876 +phase_func(): rocketmq nameserver_list[1] port 19876 +--- no_error_log eval +qr/not found topic/ + + + +=== TEST 2: use the topic that does not exist on rocketmq(even if rocketmq allows auto create topics, first time push messages to rocketmq would got this error) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1/plugins', + ngx.HTTP_PATCH, + [[{ + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "undefined_topic", + "timeout" : 1, + "batch_max_size": 1, + "include_req_body": false + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + t('/hello',ngx.HTTP_GET) + ngx.sleep(0.5) + + ngx.sleep(2) + ngx.say("passed") + } + } +--- timeout: 5 +--- response +passed +--- error_log eval +qr/getTopicRouteInfoFromNameserver return TOPIC_NOT_EXIST, No topic route info in name server for the topic: undefined_topic/ + + + +=== TEST 3: rocketmq nameserver list info in log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "producer_type": "sync", + "key" : "key1", + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + } + } +--- error_log_like eval +qr/create new rocketmq producer instance, nameserver_list: \[\{"port":9876,"host":"127.0.0.127"}]/ +qr/failed to send data to rocketmq topic: .*, nameserver_list: \{"127.0.0.127":9876}/ + + + +=== TEST 4: delete plugin metadata, tests would fail if run rocketmq-logger-log-format.t and plugin metadata is added +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/rocketmq-logger', + ngx.HTTP_DELETE + ) + } + } +--- response_body + + + +=== TEST 5: set route(id: 1,include_req_body = true,include_req_body_expr = array) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "key" : "key1", + "timeout" : 1, + "include_req_body": true, + "include_req_body_expr": [ + [ + "arg_name", + "==", + "qwerty" + ] + ], + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 6: hit route, expr eval success +--- request +POST /hello?name=qwerty +abcdef +--- response_body +hello world + +--- error_log eval +qr/send data to rocketmq: \{.*"body":"abcdef"/ +--- wait: 2 + + + +=== TEST 7: hit route,expr eval fail +--- request +POST /hello?name=zcxv +abcdef +--- response_body +hello world +--- no_error_log eval +qr/send data to rocketmq: \{.*"body":"abcdef"/ +--- wait: 2 + + + +=== TEST 8: check log schema(include_req_body) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.rocketmq-logger") + local ok, err = plugin.check_schema({ + topic = "test", + key = "key1", + nameserver_list = { + "127.0.0.1:3" + }, + include_req_body = true, + include_req_body_expr = { + {"bar", "<>", "foo"} + } + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +failed to validate the 'include_req_body_expr' expression: invalid operator '<>' +done + + + +=== TEST 9: check log schema(include_resp_body) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.rocketmq-logger") + local ok, err = plugin.check_schema({ + topic = "test", + key = "key1", + nameserver_list = { + "127.0.0.1:3" + }, + include_resp_body = true, + include_resp_body_expr = { + {"bar", "", "foo"} + } + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +failed to validate the 'include_resp_body_expr' expression: invalid operator '' +done + + + +=== TEST 10: set route(id: 1,include_resp_body = true,include_resp_body_expr = array) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "key" : "key1", + "timeout" : 1, + "include_resp_body": true, + "include_resp_body_expr": [ + [ + "arg_name", + "==", + "qwerty" + ] + ], + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 11: hit route, expr eval success +--- request +POST /hello?name=qwerty +abcdef +--- response_body +hello world +--- error_log eval +qr/send data to rocketmq: \{.*"body":"hello world\\n"/ +--- wait: 2 + + + +=== TEST 12: hit route, expr eval fail +--- request +POST /hello?name=zcxv +abcdef +--- response_body +hello world +--- no_error_log eval +qr/send data to rocketmq: \{.*"body":"hello world\\n"/ +--- wait: 2 + + + +=== TEST 13: set route include_resp_body = true - gzip +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "key" : "key1", + "timeout" : 1, + "include_resp_body": true, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:11451": 1 + }, + "type": "roundrobin" + }, + "uri": "/gzip_hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 14: hit +--- http_config +server { + listen 11451; + gzip on; + gzip_types *; + gzip_min_length 1; + location /gzip_hello { + content_by_lua_block { + ngx.req.read_body() + local s = "gzip hello world" + ngx.header['Content-Length'] = #s + 1 + ngx.say(s) + } + } +} +--- request +GET /gzip_hello +--- more_headers +Accept-Encoding: gzip +--- error_log eval +qr/send data to rocketmq: \{.*"body":"gzip hello world\\n"/ +--- wait: 2 + + + +=== TEST 15: set route include_resp_body - brotli +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "key" : "key1", + "timeout" : 1, + "include_resp_body": true, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:11452": 1 + }, + "type": "roundrobin" + }, + "uri": "/brotli_hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 16: hit route, expr eval success +--- http_config +server { + listen 11452; + location /brotli_hello { + content_by_lua_block { + ngx.req.read_body() + local s = "brotli hello world" + ngx.header['Content-Length'] = #s + 1 + ngx.say(s) + } + header_filter_by_lua_block { + local conf = { + comp_level = 6, + http_version = 1.1, + lgblock = 0, + lgwin = 19, + min_length = 1, + mode = 0, + types = "*", + } + local brotli = require("apisix.plugins.brotli") + brotli.header_filter(conf, ngx.ctx) + } + body_filter_by_lua_block { + local conf = { + comp_level = 6, + http_version = 1.1, + lgblock = 0, + lgwin = 19, + min_length = 1, + mode = 0, + types = "*", + } + local brotli = require("apisix.plugins.brotli") + brotli.body_filter(conf, ngx.ctx) + } + } +} +--- request +GET /brotli_hello +--- more_headers +Accept-Encoding: br +--- error_log eval +qr/send data to rocketmq: \{.*"body":"brotli hello world\\n"/ +--- wait: 2 + + + +=== TEST 17: multi level nested expr conditions +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.rocketmq-logger") + local ok, err = plugin.check_schema({ + topic = "test", + key = "key1", + nameserver_list = { + "127.0.0.1:3" + }, + include_req_body = true, + include_req_body_expr = { + {"request_length", "<", 1024}, + {"http_content_type", "in", {"application/xml", "application/json", "text/plain", "text/xml"}} + }, + include_resp_body = true, + include_resp_body_expr = { + {"http_content_length", "<", 1024}, + {"http_content_type", "in", {"application/xml", "application/json", "text/plain", "text/xml"}} + } + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 18: data encryption for secret_key +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "rocketmq-logger": { + "nameserver_list" : [ "127.0.0.1:9876" ], + "topic" : "test2", + "access_key": "foo", + "secret_key": "bar" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["rocketmq-logger"].secret_key) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["rocketmq-logger"].secret_key) + } + } +--- response_body +bar +77+NmbYqNfN+oLm0aX5akg== diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/security-warning.t b/CloudronPackages/APISIX/apisix-source/t/plugin/security-warning.t new file mode 100644 index 0000000..f2d2e3e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/security-warning.t @@ -0,0 +1,570 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); +run_tests(); + +__DATA__ + +=== TEST 1: authz-casdoor no https +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casdoor") + local fake_uri = "http://127.0.0.1:" .. ngx.var.server_port + local callback_url = "http://127.0.0.1:" .. ngx.var.server_port .. + "/anything/callback" + local conf = { + callback_url = callback_url, + endpoint_addr = fake_uri, + client_id = "7ceb9b7fda4a9061ec1c", + client_secret = "3416238e1edf915eac08b8fe345b2b95cdba7e04" + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + ngx.say("done") + + } + } +--- response_body +done +--- error_log +Using authz-casdoor endpoint_addr with no TLS is a security risk +Using authz-casdoor callback_url with no TLS is a security risk + + + +=== TEST 2: authz-casdoor with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.authz-casdoor") + local fake_uri = "https://127.0.0.1:" .. ngx.var.server_port + local callback_url = "https://127.0.0.1:" .. ngx.var.server_port .. + "/anything/callback" + local conf = { + callback_url = callback_url, + endpoint_addr = fake_uri, + client_id = "7ceb9b7fda4a9061ec1c", + client_secret = "3416238e1edf915eac08b8fe345b2b95cdba7e04" + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + ngx.say("done") + + } + } +--- response_body +done +--- no_error_log +Using authz-casdoor endpoint_addr with no TLS is a security risk +Using authz-casdoor callback_url with no TLS is a security risk + + + +=== TEST 3: authz keycloak with no TLS +--- config + location /t { + content_by_lua_block { + local check = {"discovery", "token_endpoint", "resource_registration_endpoint", "access_denied_redirect_uri"} + local plugin = require("apisix.plugins.authz-keycloak") + local ok, err = plugin.check_schema({ + client_id = "foo", + discovery = "http://host.domain/realms/foo/protocol/openid-connect/token", + token_endpoint = "http://token_endpoint.domain", + resource_registration_endpoint = "http://resource_registration_endpoint.domain", + access_denied_redirect_uri = "http://access_denied_redirect_uri.domain" + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log +Using authz-keycloak discovery with no TLS is a security risk +Using authz-keycloak token_endpoint with no TLS is a security risk +Using authz-keycloak resource_registration_endpoint with no TLS is a security +Using authz-keycloak access_denied_redirect_uri with no TLS is a security risk + + + +=== TEST 4: authz keycloak with TLS +--- config + location /t { + content_by_lua_block { + local check = {"discovery", "token_endpoint", "resource_registration_endpoint", "access_denied_redirect_uri"} + local plugin = require("apisix.plugins.authz-keycloak") + local ok, err = plugin.check_schema({ + client_id = "foo", + discovery = "https://host.domain/realms/foo/protocol/openid-connect/token", + token_endpoint = "https://token_endpoint.domain", + resource_registration_endpoint = "https://resource_registration_endpoint.domain", + access_denied_redirect_uri = "https://access_denied_redirect_uri.domain" + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +Using authz-keycloak discovery with no TLS is a security risk +Using authz-keycloak token_endpoint with no TLS is a security risk +Using authz-keycloak resource_registration_endpoint with no TLS is a security +Using authz-keycloak access_denied_redirect_uri with no TLS is a security risk + + + +=== TEST 5: cas auth with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.cas-auth") + local ok, err = plugin.check_schema({ + idp_uri = "http://a.com", + cas_callback_uri = "/a/b", + logout_uri = "/c/d" + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed +--- error_log +risk + + + +=== TEST 6: cas auth with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.cas-auth") + local ok, err = plugin.check_schema({ + idp_uri = "https://a.com", + cas_callback_uri = "/a/b", + logout_uri = "/c/d" + }) + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed +--- no_error_log +risk + + + +=== TEST 7: clickhouse logger with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.clickhouse-logger") + local ok, err = plugin.check_schema({ + timeout = 3, + retry_delay = 1, + batch_max_size = 500, + user = "default", + password = "a", + database = "default", + logtable = "t", + endpoint_addrs = { + "http://127.0.0.1:1980/clickhouse_logger_server", + "http://127.0.0.2:1980/clickhouse_logger_server", + }, + max_retry_count = 1, + name = "clickhouse logger", + ssl_verify = false + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed +--- error_log +Using clickhouse-logger endpoint_addrs with no TLS is a security risk + + + +=== TEST 8: clickhouse logger with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.clickhouse-logger") + local ok, err = plugin.check_schema({ + timeout = 3, + retry_delay = 1, + batch_max_size = 500, + user = "default", + password = "a", + database = "default", + logtable = "t", + endpoint_addrs = { + "https://127.0.0.1:1980/clickhouse_logger_server", + "https://127.0.0.2:1980/clickhouse_logger_server", + }, + max_retry_count = 1, + name = "clickhouse logger", + ssl_verify = false + }) + + if not ok then + ngx.say(err) + else + ngx.say("passed") + end + } + } +--- response_body +passed +--- no_error_log +Using clickhouse-logger endpoint_addrs with no TLS is a security risk + + + +=== TEST 9: elastic search logger with no TLS +--- config + location /t { + content_by_lua_block { + local ok, err + local plugin = require("apisix.plugins.elasticsearch-logger") + ok, err = plugin.check_schema({ + endpoint_addrs = { + "http://127.0.0.1:9200" + }, + field = { + index = "services" + } + }) + if err then + ngx.say(err) + else + ngx.say("passed") + end + + } + } +--- response_body_like +passed +--- error_log +Using elasticsearch-logger endpoint_addrs with no TLS is a security risk + + + +=== TEST 10: elastic search logger with TLS +--- config + location /t { + content_by_lua_block { + local ok, err + local plugin = require("apisix.plugins.elasticsearch-logger") + ok, err = plugin.check_schema({ + endpoint_addrs = { + "https://127.0.0.1:9200" + }, + field = { + index = "services" + } + }) + if err then + ngx.say(err) + else + ngx.say("passed") + end + + } + } +--- response_body_like +passed +--- no_error_log +Using elasticsearch-logger endpoint_addrs with no TLS is a security risk + + + +=== TEST 11: error log logger with tcp.tls = false +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.error-log-logger") + local ok, err = plugin.check_schema({ + tcp = { + host = "host.com", + port = "99", + tls = false, + }, + skywalking = { + endpoint_addr = "http://a.bcd" + }, + clickhouse = { + endpoint_addr = "http://some.com", + user = "user", + password = "secret", + database = "yes", + logtable = "some" + }, + }) + ngx.say(ok and "done" or err) + + } + } +--- request +GET /t +--- response_body +done +--- error_log +Using error-log-logger skywalking.endpoint_addr with no TLS is a security risk +Using error-log-logger clickhouse.endpoint_addr with no TLS is a security risk +Keeping tcp.tls disabled in error-log-logger configuration is a security risk + + + +=== TEST 12: error log logger with tcp.tls = true +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.error-log-logger") + local ok, err = plugin.check_schema({ + tcp = { + host = "host.com", + port = "99", + tls = true, + }, + skywalking = { + endpoint_addr = "https://a.bcd" + }, + clickhouse = { + endpoint_addr = "https://some.com", + user = "user", + password = "secret", + database = "yes", + logtable = "some" + }, + }) + ngx.say(ok and "done" or err) + + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +Using error-log-logger skywalking.endpoint_addr with no TLS is a security risk +Using error-log-logger clickhouse.endpoint_addr with no TLS is a security risk +Keeping tcp.tls disabled in error-log-logger configuration is a security risk + + + +=== TEST 13: forward auth with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.forward-auth") + + local ok, err = plugin.check_schema({uri = "http://127.0.0.1:8199"}) + ngx.say(ok and "done" or err) + + } + } +--- response_body +done +--- error_log +Using forward-auth uri with no TLS is a security risk +Using forward-auth uri with no TLS is a security risk + + + +=== TEST 14: forward auth with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.forward-auth") + + local ok, err = plugin.check_schema({uri = "https://127.0.0.1:8199"}) + ngx.say(ok and "done" or err) + + } + } +--- response_body +done +--- no_error_log +Using forward-auth uri with no TLS is a security risk + + + +=== TEST 15: http-logger with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.http-logger") + local ok, err = plugin.check_schema({uri = "http://127.0.0.1"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done +--- error_log +Using http-logger uri with no TLS is a security risk + + + +=== TEST 16: http-logger with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.http-logger") + local ok, err = plugin.check_schema({uri = "https://127.0.0.1"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done +--- no_error_log +Using http-logger uri with no TLS is a security risk + + + +=== TEST 17: ldap auth with no TLS +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.ldap-auth") + local ok, err = plugin.check_schema( + { + base_dn = "123", + ldap_uri = "127.0.0.1:1389", + tls_verify = false, + use_tls = false + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done +--- error_log +Keeping tls_verify disabled in ldap-auth configuration is a security risk +Keeping use_tls disabled in ldap-auth configuration is a security risk + + + +=== TEST 18: ldap auth with TLS +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local plugin = require("apisix.plugins.ldap-auth") + local ok, err = plugin.check_schema({base_dn = "123", ldap_uri = "127.0.0.1:1389", use_tls = true}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done +--- no_error_log +Using LDAP auth with TLS disabled is a security risk + + + +=== TEST 19: loki-logger with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.loki-logger") + + local ok, err = plugin.check_schema({endpoint_addrs = {"http://127.0.0.1:8199"}}) + ngx.say(ok and "done" or err) + } + } +--- response_body +done +--- error_log +Using loki-logger endpoint_addrs with no TLS is a security risk +Using loki-logger endpoint_addrs with no TLS is a security risk +Using loki-logger endpoint_addrs with no TLS is a security risk + + + +=== TEST 20: loki logger with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.loki-logger") + + local ok, err = plugin.check_schema({endpoint_addrs = {"https://127.0.0.1:8199"}}) + ngx.say(ok and "done" or err) + } + } +--- response_body +done +--- no_error_log +Using loki-logger endpoint_addrs with no TLS is a security risk diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/security-warning2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/security-warning2.t new file mode 100644 index 0000000..d7c4e9a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/security-warning2.t @@ -0,0 +1,654 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); +run_tests(); + +__DATA__ + +=== TEST 1: opa with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.opa") + local ok, err = plugin.check_schema({host = "http://127.0.0.1:8181", policy = "example/allow"}) + ngx.say(ok and "done" or err) + } + } +--- response_body +done +--- error_log +Using opa host with no TLS is a security risk + + + +=== TEST 2: opa with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.opa") + local ok, err = plugin.check_schema({host = "https://127.0.0.1:8181", policy = "example/allow"}) + ngx.say(ok and "done" or err) + } + } +--- response_body +done +--- no_error_log +Using opa host with no TLS is a security risk + + + +=== TEST 3: openid-connect with no TLS +--- config + location /t { + content_by_lua_block { + + local plugin = require("apisix.plugins.openid-connect") + local ok, err = plugin.check_schema({ + client_id = "a", + client_secret = "b", + discovery = "http://a.com", + introspection_endpoint = "http://b.com", + redirect_uri = "http://c.com", + post_logout_redirect_uri = "http://d.com", + proxy_opts = { + http_proxy = "http://e.com" + } + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done +--- error_log +Using openid-connect discovery with no TLS is a security risk +Using openid-connect introspection_endpoint with no TLS is a security risk +Using openid-connect redirect_uri with no TLS is a security risk +Using openid-connect post_logout_redirect_uri with no TLS is a security risk +Using openid-connect proxy_opts.http_proxy with no TLS is a security risk + + + +=== TEST 4: openid-connect with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openid-connect") + + local ok, err = plugin.check_schema({ + client_id = "a", + client_secret = "b", + discovery = "https://a.com", + introspection_endpoint = "https://b.com", + redirect_uri = "https://c.com", + post_logout_redirect_uri = "https://d.com", + proxy_opts = { + http_proxy = "https://e.com" + } + }) + + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done +--- no_error_log +Using openid-connect discovery with no TLS is a security risk +Using openid-connect introspection_endpoint with no TLS is a security risk +Using openid-connect redirect_uri with no TLS is a security risk +Using openid-connect post_logout_redirect_uri with no TLS is a security risk +Using openid-connect proxy_opts.http_proxy with no TLS is a security risk + + + +=== TEST 5: opentelemetry with no TLS +--- extra_yaml_config +plugins: + - opentelemetry +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/opentelemetry', + ngx.HTTP_PUT, + [[{ + "batch_span_processor": { + "max_export_batch_size": 1, + "inactive_timeout": 0.5 + }, + "trace_id_source": "x-request-id", + "collector": { + "address": "http://127.0.0.1:4318", + "request_timeout": 3, + "request_headers": { + "foo": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "opentelemetry": { + "sampler": { + "name": "always_on" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + --- deleting this data so this doesn't effect when metadata schema is validated + --- at init in next test. + local code, body = t('/apisix/admin/plugin_metadata/opentelemetry', + ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- error_log +Using opentelemetry collector.address with no TLS is a security risk + + + +=== TEST 6: opentelemetery with TLS +--- extra_yaml_config +plugins: + - opentelemetry +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/opentelemetry', + ngx.HTTP_PUT, + [[{ + "batch_span_processor": { + "max_export_batch_size": 1, + "inactive_timeout": 0.5 + }, + "trace_id_source": "x-request-id", + "collector": { + "address": "https://127.0.0.1:4318", + "request_timeout": 3, + "request_headers": { + "foo": "bar" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "opentelemetry": { + "sampler": { + "name": "always_on" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- no_error_log +Using opentelemetry collector.address with no TLS is a security risk + + + +=== TEST 7: openwhisk with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openwhisk") + local ok, err = plugin.check_schema({ + api_host = "http://127.0.0.1:3233", + service_token = "test:test", + namespace = "test", + action = "test" + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done +--- error_log +Using openwhisk api_host with no TLS is a security risk + + + +=== TEST 8: openwhisk with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.openwhisk") + local ok, err = plugin.check_schema({api_host = "https://127.0.0.1:3233", service_token = "test:test", namespace = "test", action = "test"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done +--- no_error_log +Using openwhisk api_host with no TLS is a security risk + + + +=== TEST 9: rocketmq with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.rocketmq-logger") + local ok, err = plugin.check_schema({ + topic = "test", + key = "key1", + nameserver_list = { + "127.0.0.1:3" + }, + use_tls = false + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +done +--- error_log +Keeping use_tls disabled in rocketmq-logger configuration is a security risk + + + +=== TEST 10: rocketmq with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.rocketmq-logger") + local ok, err = plugin.check_schema({ + topic = "test", + key = "key1", + nameserver_list = { + "127.0.0.1:3" + }, + use_tls = true + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- response_body +done +--- no_error_log +Keeping use_tls disabled in rocketmq-logger configuration is a security risk + + + +=== TEST 11: skywalking-logger with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.skywalking-logger") + local ok, err = plugin.check_schema({endpoint_addr = "http://127.0.0.1"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done +--- error_log +Using skywalking-logger endpoint_addr with no TLS is a security risk + + + +=== TEST 12: skywalking-logger with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.skywalking-logger") + local ok, err = plugin.check_schema({endpoint_addr = "https://127.0.0.1"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done +--- no_error_log +Using skywalking-logger endpoint_addr with no TLS is a security risk + + + +=== TEST 13: skywalking with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.skywalking") + local ok, err = plugin.check_schema({endpoint_addr = "http://127.0.0.1:12800"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log +Using skywalking endpoint_addr with no TLS is a security risk + + + +=== TEST 14: skywalking with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.skywalking") + local ok, err = plugin.check_schema({endpoint_addr = "https://127.0.0.1:12800"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +Using skywalking endpoint_addr with no TLS is a security risk + + + +=== TEST 15: syslog with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.syslog") + local ok, err = plugin.check_schema({ + host = "127.0.0.1", + port = 5140, + tls = false + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log +Keeping tls disabled in syslog configuration is a security risk + + + +=== TEST 16: syslog with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.syslog") + local ok, err = plugin.check_schema({ + host = "127.0.0.1", + port = 5140, + tls = true + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +Keeping tls disabled in syslog configuration is a security risk + + + +=== TEST 17: tcp-logger with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tcp-logger") + local ok, err = plugin.check_schema({host = "127.0.0.1", port = 3000, tls = false}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log +Keeping tls disabled in tcp-logger configuration is a security risk + + + +=== TEST 18: tcp-logger with TLS +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tcp-logger": { + "host": "127.0.0.1", + "port": 3000, + "tls": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed +--- no_error_log +Keeping tls disabled in tcp-logger configuration is a security risk + + + +=== TEST 19: wolf-rbac with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.wolf-rbac") + local conf = { + server = "http://127.0.0.1:12180" + } + + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- response_body_like eval +qr/\{"appid":"unset","header_prefix":"X-","server":"http:\/\/127\.0\.0\.1:12180"\}/ +--- error_log +Using wolf-rbac server with no TLS is a security risk + + + +=== TEST 20: wolf-rbac with TLS +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "wolf_rbac_unit_test", + "plugins": { + "wolf-rbac": { + "appid": "wolf-rbac-app", + "server": "https://127.0.0.1:1982" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed +--- no_error_log +Using wolf-rbac server with no TLS is a security risk + + + +=== TEST 21: zipkin with no TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.zipkin") + local ok, err = plugin.check_schema({endpoint = 'http://127.0.0.1', sample_ratio = 0.001}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log +Using zipkin endpoint with no TLS is a security risk + + + +=== TEST 22: zipkin with TLS +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.zipkin") + local ok, err = plugin.check_schema({endpoint = 'https://127.0.0.1', sample_ratio = 0.001}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +Using zipkin endpoint with no TLS is a security risk diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/server-info.t b/CloudronPackages/APISIX/apisix-source/t/plugin/server-info.t new file mode 100644 index 0000000..13c44a9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/server-info.t @@ -0,0 +1,104 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +our $SkipReason; + +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} +use Test::Nginx::Socket::Lua $SkipReason ? (skip_all => $SkipReason) : (); +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity check +--- yaml_config +apisix: + id: 123456 +plugins: + - server-info +plugin_attr: + server-info: + report_ttl: 60 +--- config +location /t { + content_by_lua_block { + ngx.sleep(2) + local core = require("apisix.core") + local key = "/data_plane/server_info/" .. core.id.get() + local res, err = core.etcd.get(key) + if err ~= nil then + ngx.status = 500 + ngx.say(err) + return + end + + local value = res.body.node.value + local json = require("toolkit.json") + ngx.say(json.encode(value)) + } +} +--- response_body eval +qr/^{"boot_time":\d+,"etcd_version":"[\d\.]+","hostname":"[a-zA-Z\-0-9\.]+","id":[a-zA-Z\-0-9]+,"version":"[\d\.]+"}$/ + + + +=== TEST 2: get server_info from plugin control API +--- yaml_config +apisix: + id: 123456 +plugins: + - server-info +--- config +location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, _, body = t("/v1/server_info") + if code >= 300 then + ngx.status = code + end + + body = json.decode(body) + ngx.say(json.encode(body)) + } +} +--- response_body eval +qr/^{"boot_time":\d+,"etcd_version":"[\d\.]+","hostname":"[a-zA-Z\-0-9\.]+","id":[a-zA-Z\-0-9]+,"version":"[\d\.]+"}$/ diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/serverless.t b/CloudronPackages/APISIX/apisix-source/t/plugin/serverless.t new file mode 100644 index 0000000..74318ca --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/serverless.t @@ -0,0 +1,606 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: use default phase +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.serverless-pre-function") + local schema = {functions = {"return function() ngx.log(ngx.ERR, 'serverless post function'); ngx.exit(201); end"}} + local ok, err = plugin.check_schema(schema) + if not ok then + ngx.say(err) + end + + ngx.say(schema.phase) + } + } +--- request +GET /t +--- response_body +access + + + +=== TEST 2: phase is rewrite +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.serverless-pre-function") + local ok, err = plugin.check_schema({phase = 'rewrite', functions = {"return function() ngx.log(ngx.ERR, 'serverless post function'); ngx.exit(201); end"}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 3: phase is log for post function +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.serverless-post-function") + local ok, err = plugin.check_schema({phase = 'log', functions = {"return function() ngx.log(ngx.ERR, 'serverless post function'); ngx.exit(201); end"}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 4: invalid phase +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.serverless-pre-function") + local ok, err = plugin.check_schema({phase = 'abc', functions = {"return function() ngx.log(ngx.ERR, 'serverless post function'); ngx.exit(201); end"}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "phase" validation failed: matches none of the enum values +done + + + +=== TEST 5: only accept function +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.serverless-pre-function") + local ok, err = plugin.check_schema({functions = {"local a = 123;"}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +only accept Lua function, the input code type is nil +done + + + +=== TEST 6: invalid lua code +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.serverless-pre-function") + local ok, err = plugin.check_schema({functions = {"a"}}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +failed to loadstring: [string "a"]:1: '=' expected near '' +done + + + +=== TEST 7: set route and serverless-post-function plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "functions" : ["return function() ngx.log(ngx.ERR, 'serverless post function'); ngx.exit(201); end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: check plugin +--- request +GET /hello +--- error_code: 201 +--- error_log +serverless post function + + + +=== TEST 9: set route and serverless-pre-function plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "functions" : ["return function() ngx.log(ngx.ERR, 'serverless pre function'); ngx.exit(201); end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: check plugin +--- request +GET /hello +--- error_code: 201 +--- error_log +serverless pre function + + + +=== TEST 11: serverless-pre-function and serverless-post-function +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "functions" : ["return function() ngx.log(ngx.ERR, 'serverless pre function'); end"] + }, + "serverless-post-function": { + "functions" : ["return function() ngx.log(ngx.ERR, 'serverless post function'); ngx.exit(201); end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: check plugin +--- request +GET /hello +--- error_code: 201 +--- error_log +serverless pre function +serverless post function + + + +=== TEST 13: log phase and serverless-pre-function plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "log", + "functions" : ["return function() ngx.log(ngx.ERR, 'serverless pre function'); end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: check plugin +--- request +GET /hello +--- error_log +serverless pre function + + + +=== TEST 15: functions +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : ["return function() ngx.log(ngx.ERR, 'one'); end", "return function() ngx.log(ngx.ERR, 'two'); end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: check plugin +--- request +GET /hello +--- error_log +one +two + + + +=== TEST 17: closure +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "log", + "functions" : ["local count = 1; return function() count = count + 1;ngx.log(ngx.ERR, 'serverless pre function:', count); end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: check plugin +--- request +GET /hello +--- error_log +serverless pre function:2 + + + +=== TEST 19: http -> https redirect +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "functions" : ["return function() if ngx.var.scheme == \"http\" and ngx.var.host == \"foo.com\" then ngx.header[\"Location\"] = \"https://foo.com\" .. ngx.var.request_uri; ngx.exit(ngx.HTTP_MOVED_PERMANENTLY); end; end"] + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- more_headers +Host: foo.com +--- response_body +passed + + + +=== TEST 20: check plugin +--- request +GET /hello +--- more_headers +Host: foo.com +--- error_code: 301 +--- response_headers +Location: https://foo.com/hello + + + +=== TEST 21: access conf & ctx in serverless +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "functions" : ["return function(conf, ctx) ngx.log(ngx.WARN, 'default phase: ', conf.phase); + ngx.log(ngx.WARN, 'match uri ', ctx.curr_req_matched and ctx.curr_req_matched._path); + ctx.var.upstream_uri = '/server_port' end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 22: check plugin +--- request +GET /hello +--- response_body chomp +1980 +--- error_log +default phase: access +match uri /hello + + + +=== TEST 23: add args parse test for serverless +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "functions" : ["return function(conf, ctx) local net_url = require(\"net.url\"); + local args = ngx.var.args; + ngx.print(net_url.parse(args).path); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 24: check args parse test +--- request +GET /echo?args=%40%23%24%25%5E%26 +--- response_body chomp +args=@#$%^& + + + +=== TEST 25: return status code should exit the request like other plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-post-function": { + "functions" : ["return function(conf, ctx) return 403, 'forbidden' end", + "return function(conf, ctx) ngx.log(ngx.ERR, 'unreachable') end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 26: check plugin +--- request +GET /hello +--- error_code: 403 +--- response_body chomp +forbidden diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/skywalking-logger.t b/CloudronPackages/APISIX/apisix-source/t/plugin/skywalking-logger.t new file mode 100644 index 0000000..6ab87be --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/skywalking-logger.t @@ -0,0 +1,386 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $http_config = $block->http_config // <<_EOC_; + + server { + listen 1986; + server_tokens off; + + location /v3/logs { + content_by_lua_block { + local core = require("apisix.core") + ngx.req.read_body() + local data = ngx.req.get_body_data() + local headers = ngx.req.get_headers() + ngx.log(ngx.WARN, "skywalking-logger body: ", data) + core.log.warn(core.json.encode(core.request.get_body(), true)) + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.skywalking-logger") + local ok, err = plugin.check_schema({endpoint_addr = "http://127.0.0.1"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } + + + +=== TEST 2: full schema check +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.skywalking-logger") + local ok, err = plugin.check_schema({endpoint_addr = "http://127.0.0.1", + timeout = 3, + name = "skywalking-logger", + max_retry_count = 2, + retry_delay = 2, + buffer_duration = 2, + inactive_timeout = 2, + batch_max_size = 500, + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } + + + +=== TEST 3: uri is missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.skywalking-logger") + local ok, err = plugin.check_schema({timeout = 3, + name = "skywalking-logger", + max_retry_count = 2, + retry_delay = 2, + buffer_duration = 2, + inactive_timeout = 2, + batch_max_size = 500, + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "endpoint_addr" is required +done + + + +=== TEST 4: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking-logger": { + "endpoint_addr": "http://127.0.0.1:1986", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "service_instance_name": "$hostname" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: access local server +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +Batch Processor[skywalking logger] successfully processed the entries +--- wait: 0.5 + + + +=== TEST 6: test trace context header +--- request +GET /opentracing +--- more_headers +sw8: 1-YWU3MDk3NjktNmUyMC00YzY4LTk3MzMtMTBmNDU1MjE2Y2M1-YWU3MDk3NjktNmUyMC00YzY4LTk3MzMtMTBmNDU1MjE2Y2M1-1-QVBJU0lY-QVBJU0lYIEluc3RhbmNlIE5hbWU=-L2dldA==-dXBzdHJlYW0gc2VydmljZQ== +--- response_body +opentracing +--- error_log eval +qr/.*\\\"traceContext\\\":\{(\\\"traceSegmentId\\\":\\\"ae709769-6e20-4c68-9733-10f455216cc5\\\"|\\\"traceId\\\":\\\"ae709769-6e20-4c68-9733-10f455216cc5\\\"|\\\"spanId\\\":1|,){5}\}.*/ +--- wait: 0.5 + + + +=== TEST 7: test wrong trace context header +--- request +GET /opentracing +--- more_headers +sw8: 1-YWU3MDk3NjktNmUyMC00YzY4LTk3MzMtMTBmNDU1MjE2Y2M1-YWU3MDk3NjktNmUyMC00YzY4LTk3MzMtMTBmNDU1MjE2Y2M1-1-QVBJU0lY-QVBJU0lYIEluc3RhbmNlIE5hbWU=-L2dldA== +--- response_body +opentracing +--- error_log eval +qr/failed to parse trace_context header:/ +--- wait: 0.5 + + + +=== TEST 8: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/skywalking-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: access local server and test log format +--- request +GET /opentracing +--- response_body +opentracing +--- error_log eval +qr/.*\{\\\"json\\\":\\\"\{(\\\\\\\"\@timestamp\\\\\\\":\\\\\\\".*\\\\\\\"|\\\\\\\"client_ip\\\\\\\":\\\\\\\"127\.0\.0\.1\\\\\\\"|\\\\\\\"host\\\\\\\":\\\\\\\"localhost\\\\\\\"|\\\\\\\"route_id\\\\\\\":\\\\\\\"1\\\\\\\"|,){7}\}/ +--- wait: 0.5 + + + +=== TEST 10: log format in plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking-logger": { + "endpoint_addr": "http://127.0.0.1:1986", + "log_format": { + "my_ip": "$remote_addr" + }, + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: access local server and test log format +--- request +GET /opentracing +--- response_body +opentracing +--- error_log eval +qr/.*\{\\\"json\\\":.*\\\\\\"my_ip\\\\\\":\\\\\\"127\.0\.0\.1\\\\\\".*\}/ +--- wait: 0.5 + + + +=== TEST 12: test serviceInstance $hostname +--- request +GET /opentracing +--- response_body +opentracing +--- no_error_log eval +qr/\\\"serviceInstance\\\":\\\"\$hostname\\\"/ +qr/\\\"serviceInstance\\\":\\\"\\\"/ +--- wait: 0.5 + + + +=== TEST 13: add plugin with 'include_req_body' setting, collect request log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/skywalking-logger', ngx.HTTP_DELETE) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking-logger": { + "endpoint_addr": "http://127.0.0.1:1986", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "include_req_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local code, _, body = t("/opentracing", "POST", "{\"sample_payload\":\"hello\"}") + } + } +--- error_log +\"body\":\"{\\\"sample_payload\\\":\\\"hello\\\"}\" + + + +=== TEST 14: add plugin with 'include_resp_body' setting, collect response log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/skywalking-logger', ngx.HTTP_DELETE) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking-logger": { + "endpoint_addr": "http://127.0.0.1:1986", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "include_req_body": true, + "include_resp_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local code, _, body = t("/opentracing", "POST", "{\"sample_payload\":\"hello\"}") + } + } +--- error_log +\"body\":\"opentracing\\n\" diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/skywalking.t b/CloudronPackages/APISIX/apisix-source/t/plugin/skywalking.t new file mode 100644 index 0000000..3ba07c2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/skywalking.t @@ -0,0 +1,476 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + my $extra_yaml_config = <<_EOC_; +plugins: + - example-plugin + - key-auth + - skywalking +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + my $extra_init_by_lua = <<_EOC_; + -- reduce default report interval + local client = require("skywalking.client") + client.backendTimerDelay = 0.5 + + local sw_tracer = require("skywalking.tracer") + local inject = function(mod, name) + local old_f = mod[name] + mod[name] = function (...) + ngx.log(ngx.WARN, "skywalking run ", name) + return old_f(...) + end + end + + inject(sw_tracer, "start") + inject(sw_tracer, "finish") + inject(sw_tracer, "prepareForReport") +_EOC_ + + $block->set_value("extra_init_by_lua", $extra_init_by_lua); + + $block; +}); + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("debug"); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: trigger skywalking +--- request +GET /opentracing +--- response_body +opentracing +--- grep_error_log eval +qr/skywalking run \w+/ +--- grep_error_log_out +skywalking run start +skywalking run finish +skywalking run prepareForReport +--- wait: 1 + + + +=== TEST 3: change sample ratio +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking": { + "sample_ratio": 0.00001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: not trigger skywalking +--- request +GET /opentracing +--- response_body +opentracing +--- grep_error_log eval +qr/skywalking run \w+/ +--- grep_error_log_out + + + +=== TEST 5: disabled +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: not trigger skywalking +--- request +GET /opentracing +--- response_body +opentracing +--- no_error_log +rewrite phase of skywalking plugin + + + +=== TEST 7: enable skywalking(sample_ratio=1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking": { + "sample_ratio": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: test segments report +--- request +GET /opentracing +--- response_body +opentracing +--- grep_error_log eval +qr/skywalking run \w+/ +--- grep_error_log_out +skywalking run start +skywalking run finish +skywalking run prepareForReport +--- wait: 1 + + + +=== TEST 9: enable at both global and route levels +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking": { + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking": { + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: run once +--- request +GET /opentracing +--- response_body +opentracing +--- grep_error_log eval +qr/skywalking run \w+/ +--- grep_error_log_out +skywalking run start +skywalking run finish +skywalking run prepareForReport + + + +=== TEST 11: enable at global but disable at route levels +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking": { + "_meta": { + "disable": true + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "skywalking": { + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: run once +--- request +GET /opentracing +--- response_body +opentracing +--- grep_error_log eval +qr/skywalking run \w+/ +--- grep_error_log_out +skywalking run start +skywalking run finish +skywalking run prepareForReport + + + +=== TEST 13: delete global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: trace request rejected by auth +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "auth-one" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "key-auth": {}, + "example-plugin": {"i": 1}, + "skywalking": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: hit +--- request +GET /opentracing +--- error_code: 401 +--- grep_error_log eval +qr/(skywalking run \w+|plugin body_filter phase)/ +--- grep_error_log_out +skywalking run start +plugin body_filter phase +plugin body_filter phase +skywalking run finish +skywalking run prepareForReport diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/sls-logger.t b/CloudronPackages/APISIX/apisix-source/t/plugin/sls-logger.t new file mode 100644 index 0000000..45dd3f8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/sls-logger.t @@ -0,0 +1,531 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.sls-logger") + local ok, err = plugin.check_schema({host = "cn-zhangjiakou-intranet.log.aliyuncs.com", port = 10009, project = "your-project", logstore = "your-logstore" + , access_key_id = "your_access_key", access_key_secret = "your_access_secret"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: missing access_key_secret +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.sls-logger") + local ok, err = plugin.check_schema({host = "cn-zhangjiakou-intranet.log.aliyuncs.com", port = 10009, project = "your-project", logstore = "your-logstore" + , access_key_id = "your_access_key"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "access_key_secret" is required +done + + + +=== TEST 3: wrong type of string +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.sls-logger") + local ok, err = plugin.check_schema({host = "cn-zhangjiakou-intranet.log.aliyuncs.com", port = 10009, project = "your_project", logstore = "your_logstore" + , access_key_id = "your_access_key", access_key_secret = "your_access_secret", timeout = "10"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "timeout" validation failed: wrong type: expected integer, got string +done + + + +=== TEST 4: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "sls-logger": { + "host": "100.100.99.135", + "port": 10009, + "project": "your_project", + "logstore": "your_logstore", + "access_key_id": "your_access_key_id", + "access_key_secret": "your_access_key_secret", + "timeout": 30000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: access +--- request +GET /hello +--- response_body +hello world +--- wait: 1 + + + +=== TEST 6: test combine log +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.sls-logger") + local entities = {} + table.insert(entities, {data = "1"}) + table.insert(entities, {data = "2"}) + table.insert(entities, {data = "3"}) + local data = plugin.combine_syslog(entities) + ngx.say(data) + } + } +--- response_body +123 + + + +=== TEST 7: sls log get milliseconds +--- config + location /t { + content_by_lua_block { + local function get_syslog_timestamp_millisecond(log_entry) + local first_idx = string.find(log_entry, " ") + 1 + local last_idx2 = string.find(log_entry, " ", first_idx) + local rfc3339_date = string.sub(log_entry, first_idx, last_idx2) + local rfc3339_len = #rfc3339_date + local rfc3339_millisecond = string.sub(rfc3339_date, rfc3339_len - 4, rfc3339_len - 2) + return tonumber(rfc3339_millisecond) + end + + math.randomseed(os.time()) + local rfc5424 = require("apisix.utils.rfc5424") + local m = 0 + -- because the millisecond value obtained by `ngx.now` may be `0` + -- it is executed multiple times to ensure the accuracy of the test + for i = 1, 5 do + ngx.sleep(string.format("%0.3f", math.random())) + local structured_data = { + {name = "project", value = "apisix.apache.org"}, + {name = "logstore", value = "apisix.apache.org"}, + {name = "access-key-id", value = "apisix.sls.logger"}, + {name = "access-key-secret", value = "BD274822-96AA-4DA6-90EC-15940FB24444"} + } + local log_entry = rfc5424.encode("SYSLOG", "INFO", "localhost", "apisix", + 123456, "hello world", structured_data) + m = get_syslog_timestamp_millisecond(log_entry) + m + end + + if m > 0 then + ngx.say("passed") + end + } + } +--- response_body +passed +--- timeout: 5 + + + +=== TEST 8: add log format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/sls-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: access +--- extra_init_by_lua + local json = require("toolkit.json") + local rfc5424 = require("apisix.utils.rfc5424") + local old_f = rfc5424.encode + rfc5424.encode = function(facility, severity, hostname, appname, pid, msg, structured_data) + local r = json.decode(msg) + assert(r.client_ip == "127.0.0.1", r.client_ip) + assert(r.host == "localhost", r.host) + return old_f(facility, severity, hostname, appname, pid, msg, structured_data) + end +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 10: delete exist routes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- delete exist consumers + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: data encryption for access_key_secret +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "sls-logger": { + "host": "100.100.99.135", + "port": 10009, + "project": "your_project", + "logstore": "your_logstore", + "access_key_id": "your_access_key_id", + "access_key_secret": "your_access_key_secret", + "timeout": 30000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["sls-logger"].access_key_secret) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["sls-logger"].access_key_secret) + } + } +--- response_body +your_access_key_secret +1T6nR0fz4yhz/zTuRTvt7Xu3c9ASelDXG2//e/A5OiA= + + + +=== TEST 12: log format in plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "sls-logger": { + "host": "100.100.99.135", + "port": 10009, + "project": "your_project", + "logstore": "your_logstore", + "access_key_id": "your_access_key_id", + "access_key_secret": "your_access_key_secret", + "log_format": { + "vip": "$remote_addr" + }, + "timeout": 30000 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: access +--- extra_init_by_lua + local json = require("toolkit.json") + local rfc5424 = require("apisix.utils.rfc5424") + local old_f = rfc5424.encode + rfc5424.encode = function(facility, severity, hostname, appname, pid, msg, structured_data) + local r = json.decode(msg) + assert(r.vip == "127.0.0.1", r.vip) + return old_f(facility, severity, hostname, appname, pid, msg, structured_data) + end +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 14: add plugin with 'include_req_body' setting, collect request log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/sls-logger', ngx.HTTP_DELETE) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "sls-logger": { + "host": "127.0.0.1", + "port": 10009, + "project": "your_project", + "logstore": "your_logstore", + "access_key_id": "your_access_key_id", + "access_key_secret": "your_access_key_secret", + "timeout": 30000, + "include_req_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local code, _, body = t("/hello", "POST", "{\"sample_payload\":\"hello\"}") + } + } +--- error_log +"body":"{\"sample_payload\":\"hello\"} + + + +=== TEST 15: add plugin with 'include_resp_body' setting, collect response log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/sls-logger', ngx.HTTP_DELETE) + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "sls-logger": { + "host": "127.0.0.1", + "port": 10009, + "project": "your_project", + "logstore": "your_logstore", + "access_key_id": "your_access_key_id", + "access_key_secret": "your_access_key_secret", + "timeout": 30000, + "include_resp_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + local code, _, body = t("/hello", "POST", "{\"sample_payload\":\"hello\"}") + } + } +--- error_log +"body":"hello world\n" + + + +=== TEST 16: set incorrect plugin metadata, should have error log +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local key = "/plugin_metadata/sls-logger" + local val = { + id = "sls-logger", + log_format = "bad plugin metadata" + } + local _, err = core.etcd.set(key, val) + if err then + ngx.say(err) + return + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log +sync_data(): failed to check item data of [/apisix/plugin_metadata] +failed to check the configuration of plugin sls-logger + + + +=== TEST 17: set correct plugin metadata, should no error log +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local key = "/plugin_metadata/sls-logger" + local val = { + id = "sls-logger", + log_format = { + host = "$host", + client_ip = "$remote_addr" + } + } + local _, err = core.etcd.set(key, val) + if err then + ngx.say(err) + return + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/splunk-hec-logging.t b/CloudronPackages/APISIX/apisix-source/t/plugin/splunk-hec-logging.t new file mode 100644 index 0000000..3d6b108 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/splunk-hec-logging.t @@ -0,0 +1,465 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: configuration verification +--- config + location /t { + content_by_lua_block { + local ok, err + local configs = { + -- full configuration + { + endpoint = { + uri = "http://127.0.0.1:18088/services/collector", + token = "BD274822-96AA-4DA6-90EC-18940FB2414C", + channel = "FE0ECFAD-13D5-401B-847D-77833BD77131", + timeout = 60 + }, + max_retry_count = 0, + retry_delay = 1, + buffer_duration = 60, + inactive_timeout = 2, + batch_max_size = 10, + }, + -- minimize configuration + { + endpoint = { + uri = "http://127.0.0.1:18088/services/collector", + token = "BD274822-96AA-4DA6-90EC-18940FB2414C", + } + }, + -- property "uri" is required + { + endpoint = { + token = "BD274822-96AA-4DA6-90EC-18940FB2414C", + } + }, + -- property "token" is required + { + endpoint = { + uri = "http://127.0.0.1:18088/services/collector", + } + }, + -- property "uri" validation failed + { + endpoint = { + uri = "127.0.0.1:18088/services/collector", + token = "BD274822-96AA-4DA6-90EC-18940FB2414C", + } + } + } + + local plugin = require("apisix.plugins.splunk-hec-logging") + for i = 1, #configs do + ok, err = plugin.check_schema(configs[i]) + if err then + ngx.say(err) + else + ngx.say("passed") + end + end + } + } +--- response_body_like +passed +passed +property "endpoint" validation failed: property "uri" is required +property "endpoint" validation failed: property "token" is required +property "endpoint" validation failed: property "uri" validation failed.* + + + +=== TEST 2: set route (failed auth) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["splunk-hec-logging"] = { + endpoint = { + uri = "http://127.0.0.1:18088/services/collector", + token = "BD274822-96AA-4DA6-90EC-18940FB24444" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: test route (failed auth) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world +--- error_log +Batch Processor[splunk-hec-logging] failed to process entries: failed to send splunk, Invalid authorization +Batch Processor[splunk-hec-logging] exceeded the max_retry_count + + + +=== TEST 4: set route (success write) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["splunk-hec-logging"] = { + endpoint = { + uri = "http://127.0.0.1:18088/services/collector", + token = "BD274822-96AA-4DA6-90EC-18940FB2414C" + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: test route (success write) +--- request +GET /hello +--- wait: 2 +--- response_body +hello world + + + +=== TEST 6: bad custom log format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/splunk-hec-logging', + ngx.HTTP_PUT, + [[{ + "log_format": "'$host' '$time_iso8601'" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + ngx.say(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"log_format\" validation failed: wrong type: expected object, got string"} + + + +=== TEST 7: set route to test custom log format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/splunk-hec-logging', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr", + "message_1":"test custom log format in plugin" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1982"] = 1 + } + }, + plugins = { + ["splunk-hec-logging"] = { + endpoint = { + uri = "http://127.0.0.1:18088/services/collector", + token = "BD274822-96AA-4DA6-90EC-18940FB2414C" + }, + batch_max_size = 3, + inactive_timeout = 1 + } + } + }) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + } + } +--- response_body +passed +--- wait: 5 + + + +=== TEST 8: check splunk log +--- exec +tail -n 1 ci/pod/vector/splunk.log +--- response_body eval +qr/.*test custom log format in plugin.*/ + + + +=== TEST 9: set route to test custom log format in route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/splunk-hec-logging', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "vip": "$remote_addr", + "message_2":"logger format in plugin" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1982"] = 1 + } + }, + plugins = { + ["splunk-hec-logging"] = { + endpoint = { + uri = "http://127.0.0.1:18088/services/collector", + token = "BD274822-96AA-4DA6-90EC-18940FB2414C" + }, + batch_max_size = 3, + inactive_timeout = 1 + } + } + }) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + } + } +--- response_body +passed +--- wait: 5 + + + +=== TEST 10: check splunk log +--- exec +tail -n 1 ci/pod/vector/splunk.log +--- response_body eval +qr/.*logger format in plugin.*/ + + + +=== TEST 11: set route test batched data +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/splunk-hec-logging', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "vip": "$remote_addr", + "message_3":"test batched data" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1982"] = 1 + } + }, + plugins = { + ["splunk-hec-logging"] = { + endpoint = { + uri = "http://127.0.0.1:18088/services/collector", + token = "BD274822-96AA-4DA6-90EC-18940FB2414C" + }, + batch_max_size = 3, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + } + } +--- response_body +passed +--- wait: 5 + + + +=== TEST 12: check splunk log +--- exec +tail -n 1 ci/pod/vector/splunk.log +--- response_body eval +qr/.*test batched data.*/ + + + +=== TEST 13: set route with keepalive_timeout (success write) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + }, + plugins = { + ["splunk-hec-logging"] = { + endpoint = { + uri = "http://127.0.0.1:18088/services/collector", + token = "BD274822-96AA-4DA6-90EC-18940FB2414C", + keepalive_timeout = 5000 + }, + batch_max_size = 1, + inactive_timeout = 1 + } + } + }) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/syslog.t b/CloudronPackages/APISIX/apisix-source/t/plugin/syslog.t new file mode 100644 index 0000000..9dbb03a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/syslog.t @@ -0,0 +1,653 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.syslog") + local ok, err = plugin.check_schema({ + host = "127.0.0.1", + port = 5140, + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: missing port +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.syslog") + local ok, err = plugin.check_schema({host = "127.0.0.1"}) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "port" is required +done + + + +=== TEST 3: wrong type of string +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.syslog") + local ok, err = plugin.check_schema({ + host = "127.0.0.1", + port = "5140", + }) + if not ok then + ngx.say(err) + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "port" validation failed: wrong type: expected integer, got string +done + + + +=== TEST 4: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 5140 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: access +--- request +GET /hello +--- response_body +hello world +--- wait: 0.2 + + + +=== TEST 6: flush manually +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.syslog") + local logger_socket = require("resty.logger.socket") + local logger, err = logger_socket:new({ + host = "127.0.0.1", + port = 5140, + flush_limit = 100, + }) + + local bytes, err = logger:log("abc") + if err then + ngx.log(ngx.ERR, err) + end + + local bytes, err = logger:log("efg") + if err then + ngx.log(ngx.ERR, err) + end + + local ok, err = plugin.flush_syslog(logger) + if not ok then + ngx.say("failed to flush syslog: ", err) + return + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 7: small flush_limit, instant flush +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + -- before 2.13.0, timeout is incorrectly treated as inactive_timeout + [[{ + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 5140, + "flush_limit" : 1, + "inactive_timeout": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + -- wait etcd sync + ngx.sleep(0.5) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + if not res then + ngx.say("failed request: ", err) + return + end + + if res.status >= 300 then + ngx.status = res.status + end + ngx.print(res.body) + + -- wait flush log + ngx.sleep(2.5) + } + } +--- request +GET /t +--- response_body +passed +hello world +--- error_log +try to lock with key route#1 +unlock with key route#1 +--- timeout: 5 + + + +=== TEST 8: check log +--- exec +tail -n 1 ci/pod/vector/syslog-tcp.log +--- response_body eval +qr/.*apisix_latency.*/ + + + +=== TEST 9: check plugin configuration updating +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body1 = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "host": "127.0.0.1", + "port": 5044, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, _, body2 = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, body3 = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "host": "127.0.0.1", + "port": 5045, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, _, body4 = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.print(body1) + ngx.print(body2) + ngx.print(body3) + ngx.print(body4) + } + } +--- request +GET /t +--- wait: 0.5 +--- response_body +passedopentracing +passedopentracing +--- grep_error_log eval +qr/sending a batch logs to 127.0.0.1:(\d+)/ +--- grep_error_log_out +sending a batch logs to 127.0.0.1:5044 +sending a batch logs to 127.0.0.1:5045 + + + +=== TEST 10: add log format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/syslog', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "client_ip": "$remote_addr", + "upstream": "$upstream_addr" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: Add route and Enable Syslog Plugin, batch_max_size=1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "batch_max_size": 1, + "disable": false, + "flush_limit": 1, + "host" : "127.0.0.1", + "port" : 5140 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: hit route and report sys logger +--- extra_init_by_lua + local syslog = require("apisix.plugins.syslog.init") + local json = require("apisix.core.json") + local log = require("apisix.core.log") + local old_f = syslog.push_entry + syslog.push_entry = function(conf, ctx, entry) + log.info("syslog-log-format => " .. json.encode(entry)) + return old_f(conf, ctx, entry) + end +--- request +GET /hello +--- response_body +hello world +--- wait: 0.5 +--- no_error_log +[error] +--- error_log eval +qr/syslog-log-format.*\{.*"upstream":"127.0.0.1:\d+"/ + + + +=== TEST 13: check log +--- exec +tail -n 1 ci/pod/vector/syslog-tcp.log +--- response_body eval +qr/.*\"host\":\"localhost\".*/ + + + +=== TEST 14: log format in plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "batch_max_size": 1, + "flush_limit": 1, + "log_format": { + "vip": "$remote_addr" + }, + "host" : "127.0.0.1", + "port" : 5140 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: access +--- extra_init_by_lua + local syslog = require("apisix.plugins.syslog.init") + local json = require("apisix.core.json") + local log = require("apisix.core.log") + local old_f = syslog.push_entry + syslog.push_entry = function(conf, ctx, entry) + assert(entry.vip == "127.0.0.1") + log.info("push_entry is called with data: ", json.encode(entry)) + return old_f(conf, ctx, entry) + end +--- request +GET /hello +--- response_body +hello world +--- wait: 0.5 +--- no_error_log +[error] +--- error_log +push_entry is called with data + + + +=== TEST 16: check log +--- exec +tail -n 1 ci/pod/vector/syslog-tcp.log +--- response_body eval +qr/.*vip.*/ + + + +=== TEST 17: test udp mode +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "batch_max_size": 1, + "disable": false, + "flush_limit": 1, + "host" : "127.0.0.1", + "port" : 5150, + "sock_type": "udp" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: hit +--- request +GET /hello + + + +=== TEST 19: check log +--- exec +tail -n 1 ci/pod/vector/syslog-udp.log +--- response_body eval +qr/.*upstream.*/ + + + +=== TEST 20: add plugin with 'include_req_body' setting, collect request log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/syslog', ngx.HTTP_DELETE) + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "batch_max_size": 1, + "flush_limit": 1, + "host" : "127.0.0.1", + "port" : 5140, + "include_req_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + + local code, _, body = t("/hello", "POST", "{\"sample_payload\":\"hello\"}") + } + } +--- request +GET /t +--- error_log +"body":"{\"sample_payload\":\"hello\"}" + + + +=== TEST 21: add plugin with 'include_resp_body' setting, collect response log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/syslog', ngx.HTTP_DELETE) + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "batch_max_size": 1, + "flush_limit": 1, + "host" : "127.0.0.1", + "port" : 5140, + "include_resp_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + + local code, _, body = t("/hello", "POST", "{\"sample_payload\":\"hello\"}") + } + } +--- request +GET /t +--- error_log +"body":"hello world\n" diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/tcp-logger.t b/CloudronPackages/APISIX/apisix-source/t/plugin/tcp-logger.t new file mode 100644 index 0000000..b3c29ee --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/tcp-logger.t @@ -0,0 +1,608 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tcp-logger") + local ok, err = plugin.check_schema({host = "127.0.0.1", port = 3000}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: missing host +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tcp-logger") + local ok, err = plugin.check_schema({port = 3000}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "host" is required +done + + + +=== TEST 3: wrong type of string +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tcp-logger") + local ok, err = plugin.check_schema({host= "127.0.0.1", port = 2000, timeout = "10", + tls = false, tls_options = "tls options"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "timeout" validation failed: wrong type: expected integer, got string +done + + + +=== TEST 4: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tcp-logger": { + "host": "127.0.0.1", + "port": 3000, + "tls": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: access +--- request +GET /hello +--- response_body +hello world +--- wait: 1 + + + +=== TEST 6: error log +--- log_level: error +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tcp-logger": { + "host": "312.0.0.1", + "port": 2000, + "batch_max_size": 1, + "max_retry_count": 2, + "retry_delay": 0 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + } + } +--- request +GET /t +--- error_log +failed to connect to TCP server: host[312.0.0.1] port[2000] +[error] +--- wait: 3 + + + +=== TEST 7: check plugin configuration updating +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body1 = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tcp-logger": { + "host": "127.0.0.1", + "port": 3000, + "tls": false, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, _, body2 = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, body3 = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tcp-logger": { + "host": "127.0.0.1", + "port": 43000, + "tls": false, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, _, body4 = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.print(body1) + ngx.print(body2) + ngx.print(body3) + ngx.print(body4) + } + } +--- request +GET /t +--- wait: 0.5 +--- response_body +passedopentracing +passedopentracing +--- grep_error_log eval +qr/sending a batch logs to 127.0.0.1:(\d+)/ +--- grep_error_log_out +sending a batch logs to 127.0.0.1:3000 +sending a batch logs to 127.0.0.1:43000 + + + +=== TEST 8: bad custom log format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/tcp-logger', + ngx.HTTP_PUT, + [[{ + "log_format": "'$host' '$time_iso8601'" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"log_format\" validation failed: wrong type: expected object, got string"} + + + +=== TEST 9: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tcp-logger": { + "host": "127.0.0.1", + "port": 3000, + "tls": false, + "batch_max_size": 1, + "inactive_timeout": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/plugin_metadata/tcp-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "case name": "plugin_metadata", + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, _, _ = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say("passed") + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: log format in plugin_metadata +--- exec +tail -n 1 ci/pod/vector/tcp.log +--- response_body eval +qr/.*plugin_metadata.*/ + + + +=== TEST 11: remove tcp logger metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/plugin_metadata/tcp-logger', + ngx.HTTP_PUT, + [[{ + "log_format": {} + }]] + ) + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: log format in plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tcp-logger": { + "host": "127.0.0.1", + "port": 3000, + "tls": false, + "log_format": { + "case name": "logger format in plugin", + "vip": "$remote_addr" + }, + "batch_max_size": 1, + "inactive_timeout": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say("passed") + } + } +--- request +GET /t +--- wait: 0.5 +--- response_body +passed + + + +=== TEST 13: check tcp log +--- exec +tail -n 1 ci/pod/vector/tcp.log +--- response_body eval +qr/.*logger format in plugin.*/ + + + +=== TEST 14: true tcp log with tls +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body1 = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tcp-logger": { + "host": "127.0.0.1", + "port": 43000, + "tls": true, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, _, body2 = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.print(body2) + } + } +--- request +GET /t +--- wait: 0.5 +--- response_body +opentracing + + + +=== TEST 15: check tls log +--- exec +tail -n 1 ci/pod/vector/tls-datas.log +--- response_body eval +qr/.*route_id.*1.*/ + + + +=== TEST 16: add plugin with 'include_req_body' setting, collect request log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/tcp-logger', ngx.HTTP_DELETE) + local code, body1 = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tcp-logger": { + "host": "127.0.0.1", + "port": 43000, + "tls": true, + "batch_max_size": 1, + "include_req_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, _, body = t("/opentracing", "POST", "{\"sample_payload\":\"hello\"}") + } + } +--- request +GET /t +--- error_log +"body":"{\"sample_payload\":\"hello\"}" + + + +=== TEST 17: add plugin with 'include_resp_body' setting, collect request log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/tcp-logger', ngx.HTTP_DELETE) + local code, body1 = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tcp-logger": { + "host": "127.0.0.1", + "port": 43000, + "tls": true, + "batch_max_size": 1, + "include_resp_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, _, body = t("/opentracing", "POST", "{\"sample_payload\":\"hello\"}") + } + } +--- request +GET /t +--- error_log +"body":"opentracing\n" diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/tencent-cloud-cls.t b/CloudronPackages/APISIX/apisix-source/t/plugin/tencent-cloud-cls.t new file mode 100644 index 0000000..92b78f2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/tencent-cloud-cls.t @@ -0,0 +1,693 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + server { + listen 10420; + location /structuredlog { + content_by_lua_block { + ngx.req.read_body() + local data = ngx.req.get_body_data() + local headers = ngx.req.get_headers() + ngx.log(ngx.WARN, "tencent-cloud-cls body: ", data) + for k, v in pairs(headers) do + ngx.log(ngx.WARN, "tencent-cloud-cls headers: " .. k .. ":" .. v) + end + ngx.say("ok") + } + } + } + server { + listen 10421; + location /structuredlog { + content_by_lua_block { + ngx.exit(500) + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: schema check +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tencent-cloud-cls") + local ok, err = plugin.check_schema({ + cls_host = "ap-guangzhou.cls.tencentyun.com", + cls_topic = "143b5d70-139b-4aec-b54e-bb97756916de", + secret_id = "secret_id", + secret_key = "secret_key", + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +done + + + +=== TEST 2: cls config missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tencent-cloud-cls") + local ok, err = plugin.check_schema({ + cls_host = "ap-guangzhou.cls.tencentyun.com", + cls_topic = "143b5d70-139b-4aec-b54e-bb97756916de", + secret_id = "secret_id", + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "secret_key" is required +done + + + +=== TEST 3: add plugin for incorrect server +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "127.0.0.1:10421", + "cls_topic": "143b5d70-139b-4aec-b54e-bb97756916de", + "secret_id": "secret_id", + "secret_key": "secret_key", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: incorrect server +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +Batch Processor[tencent-cloud-cls] failed to process entries [1/1]: got wrong status: 500 +--- wait: 0.5 + + + +=== TEST 5: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "127.0.0.1:10420", + "cls_topic": "143b5d70-139b-4aec-b54e-bb97756916de", + "secret_id": "secret_id", + "secret_key": "secret_key", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: access local server +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +Batch Processor[tencent-cloud-cls] successfully processed the entries +--- wait: 0.5 + + + +=== TEST 7: verify request +--- extra_init_by_lua + local cls = require("apisix.plugins.tencent-cloud-cls.cls-sdk") + cls.send_to_cls = function(self, logs) + if (#logs ~= 1) then + ngx.log(ngx.ERR, "unexpected logs length: ", #logs) + return + end + return true + end +--- request +GET /opentracing +--- response_body +opentracing +--- error_log +Batch Processor[tencent-cloud-cls] successfully processed the entries +--- wait: 0.5 + + + +=== TEST 8: verify cls api request +--- extra_init_by_lua + local cls = require("apisix.plugins.tencent-cloud-cls.cls-sdk") + cls.send_cls_request = function(self, pb_obj) + if (#pb_obj.logGroupList ~= 1) then + ngx.log(ngx.ERR, "unexpected logGroupList length: ", #pb_obj.logGroupList) + return false + end + local log_group = pb_obj.logGroupList[1] + if #log_group.logs ~= 1 then + ngx.log(ngx.ERR, "unexpected logs length: ", #log_group.logs) + return false + end + local log = log_group.logs[1] + if #log.contents == 0 then + ngx.log(ngx.ERR, "unexpected contents length: ", #log.contents) + return false + end + return true + end +--- request +GET /opentracing +--- response_body +opentracing + + + +=== TEST 9: plugin metadata +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tencent-cloud-cls") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/tencent-cloud-cls', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } + }]] + ) + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: log use log_format +--- extra_init_by_lua + local cls = require("apisix.plugins.tencent-cloud-cls.cls-sdk") + cls.send_cls_request = function(self, pb_obj) + if (#pb_obj.logGroupList ~= 1) then + ngx.log(ngx.ERR, "unexpected logGroupList length: ", #pb_obj.logGroupList) + return false + end + local log_group = pb_obj.logGroupList[1] + if #log_group.logs ~= 1 then + ngx.log(ngx.ERR, "unexpected logs length: ", #log_group.logs) + return false + end + local log = log_group.logs[1] + if #log.contents == 0 then + ngx.log(ngx.ERR, "unexpected contents length: ", #log.contents) + return false + end + local has_host, has_timestamp, has_client_ip = false, false, false + for i, tag in ipairs(log.contents) do + if tag.key == "host" then + has_host = true + end + if tag.key == "@timestamp" then + has_timestamp = true + end + if tag.key == "client_ip" then + has_client_ip = true + end + end + if not(has_host and has_timestamp and has_client_ip) then + return false + end + return true + end +--- request +GET /opentracing +--- response_body +opentracing +--- wait: 0.5 + + + +=== TEST 11: delete exist routes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- delete exist consumers + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: data encryption for secret_key +--- yaml_config +apisix: + data_encryption: + enable_encrypt_fields: true + keyring: + - edd1c9f0985e76a2 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "127.0.0.1:10421", + "cls_topic": "143b5d70-139b-4aec-b54e-bb97756916de", + "secret_id": "secret_id", + "secret_key": "secret_key", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + -- get plugin conf from admin api, password is decrypted + local code, message, res = t('/apisix/admin/routes/1', + ngx.HTTP_GET + ) + res = json.decode(res) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + + ngx.say(res.value.plugins["tencent-cloud-cls"].secret_key) + + -- get plugin conf from etcd, password is encrypted + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/routes/1')) + ngx.say(res.body.node.value.plugins["tencent-cloud-cls"].secret_key) + } + } +--- response_body +secret_key +oshn8tcqE8cJArmEILVNPQ== + + + +=== TEST 13: log format in plugin +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.tencent-cloud-cls") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "127.0.0.1:10421", + "cls_topic": "143b5d70-139b-4aec-b54e-bb97756916de", + "secret_id": "secret_id", + "secret_key": "secret_key", + "batch_max_size": 1, + "max_retry_count": 1, + "inactive_timeout": 1, + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "vip": "$remote_addr" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: log use log_format +--- extra_init_by_lua + local cls = require("apisix.plugins.tencent-cloud-cls.cls-sdk") + cls.send_cls_request = function(self, pb_obj) + if (#pb_obj.logGroupList ~= 1) then + ngx.log(ngx.ERR, "unexpected logGroupList length: ", #pb_obj.logGroupList) + return false + end + local log_group = pb_obj.logGroupList[1] + if #log_group.logs ~= 1 then + ngx.log(ngx.ERR, "unexpected logs length: ", #log_group.logs) + return false + end + local log = log_group.logs[1] + if #log.contents == 0 then + ngx.log(ngx.ERR, "unexpected contents length: ", #log.contents) + return false + end + local has_host, has_timestamp, has_vip = false, false, false + for i, tag in ipairs(log.contents) do + if tag.key == "host" then + has_host = true + end + if tag.key == "@timestamp" then + has_timestamp = true + end + if tag.key == "vip" then + has_vip = true + end + end + if not(has_host and has_timestamp and has_vip) then + return false + end + return true + end +--- request +GET /opentracing +--- response_body +opentracing +--- wait: 0.5 + + + +=== TEST 15: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "127.0.0.1:10420", + "cls_topic": "143b5d70-139b-4aec-b54e-bb97756916de", + "secret_id": "secret_id", + "secret_key": "secret_key", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: test resolvt e ip failed +--- extra_init_by_lua + local socket = require("socket") + socket.dns.toip = function(address) + return nil, "address can't be resolved" + end +--- request +GET /opentracing +--- response_body +opentracing +--- error_log eval +qr/resolve ip failed, hostname: .*, error: address can't be resolved/ +--- wait: 0.5 + + + +=== TEST 17: collect log with include_req_body_expr +--- log_level: debug +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/tencent-cloud-cls', ngx.HTTP_DELETE) + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "127.0.0.1:10420", + "cls_topic": "143b5d70-139b-4aec-b54e-bb97756916de", + "secret_id": "secret_id", + "secret_key": "secret_key", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "include_req_body": true, + "include_req_body_expr": [ + ["arg_bar", "==", "bar"] + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + -- this will include resp body + local code, _, body = t("/opentracing?bar=bar", "POST", "body-data") + } + } +--- error_log +"body":"body-data" + + + +=== TEST 18: collect log with include_req_body_expr mismatch +--- log_level: debug +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, _, body = t("/opentracing?foo=bar", "POST", "body-data") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + + } + } +--- no_error_log +"body":"body-data" + + + +=== TEST 19: collect log with include_resp_body_expr +--- log_level: debug +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "127.0.0.1:10420", + "cls_topic": "143b5d70-139b-4aec-b54e-bb97756916de", + "secret_id": "secret_id", + "secret_key": "secret_key", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2, + "include_resp_body": true, + "include_resp_body_expr": [ + ["arg_bar", "==", "bar"] + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + -- this will include resp body + local code, _, body = t("/opentracing?bar=bar", "GET") + } + } +--- error_log +"body":"opentracing\n" + + + +=== TEST 20: collect log with include_resp_body_expr mismatch +--- log_level: debug +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, _, body = t("/opentracing?foo=bar", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.print(body) + + } + } +--- no_error_log +"body":"opentracing\n" diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split.t b/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split.t new file mode 100644 index 0000000..144ee77 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split.t @@ -0,0 +1,790 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +run_tests; + +__DATA__ + +=== TEST 1: schema validation passed +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.traffic-split") + local ok, err = plugin.check_schema({ + rules = { + { + match = { + { + vars = { + {"arg_name", "==", "jack"}, + {"arg_age", "!", "<", "16"} + } + }, + { + vars = { + {"arg_name", "==", "rose"}, + {"arg_age", "!", ">", "32"} + } + } + }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = {["127.0.0.1:1981"]=2}, + timeout = {connect = 15, send = 15, read = 15} + }, + weight = 2 + }, + { + upstream = { + name = "upstream_B", + type = "roundrobin", + nodes = {["127.0.0.1:1982"]=2}, + timeout = {connect = 15, send = 15, read = 15} + }, + weight = 2 + }, + { + weight = 1 + } + } + } + } + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: schema validation passed, and `match` configuration is missing +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.traffic-split") + local ok, err = plugin.check_schema({ + rules = { + { + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = {["127.0.0.1:1981"]=2}, + timeout = {connect = 15, send = 15, read = 15} + }, + weight = 2 + }, + { + weight = 1 + } + } + } + } + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 3: schema validation failed, `vars` expression operator type is wrong +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.traffic-split") + local ok, err = plugin.check_schema({ + rules = { + { + match = { + { + vars = { + {"arg_name", 123, "jack"} + } + } + }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = {["127.0.0.1:1981"]=2}, + timeout = {connect = 15, send = 15, read = 15} + }, + weight = 2 + }, + { + weight = 1 + } + } + } + } + }) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body eval +qr/failed to validate the 'vars' expression: invalid operator '123'/ + + + +=== TEST 4: missing `rules` configuration, the upstream of the default `route` takes effect +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "plugins": { + "traffic-split": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: the upstream of the default `route` takes effect +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 6 do + local _, _, body = t('/server_port', ngx.HTTP_GET) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- request +GET /t +--- response_body +1980, 1980, 1980, 1980, 1980, 1980 + + + +=== TEST 6: when `weighted_upstreams` is empty, the upstream of `route` is used by default +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "plugins": { + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [{}] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: the upstream of the default `route` takes effect +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 6 do + local _, _, body = t('/server_port', ngx.HTTP_GET) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- request +GET /t +--- response_body +1980, 1980, 1980, 1980, 1980, 1980 + + + +=== TEST 8: single `vars` expression and single plugin `upstream`, and the upstream traffic on `route` accounts for 1/3 +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { { + vars = { { "arg_name", "==", "jack" }, { "arg_age", "!", "<", "16" } } + } }, + weighted_upstreams = { { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + ["127.0.0.1:1981"] = 2 + }, + timeout = { + connect = 15, + send = 15, + read = 15 + } + }, + weight = 2 + }, { + weight = 1 + } } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: expression validation failed, return to the default `route` upstream port `1980` +--- request +GET /server_port?name=jack&age=14 +--- response_body eval +1980 + + + +=== TEST 10: the expression passes and initiated multiple requests, the upstream traffic of `route` accounts for 1/3, and the upstream traffic of plugins accounts for 2/3 +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 6 do + local _, _, body = t('/server_port?name=jack&age=16', ngx.HTTP_GET) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- request +GET /t +--- response_body +1980, 1980, 1981, 1981, 1981, 1981 + + + +=== TEST 11: Multiple vars rules and multiple plugin upstream +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"arg_name", "==", "jack" }, { "arg_age", "~~", "^[1-9]{1,2}"} + } + }, + { + vars = { + {"arg_name2", "in", {"jack", "rose"} }, { "arg_age", "!", "<", 18} + } + } + }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + ["127.0.0.1:1981"] = 20 + } + }, + weight = 2 + }, + { + upstream = { + name = "upstream_B", + type = "roundrobin", + nodes = { + ["127.0.0.1:1982"] = 10 + } + }, + weight = 2 + }, + { + weight = 1 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: expression validation failed, return to the default `route` upstream port `1980` +--- request +GET /server_port?name=jack&age=0 +--- response_body eval +1980 + + + +=== TEST 13: the expression passes and initiated multiple requests, the upstream traffic of `route` accounts for 1/5, and the upstream traffic of plugins accounts for 4/5 +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 5 do + local _, _, body = t('/server_port?name=jack&age=22', ngx.HTTP_GET) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- request +GET /t +--- response_body +1980, 1981, 1981, 1982, 1982 + + + +=== TEST 14: Multiple vars rules and multiple plugin upstream, do not split traffic to the upstream of `route` +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"arg_name", "==", "jack" }, { "arg_age", "~~", "^[1-9]{1,2}"} + } + }, + { + vars = { + {"arg_name2", "in", {"jack", "rose"} }, { "arg_age", "!", "<", 18} + } + } + }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + ["127.0.0.1:1981"] = 20 + } + }, + weight = 2 + }, + { + upstream = { + name = "upstream_B", + type = "roundrobin", + nodes = { + ["127.0.0.1:1982"] = 10 + } + }, + weight = 2 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: the expression passes and initiated multiple requests, do not split traffic to the upstream of `route` +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 6 do + local _, _, body = t('/server_port?name=jack&age=22', ngx.HTTP_GET) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- request +GET /t +--- response_body +1981, 1981, 1981, 1982, 1982, 1982 + + + +=== TEST 16: support multiple ip configuration of `nodes`, and missing upstream configuration on `route` +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"arg_name", "==", "jack" }, { "arg_age", "~~", "^[1-9]{1,2}"} + } + } + }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + ["127.0.0.1:1981"] = 2, + ["127.0.0.1:1982"] = 2 + }, + timeout = { + connect = 15, + send = 15, + read = 15 + } + }, + weight = 1 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: the expression passes and initiated multiple requests, roundrobin the ip of nodes +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 5 do + local _, _, body = t('/server_port?name=jack&age=22', ngx.HTTP_GET) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- request +GET /t +--- response_body +1980, 1981, 1981, 1982, 1982 +--- no_error_log + + + +=== TEST 18: host is domain name +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/", + plugins = { + ["traffic-split"] = { + rules = { { + weighted_upstreams = { { + upstream = { + name = "upstream_A", + type = "roundrobin", + pass_host = "rewrite", + upstream_host = "httpbin.org", + nodes = { + ["httpbin.org:80"] = 0 + } + }, + weight = 100000 + }, { + weight = 1 + } } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 19: domain name resolved successfully +--- pipelined_requests eval +["GET /", "GET /"] +--- error_code eval +[200, 200] +--- error_log_like eval +qr/(dns resolver domain: httpbin.org to \d+.\d+.\d+.\d+){2}/ + + + +=== TEST 20: mock Grayscale Release +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = {{ + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = {["127.0.0.1:1981"] = 1} + }, + weight = 2 + }, + { + weight = 1 + } + } + }} + } + }, + upstream = { + type = "roundrobin", + nodes = {["127.0.0.1:1980"] = 1} + } + } + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 21: 2/3 request traffic hits the upstream of the plugin, 1/3 request traffic hits the upstream of `route` +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 6 do + local _, _, body = t('/server_port', ngx.HTTP_GET) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- request +GET /t +--- response_body +1980, 1980, 1981, 1981, 1981, 1981 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split2.t new file mode 100644 index 0000000..746d744 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split2.t @@ -0,0 +1,864 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: vars rule with ! (set) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = {{ + match = { + {vars = { + {"!AND", + {"arg_name", "==", "jack"}, + {"arg_age", "!", "<", "18"}, + } + } + } + }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = {["127.0.0.1:1981"] = 1} + }, + weight = 1, + } + } + }} + } + }, + upstream = { + type = "roundrobin", + nodes = {["127.0.0.1:1980"] = 1} + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: vars rule with ! (hit) +--- request +GET /server_port?name=jack&age=17 +--- response_body chomp +1981 + + + +=== TEST 3: vars rule with ! (miss) +--- request +GET /server_port?name=jack&age=18 +--- response_body chomp +1980 + + + +=== TEST 4: the upstream node is IP and pass_host is `pass` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + + local data = { + uri = "/uri", + plugins = { + ["traffic-split"] = { + rules = {{ + match = { { + vars = { { "arg_name", "==", "jack" } } + } }, + weighted_upstreams = { + { + upstream = { + type = "roundrobin", + pass_host = "pass", + nodes = {["127.0.0.1:1981"] = 1} + } + } + } + }} + } + }, + upstream = { + type = "roundrobin", + nodes = {["127.0.0.1:1980"] = 1} + } + } + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: upstream_host is `127.0.0.1` +--- request +GET /uri?name=jack +--- more_headers +host: 127.0.0.1 +--- response_body +uri: /uri +host: 127.0.0.1 +x-real-ip: 127.0.0.1 + + + +=== TEST 6: the upstream node is IP and pass_host is `rewrite` +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/uri", + plugins = { + ["traffic-split"] = { + rules = { { + match = { { + vars = { { "arg_name", "==", "jack" } } + } }, + weighted_upstreams = { { + upstream = { + type = "roundrobin", + pass_host = "rewrite", + upstream_host = "test.com", + nodes = { + ["127.0.0.1:1981"] = 1 + } + } + } } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: upstream_host is test.com +--- request +GET /uri?name=jack +--- response_body +uri: /uri +host: test.com +x-real-ip: 127.0.0.1 + + + +=== TEST 8: the upstream node is IP and pass_host is `node` +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + + local data = { + uri = "/uri", + plugins = { + ["traffic-split"] = { + rules = {{ + match = { { + vars = { { "arg_name", "==", "jack" } } + } }, + weighted_upstreams = { + { + upstream = { + type = "roundrobin", + pass_host = "node", + nodes = {["localhost:1981"] = 1} + } + } + } + }} + } + }, + upstream = { + type = "roundrobin", + nodes = {["127.0.0.1:1980"] = 1} + } + } + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: upstream_host is localhost +--- request +GET /uri?name=jack +--- more_headers +host: 127.0.0.1 +--- response_body +uri: /uri +host: localhost +x-real-ip: 127.0.0.1 + + + +=== TEST 10: the upstream.type is `chash` and `key` is header +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + weighted_upstreams = { + { + upstream = { + name = "chash_test", + type = "chash", + hash_on = "header", + key = "custom_header", + nodes = { + ["127.0.0.1:1981"] = 1, + ["127.0.0.1:1982"] = 1 + } + }, + weight = 1 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: hit routes, hash_on custom header +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + local headers = {} + local headers2 = {} + headers["custom_header"] = "hello" + headers2["custom_header"] = "world" + for i = 1, 8, 2 do + local _, _, body = t('/server_port', ngx.HTTP_GET, "", nil, headers2) + local _, _, body2 = t('/server_port', ngx.HTTP_GET, "", nil, headers) + bodys[i] = body + bodys[i+1] = body2 + end + + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body eval +qr/1981, 1982, 1981, 1982, 1981, 1982, 1981, 1982/ +--- grep_error_log eval +qr/hash_on: header|chash_key: "hello"|chash_key: "world"/ +--- grep_error_log_out +hash_on: header +chash_key: "world" +hash_on: header +chash_key: "hello" +hash_on: header +chash_key: "world" +hash_on: header +chash_key: "hello" +hash_on: header +chash_key: "world" +hash_on: header +chash_key: "hello" +hash_on: header +chash_key: "world" +hash_on: header +chash_key: "hello" + + + +=== TEST 12: the plugin has multiple weighted_upstreams(upstream method) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { { + vars = { { "arg_id", "==", "1" } } + } }, + weighted_upstreams = { { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + ["127.0.0.1:1981"] = 1 + } + }, + weight = 1 + } } + }, { + match = { { + vars = { { "arg_id", "==", "2" } } + } }, + weighted_upstreams = { { + upstream = { + name = "upstream_B", + type = "roundrobin", + nodes = { + ["127.0.0.1:1982"] = 1 + } + }, + weight = 1 + } } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit each upstream separately +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 9, 3 do + local _, _, body = t('/server_port', ngx.HTTP_GET) + local _, _, body2 = t('/server_port?id=1', ngx.HTTP_GET) + local _, _, body3 = t('/server_port?id=2', ngx.HTTP_GET) + bodys[i] = body + bodys[i+1] = body2 + bodys[i+2] = body3 + end + + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body eval +qr/1980, 1981, 1982, 1980, 1981, 1982, 1980, 1981, 1982/ + + + +=== TEST 14: the plugin has multiple weighted_upstreams and has a default routing weight in weighted_upstreams +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { { + vars = { { "arg_id", "==", "1" } } + } }, + weighted_upstreams = { { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + ["127.0.0.1:1981"] = 1 + } + }, + weight = 1 + }, { + weight = 1 + } } + }, { + match = { { + vars = { { "arg_id", "==", "2" } } + } }, + weighted_upstreams = { { + upstream = { + name = "upstream_B", + type = "roundrobin", + nodes = { + ["127.0.0.1:1982"] = 1 + } + }, + weight = 1 + }, { + weight = 1 + } } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: every weighted_upstreams in the plugin is hit +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 8, 2 do + local _, _, body = t('/server_port?id=1', ngx.HTTP_GET) + local _, _, body2 = t('/server_port?id=2', ngx.HTTP_GET) + bodys[i] = body + bodys[i+1] = body2 + end + + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body eval +qr/1980, 1980, 1980, 1980, 1981, 1981, 1982, 1982/ + + + +=== TEST 16: set upstream(upstream_id: 1, upstream_id: 2) and add route +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin", + "desc": "new upstream A" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + code, body = t('/apisix/admin/upstreams/2', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin", + "desc": "new upstream B" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + end + + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { + { + match = { + { + vars = { + {"arg_id", "==", "1" } + } + } + }, + weighted_upstreams = { + { + upstream_id = 1, + weight = 1 + } + } + }, + { + match = { + { + vars = { + {"arg_id", "==", "2" } + } + } + }, + weighted_upstreams = { + { + upstream_id = 2, + weight = 1 + } + } + } + } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 17: hit each upstream separately +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 9, 3 do + local _, _, body = t('/server_port', ngx.HTTP_GET) + local _, _, body2 = t('/server_port?id=1', ngx.HTTP_GET) + local _, _, body3 = t('/server_port?id=2', ngx.HTTP_GET) + bodys[i] = body + bodys[i+1] = body2 + bodys[i+2] = body3 + end + + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body eval +qr/1980, 1981, 1982, 1980, 1981, 1982, 1980, 1981, 1982/ + + + +=== TEST 18: multi nodes with `node` mode to pass host +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "localhost:1979": 1000, + "127.0.0.1:1980": 1 + }, + "type": "roundrobin", + "pass_host": "node" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local data = { + uri = "/uri", + plugins = { + ["traffic-split"] = { + rules = { + { + match = { + { + vars = { + {"arg_id", "==", "1" } + } + } + }, + weighted_upstreams = { + { + upstream_id = 1, + weight = 1 + } + } + } + } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1978"] = 1 + } + } + } + + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 19: hit route +--- request +GET /uri?id=1 +--- response_body eval +qr/host: 127.0.0.1/ +--- error_log +proxy request to 127.0.0.1:1980 + + + +=== TEST 20: invalid upstream_id should report failure +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local data = { + uri = "/route", + plugins = { + ["traffic-split"] = { + rules = { + { + weighted_upstreams = { + { + upstream_id = "invalid-id", + weight = 1 + } + } + }, + } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + json.encode(data) + ) + ngx.status, body = t('/route', ngx.HTTP_GET) + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 500 +--- error_log +failed to find upstream by id: invalid-id + + + +=== TEST 21: use upstream with https scheme +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + + local data = { + uri = "/hello", + plugins = { + ["traffic-split"] = { + rules = { + { + match = { { + vars = { { "arg_scheme", "==", "https" } } + } }, + weighted_upstreams = { + { + upstream = { + type = "roundrobin", + pass_host = "node", + nodes = { + ["127.0.0.1:1983"] = 1, + }, + scheme = "https" + }, + weight = 1 + } + } + } + } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 22: hit route +--- request +GET /hello?scheme=https +--- error_code: 200 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split3.t new file mode 100644 index 0000000..e709609 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split3.t @@ -0,0 +1,784 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: mock Blue-green Release +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { { + vars = { { "http_release", "==", "blue" } } + } }, + weighted_upstreams = { { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + ["127.0.0.1:1981"] = 1 + } + } + } } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: release is equal to `blue` +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + local headers = {} + headers["release"] = "blue" + for i = 1, 6 do + local _, _, body = t('/server_port', ngx.HTTP_GET, "", nil, headers) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1981, 1981, 1981, 1981, 1981, 1981 + + + +=== TEST 3: release is equal to `green` +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + local headers = {} + headers["release"] = "green" + for i = 1, 6 do + local _, _, body = t('/server_port', ngx.HTTP_GET, "", nil, headers) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1980, 1980, 1980, 1980, 1980, 1980 + + + +=== TEST 4: mock Custom Release +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"arg_name", "==", "jack"}, + {"arg_age", ">", "23"}, + {"http_appkey", "~~", "[a-z]{1,5}"} + } + } + }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + ["127.0.0.1:1981"] = 20 + } + }, + weight = 2 + }, + { + upstream = { + name = "upstream_B", + type = "roundrobin", + nodes = { + ["127.0.0.1:1982"] = 10 + } + }, + weight = 2 + }, + { + weight = 1 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: `match` rule passed +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + local headers = {} + headers["appkey"] = "api-key" + for i = 1, 5 do + local _, _, body = t('/server_port?name=jack&age=36', ngx.HTTP_GET, "", nil, headers) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1980, 1981, 1981, 1982, 1982 + + + +=== TEST 6: `match` rule failed, `age` condition did not match +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + local headers = {} + headers["release"] = "green" + for i = 1, 6 do + local _, _, body = t('/server_port?name=jack&age=16', ngx.HTTP_GET, "", nil, headers) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1980, 1980, 1980, 1980, 1980, 1980 + + + +=== TEST 7: upstream nodes are array type and node is the domain name +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + {host = "test.com", port = 80, weight = 0} + } + }, + weight = 2 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: domain name resolved successfully +--- request +GET /server_port +--- error_code: 502 +--- error_log eval +qr/dns resolver domain: test.com to \d+.\d+.\d+.\d+/ + + + +=== TEST 9: the nodes of upstream are array type, with multiple nodes +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"arg_name", "==", "jack"}, + {"arg_age", ">", "23"}, + {"http_appkey", "~~", "[a-z]{1,5}"} + } + } + }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + {host = "127.0.0.1", port = 1981, weight = 2}, + {host = "127.0.0.1", port = 1982, weight = 2} + } + }, + weight = 4 + }, + { + weight = 1 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: `match` rule passed +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + local headers = {} + headers["appkey"] = "api-key" + for i = 1, 5 do + local _, _, body = t('/server_port?name=jack&age=36', ngx.HTTP_GET, "", nil, headers) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1980, 1981, 1981, 1982, 1982 + + + +=== TEST 11: the upstream node is an array type and has multiple upstream +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"arg_name", "==", "jack"}, + {"arg_age", ">", "23"}, + {"http_appkey", "~~", "[a-z]{1,5}"} + } + } + }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + {host = "127.0.0.1", port = 1981, weight = 2} + } + }, + weight = 2 + }, + { + upstream = { + name = "upstream_B", + type = "roundrobin", + nodes = { + {host = "127.0.0.1", port = 1982, weight = 2} + } + }, + weight = 2 + }, + { + weight = 1 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: `match` rule passed +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + local headers = {} + headers["appkey"] = "api-key" + for i = 1, 5 do + local _, _, body = t('/server_port?name=jack&age=36', ngx.HTTP_GET, "", nil, headers) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1980, 1981, 1981, 1982, 1982 + + + +=== TEST 13: multi-upstream, test with unique upstream key +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + {host = "127.0.0.1", port = 1981, weight = 2} + } + }, + weight = 2 + }, + { + upstream = { + name = "upstream_B", + type = "roundrobin", + nodes = { + {host = "127.0.0.1", port = 1982, weight = 2} + } + }, + weight = 2 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: the upstream `key` is unique +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 2 do + local _, _, body = t('/server_port', ngx.HTTP_GET) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1981, 1982 +--- grep_error_log eval +qr/upstream_key: roundrobin#route_1_\d/ +--- grep_error_log_out eval +qr/(upstream_key: roundrobin#route_1_1 +upstream_key: roundrobin#route_1_2 +|upstream_key: roundrobin#route_1_2 +upstream_key: roundrobin#route_1_1 +)/ + + + +=== TEST 15: has empty upstream, test the upstream key is unique +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + {host = "127.0.0.1", port = 1981, weight = 2} + } + }, + weight = 1 + }, + { + weight = 1 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: the upstream `key` is unique +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 2 do + local _, _, body = t('/server_port', ngx.HTTP_GET) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1980, 1981 +--- grep_error_log eval +qr/upstream_key: roundrobin#route_1_\d/ +--- grep_error_log_out +upstream_key: roundrobin#route_1_1 + + + +=== TEST 17: the request header contains horizontal lines("-") +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"http_x-api-appkey", "==", "api-key"} + } + } + }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + {host = "127.0.0.1", port = 1981, weight = 2}, + {host = "127.0.0.1", port = 1982, weight = 2} + } + }, + weight = 4 + }, + { + weight = 1 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: `match` rule passed +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + local headers = {} + headers["x-api-appkey"] = "api-key" + for i = 1, 5 do + local _, _, body = t('/server_port', ngx.HTTP_GET, "", nil, headers) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1980, 1981, 1981, 1982, 1982 + + + +=== TEST 19: request args and request headers contain horizontal lines("-") +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"arg_x-api-name", "==", "jack"}, + {"arg_x-api-age", ">", "23"}, + {"http_x-api-appkey", "~~", "[a-z]{1,5}"} + } + } + }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + {host = "127.0.0.1", port = 1981, weight = 2}, + {host = "127.0.0.1", port = 1982, weight = 2} + } + }, + weight = 4 + }, + { + weight = 1 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: `match` rule passed +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + local headers = {} + headers["x-api-appkey"] = "hello" + for i = 1, 5 do + local _, _, body = t('/server_port?x-api-name=jack&x-api-age=36', ngx.HTTP_GET, "", nil, headers) + bodys[i] = body + end + table.sort(bodys) + ngx.print(table.concat(bodys, ", ")) + } +} +--- response_body chomp +1980, 1981, 1981, 1982, 1982 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split4.t b/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split4.t new file mode 100644 index 0000000..9da4f0f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split4.t @@ -0,0 +1,744 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log && + (defined $block->error_code && $block->error_code != 502)) + { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: set upstream(id: 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/2', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin", + "desc": "new upstream" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: set route(id: 1, upstream_id: 1, upstream_id in plugin: 2), and `weighted_upstreams` does not have a structure with only `weight` +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"arg_name", "==", "James"} + } + } + }, + weighted_upstreams = { + { + upstream_id = 2 + } + } + } } + } + }, + upstream_id = 1 + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: when `match` rule passed, use the `upstream_id` in plugin, and when it failed, use the `upstream_id` in route +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + + for i = 1, 5, 2 do + -- match rule passed + local _, _, body = t('/server_port?name=James', ngx.HTTP_GET) + bodys[i] = body + + -- match rule failed + local _, _, body = t('/server_port', ngx.HTTP_GET) + bodys[i+1] = body + end + + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1981, 1981, 1981, 1982, 1982, 1982 + + + +=== TEST 5: set route(use upstream for route and upstream_id for plugin), and `weighted_upstreams` does not have a structure with only `weight` +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"arg_name", "==", "James"} + } + } + }, + weighted_upstreams = { + { + upstream_id = 1 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: when `match` rule passed, use the `upstream_id` in plugin, and when it failed, use the `upstream` in route +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + + for i = 1, 5, 2 do + -- match rule passed + local _, _, body = t('/server_port?name=James', ngx.HTTP_GET) + bodys[i] = body + + -- match rule failed + local _, _, body = t('/server_port', ngx.HTTP_GET) + bodys[i+1] = body + end + + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1980, 1980, 1980, 1981, 1981, 1981 + + + +=== TEST 7: set route(id: 1, upstream_id: 1, upstream_id in plugin: 2), and `weighted_upstreams` has a structure with only `weight` +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"uri", "==", "/server_port"} + } + } + }, + weighted_upstreams = { + { + upstream_id = 2, + weight = 1 + }, + { + weight = 1 + } + } + } } + } + }, + upstream_id = 1 + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: all requests `match` rule passed, proxy requests to the upstream of route based on the structure with only `weight` in `weighted_upstreams` +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 6 do + local _, _, body = t('/server_port', ngx.HTTP_GET) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1981, 1981, 1981, 1982, 1982, 1982 + + + +=== TEST 9: the upstream_id is used in the plugin +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"arg_x-api-name", "==", "jack"} + } + } + }, + weighted_upstreams = { + { + upstream_id = 1, + weight = 2 + }, + { + upstream_id = 2, + weight = 1 + }, + { + weight = 2 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: `match` rule passed(upstream_id) +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + local headers = {} + for i = 1, 5 do + local _, _, body = t('/server_port?x-api-name=jack', ngx.HTTP_GET, "", nil, headers) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1980, 1980, 1981, 1981, 1982 + + + +=== TEST 11: only use upstream_id in the plugin +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"arg_x-api-name", "==", "jack"} + } + } + }, + weighted_upstreams = { + { + upstream_id = 1, + weight = 1 + }, + { + upstream_id = 2, + weight = 1 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: `match` rule passed(only use upstream_id) +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + for i = 1, 4 do + local _, _, body = t('/server_port?x-api-name=jack', ngx.HTTP_GET, "", nil, headers) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1981, 1981, 1982, 1982 + + + +=== TEST 13: use upstream and upstream_id in the plugin +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { + { + vars = { + {"arg_x-api-name", "==", "jack"} + } + } + }, + weighted_upstreams = { + { + upstream_id = 1, + weight = 2 + }, + { + upstream = { + type = "roundrobin", + nodes = { + {host = "127.0.0.1", port = 1982, weight = 1} + } + }, + weight = 1 + }, + { + weight = 2 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: `match` rule passed(upstream + upstream_id) +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local bodys = {} + local headers = {} + headers["x-api-appkey"] = "hello" + for i = 1, 5 do + local _, _, body = t('/server_port?x-api-name=jack', ngx.HTTP_GET, "", nil, headers) + bodys[i] = body + end + table.sort(bodys) + ngx.say(table.concat(bodys, ", ")) + } +} +--- response_body +1980, 1980, 1981, 1981, 1982 + + + +=== TEST 15: set route + upstream (two upstream node: one healthy + one unhealthy) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local up_data = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1981"] = 1, + ["127.0.0.1:1970"] = 1 + }, + checks = { + active = { + http_path = "/status", + host = "foo.com", + healthy = { + interval = 1, + successes = 1 + }, + unhealthy = { + interval = 1, + http_failures = 2 + } + } + } + } + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + json.encode(up_data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + weighted_upstreams = { + { + upstream_id = 1, + weight = 1 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: hit routes, ensure the checker is bound to the upstream +--- config +location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/server_port" + + do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + end + + ngx.sleep(2.5) + + local ports_count = {} + for i = 1, 6 do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {method = "GET", keepalive = false}) + if not res then + ngx.say(err) + return + end + + ports_count[res.body] = (ports_count[res.body] or 0) + 1 + end + + local ports_arr = {} + for port, count in pairs(ports_count) do + table.insert(ports_arr, {port = port, count = count}) + end + + local function cmd(a, b) + return a.port > b.port + end + table.sort(ports_arr, cmd) + + ngx.say(require("toolkit.json").encode(ports_arr)) + ngx.exit(200) + } +} +--- response_body +[{"count":6,"port":"1981"}] +--- grep_error_log eval +qr/\([^)]+\) unhealthy .* for '.*'/ +--- grep_error_log_out +(upstream#/apisix/upstreams/1) unhealthy TCP increment (1/2) for 'foo.com(127.0.0.1:1970)' +(upstream#/apisix/upstreams/1) unhealthy TCP increment (2/2) for 'foo.com(127.0.0.1:1970)' +--- timeout: 10 + + + +=== TEST 17: set upstream(id: 1), by default retries count = number of nodes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1": 1, + "127.0.0.2:1": 1, + "127.0.0.3:1": 1 + }, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: set route(id: 1, upstream_id: 1) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/hello", + plugins = { + ["traffic-split"] = { + rules = { { + weighted_upstreams = { + { + upstream_id = 1, + weight = 1 + } + } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 19: hit routes +--- request +GET /hello +--- error_code: 502 +--- grep_error_log eval +qr/\([^)]+\) while connecting to upstream/ +--- grep_error_log_out +(111: Connection refused) while connecting to upstream +(111: Connection refused) while connecting to upstream +(111: Connection refused) while connecting to upstream diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split5.t b/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split5.t new file mode 100644 index 0000000..01df76e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/traffic-split5.t @@ -0,0 +1,636 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + my $http_config = $block->http_config // <<_EOC_; + # fake server, only for test + server { + listen 1970; + location / { + content_by_lua_block { + ngx.say(1970) + } + } + } + + server { + listen 1971; + location / { + content_by_lua_block { + ngx.say(1971) + } + } + } + + server { + listen 1972; + location / { + content_by_lua_block { + ngx.say(1972) + } + } + } + + server { + listen 1973; + location / { + content_by_lua_block { + ngx.say(1973) + } + } + } + + server { + listen 1974; + location / { + content_by_lua_block { + ngx.say(1974) + } + } + } +_EOC_ + + $block->set_value("http_config", $http_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set upstream(multiple rules, multiple nodes under each weighted_upstreams) and add route +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/hello", + plugins = { + ["traffic-split"] = { + rules = { + { + match = { { + vars = { { "arg_id", "==", "1" } } + } }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + ["127.0.0.1:1970"] = 1, + ["127.0.0.1:1971"] = 1 + } + }, + weight = 1 + } + } + }, + { + match = { { + vars = { { "arg_id", "==", "2" } } + } }, + weighted_upstreams = { + { + upstream = { + name = "upstream_B", + type = "roundrobin", + nodes = { + ["127.0.0.1:1972"] = 1, + ["127.0.0.1:1973"] = 1 + } + }, + weight = 1 + } + } + } + } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1974"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit different weighted_upstreams by rules +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri) + local port = tonumber(res.body) + if port ~= 1974 then + ngx.status = ngx.HTTP_INTERNAL_SERVER_ERROR + ngx.say("failed while no arg_id") + return + end + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello?id=1" + res, err = httpc:request_uri(uri) + port = tonumber(res.body) + if port ~= 1970 and port ~= 1971 then + ngx.status = ngx.HTTP_INTERNAL_SERVER_ERROR + ngx.say("failed while arg_id = 1") + return + end + + uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello?id=2" + res, err = httpc:request_uri(uri) + port = tonumber(res.body) + if port ~= 1972 and port ~= 1973 then + ngx.status = ngx.HTTP_INTERNAL_SERVER_ERROR + ngx.say("failed while arg_id = 2") + return + end + + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 3: set upstream(multiple rules, multiple nodes with different weight under each weighted_upstreams) and add route +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/hello", + plugins = { + ["traffic-split"] = { + rules = { + { + match = { { + vars = { { "arg_id", "==", "1" } } + } }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + ["127.0.0.1:1970"] = 2, + ["127.0.0.1:1971"] = 1 + } + }, + weight = 1 + } + } + }, + { + match = { { + vars = { { "arg_id", "==", "2" } } + } }, + weighted_upstreams = { + { + upstream = { + name = "upstream_B", + type = "roundrobin", + nodes = { + ["127.0.0.1:1972"] = 2, + ["127.0.0.1:1973"] = 1 + } + }, + weight = 1 + } + } + } + } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1974"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: pick different nodes by weight +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello?id=1" + local ports = {} + local res, err + for i = 1, 3 do + res, err = httpc:request_uri(uri) + local port = tonumber(res.body) + ports[i] = port + end + table.sort(ports) + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello?id=2" + for i = 4, 6 do + res, err = httpc:request_uri(uri) + local port = tonumber(res.body) + ports[i] = port + end + table.sort(ports) + + ngx.say(table.concat(ports, ", ")) + } + } +--- response_body +1970, 1970, 1971, 1972, 1972, 1973 + + + +=== TEST 5: set upstream(multiple rules, the first rule has the match attribute and the second rule does not) and add route +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/hello", + plugins = { + ["traffic-split"] = { + rules = { + { + match = { { + vars = { { "arg_id", "==", "1" } } + } }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + ["127.0.0.1:1970"] = 1 + } + }, + weight = 1 + } + } + }, + { + weighted_upstreams = { + { + upstream = { + name = "upstream_B", + type = "roundrobin", + nodes = { + ["127.0.0.1:1971"] = 1 + } + }, + weight = 1 + }, + { + weight = 1 + } + } + } + } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1972"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: first rule match failed and the second rule match success +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello?id=1" + local ports = {} + local res, err + for i = 1, 2 do + res, err = httpc:request_uri(uri) + local port = tonumber(res.body) + ports[i] = port + end + + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello?id=2" + for i = 3, 4 do + res, err = httpc:request_uri(uri) + local port = tonumber(res.body) + ports[i] = port + end + table.sort(ports) + + ngx.say(table.concat(ports, ", ")) + } + } +--- response_body +1970, 1970, 1971, 1972 + + + +=== TEST 7: set up traffic-split rule +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/server_port", + plugins = { + ["traffic-split"] = { + rules = { { + match = { { + vars = { { "arg_name", "==", "jack" } } + } }, + weighted_upstreams = { { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1979"] = 1 + }, + }, + } } + } } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit and check default timeout +--- http_config +proxy_connect_timeout 12345s; +--- request +GET /server_port?name=jack +--- log_level: debug +--- error_log eval +qr/event timer add: \d+: 12345000:\d+/ +--- error_code: 502 + + + +=== TEST 9: set upstream for post_arg_id test case +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/hello", + plugins = { + ["traffic-split"] = { + rules = { + { + match = { { + vars = { { "post_arg_id", "==", "1" } } + } }, + weighted_upstreams = { + { + upstream = { + name = "upstream_A", + type = "roundrobin", + nodes = { + ["127.0.0.1:1970"] = 1 + } + }, + weight = 1 + } + } + } + } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1974"] = 1 + } + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 10: post_arg_id = 1 without content-type charset +--- request +POST /hello +id=1 +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- response_body +1970 + + + +=== TEST 11: post_arg_id = 1 with content-type charset +--- request +POST /hello +id=1 +--- more_headers +Content-Type: application/x-www-form-urlencoded;charset=UTF-8 +--- response_body +1970 + + + +=== TEST 12: failure after plugin reload +--- extra_yaml_config +nginx_config: + worker_processes: 1 + +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "type": "roundrobin", + "nodes": { + "127.0.0.1:1970":10 + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/upstreams/2', + ngx.HTTP_PUT, + [[{ + "type": "roundrobin", + "nodes": { + "127.0.0.1:1971":10 + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream_id": "2", + "weight": 1 + }, + { + "weight": 1 + } + ] + } + ] + } + }, + "upstream_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + + local code, body = t('/hello') + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/plugins/reload', ngx.HTTP_PUT) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/hello') + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say("passed.") + } + } +--- request +GET /t +--- response_body +passed. diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/ua-restriction.t b/CloudronPackages/APISIX/apisix-source/t/plugin/ua-restriction.t new file mode 100644 index 0000000..56f07b3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/ua-restriction.t @@ -0,0 +1,761 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set both allowlist and denylist +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ua-restriction") + local conf = { + allowlist = { + "my-bot1", + "my-bot2" + }, + denylist = { + "my-bot1", + "my-bot2" + }, + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + return + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- response_body +value should match only one schema, but matches both schemas 1 and 2 + + + +=== TEST 2: bypass_missing not boolean +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ua-restriction") + local conf = { + bypass_missing = "foo", + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "bypass_missing" validation failed: wrong type: expected boolean, got string +done + + + +=== TEST 3: allowlist not array +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ua-restriction") + local conf = { + allowlist = "my-bot1", + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "allowlist" validation failed: wrong type: expected array, got string +done + + + +=== TEST 4: denylist not array +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ua-restriction") + local conf = { + denylist = 100, + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "denylist" validation failed: wrong type: expected array, got number +done + + + +=== TEST 5: message not string +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ua-restriction") + local conf = { + message = 100, + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "message" validation failed: wrong type: expected string, got number +done + + + +=== TEST 6: set denylist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ua-restriction": { + "denylist": [ + "my-bot1", + "(Baiduspider)/(\\d+)\\.(\\d+)" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: hit route and user-agent in denylist +--- request +GET /hello +--- more_headers +User-Agent:my-bot1 +--- error_code: 403 +--- response_body +{"message":"Not allowed"} + + + +=== TEST 8: hit route and user-agent in denylist with multiple user-agent +--- request +GET /hello +--- more_headers +User-Agent:my-bot1 +User-Agent:my-bot2 +--- error_code: 403 +--- response_body +{"message":"Not allowed"} + + + +=== TEST 9: hit route and user-agent in denylist with reverse order multiple user-agent +--- request +GET /hello +--- more_headers +User-Agent:my-bot2 +User-Agent:my-bot1 +--- error_code: 403 +--- response_body +{"message":"Not allowed"} + + + +=== TEST 10: hit route and user-agent match denylist regex +--- request +GET /hello +--- more_headers +User-Agent:Baiduspider/3.0 +--- error_code: 403 +--- response_body +{"message":"Not allowed"} + + + +=== TEST 11: hit route and user-agent not in denylist +--- request +GET /hello +--- more_headers +User-Agent:foo/bar +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 12: set allowlist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ua-restriction": { + "allowlist": [ + "my-bot1", + "(Baiduspider)/(\\d+)\\.(\\d+)" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit route and user-agent in allowlist +--- request +GET /hello +--- more_headers +User-Agent:my-bot1 +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 14: hit route and user-agent match allowlist regex +--- request +GET /hello +--- more_headers +User-Agent:Baiduspider/3.0 +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 15: hit route and user-agent not in allowlist +--- request +GET /hello +--- more_headers +User-Agent:foo/bar +--- error_code: 403 +--- response_body +{"message":"Not allowed"} + + + +=== TEST 16: hit route and user-agent in allowlist with multiple user-agent +--- request +GET /hello +--- more_headers +User-Agent:foo/bar +User-Agent:my-bot1 +--- response_body +hello world + + + +=== TEST 17: hit route and user-agent in allowlist with reverse order multiple user-agent +--- request +GET /hello +--- more_headers +User-Agent:my-bot1 +User-Agent:foo/bar +--- response_body +hello world + + + +=== TEST 18: message that do not reach the minimum range +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ua-restriction": { + "message": "" + } + } + }]] + ) + + ngx.say(body) + } + } +--- response_body_like eval +qr/string too short, expected at least 1, got 0/ + + + +=== TEST 19: exceeds the maximum limit of message +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local json = require("toolkit.json") + + local data = { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + } + }, + plugins = { + ["ua-restriction"] = { + denylist = { + "my-bot1", + }, + message = ("-1Aa#"):rep(205) + } + } + } + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + ngx.say(body) + } + } +--- response_body_like eval +qr/string too long, expected at most 1024, got 1025/ + + + +=== TEST 20: set custom message +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ua-restriction": { + "denylist": [ + "(Baiduspider)/(\\d+)\\.(\\d+)" + ], + "message": "Do you want to do something bad?" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } + +--- response_body +passed + + + +=== TEST 21: test custom message +--- request +GET /hello +--- more_headers +User-Agent:Baiduspider/1.0 +--- error_code: 403 +--- response_body +{"message":"Do you want to do something bad?"} + + + +=== TEST 22: test remove ua-restriction, add denylist(part 1) +--- config + location /enable { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ua-restriction": { + "denylist": [ + "(Baiduspider)/(\\d+)\\.(\\d+)" + ] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /enable +--- error_code: 200 +--- response_body +passed + + + +=== TEST 23: test remove ua-restriction, fail(part 2) +--- request +GET /hello +--- more_headers +User-Agent:Baiduspider/1.0 +--- error_code: 403 +--- response_body +{"message":"Not allowed"} + + + +=== TEST 24: test remove ua-restriction, remove plugin(part 3) +--- config + location /disable { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /disable +--- error_code: 200 +--- response_body +passed + + + +=== TEST 25: test remove ua-restriction, check spider User-Agent(part 4) +--- request +GET /hello +--- more_headers +User-Agent:Baiduspider/1.0 +--- response_body +hello world + + + +=== TEST 26: set disable=true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "ua-restriction": { + "denylist": [ + "foo" + ], + "_meta": { + "disable": true + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 27: the element in allowlist is null +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ua-restriction") + local conf = { + allowlist = { + "userdata: NULL", + null, + nil, + "" + }, + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "allowlist" validation failed: wrong type: expected array, got table +done + + + +=== TEST 28: the element in denylist is null +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.ua-restriction") + local conf = { + denylist = { + "userdata: NULL", + null, + nil, + "" + }, + } + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "denylist" validation failed: wrong type: expected array, got table +done + + + +=== TEST 29: test both allowlist and denylist are not exist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ua-restriction": { + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin ua-restriction err: value should match only one schema, but matches none"} + + + +=== TEST 30: test bypass_missing +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ua-restriction": { + "allowlist": [ + "my-bot1" + ] + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 31: hit +--- request +GET /hello +--- error_code: 403 +--- response_body +{"message":"Not allowed"} + + + +=== TEST 32: test bypass_missing with true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ua-restriction": { + "bypass_missing": true, + "denylist": [ + "my-bot1" + ] + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 33: hit +--- request +GET /hello +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/udp-logger.t b/CloudronPackages/APISIX/apisix-source/t/plugin/udp-logger.t new file mode 100644 index 0000000..b20248b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/udp-logger.t @@ -0,0 +1,539 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.udp-logger") + local ok, err = plugin.check_schema({host = "127.0.0.1", port = 3000}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: missing host +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.udp-logger") + local ok, err = plugin.check_schema({port = 3000}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "host" is required +done + + + +=== TEST 3: wrong type of string +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.udp-logger") + local ok, err = plugin.check_schema({host= "127.0.0.1", port = 3000, timeout = "10"}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "timeout" validation failed: wrong type: expected integer, got string +done + + + +=== TEST 4: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "udp-logger": { + "host": "127.0.0.1", + "port": 2000, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: access +--- request +GET /opentracing +--- response_body +opentracing +--- wait: 1 + + + +=== TEST 6: error log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "udp-logger": { + "host": "312.0.0.1", + "port": 2000, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" + local res, err = httpc:request_uri(uri, {method = "GET"}) + } + } +--- request +GET /t +--- error_log +failed to connect to UDP server: host[312.0.0.1] port[2000] +[error] +--- wait: 5 + + + +=== TEST 7: check plugin configuration updating +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body1 = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "udp-logger": { + "host": "127.0.0.1", + "port": 2000, + "tls": false, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, _, body2 = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, body3 = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "udp-logger": { + "host": "127.0.0.1", + "port": 2002, + "tls": false, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, _, body4 = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.print(body1) + ngx.print(body2) + ngx.print(body3) + ngx.print(body4) + } + } +--- request +GET /t +--- wait: 0.5 +--- response_body +passedopentracing +passedopentracing +--- grep_error_log eval +qr/sending a batch logs to 127.0.0.1:(\d+)/ +--- grep_error_log_out +sending a batch logs to 127.0.0.1:2000 +sending a batch logs to 127.0.0.1:2002 + + + +=== TEST 8: bad custom log format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/udp-logger', + ngx.HTTP_PUT, + [[{ + "log_format": "'$host' '$time_iso8601'" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(body) + return + end + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"log_format\" validation failed: wrong type: expected object, got string"} + + + +=== TEST 9: configure plugin and access route /hello +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "udp-logger": { + "host": "127.0.0.1", + "port": 8127, + "tls": false, + "batch_max_size": 1, + "inactive_timeout": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/plugin_metadata/udp-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "case name": "plugin_metadata", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + local code, _, _ = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: check if log exists to confirm if logging server was hit +--- exec +tail -n 1 ci/pod/vector/udp.log +--- response_body eval +qr/.*plugin_metadata.*/ + + + +=== TEST 11: configure plugin and access route /hello +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "udp-logger": { + "host": "127.0.0.1", + "port": 8127, + "tls": false, + "batch_max_size": 1, + "inactive_timeout": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/plugin_metadata/udp-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "case name": "logger format in plugin", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + local code, _, _ = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: check log format from logging server +--- exec +tail -n 1 ci/pod/vector/udp.log +--- response_body eval +qr/.*logger format in plugin.*/ + + + +=== TEST 13: add plugin with 'include_req_body' setting, collect request log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/udp-logger', ngx.HTTP_DELETE) + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "udp-logger": { + "host": "127.0.0.1", + "port": 8127, + "tls": false, + "batch_max_size": 1, + "inactive_timeout": 1, + "include_req_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + + local code, _, body = t("/hello", "POST", "{\"sample_payload\":\"hello\"}") + } + } +--- request +GET /t +--- error_log +"body":"{\"sample_payload\":\"hello\"}" + + + +=== TEST 14: add plugin with 'include_resp_body' setting, collect request log +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/plugin_metadata/udp-logger', ngx.HTTP_DELETE) + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "udp-logger": { + "host": "127.0.0.1", + "port": 8127, + "tls": false, + "batch_max_size": 1, + "inactive_timeout": 1, + "include_resp_body": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + + local code, _, body = t("/hello", "POST", "{\"sample_payload\":\"hello\"}") + } + } +--- request +GET /t +--- error_log +"body":"hello world\n" diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/uri-blocker.t b/CloudronPackages/APISIX/apisix-source/t/plugin/uri-blocker.t new file mode 100644 index 0000000..616f3e8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/uri-blocker.t @@ -0,0 +1,470 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +run_tests; + +__DATA__ + +=== TEST 1: invalid regular expression +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": [".+("] + } + }, + "uri": "/hello" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } +} +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin uri-blocker err: pcre_compile() failed: missing ) in \".+(\""} + + + +=== TEST 2: multiple valid rules +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": ["^a", "^b"] + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: multiple rules(include one invalid rule) +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": ["^a", "^b("] + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } +} +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin uri-blocker err: pcre_compile() failed: missing ) in \"^b(\""} + + + +=== TEST 4: one block rule +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": ["aa"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: hit block rule +--- request +GET /hello?aa=1 +--- error_code: 403 +--- error_log +concat block_rules: aa + + + +=== TEST 6: miss block rule +--- request +GET /hello?bb=2 +--- error_log +concat block_rules: aa + + + +=== TEST 7: multiple block rules +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": ["aa", "bb", "c\\d+"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit block rule +--- request +GET /hello?x=bb +--- error_code: 403 +--- error_log +concat block_rules: aa|bb|c\d+, + + + +=== TEST 9: hit block rule +--- request +GET /hello?bb=2 +--- error_code: 403 +--- error_log +concat block_rules: aa|bb|c\d+, + + + +=== TEST 10: hit block rule +--- request +GET /hello?c1=2 +--- error_code: 403 + + + +=== TEST 11: not hit block rule +--- request +GET /hello?cc=2 + + + +=== TEST 12: SQL injection +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": ["select.+(from|limit)", "(?:(union(.*?)select))"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: hit block rule +--- request +GET /hello?name=;select%20from%20sys +--- error_code: 403 +--- error_log +concat block_rules: select.+(from|limit)|(?:(union(.*?)select)), + + + +=== TEST 14: hit block rule +--- request +GET /hello?name=;union%20select%20 +--- error_code: 403 + + + +=== TEST 15: not hit block rule +--- request +GET /hello?cc=2 + + + +=== TEST 16: invalid rejected_msg length or type +--- config +location /t { + content_by_lua_block { + local data = { + { + input = { + plugins = { + ["uri-blocker"] = { + block_rules = { "^a" }, + rejected_msg = "", + }, + }, + uri = "/hello", + }, + output = { + error_msg = "failed to check the configuration of plugin uri-blocker err: property \"rejected_msg\" validation failed: string too short, expected at least 1, got 0", + }, + }, + { + input = { + plugins = { + ["uri-blocker"] = { + block_rules = { "^a" }, + rejected_msg = true, + }, + }, + uri = "/hello", + }, + output = { + error_msg = "failed to check the configuration of plugin uri-blocker err: property \"rejected_msg\" validation failed: wrong type: expected string, got boolean", + }, + }, + } + + local t = require("lib.test_admin").test + local err_count = 0 + for i in ipairs(data) do + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, data[i].input, data[i].output) + + if code >= 300 then + err_count = err_count + 1 + end + ngx.print(body) + end + + assert(err_count == #data) + } +} +--- request +GET /t + + + +=== TEST 17: one block rule, with rejected_msg +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": ["aa"], + "rejected_msg": "access is not allowed" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } +} +--- request +GET /t + + + +=== TEST 18: hit block rule and return rejected_msg +--- request +GET /hello?aa=1 +--- error_code: 403 +--- response_body +{"error_msg":"access is not allowed"} + + + +=== TEST 19: one block rule, with case insensitive +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": ["AA"], + "rejected_msg": "access is not allowed", + "case_insensitive": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } +} +--- request +GET /t + + + +=== TEST 20: hit block rule +--- request +GET /hello?aa=1 +--- error_code: 403 +--- response_body +{"error_msg":"access is not allowed"} + + + +=== TEST 21: add block rule with anchor +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "uri-blocker": { + "block_rules": ["^/internal/"] + } + }, + "uri": "/internal/*" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } +} +--- request +GET /t + + + +=== TEST 22: can't bypass with url without normalization +--- request +GET /./internal/x?aa=1 +--- error_code: 403 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/wolf-rbac.t b/CloudronPackages/APISIX/apisix-source/t/plugin/wolf-rbac.t new file mode 100644 index 0000000..8136e3d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/wolf-rbac.t @@ -0,0 +1,737 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{VAULT_TOKEN} = "root"; +} + +use t::APISIX 'no_plan'; + + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.wolf-rbac") + local conf = {} + + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + end + + ngx.say(require("toolkit.json").encode(conf)) + } + } +--- response_body_like eval +qr/\{"appid":"unset","header_prefix":"X-","server":"http:\/\/127\.0\.0\.1:12180"\}/ + + + +=== TEST 2: wrong type of string +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.wolf-rbac") + local ok, err = plugin.check_schema({appid = 123}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- response_body +property "appid" validation failed: wrong type: expected string, got number +done + + + +=== TEST 3: setup public API route +--- config + location /t { + content_by_lua_block { + local data = { + { + url = "/apisix/admin/routes/wolf-login", + data = [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/plugin/wolf-rbac/login" + }]] + }, + { + url = "/apisix/admin/routes/wolf-userinfo", + data = [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/plugin/wolf-rbac/user_info" + }]] + }, + { + url = "/apisix/admin/routes/wolf-change-pwd", + data = [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/plugin/wolf-rbac/change_pwd" + }]] + }, + } + + local t = require("lib.test_admin").test + + for _, data in ipairs(data) do + local code, body = t(data.url, ngx.HTTP_PUT, data.data) + ngx.say(body) + end + } + } +--- response_body eval +"passed\n" x 3 + + + +=== TEST 4: add consumer with username and plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "wolf_rbac_unit_test", + "plugins": { + "wolf-rbac": { + "appid": "wolf-rbac-app", + "server": "http://127.0.0.1:1982" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: enable wolf rbac plugin using admin api +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "wolf-rbac": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello*","/wolf/rbac/*"] + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: login failed, appid is missing +--- request +POST /apisix/plugin/wolf-rbac/login +username=admin&password=123456 +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 400 +--- response_body_like eval +qr/appid is missing/ + + + +=== TEST 7: login failed, appid not found +--- request +POST /apisix/plugin/wolf-rbac/login +appid=not-found&username=admin&password=123456 +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 400 +--- response_body_like eval +qr/appid not found/ + + + +=== TEST 8: login failed, username missing +--- request +POST /apisix/plugin/wolf-rbac/login +appid=wolf-rbac-app&password=123456 +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 200 +--- response_body +{"message":"request to wolf-server failed!"} +--- grep_error_log eval +qr/ERR_USERNAME_MISSING/ +--- grep_error_log_out eval +qr/ERR_USERNAME_MISSING/ + + + +=== TEST 9: login failed, password missing +--- request +POST /apisix/plugin/wolf-rbac/login +appid=wolf-rbac-app&username=admin +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 200 +--- response_body +{"message":"request to wolf-server failed!"} +--- grep_error_log eval +qr/ERR_PASSWORD_MISSING/ +--- grep_error_log_out eval +qr/ERR_PASSWORD_MISSING/ + + + +=== TEST 10: login failed, username not found +--- request +POST /apisix/plugin/wolf-rbac/login +appid=wolf-rbac-app&username=not-found&password=123456 +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 200 +--- response_body +{"message":"request to wolf-server failed!"} +--- grep_error_log eval +qr/ERR_USER_NOT_FOUND/ +--- grep_error_log_out eval +qr/ERR_USER_NOT_FOUND/ + + + +=== TEST 11: login failed, wrong password +--- request +POST /apisix/plugin/wolf-rbac/login +appid=wolf-rbac-app&username=admin&password=wrong-password +--- more_headers +Content-Type: application/x-www-form-urlencoded +--- error_code: 200 +--- response_body +{"message":"request to wolf-server failed!"} +--- grep_error_log eval +qr/ERR_PASSWORD_ERROR/ +--- grep_error_log_out eval +qr/ERR_PASSWORD_ERROR/ + + + +=== TEST 12: login successfully +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/plugin/wolf-rbac/login', + ngx.HTTP_POST, + [[ + {"appid": "wolf-rbac-app", "username": "admin","password": "123456"} + ]], + [[ + {"rbac_token":"V1#wolf-rbac-app#wolf-rbac-token","user_info":{"nickname":"administrator","username":"admin","id":"100"}} + ]], + {["Content-Type"] = "application/json"} + ) + ngx.status = code + } + } + + + +=== TEST 13: verify, missing token +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"Missing rbac token in request"} + + + +=== TEST 14: verify: invalid rbac token +--- request +GET /hello +--- error_code: 401 +--- more_headers +x-rbac-token: invalid-rbac-token +--- response_body +{"message":"invalid rbac token: parse failed"} + + + +=== TEST 15: verify: invalid appid in rbac token +--- request +GET /hello +--- error_code: 401 +--- more_headers +x-rbac-token: V1#invalid-appid#rbac-token +--- response_body +{"message":"Invalid appid in rbac token"} +--- error_log +consumer [invalid-appid] not found + + + +=== TEST 16: verify: failed +--- request +GET /hello1 +--- error_code: 403 +--- more_headers +x-rbac-token: V1#wolf-rbac-app#wolf-rbac-token +--- response_body +{"message":"ERR_ACCESS_DENIED"} +--- grep_error_log eval +qr/ERR_ACCESS_DENIED */ +--- grep_error_log_out +ERR_ACCESS_DENIED +ERR_ACCESS_DENIED +ERR_ACCESS_DENIED + + + +=== TEST 17: verify (in argument) +--- request +GET /hello?rbac_token=V1%23wolf-rbac-app%23wolf-rbac-token +--- response_headers +X-UserId: 100 +X-Username: admin +X-Nickname: administrator +--- response_body +hello world + + + +=== TEST 18: verify (in header Authorization) +--- request +GET /hello +--- more_headers +Authorization: V1#wolf-rbac-app#wolf-rbac-token +--- response_headers +X-UserId: 100 +X-Username: admin +X-Nickname: administrator +--- response_body +hello world + + + +=== TEST 19: verify (in header x-rbac-token) +--- request +GET /hello +--- more_headers +x-rbac-token: V1#wolf-rbac-app#wolf-rbac-token +--- response_headers +X-UserId: 100 +X-Username: admin +X-Nickname: administrator +--- response_body +hello world + + + +=== TEST 20: verify (in cookie) +--- request +GET /hello +--- more_headers +Cookie: x-rbac-token=V1#wolf-rbac-app#wolf-rbac-token +--- response_headers +X-UserId: 100 +X-Username: admin +X-Nickname: administrator +--- response_body +hello world + + + +=== TEST 21: get userinfo failed, missing token +--- request +GET /apisix/plugin/wolf-rbac/user_info +--- error_code: 401 +--- response_body +{"message":"Missing rbac token in request"} + + + +=== TEST 22: get userinfo failed, invalid rbac token +--- request +GET /apisix/plugin/wolf-rbac/user_info +--- error_code: 401 +--- more_headers +x-rbac-token: invalid-rbac-token +--- response_body +{"message":"invalid rbac token: parse failed"} + + + +=== TEST 23: get userinfo +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/plugin/wolf-rbac/user_info', + ngx.HTTP_GET, + nil, + [[ +{"user_info":{"username":"admin","id":"100","nickname":"administrator"}} + ]], + {Cookie = "x-rbac-token=V1#wolf-rbac-app#wolf-rbac-token"} + ) + ngx.status = code + } + } + + + +=== TEST 24: change password failed, old password incorrect +--- request +PUT /apisix/plugin/wolf-rbac/change_pwd +{"oldPassword": "error", "newPassword": "abcdef"} +--- more_headers +Content-Type: application/json +Cookie: x-rbac-token=V1#wolf-rbac-app#wolf-rbac-token +--- error_code: 200 +--- response_body +{"message":"request to wolf-server failed!"} +--- grep_error_log eval +qr/ERR_OLD_PASSWORD_INCORRECT/ +--- grep_error_log_out eval +qr/ERR_OLD_PASSWORD_INCORRECT/ + + + +=== TEST 25: change password +--- request +PUT /apisix/plugin/wolf-rbac/change_pwd +{"oldPassword":"123456", "newPassword": "abcdef"} +--- more_headers +Content-Type: application/json +Cookie: x-rbac-token=V1#wolf-rbac-app#wolf-rbac-token +--- error_code: 200 +--- response_body_like eval +qr/success to change password/ + + + +=== TEST 26: custom headers in request headers +--- request +GET /wolf/rbac/custom/headers?rbac_token=V1%23wolf-rbac-app%23wolf-rbac-token +--- response_headers +X-UserId: 100 +X-Username: admin +X-Nickname: administrator +--- response_body +id:100,username:admin,nickname:administrator + + + +=== TEST 27: change password by post raw args +--- request +PUT /apisix/plugin/wolf-rbac/change_pwd +oldPassword=123456&newPassword=abcdef +--- more_headers +Cookie: x-rbac-token=V1#wolf-rbac-app#wolf-rbac-token +--- error_code: 200 +--- response_body_like eval +qr/success to change password/ + + + +=== TEST 28: change password by post raw args, greater than 100 args is ok +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local headers = { + ["Cookie"] = "x-rbac-token=V1#wolf-rbac-app#wolf-rbac-token" + } + local tbl = {} + for i=1, 100 do + tbl[i] = "test"..tostring(i).."=test&" + end + tbl[101] = "oldPassword=123456&newPassword=abcdef" + local code, _, real_body = t.test('/apisix/plugin/wolf-rbac/change_pwd', + ngx.HTTP_PUT, + table.concat(tbl, ""), + nil, + headers + ) + ngx.status = 200 + ngx.say(real_body) + } +} +--- response_body_like eval +qr/success to change password/ + + + +=== TEST 29: verify: failed, server internal error +--- request +GET /hello/500 +--- error_code: 500 +--- more_headers +x-rbac-token: V1#wolf-rbac-app#wolf-rbac-token +--- response_body +{"message":"request to wolf-server failed, status:500"} +--- grep_error_log eval +qr/request to wolf-server failed, status:500 */ +--- grep_error_log_out +request to wolf-server failed, status:500 +request to wolf-server failed, status:500 + + + +=== TEST 30: verify: failed, token is expired +--- request +GET /hello/401 +--- error_code: 401 +--- more_headers +x-rbac-token: V1#wolf-rbac-app#wolf-rbac-token +--- response_body +{"message":"ERR_TOKEN_INVALID"} +--- grep_error_log eval +qr/ERR_TOKEN_INVALID */ +--- grep_error_log_out +ERR_TOKEN_INVALID +ERR_TOKEN_INVALID +ERR_TOKEN_INVALID + + + +=== TEST 31: set hmac-auth conf: appid uses secret ref +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "root" + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "wolf_rbac_unit_test", + "plugins": { + "wolf-rbac": { + "appid": "$secret://vault/test1/wolf_rbac_unit_test/appid", + "server": "http://127.0.0.1:1982" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 32: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/wolf_rbac_unit_test appid=wolf-rbac-app +--- response_body +Success! Data written to: kv/apisix/wolf_rbac_unit_test + + + +=== TEST 33: login successfully +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/plugin/wolf-rbac/login', + ngx.HTTP_POST, + [[ + {"appid": "wolf-rbac-app", "username": "admin","password": "123456"} + ]], + [[ + {"rbac_token":"V1#wolf-rbac-app#wolf-rbac-token","user_info":{"nickname":"administrator","username":"admin","id":"100"}} + ]], + {["Content-Type"] = "application/json"} + ) + ngx.status = code + } + } + + + +=== TEST 34: set hmac-auth conf with the token in an env var: appid uses secret ref +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix" : "kv/apisix", + "token" : "$ENV://VAULT_TOKEN" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "wolf_rbac_unit_test", + "plugins": { + "wolf-rbac": { + "appid": "$secret://vault/test1/wolf_rbac_unit_test/appid", + "server": "http://127.0.0.1:1982" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 35: login successfully +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/plugin/wolf-rbac/login', + ngx.HTTP_POST, + [[ + {"appid": "wolf-rbac-app", "username": "admin","password": "123456"} + ]], + [[ + {"rbac_token":"V1#wolf-rbac-app#wolf-rbac-token","user_info":{"nickname":"administrator","username":"admin","id":"100"}} + ]], + {["Content-Type"] = "application/json"} + ) + ngx.status = code + } + } + + + +=== TEST 36: add consumer with echo plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "wolf_rbac_with_other_plugins", + "plugins": { + "wolf-rbac": { + "appid": "wolf-rbac-app", + "server": "http://127.0.0.1:1982" + }, + "echo": { + "body": "consumer merge echo plugins\n" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 37: verify echo plugin in consumer +--- request +GET /hello +--- more_headers +Authorization: V1#wolf-rbac-app#wolf-rbac-token +--- response_headers +X-UserId: 100 +X-Username: admin +X-Nickname: administrator +--- response_body +consumer merge echo plugins +--- no_error_log +[error] diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/workflow-without-case.t b/CloudronPackages/APISIX/apisix-source/t/plugin/workflow-without-case.t new file mode 100644 index 0000000..2ce469a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/workflow-without-case.t @@ -0,0 +1,85 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + + +__DATA__ + +=== TEST 1: set plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "workflow": { + "rules": [ + { + "actions": [ + [ + "return", + { + "code": 403 + } + ] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: trigger workflow +--- request +GET /hello +--- error_code: 403 diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/workflow.t b/CloudronPackages/APISIX/apisix-source/t/plugin/workflow.t new file mode 100644 index 0000000..022aa48 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/workflow.t @@ -0,0 +1,745 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + + +__DATA__ + +=== TEST 1: schema check +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.workflow") + local data = { + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = 403 + } + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + status = 403 + } + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = "403" + } + } + } + } + } + }, + { + rules = { + { + case = { + + }, + actions = { + { + "return", + { + code = 403 + } + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "fake", + { + code = 403 + } + } + } + } + } + } + } + + for _, conf in ipairs(data) do + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + end + } + } +--- response_body +done +property "rules" validation failed: failed to validate item 1: property "actions" is required +property "rules" validation failed: failed to validate item 1: property "actions" validation failed: failed to validate item 1: expect array to have at least 1 items +failed to validate the 'return' action: property "code" is required +failed to validate the 'return' action: property "code" validation failed: wrong type: expected integer, got string +property "rules" validation failed: failed to validate item 1: property "case" validation failed: expect array to have at least 1 items +unsupported action: fake + + + +=== TEST 2: set plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "workflow": { + "rules": [ + { + "case": [ + ["uri", "==", "/hello"] + ], + "actions": [ + [ + "return", + { + "code": 403 + } + ] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: trigger workflow +--- request +GET /hello +--- error_code: 403 + + + +=== TEST 4: multiple conditions in one case +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "workflow": { + "rules": [ + { + "case": [ + ["uri", "==", "/hello"], + ["arg_foo", "==", "bar"] + ], + "actions": [ + [ + "return", + { + "code": 403 + } + ] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: missing match the only case +--- request +GET /hello?foo=bad + + + +=== TEST 6: trigger workflow +--- request +GET /hello?foo=bar +--- error_code: 403 +--- response_body +{"error_msg":"rejected by workflow"} + + + +=== TEST 7: multiple cases with different actions +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = 403 + } + } + } + }, + { + case = { + {"uri", "==", "/hello2"} + }, + actions = { + { + "return", + { + code = 401 + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: trigger one case +--- request +GET /hello +--- error_code: 403 + + + +=== TEST 9: trigger another case +--- request +GET /hello2 +--- error_code: 401 + + + +=== TEST 10: match case in order +# rules is an array, match in the order of the index of the array, +# when cases are matched, actions are executed and do not continue +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"arg_foo", "==", "bar"} + }, + actions = { + { + "return", + { + code = 403 + } + } + } + }, + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = 401 + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: both case 1&2 matched, trigger the first cases +--- request +GET /hello?foo=bar +--- error_code: 403 + + + +=== TEST 12: case 1 mismatched, trigger the second cases +--- request +GET /hello?foo=bad +--- error_code: 401 + + + +=== TEST 13: all cases mismatched, pass to upstream +--- request +GET /hello1 +--- response_body +hello1 world + + + +=== TEST 14: schema check(limit-count) +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.workflow") + local data = { + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + {count = 2, time_window = 60, rejected_code = 503, key = 'remote_addr'} + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + {count = 2} + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + {time_window = 60} + } + } + } + } + }, + { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 2, + time_window = 60, + rejected_code = 503, + group = "services_1" + } + } + } + } + } + } + } + + for _, conf in ipairs(data) do + local ok, err = plugin.check_schema(conf) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + end + } + } +--- response_body +done +failed to validate the 'limit-count' action: property "time_window" is required +failed to validate the 'limit-count' action: property "count" is required +failed to validate the 'limit-count' action: group is not supported + + + +=== TEST 15: set actions as limit-count +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "workflow": { + "rules": [ + { + "case": [ + ["uri", "==", "/hello"] + ], + "actions": [ + [ + "limit-count", + { + "count": 3, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + ] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 200, 503] + + + +=== TEST 17: the conf in actions is isolation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: cross-hit case 1 and case 2, up limit by isolation +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1", +"GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 200, 200, 200, 200, 503, 503] + + + +=== TEST 19: multiple conditions in one case +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "workflow": { + "rules": [ + { + "case": [ + "OR", + ["arg_foo", "==", "bar"], + ["uri", "==", "/hello"] + ], + "actions": [ + [ + "return", + { + "code": 403 + } + ] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: trigger workflow +--- request +GET /hello +--- error_code: 403 +--- response_body +{"error_msg":"rejected by workflow"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/workflow2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/workflow2.t new file mode 100644 index 0000000..686e4bb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/workflow2.t @@ -0,0 +1,318 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + + +__DATA__ + +=== TEST 1: multiple cases with different actions(return & limit-count) +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "return", + { + code = 403 + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 1, + time_window = 60, + rejected_code = 503 + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: cross-hit case 1 and case 2, trigger actions by isolation +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello1"] +--- error_code eval +[403, 200, 503] + + + +=== TEST 3: the conf in actions is isolation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 3, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: cross-hit case 1 and case 2, trigger actions by isolation +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 200, 200] + + + +=== TEST 5: cross-hit case 1 and case 2, up limit by isolation 2 +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 6: different actions with different limit count conf, up limit by isolation +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + rules = { + { + case = { + {"uri", "==", "/hello"} + }, + actions = { + { + "limit-count", + { + count = 1, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + }, + { + case = { + {"uri", "==", "/hello1"} + }, + actions = { + { + "limit-count", + { + count = 2, + time_window = 60, + rejected_code = 503, + key = "remote_addr" + } + } + } + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: case 1 up limit, case 2 psssed +--- pipelined_requests eval +["GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] +--- error_code eval +[200, 200, 503, 200] + + + +=== TEST 8: test no rules +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin workflow err: property \"rules\" is required"} diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/zipkin.t b/CloudronPackages/APISIX/apisix-source/t/plugin/zipkin.t new file mode 100644 index 0000000..a227f35 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/zipkin.t @@ -0,0 +1,484 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +log_level("info"); +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.zipkin") + local ok, err = plugin.check_schema({endpoint = 'http://127.0.0.1', sample_ratio = 0.001}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 2: wrong value of ratio +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.zipkin") + local ok, err = plugin.check_schema({endpoint = 'http://127.0.0.1', sample_ratio = -0.1}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "sample_ratio" validation failed: expected -0.1 to be at least 1e-05 +done + + + +=== TEST 3: wrong value of ratio +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.zipkin") + local ok, err = plugin.check_schema({endpoint = 'http://127.0.0.1', sample_ratio = 2}) + if not ok then + ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +property "sample_ratio" validation failed: expected 2 to be at most 1 +done + + + +=== TEST 4: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:1980/mock_zipkin?server_addr=127.0.0.1", + "sample_ratio": 1, + "service_name": "APISIX" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: tiger zipkin +--- request +GET /opentracing +--- wait: 10 + + + +=== TEST 6: change sample ratio +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9999/mock_zipkin", + "sample_ratio": 0.00001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: not tiger zipkin +--- request +GET /opentracing +--- response_body +opentracing + + + +=== TEST 8: disabled +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: not tiger zipkin +--- request +GET /opentracing +--- response_body +opentracing + + + +=== TEST 10: set plugin with external ip address +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:1980/mock_zipkin?server_addr=1.2.3.4", + "sample_ratio": 1, + "service_name": "apisix", + "server_addr": "1.2.3.4" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: tiger zipkin +--- request +GET /opentracing +--- wait: 10 + + + +=== TEST 12: sanity server_addr +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.plugins.zipkin") + local ok, err = plugin.check_schema({ + endpoint = 'http://127.0.0.1', + sample_ratio = 0.001, + server_addr = 'badip' + }) + if not ok then + ngx.say(err) + else + ngx.say("done") + end + } + } +--- request +GET /t +--- response_body +property "server_addr" validation failed: failed to match pattern "^[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}$" with "badip" + + + +=== TEST 13: check zipkin headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9999/mock_zipkin", + "sample_ratio": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: set x-b3-sampled if sampled +--- request +GET /echo +--- response_headers +x-b3-sampled: 1 + + + +=== TEST 15: don't sample if disabled +--- request +GET /echo +--- more_headers +x-b3-sampled: 0 +--- response_headers +x-b3-sampled: 0 + + + +=== TEST 16: don't sample if disabled (old way) +--- request +GET /echo +--- more_headers +x-b3-sampled: false +--- response_headers +x-b3-sampled: 0 + + + +=== TEST 17: sample according to the header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9999/mock_zipkin", + "sample_ratio": 0.00001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: don't sample by default +--- request +GET /echo +--- response_headers +x-b3-sampled: 0 + + + +=== TEST 19: sample if needed +--- request +GET /echo +--- more_headers +x-b3-sampled: 1 +--- response_headers +x-b3-sampled: 1 + + + +=== TEST 20: sample if debug +--- request +GET /echo +--- more_headers +x-b3-flags: 1 +--- response_headers +x-b3-sampled: 1 + + + +=== TEST 21: sample if needed (old way) +--- request +GET /echo +--- more_headers +x-b3-sampled: true +--- response_headers +x-b3-sampled: 1 + + + +=== TEST 22: don't cache the per-req sample ratio +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/echo" + -- force to trace + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ['x-b3-sampled'] = 1 + } + }) + if not res then + ngx.say(err) + return + end + ngx.say(res.headers['x-b3-sampled']) + + -- force not to trace + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ['x-b3-sampled'] = 0 + } + }) + if not res then + ngx.say(err) + return + end + ngx.say(res.headers['x-b3-sampled']) + } + } +--- request +GET /t +--- response_body +1 +0 + + + +=== TEST 23: no error in log phase while b3 header invalid +--- request +GET /echo +--- more_headers +b3: 80f198ee56343ba864fe8b2a57d3eff7 +--- response_headers +x-b3-sampled: +--- error_code: 400 +--- error_log +invalid b3 header +--- no_error_log +attempt to index local 'opentracing' (a nil value) diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/zipkin2.t b/CloudronPackages/APISIX/apisix-source/t/plugin/zipkin2.t new file mode 100644 index 0000000..fd34707 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/zipkin2.t @@ -0,0 +1,260 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /echo"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } + + my $extra_init_by_lua = <<_EOC_; + local new = require("opentracing.tracer").new + local tracer_mt = getmetatable(new()).__index + local orig_func = tracer_mt.start_span + tracer_mt.start_span = function (...) + local orig = orig_func(...) + local mt = getmetatable(orig).__index + local old_start_child_span = mt.start_child_span + mt.start_child_span = function(self, name, time) + ngx.log(ngx.WARN, "zipkin start_child_span ", name, " time: ", time) + return old_start_child_span(self, name, time) + end + return orig + end +_EOC_ + + $block->set_value("extra_init_by_lua", $extra_init_by_lua); + +}); + +run_tests; + +__DATA__ + +=== TEST 1: b3 single header +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9999/mock_zipkin", + "sample_ratio": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: sanity +--- more_headers +b3: 80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1-1-05e3ac9a4f6e3b90 +--- response_headers +x-b3-sampled: 1 +x-b3-traceid: 80f198ee56343ba864fe8b2a57d3eff7 +--- raw_response_headers_unlike +b3: +--- error_log +new span context: trace id: 80f198ee56343ba864fe8b2a57d3eff7, span id: e457b5a2e4d86bd1, parent span id: 05e3ac9a4f6e3b90 +--- grep_error_log eval +qr/zipkin start_child_span apisix.response_span time: nil/ +--- grep_error_log_out + + + +=== TEST 3: invalid header +--- more_headers +b3: 80f198ee56343ba864fe8b2a57d3eff7 +--- response_headers +x-b3-sampled: +--- error_code: 400 +--- error_log +invalid b3 header + + + +=== TEST 4: disable via b3 +--- more_headers +b3: 80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1-0-05e3ac9a4f6e3b90 +--- response_headers_like +x-b3-sampled: 0 +x-b3-traceid: 80f198ee56343ba864fe8b2a57d3eff7 +x-b3-parentspanid: e457b5a2e4d86bd1 +x-b3-spanid: \w+ + + + +=== TEST 5: disable via b3 (abbr) +--- more_headers +b3: 0 +--- response_headers_like +x-b3-sampled: 0 +x-b3-spanid: \w+ +x-b3-traceid: \w+ + + + +=== TEST 6: debug via b3 +--- more_headers +b3: 80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1-d-05e3ac9a4f6e3b90 +--- response_headers +x-b3-sampled: 1 +x-b3-flags: 1 + + + +=== TEST 7: b3 without parent span id +--- more_headers +b3: 80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1-d +--- response_headers +x-b3-sampled: 1 +x-b3-flags: 1 +--- error_log +new span context: trace id: 80f198ee56343ba864fe8b2a57d3eff7, span id: e457b5a2e4d86bd1, parent span id: nil + + + +=== TEST 8: b3 without sampled & parent span id +--- more_headers +b3: 80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1 +--- response_headers +x-b3-sampled: 1 +--- error_log +new span context: trace id: 80f198ee56343ba864fe8b2a57d3eff7, span id: e457b5a2e4d86bd1, parent span id: nil + + + +=== TEST 9: set plugin with span version 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:1980/mock_zipkin?span_version=1", + "sample_ratio": 1, + "service_name": "apisix", + "span_version": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t + + + +=== TEST 10: tiger zipkin +--- request +GET /opentracing +--- wait: 10 +--- grep_error_log eval +qr/zipkin start_child_span apisix.response_span time: nil/ +--- grep_error_log_out + + + +=== TEST 11: check not error with limit count +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9999/mock_zipkin", + "sample_ratio": 1, + "service_name": "APISIX" + }, + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 403, + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- pipelined_requests eval +["GET /t", "GET /opentracing", "GET /opentracing", "GET /opentracing"] +--- error_code eval +[200, 200, 200, 403] diff --git a/CloudronPackages/APISIX/apisix-source/t/plugin/zipkin3.t b/CloudronPackages/APISIX/apisix-source/t/plugin/zipkin3.t new file mode 100644 index 0000000..2d743ff --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/plugin/zipkin3.t @@ -0,0 +1,131 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - zipkin +plugin_attr: + zipkin: + set_ngx_var: true +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + my $upstream_server_config = $block->upstream_server_config // <<_EOC_; + set \$zipkin_context_traceparent ""; + set \$zipkin_trace_id ""; + set \$zipkin_span_id ""; +_EOC_ + + $block->set_value("upstream_server_config", $upstream_server_config); + + my $extra_init_by_lua = <<_EOC_; + local zipkin = require("apisix.plugins.zipkin") + local orig_func = zipkin.access + zipkin.access = function (...) + local traceparent = ngx.var.zipkin_context_traceparent + if traceparent == nil or traceparent == '' then + ngx.log(ngx.ERR,"ngx_var.zipkin_context_traceparent is empty") + else + ngx.log(ngx.ERR,"ngx_var.zipkin_context_traceparent:",ngx.var.zipkin_context_traceparent) + end + + local orig = orig_func(...) + return orig + end +_EOC_ + + $block->set_value("extra_init_by_lua", $extra_init_by_lua); + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: add plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "zipkin": { + "endpoint": "http://127.0.0.1:9999/mock_zipkin", + "sample_ratio": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: trigger zipkin with open set variables +--- request +GET /echo +--- error_log eval +qr/ngx_var.zipkin_context_traceparent:00-\w{32}-\w{16}-01*/ + + + +=== TEST 3: trigger zipkin with disable set variables +--- extra_yaml_config +plugins: + - zipkin +plugin_attr: + zipkin: + set_ngx_var: false +--- request +GET /echo +--- error_log +ngx_var.zipkin_context_traceparent is empty diff --git a/CloudronPackages/APISIX/apisix-source/t/pnpm-lock.yaml b/CloudronPackages/APISIX/apisix-source/t/pnpm-lock.yaml new file mode 100644 index 0000000..295f89d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/pnpm-lock.yaml @@ -0,0 +1,3016 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + devDependencies: + '@jest/globals': + specifier: ^29.7.0 + version: 29.7.0 + '@trivago/prettier-plugin-sort-imports': + specifier: ^5.2.2 + version: 5.2.2(prettier@3.5.3) + '@types/jest': + specifier: 29.5.14 + version: 29.5.14 + '@types/node': + specifier: 22.14.1 + version: 22.14.1 + axios: + specifier: ^1.9.0 + version: 1.9.0 + docker-compose: + specifier: ^1.2.0 + version: 1.2.0 + graphql: + specifier: ^16.11.0 + version: 16.11.0 + graphql-request: + specifier: ^7.1.2 + version: 7.1.2(graphql@16.11.0) + jest: + specifier: 29.7.0 + version: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + lago-javascript-client: + specifier: ^1.26.0 + version: 1.27.1 + simple-git: + specifier: ^3.27.0 + version: 3.27.0 + ts-jest: + specifier: 29.3.2 + version: 29.3.2(@babel/core@7.26.10)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.26.10))(jest@29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)))(typescript@5.8.3) + ts-node: + specifier: 10.9.2 + version: 10.9.2(@types/node@22.14.1)(typescript@5.8.3) + yaml: + specifier: ^2.7.1 + version: 2.7.1 + +packages: + + '@ampproject/remapping@2.3.0': + resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} + engines: {node: '>=6.0.0'} + + '@babel/code-frame@7.26.2': + resolution: {integrity: sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==} + engines: {node: '>=6.9.0'} + + '@babel/compat-data@7.26.8': + resolution: {integrity: sha512-oH5UPLMWR3L2wEFLnFJ1TZXqHufiTKAiLfqw5zkhS4dKXLJ10yVztfil/twG8EDTA4F/tvVNw9nOl4ZMslB8rQ==} + engines: {node: '>=6.9.0'} + + '@babel/core@7.26.10': + resolution: {integrity: sha512-vMqyb7XCDMPvJFFOaT9kxtiRh42GwlZEg1/uIgtZshS5a/8OaduUfCi7kynKgc3Tw/6Uo2D+db9qBttghhmxwQ==} + engines: {node: '>=6.9.0'} + + '@babel/generator@7.27.0': + resolution: {integrity: sha512-VybsKvpiN1gU1sdMZIp7FcqphVVKEwcuj02x73uvcHE0PTihx1nlBcowYWhDwjpoAXRv43+gDzyggGnn1XZhVw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-compilation-targets@7.27.0': + resolution: {integrity: sha512-LVk7fbXml0H2xH34dFzKQ7TDZ2G4/rVTOrq9V+icbbadjbVxxeFeDsNHv2SrZeWoA+6ZiTyWYWtScEIW07EAcA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-imports@7.25.9': + resolution: {integrity: sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-transforms@7.26.0': + resolution: {integrity: sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/helper-plugin-utils@7.26.5': + resolution: {integrity: sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg==} + engines: {node: '>=6.9.0'} + + '@babel/helper-string-parser@7.25.9': + resolution: {integrity: sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.25.9': + resolution: {integrity: sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-option@7.25.9': + resolution: {integrity: sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==} + engines: {node: '>=6.9.0'} + + '@babel/helpers@7.27.0': + resolution: {integrity: sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.27.0': + resolution: {integrity: sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/plugin-syntax-async-generators@7.8.4': + resolution: {integrity: sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-bigint@7.8.3': + resolution: {integrity: sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-class-properties@7.12.13': + resolution: {integrity: sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-class-static-block@7.14.5': + resolution: {integrity: sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-import-attributes@7.26.0': + resolution: {integrity: sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-import-meta@7.10.4': + resolution: {integrity: sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-json-strings@7.8.3': + resolution: {integrity: sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-jsx@7.25.9': + resolution: {integrity: sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-logical-assignment-operators@7.10.4': + resolution: {integrity: sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-nullish-coalescing-operator@7.8.3': + resolution: {integrity: sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-numeric-separator@7.10.4': + resolution: {integrity: sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-object-rest-spread@7.8.3': + resolution: {integrity: sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-optional-catch-binding@7.8.3': + resolution: {integrity: sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-optional-chaining@7.8.3': + resolution: {integrity: sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-private-property-in-object@7.14.5': + resolution: {integrity: sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-top-level-await@7.14.5': + resolution: {integrity: sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-typescript@7.25.9': + resolution: {integrity: sha512-hjMgRy5hb8uJJjUcdWunWVcoi9bGpJp8p5Ol1229PoN6aytsLwNMgmdftO23wnCLMfVmTwZDWMPNq/D1SY60JQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/template@7.27.0': + resolution: {integrity: sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA==} + engines: {node: '>=6.9.0'} + + '@babel/traverse@7.27.0': + resolution: {integrity: sha512-19lYZFzYVQkkHkl4Cy4WrAVcqBkgvV2YM2TU3xG6DIwO7O3ecbDPfW3yM3bjAGcqcQHi+CCtjMR3dIEHxsd6bA==} + engines: {node: '>=6.9.0'} + + '@babel/types@7.27.0': + resolution: {integrity: sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg==} + engines: {node: '>=6.9.0'} + + '@bcoe/v8-coverage@0.2.3': + resolution: {integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==} + + '@cspotcode/source-map-support@0.8.1': + resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==} + engines: {node: '>=12'} + + '@graphql-typed-document-node/core@3.2.0': + resolution: {integrity: sha512-mB9oAsNCm9aM3/SOv4YtBMqZbYj10R7dkq8byBqxGY/ncFwhf2oQzMV+LCRlWoDSEBJ3COiR1yeDvMtsoOsuFQ==} + peerDependencies: + graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 + + '@istanbuljs/load-nyc-config@1.1.0': + resolution: {integrity: sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ==} + engines: {node: '>=8'} + + '@istanbuljs/schema@0.1.3': + resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==} + engines: {node: '>=8'} + + '@jest/console@29.7.0': + resolution: {integrity: sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/core@29.7.0': + resolution: {integrity: sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + '@jest/environment@29.7.0': + resolution: {integrity: sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/expect-utils@29.7.0': + resolution: {integrity: sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/expect@29.7.0': + resolution: {integrity: sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/fake-timers@29.7.0': + resolution: {integrity: sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/globals@29.7.0': + resolution: {integrity: sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/reporters@29.7.0': + resolution: {integrity: sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + '@jest/schemas@29.6.3': + resolution: {integrity: sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/source-map@29.6.3': + resolution: {integrity: sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/test-result@29.7.0': + resolution: {integrity: sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/test-sequencer@29.7.0': + resolution: {integrity: sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/transform@29.7.0': + resolution: {integrity: sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jest/types@29.6.3': + resolution: {integrity: sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + '@jridgewell/gen-mapping@0.3.8': + resolution: {integrity: sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==} + engines: {node: '>=6.0.0'} + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/set-array@1.2.1': + resolution: {integrity: sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.0': + resolution: {integrity: sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==} + + '@jridgewell/trace-mapping@0.3.25': + resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} + + '@jridgewell/trace-mapping@0.3.9': + resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} + + '@kwsites/file-exists@1.1.1': + resolution: {integrity: sha512-m9/5YGR18lIwxSFDwfE3oA7bWuq9kdau6ugN4H2rJeyhFQZcG9AgSHkQtSD15a8WvTgfz9aikZMrKPHvbpqFiw==} + + '@kwsites/promise-deferred@1.1.1': + resolution: {integrity: sha512-GaHYm+c0O9MjZRu0ongGBRbinu8gVAMd2UZjji6jVmqKtZluZnptXGWhz1E8j8D2HJ3f/yMxKAUC0b+57wncIw==} + + '@sinclair/typebox@0.27.8': + resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==} + + '@sinonjs/commons@3.0.1': + resolution: {integrity: sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ==} + + '@sinonjs/fake-timers@10.3.0': + resolution: {integrity: sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA==} + + '@trivago/prettier-plugin-sort-imports@5.2.2': + resolution: {integrity: sha512-fYDQA9e6yTNmA13TLVSA+WMQRc5Bn/c0EUBditUHNfMMxN7M82c38b1kEggVE3pLpZ0FwkwJkUEKMiOi52JXFA==} + engines: {node: '>18.12'} + peerDependencies: + '@vue/compiler-sfc': 3.x + prettier: 2.x - 3.x + prettier-plugin-svelte: 3.x + svelte: 4.x || 5.x + peerDependenciesMeta: + '@vue/compiler-sfc': + optional: true + prettier-plugin-svelte: + optional: true + svelte: + optional: true + + '@tsconfig/node10@1.0.11': + resolution: {integrity: sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==} + + '@tsconfig/node12@1.0.11': + resolution: {integrity: sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==} + + '@tsconfig/node14@1.0.3': + resolution: {integrity: sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==} + + '@tsconfig/node16@1.0.4': + resolution: {integrity: sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==} + + '@types/babel__core@7.20.5': + resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} + + '@types/babel__generator@7.27.0': + resolution: {integrity: sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==} + + '@types/babel__template@7.4.4': + resolution: {integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==} + + '@types/babel__traverse@7.20.7': + resolution: {integrity: sha512-dkO5fhS7+/oos4ciWxyEyjWe48zmG6wbCheo/G2ZnHx4fs3EU6YC6UM8rk56gAjNJ9P3MTH2jo5jb92/K6wbng==} + + '@types/graceful-fs@4.1.9': + resolution: {integrity: sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==} + + '@types/istanbul-lib-coverage@2.0.6': + resolution: {integrity: sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==} + + '@types/istanbul-lib-report@3.0.3': + resolution: {integrity: sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA==} + + '@types/istanbul-reports@3.0.4': + resolution: {integrity: sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ==} + + '@types/jest@29.5.14': + resolution: {integrity: sha512-ZN+4sdnLUbo8EVvVc2ao0GFW6oVrQRPn4K2lglySj7APvSrgzxHiNNK99us4WDMi57xxA2yggblIAMNhXOotLQ==} + + '@types/node@22.14.1': + resolution: {integrity: sha512-u0HuPQwe/dHrItgHHpmw3N2fYCR6x4ivMNbPHRkBVP4CvN+kiRrKHWk3i8tXiO/joPwXLMYvF9TTF0eqgHIuOw==} + + '@types/stack-utils@2.0.3': + resolution: {integrity: sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==} + + '@types/yargs-parser@21.0.3': + resolution: {integrity: sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ==} + + '@types/yargs@17.0.33': + resolution: {integrity: sha512-WpxBCKWPLr4xSsHgz511rFJAM+wS28w2zEO1QDNY5zM/S8ok70NNfztH0xwhqKyaK0OHCbN98LDAZuy1ctxDkA==} + + acorn-walk@8.3.4: + resolution: {integrity: sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==} + engines: {node: '>=0.4.0'} + + acorn@8.14.1: + resolution: {integrity: sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==} + engines: {node: '>=0.4.0'} + hasBin: true + + ansi-escapes@4.3.2: + resolution: {integrity: sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==} + engines: {node: '>=8'} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + ansi-styles@5.2.0: + resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==} + engines: {node: '>=10'} + + anymatch@3.1.3: + resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} + engines: {node: '>= 8'} + + arg@4.1.3: + resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==} + + argparse@1.0.10: + resolution: {integrity: sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==} + + async@3.2.6: + resolution: {integrity: sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==} + + asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + + axios@1.9.0: + resolution: {integrity: sha512-re4CqKTJaURpzbLHtIi6XpDv20/CnpXOtjRY5/CU32L8gU8ek9UIivcfvSWvmKEngmVbrUtPpdDwWDWL7DNHvg==} + + babel-jest@29.7.0: + resolution: {integrity: sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + '@babel/core': ^7.8.0 + + babel-plugin-istanbul@6.1.1: + resolution: {integrity: sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA==} + engines: {node: '>=8'} + + babel-plugin-jest-hoist@29.6.3: + resolution: {integrity: sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + babel-preset-current-node-syntax@1.1.0: + resolution: {integrity: sha512-ldYss8SbBlWva1bs28q78Ju5Zq1F+8BrqBZZ0VFhLBvhh6lCpC2o3gDJi/5DRLs9FgYZCnmPYIVFU4lRXCkyUw==} + peerDependencies: + '@babel/core': ^7.0.0 + + babel-preset-jest@29.6.3: + resolution: {integrity: sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + '@babel/core': ^7.0.0 + + balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + + brace-expansion@2.0.1: + resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} + + braces@3.0.3: + resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} + engines: {node: '>=8'} + + browserslist@4.24.4: + resolution: {integrity: sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + + bs-logger@0.2.6: + resolution: {integrity: sha512-pd8DCoxmbgc7hyPKOvxtqNcjYoOsABPQdcCUjGp3d42VR2CX1ORhk2A87oqqu5R1kk+76nsxZupkmyd+MVtCog==} + engines: {node: '>= 6'} + + bser@2.1.1: + resolution: {integrity: sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ==} + + buffer-from@1.1.2: + resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} + + call-bind-apply-helpers@1.0.2: + resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} + engines: {node: '>= 0.4'} + + callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + + camelcase@5.3.1: + resolution: {integrity: sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==} + engines: {node: '>=6'} + + camelcase@6.3.0: + resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} + engines: {node: '>=10'} + + caniuse-lite@1.0.30001715: + resolution: {integrity: sha512-7ptkFGMm2OAOgvZpwgA4yjQ5SQbrNVGdRjzH0pBdy1Fasvcr+KAeECmbCAECzTuDuoX0FCY8KzUxjf9+9kfZEw==} + + chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + + char-regex@1.0.2: + resolution: {integrity: sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw==} + engines: {node: '>=10'} + + ci-info@3.9.0: + resolution: {integrity: sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==} + engines: {node: '>=8'} + + cjs-module-lexer@1.4.3: + resolution: {integrity: sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==} + + cliui@8.0.1: + resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==} + engines: {node: '>=12'} + + co@4.6.0: + resolution: {integrity: sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==} + engines: {iojs: '>= 1.0.0', node: '>= 0.12.0'} + + collect-v8-coverage@1.0.2: + resolution: {integrity: sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q==} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + + concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + + convert-source-map@2.0.0: + resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} + + create-jest@29.7.0: + resolution: {integrity: sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + hasBin: true + + create-require@1.1.1: + resolution: {integrity: sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==} + + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + + debug@4.4.0: + resolution: {integrity: sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + dedent@1.5.3: + resolution: {integrity: sha512-NHQtfOOW68WD8lgypbLA5oT+Bt0xXJhiYvoR6SmmNXZfpzOGXwdKWmcwG8N7PwVVWV3eF/68nmD9BaJSsTBhyQ==} + peerDependencies: + babel-plugin-macros: ^3.1.0 + peerDependenciesMeta: + babel-plugin-macros: + optional: true + + deepmerge@4.3.1: + resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==} + engines: {node: '>=0.10.0'} + + delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + + detect-newline@3.1.0: + resolution: {integrity: sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==} + engines: {node: '>=8'} + + diff-sequences@29.6.3: + resolution: {integrity: sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + diff@4.0.2: + resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} + engines: {node: '>=0.3.1'} + + docker-compose@1.2.0: + resolution: {integrity: sha512-wIU1eHk3Op7dFgELRdmOYlPYS4gP8HhH1ZmZa13QZF59y0fblzFDFmKPhyc05phCy2hze9OEvNZAsoljrs+72w==} + engines: {node: '>= 6.0.0'} + + dunder-proto@1.0.1: + resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} + engines: {node: '>= 0.4'} + + ejs@3.1.10: + resolution: {integrity: sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==} + engines: {node: '>=0.10.0'} + hasBin: true + + electron-to-chromium@1.5.143: + resolution: {integrity: sha512-QqklJMOFBMqe46k8iIOwA9l2hz57V2OKMmP5eSWcUvwx+mASAsbU+wkF1pHjn9ZVSBPrsYWr4/W/95y5SwYg2g==} + + emittery@0.13.1: + resolution: {integrity: sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ==} + engines: {node: '>=12'} + + emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + + error-ex@1.3.2: + resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==} + + es-define-property@1.0.1: + resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} + engines: {node: '>= 0.4'} + + es-errors@1.3.0: + resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} + engines: {node: '>= 0.4'} + + es-object-atoms@1.1.1: + resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} + engines: {node: '>= 0.4'} + + es-set-tostringtag@2.1.0: + resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==} + engines: {node: '>= 0.4'} + + escalade@3.2.0: + resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} + engines: {node: '>=6'} + + escape-string-regexp@2.0.0: + resolution: {integrity: sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==} + engines: {node: '>=8'} + + esprima@4.0.1: + resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==} + engines: {node: '>=4'} + hasBin: true + + execa@5.1.1: + resolution: {integrity: sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==} + engines: {node: '>=10'} + + exit@0.1.2: + resolution: {integrity: sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ==} + engines: {node: '>= 0.8.0'} + + expect@29.7.0: + resolution: {integrity: sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + fb-watchman@2.0.2: + resolution: {integrity: sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA==} + + filelist@1.0.4: + resolution: {integrity: sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==} + + fill-range@7.1.1: + resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} + engines: {node: '>=8'} + + find-up@4.1.0: + resolution: {integrity: sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==} + engines: {node: '>=8'} + + follow-redirects@1.15.9: + resolution: {integrity: sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==} + engines: {node: '>=4.0'} + peerDependencies: + debug: '*' + peerDependenciesMeta: + debug: + optional: true + + form-data@4.0.2: + resolution: {integrity: sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==} + engines: {node: '>= 6'} + + fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + + gensync@1.0.0-beta.2: + resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} + engines: {node: '>=6.9.0'} + + get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + + get-intrinsic@1.3.0: + resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} + engines: {node: '>= 0.4'} + + get-package-type@0.1.0: + resolution: {integrity: sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q==} + engines: {node: '>=8.0.0'} + + get-proto@1.0.1: + resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} + engines: {node: '>= 0.4'} + + get-stream@6.0.1: + resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==} + engines: {node: '>=10'} + + glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + deprecated: Glob versions prior to v9 are no longer supported + + globals@11.12.0: + resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} + engines: {node: '>=4'} + + gopd@1.2.0: + resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} + engines: {node: '>= 0.4'} + + graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + + graphql-request@7.1.2: + resolution: {integrity: sha512-+XE3iuC55C2di5ZUrB4pjgwe+nIQBuXVIK9J98wrVwojzDW3GMdSBZfxUk8l4j9TieIpjpggclxhNEU9ebGF8w==} + peerDependencies: + graphql: 14 - 16 + + graphql@16.11.0: + resolution: {integrity: sha512-mS1lbMsxgQj6hge1XZ6p7GPhbrtFwUFYi3wRzXAC/FmYnyXMTvvI3td3rjmQ2u8ewXueaSvRPWaEcgVVOT9Jnw==} + engines: {node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0} + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + has-symbols@1.1.0: + resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} + engines: {node: '>= 0.4'} + + has-tostringtag@1.0.2: + resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} + engines: {node: '>= 0.4'} + + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} + + html-escaper@2.0.2: + resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} + + human-signals@2.1.0: + resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==} + engines: {node: '>=10.17.0'} + + import-local@3.2.0: + resolution: {integrity: sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==} + engines: {node: '>=8'} + hasBin: true + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + is-arrayish@0.2.1: + resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} + + is-core-module@2.16.1: + resolution: {integrity: sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==} + engines: {node: '>= 0.4'} + + is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + + is-generator-fn@2.1.0: + resolution: {integrity: sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==} + engines: {node: '>=6'} + + is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + is-stream@2.0.1: + resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} + engines: {node: '>=8'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + istanbul-lib-coverage@3.2.2: + resolution: {integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==} + engines: {node: '>=8'} + + istanbul-lib-instrument@5.2.1: + resolution: {integrity: sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==} + engines: {node: '>=8'} + + istanbul-lib-instrument@6.0.3: + resolution: {integrity: sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q==} + engines: {node: '>=10'} + + istanbul-lib-report@3.0.1: + resolution: {integrity: sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==} + engines: {node: '>=10'} + + istanbul-lib-source-maps@4.0.1: + resolution: {integrity: sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw==} + engines: {node: '>=10'} + + istanbul-reports@3.1.7: + resolution: {integrity: sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g==} + engines: {node: '>=8'} + + jake@10.9.2: + resolution: {integrity: sha512-2P4SQ0HrLQ+fw6llpLnOaGAvN2Zu6778SJMrCUwns4fOoG9ayrTiZk3VV8sCPkVZF8ab0zksVpS8FDY5pRCNBA==} + engines: {node: '>=10'} + hasBin: true + + javascript-natural-sort@0.7.1: + resolution: {integrity: sha512-nO6jcEfZWQXDhOiBtG2KvKyEptz7RVbpGP4vTD2hLBdmNQSsCiicO2Ioinv6UI4y9ukqnBpy+XZ9H6uLNgJTlw==} + + jest-changed-files@29.7.0: + resolution: {integrity: sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-circus@29.7.0: + resolution: {integrity: sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-cli@29.7.0: + resolution: {integrity: sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + hasBin: true + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + jest-config@29.7.0: + resolution: {integrity: sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + peerDependencies: + '@types/node': '*' + ts-node: '>=9.0.0' + peerDependenciesMeta: + '@types/node': + optional: true + ts-node: + optional: true + + jest-diff@29.7.0: + resolution: {integrity: sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-docblock@29.7.0: + resolution: {integrity: sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-each@29.7.0: + resolution: {integrity: sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-environment-node@29.7.0: + resolution: {integrity: sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-get-type@29.6.3: + resolution: {integrity: sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-haste-map@29.7.0: + resolution: {integrity: sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-leak-detector@29.7.0: + resolution: {integrity: sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-matcher-utils@29.7.0: + resolution: {integrity: sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-message-util@29.7.0: + resolution: {integrity: sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-mock@29.7.0: + resolution: {integrity: sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-pnp-resolver@1.2.3: + resolution: {integrity: sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w==} + engines: {node: '>=6'} + peerDependencies: + jest-resolve: '*' + peerDependenciesMeta: + jest-resolve: + optional: true + + jest-regex-util@29.6.3: + resolution: {integrity: sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-resolve-dependencies@29.7.0: + resolution: {integrity: sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-resolve@29.7.0: + resolution: {integrity: sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-runner@29.7.0: + resolution: {integrity: sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-runtime@29.7.0: + resolution: {integrity: sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-snapshot@29.7.0: + resolution: {integrity: sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-util@29.7.0: + resolution: {integrity: sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-validate@29.7.0: + resolution: {integrity: sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-watcher@29.7.0: + resolution: {integrity: sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest-worker@29.7.0: + resolution: {integrity: sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + jest@29.7.0: + resolution: {integrity: sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + hasBin: true + peerDependencies: + node-notifier: ^8.0.1 || ^9.0.0 || ^10.0.0 + peerDependenciesMeta: + node-notifier: + optional: true + + js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + + js-yaml@3.14.1: + resolution: {integrity: sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==} + hasBin: true + + jsesc@3.1.0: + resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==} + engines: {node: '>=6'} + hasBin: true + + json-parse-even-better-errors@2.3.1: + resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + + json5@2.2.3: + resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==} + engines: {node: '>=6'} + hasBin: true + + kleur@3.0.3: + resolution: {integrity: sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==} + engines: {node: '>=6'} + + lago-javascript-client@1.27.1: + resolution: {integrity: sha512-0TCRgONHbKJwVOFi9VMh7u4XjG7QBu7x13iJT66vSJGLOsIbcmFLEhaofpnh5x7s1qJROGTzamNmVxvQ8afpdA==} + + leven@3.1.0: + resolution: {integrity: sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==} + engines: {node: '>=6'} + + lines-and-columns@1.2.4: + resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} + + locate-path@5.0.0: + resolution: {integrity: sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==} + engines: {node: '>=8'} + + lodash.memoize@4.1.2: + resolution: {integrity: sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==} + + lodash@4.17.21: + resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} + + lru-cache@5.1.1: + resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} + + make-dir@4.0.0: + resolution: {integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==} + engines: {node: '>=10'} + + make-error@1.3.6: + resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} + + makeerror@1.0.12: + resolution: {integrity: sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg==} + + math-intrinsics@1.1.0: + resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} + engines: {node: '>= 0.4'} + + merge-stream@2.0.0: + resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} + + micromatch@4.0.8: + resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} + engines: {node: '>=8.6'} + + mime-db@1.52.0: + resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} + engines: {node: '>= 0.6'} + + mime-types@2.1.35: + resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} + engines: {node: '>= 0.6'} + + mimic-fn@2.1.0: + resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==} + engines: {node: '>=6'} + + minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + + minimatch@5.1.6: + resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==} + engines: {node: '>=10'} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + node-int64@0.4.0: + resolution: {integrity: sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==} + + node-releases@2.0.19: + resolution: {integrity: sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==} + + normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + + npm-run-path@4.0.1: + resolution: {integrity: sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==} + engines: {node: '>=8'} + + once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + + onetime@5.1.2: + resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} + engines: {node: '>=6'} + + p-limit@2.3.0: + resolution: {integrity: sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==} + engines: {node: '>=6'} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@4.1.0: + resolution: {integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==} + engines: {node: '>=8'} + + p-try@2.2.0: + resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==} + engines: {node: '>=6'} + + parse-json@5.2.0: + resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} + engines: {node: '>=8'} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + pirates@4.0.7: + resolution: {integrity: sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==} + engines: {node: '>= 6'} + + pkg-dir@4.2.0: + resolution: {integrity: sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==} + engines: {node: '>=8'} + + prettier@3.5.3: + resolution: {integrity: sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==} + engines: {node: '>=14'} + hasBin: true + + pretty-format@29.7.0: + resolution: {integrity: sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ==} + engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} + + prompts@2.4.2: + resolution: {integrity: sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==} + engines: {node: '>= 6'} + + proxy-from-env@1.1.0: + resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + + pure-rand@6.1.0: + resolution: {integrity: sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==} + + react-is@18.3.1: + resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==} + + require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + + resolve-cwd@3.0.0: + resolution: {integrity: sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==} + engines: {node: '>=8'} + + resolve-from@5.0.0: + resolution: {integrity: sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==} + engines: {node: '>=8'} + + resolve.exports@2.0.3: + resolution: {integrity: sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A==} + engines: {node: '>=10'} + + resolve@1.22.10: + resolution: {integrity: sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==} + engines: {node: '>= 0.4'} + hasBin: true + + semver@6.3.1: + resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==} + hasBin: true + + semver@7.7.1: + resolution: {integrity: sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==} + engines: {node: '>=10'} + hasBin: true + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + signal-exit@3.0.7: + resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} + + simple-git@3.27.0: + resolution: {integrity: sha512-ivHoFS9Yi9GY49ogc6/YAi3Fl9ROnF4VyubNylgCkA+RVqLaKWnDSzXOVzya8csELIaWaYNutsEuAhZrtOjozA==} + + sisteransi@1.0.5: + resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} + + slash@3.0.0: + resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} + engines: {node: '>=8'} + + source-map-support@0.5.13: + resolution: {integrity: sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w==} + + source-map@0.6.1: + resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} + engines: {node: '>=0.10.0'} + + sprintf-js@1.0.3: + resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} + + stack-utils@2.0.6: + resolution: {integrity: sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==} + engines: {node: '>=10'} + + string-length@4.0.2: + resolution: {integrity: sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==} + engines: {node: '>=10'} + + string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strip-bom@4.0.0: + resolution: {integrity: sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w==} + engines: {node: '>=8'} + + strip-final-newline@2.0.0: + resolution: {integrity: sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==} + engines: {node: '>=6'} + + strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + supports-color@8.1.1: + resolution: {integrity: sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==} + engines: {node: '>=10'} + + supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + + test-exclude@6.0.0: + resolution: {integrity: sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w==} + engines: {node: '>=8'} + + tmpl@1.0.5: + resolution: {integrity: sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==} + + to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + + ts-jest@29.3.2: + resolution: {integrity: sha512-bJJkrWc6PjFVz5g2DGCNUo8z7oFEYaz1xP1NpeDU7KNLMWPpEyV8Chbpkn8xjzgRDpQhnGMyvyldoL7h8JXyug==} + engines: {node: ^14.15.0 || ^16.10.0 || ^18.0.0 || >=20.0.0} + hasBin: true + peerDependencies: + '@babel/core': '>=7.0.0-beta.0 <8' + '@jest/transform': ^29.0.0 + '@jest/types': ^29.0.0 + babel-jest: ^29.0.0 + esbuild: '*' + jest: ^29.0.0 + typescript: '>=4.3 <6' + peerDependenciesMeta: + '@babel/core': + optional: true + '@jest/transform': + optional: true + '@jest/types': + optional: true + babel-jest: + optional: true + esbuild: + optional: true + + ts-node@10.9.2: + resolution: {integrity: sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==} + hasBin: true + peerDependencies: + '@swc/core': '>=1.2.50' + '@swc/wasm': '>=1.2.50' + '@types/node': '*' + typescript: '>=2.7' + peerDependenciesMeta: + '@swc/core': + optional: true + '@swc/wasm': + optional: true + + type-detect@4.0.8: + resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} + engines: {node: '>=4'} + + type-fest@0.21.3: + resolution: {integrity: sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==} + engines: {node: '>=10'} + + type-fest@4.40.1: + resolution: {integrity: sha512-9YvLNnORDpI+vghLU/Nf+zSv0kL47KbVJ1o3sKgoTefl6i+zebxbiDQWoe/oWWqPhIgQdRZRT1KA9sCPL810SA==} + engines: {node: '>=16'} + + typescript@5.8.3: + resolution: {integrity: sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==} + engines: {node: '>=14.17'} + hasBin: true + + undici-types@6.21.0: + resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + + update-browserslist-db@1.1.3: + resolution: {integrity: sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + + v8-compile-cache-lib@3.0.1: + resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} + + v8-to-istanbul@9.3.0: + resolution: {integrity: sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==} + engines: {node: '>=10.12.0'} + + walker@1.0.8: + resolution: {integrity: sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ==} + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + write-file-atomic@4.0.2: + resolution: {integrity: sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==} + engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} + + y18n@5.0.8: + resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} + engines: {node: '>=10'} + + yallist@3.1.1: + resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} + + yaml@2.7.1: + resolution: {integrity: sha512-10ULxpnOCQXxJvBgxsn9ptjq6uviG/htZKk9veJGhlqn3w/DxQ631zFF+nlQXLwmImeS5amR2dl2U8sg6U9jsQ==} + engines: {node: '>= 14'} + hasBin: true + + yargs-parser@21.1.1: + resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} + engines: {node: '>=12'} + + yargs@17.7.2: + resolution: {integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==} + engines: {node: '>=12'} + + yn@3.1.1: + resolution: {integrity: sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==} + engines: {node: '>=6'} + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + +snapshots: + + '@ampproject/remapping@2.3.0': + dependencies: + '@jridgewell/gen-mapping': 0.3.8 + '@jridgewell/trace-mapping': 0.3.25 + + '@babel/code-frame@7.26.2': + dependencies: + '@babel/helper-validator-identifier': 7.25.9 + js-tokens: 4.0.0 + picocolors: 1.1.1 + + '@babel/compat-data@7.26.8': {} + + '@babel/core@7.26.10': + dependencies: + '@ampproject/remapping': 2.3.0 + '@babel/code-frame': 7.26.2 + '@babel/generator': 7.27.0 + '@babel/helper-compilation-targets': 7.27.0 + '@babel/helper-module-transforms': 7.26.0(@babel/core@7.26.10) + '@babel/helpers': 7.27.0 + '@babel/parser': 7.27.0 + '@babel/template': 7.27.0 + '@babel/traverse': 7.27.0 + '@babel/types': 7.27.0 + convert-source-map: 2.0.0 + debug: 4.4.0 + gensync: 1.0.0-beta.2 + json5: 2.2.3 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + + '@babel/generator@7.27.0': + dependencies: + '@babel/parser': 7.27.0 + '@babel/types': 7.27.0 + '@jridgewell/gen-mapping': 0.3.8 + '@jridgewell/trace-mapping': 0.3.25 + jsesc: 3.1.0 + + '@babel/helper-compilation-targets@7.27.0': + dependencies: + '@babel/compat-data': 7.26.8 + '@babel/helper-validator-option': 7.25.9 + browserslist: 4.24.4 + lru-cache: 5.1.1 + semver: 6.3.1 + + '@babel/helper-module-imports@7.25.9': + dependencies: + '@babel/traverse': 7.27.0 + '@babel/types': 7.27.0 + transitivePeerDependencies: + - supports-color + + '@babel/helper-module-transforms@7.26.0(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-module-imports': 7.25.9 + '@babel/helper-validator-identifier': 7.25.9 + '@babel/traverse': 7.27.0 + transitivePeerDependencies: + - supports-color + + '@babel/helper-plugin-utils@7.26.5': {} + + '@babel/helper-string-parser@7.25.9': {} + + '@babel/helper-validator-identifier@7.25.9': {} + + '@babel/helper-validator-option@7.25.9': {} + + '@babel/helpers@7.27.0': + dependencies: + '@babel/template': 7.27.0 + '@babel/types': 7.27.0 + + '@babel/parser@7.27.0': + dependencies: + '@babel/types': 7.27.0 + + '@babel/plugin-syntax-async-generators@7.8.4(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-bigint@7.8.3(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-class-properties@7.12.13(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-class-static-block@7.14.5(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-import-attributes@7.26.0(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-import-meta@7.10.4(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-json-strings@7.8.3(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-jsx@7.25.9(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-logical-assignment-operators@7.10.4(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-nullish-coalescing-operator@7.8.3(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-numeric-separator@7.10.4(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-object-rest-spread@7.8.3(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-optional-catch-binding@7.8.3(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-optional-chaining@7.8.3(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-private-property-in-object@7.14.5(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-top-level-await@7.14.5(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/plugin-syntax-typescript@7.25.9(@babel/core@7.26.10)': + dependencies: + '@babel/core': 7.26.10 + '@babel/helper-plugin-utils': 7.26.5 + + '@babel/template@7.27.0': + dependencies: + '@babel/code-frame': 7.26.2 + '@babel/parser': 7.27.0 + '@babel/types': 7.27.0 + + '@babel/traverse@7.27.0': + dependencies: + '@babel/code-frame': 7.26.2 + '@babel/generator': 7.27.0 + '@babel/parser': 7.27.0 + '@babel/template': 7.27.0 + '@babel/types': 7.27.0 + debug: 4.4.0 + globals: 11.12.0 + transitivePeerDependencies: + - supports-color + + '@babel/types@7.27.0': + dependencies: + '@babel/helper-string-parser': 7.25.9 + '@babel/helper-validator-identifier': 7.25.9 + + '@bcoe/v8-coverage@0.2.3': {} + + '@cspotcode/source-map-support@0.8.1': + dependencies: + '@jridgewell/trace-mapping': 0.3.9 + + '@graphql-typed-document-node/core@3.2.0(graphql@16.11.0)': + dependencies: + graphql: 16.11.0 + + '@istanbuljs/load-nyc-config@1.1.0': + dependencies: + camelcase: 5.3.1 + find-up: 4.1.0 + get-package-type: 0.1.0 + js-yaml: 3.14.1 + resolve-from: 5.0.0 + + '@istanbuljs/schema@0.1.3': {} + + '@jest/console@29.7.0': + dependencies: + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + chalk: 4.1.2 + jest-message-util: 29.7.0 + jest-util: 29.7.0 + slash: 3.0.0 + + '@jest/core@29.7.0(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3))': + dependencies: + '@jest/console': 29.7.0 + '@jest/reporters': 29.7.0 + '@jest/test-result': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + ansi-escapes: 4.3.2 + chalk: 4.1.2 + ci-info: 3.9.0 + exit: 0.1.2 + graceful-fs: 4.2.11 + jest-changed-files: 29.7.0 + jest-config: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + jest-haste-map: 29.7.0 + jest-message-util: 29.7.0 + jest-regex-util: 29.6.3 + jest-resolve: 29.7.0 + jest-resolve-dependencies: 29.7.0 + jest-runner: 29.7.0 + jest-runtime: 29.7.0 + jest-snapshot: 29.7.0 + jest-util: 29.7.0 + jest-validate: 29.7.0 + jest-watcher: 29.7.0 + micromatch: 4.0.8 + pretty-format: 29.7.0 + slash: 3.0.0 + strip-ansi: 6.0.1 + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + - ts-node + + '@jest/environment@29.7.0': + dependencies: + '@jest/fake-timers': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + jest-mock: 29.7.0 + + '@jest/expect-utils@29.7.0': + dependencies: + jest-get-type: 29.6.3 + + '@jest/expect@29.7.0': + dependencies: + expect: 29.7.0 + jest-snapshot: 29.7.0 + transitivePeerDependencies: + - supports-color + + '@jest/fake-timers@29.7.0': + dependencies: + '@jest/types': 29.6.3 + '@sinonjs/fake-timers': 10.3.0 + '@types/node': 22.14.1 + jest-message-util: 29.7.0 + jest-mock: 29.7.0 + jest-util: 29.7.0 + + '@jest/globals@29.7.0': + dependencies: + '@jest/environment': 29.7.0 + '@jest/expect': 29.7.0 + '@jest/types': 29.6.3 + jest-mock: 29.7.0 + transitivePeerDependencies: + - supports-color + + '@jest/reporters@29.7.0': + dependencies: + '@bcoe/v8-coverage': 0.2.3 + '@jest/console': 29.7.0 + '@jest/test-result': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + '@jridgewell/trace-mapping': 0.3.25 + '@types/node': 22.14.1 + chalk: 4.1.2 + collect-v8-coverage: 1.0.2 + exit: 0.1.2 + glob: 7.2.3 + graceful-fs: 4.2.11 + istanbul-lib-coverage: 3.2.2 + istanbul-lib-instrument: 6.0.3 + istanbul-lib-report: 3.0.1 + istanbul-lib-source-maps: 4.0.1 + istanbul-reports: 3.1.7 + jest-message-util: 29.7.0 + jest-util: 29.7.0 + jest-worker: 29.7.0 + slash: 3.0.0 + string-length: 4.0.2 + strip-ansi: 6.0.1 + v8-to-istanbul: 9.3.0 + transitivePeerDependencies: + - supports-color + + '@jest/schemas@29.6.3': + dependencies: + '@sinclair/typebox': 0.27.8 + + '@jest/source-map@29.6.3': + dependencies: + '@jridgewell/trace-mapping': 0.3.25 + callsites: 3.1.0 + graceful-fs: 4.2.11 + + '@jest/test-result@29.7.0': + dependencies: + '@jest/console': 29.7.0 + '@jest/types': 29.6.3 + '@types/istanbul-lib-coverage': 2.0.6 + collect-v8-coverage: 1.0.2 + + '@jest/test-sequencer@29.7.0': + dependencies: + '@jest/test-result': 29.7.0 + graceful-fs: 4.2.11 + jest-haste-map: 29.7.0 + slash: 3.0.0 + + '@jest/transform@29.7.0': + dependencies: + '@babel/core': 7.26.10 + '@jest/types': 29.6.3 + '@jridgewell/trace-mapping': 0.3.25 + babel-plugin-istanbul: 6.1.1 + chalk: 4.1.2 + convert-source-map: 2.0.0 + fast-json-stable-stringify: 2.1.0 + graceful-fs: 4.2.11 + jest-haste-map: 29.7.0 + jest-regex-util: 29.6.3 + jest-util: 29.7.0 + micromatch: 4.0.8 + pirates: 4.0.7 + slash: 3.0.0 + write-file-atomic: 4.0.2 + transitivePeerDependencies: + - supports-color + + '@jest/types@29.6.3': + dependencies: + '@jest/schemas': 29.6.3 + '@types/istanbul-lib-coverage': 2.0.6 + '@types/istanbul-reports': 3.0.4 + '@types/node': 22.14.1 + '@types/yargs': 17.0.33 + chalk: 4.1.2 + + '@jridgewell/gen-mapping@0.3.8': + dependencies: + '@jridgewell/set-array': 1.2.1 + '@jridgewell/sourcemap-codec': 1.5.0 + '@jridgewell/trace-mapping': 0.3.25 + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/set-array@1.2.1': {} + + '@jridgewell/sourcemap-codec@1.5.0': {} + + '@jridgewell/trace-mapping@0.3.25': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.0 + + '@jridgewell/trace-mapping@0.3.9': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.0 + + '@kwsites/file-exists@1.1.1': + dependencies: + debug: 4.4.0 + transitivePeerDependencies: + - supports-color + + '@kwsites/promise-deferred@1.1.1': {} + + '@sinclair/typebox@0.27.8': {} + + '@sinonjs/commons@3.0.1': + dependencies: + type-detect: 4.0.8 + + '@sinonjs/fake-timers@10.3.0': + dependencies: + '@sinonjs/commons': 3.0.1 + + '@trivago/prettier-plugin-sort-imports@5.2.2(prettier@3.5.3)': + dependencies: + '@babel/generator': 7.27.0 + '@babel/parser': 7.27.0 + '@babel/traverse': 7.27.0 + '@babel/types': 7.27.0 + javascript-natural-sort: 0.7.1 + lodash: 4.17.21 + prettier: 3.5.3 + transitivePeerDependencies: + - supports-color + + '@tsconfig/node10@1.0.11': {} + + '@tsconfig/node12@1.0.11': {} + + '@tsconfig/node14@1.0.3': {} + + '@tsconfig/node16@1.0.4': {} + + '@types/babel__core@7.20.5': + dependencies: + '@babel/parser': 7.27.0 + '@babel/types': 7.27.0 + '@types/babel__generator': 7.27.0 + '@types/babel__template': 7.4.4 + '@types/babel__traverse': 7.20.7 + + '@types/babel__generator@7.27.0': + dependencies: + '@babel/types': 7.27.0 + + '@types/babel__template@7.4.4': + dependencies: + '@babel/parser': 7.27.0 + '@babel/types': 7.27.0 + + '@types/babel__traverse@7.20.7': + dependencies: + '@babel/types': 7.27.0 + + '@types/graceful-fs@4.1.9': + dependencies: + '@types/node': 22.14.1 + + '@types/istanbul-lib-coverage@2.0.6': {} + + '@types/istanbul-lib-report@3.0.3': + dependencies: + '@types/istanbul-lib-coverage': 2.0.6 + + '@types/istanbul-reports@3.0.4': + dependencies: + '@types/istanbul-lib-report': 3.0.3 + + '@types/jest@29.5.14': + dependencies: + expect: 29.7.0 + pretty-format: 29.7.0 + + '@types/node@22.14.1': + dependencies: + undici-types: 6.21.0 + + '@types/stack-utils@2.0.3': {} + + '@types/yargs-parser@21.0.3': {} + + '@types/yargs@17.0.33': + dependencies: + '@types/yargs-parser': 21.0.3 + + acorn-walk@8.3.4: + dependencies: + acorn: 8.14.1 + + acorn@8.14.1: {} + + ansi-escapes@4.3.2: + dependencies: + type-fest: 0.21.3 + + ansi-regex@5.0.1: {} + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + ansi-styles@5.2.0: {} + + anymatch@3.1.3: + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + + arg@4.1.3: {} + + argparse@1.0.10: + dependencies: + sprintf-js: 1.0.3 + + async@3.2.6: {} + + asynckit@0.4.0: {} + + axios@1.9.0: + dependencies: + follow-redirects: 1.15.9 + form-data: 4.0.2 + proxy-from-env: 1.1.0 + transitivePeerDependencies: + - debug + + babel-jest@29.7.0(@babel/core@7.26.10): + dependencies: + '@babel/core': 7.26.10 + '@jest/transform': 29.7.0 + '@types/babel__core': 7.20.5 + babel-plugin-istanbul: 6.1.1 + babel-preset-jest: 29.6.3(@babel/core@7.26.10) + chalk: 4.1.2 + graceful-fs: 4.2.11 + slash: 3.0.0 + transitivePeerDependencies: + - supports-color + + babel-plugin-istanbul@6.1.1: + dependencies: + '@babel/helper-plugin-utils': 7.26.5 + '@istanbuljs/load-nyc-config': 1.1.0 + '@istanbuljs/schema': 0.1.3 + istanbul-lib-instrument: 5.2.1 + test-exclude: 6.0.0 + transitivePeerDependencies: + - supports-color + + babel-plugin-jest-hoist@29.6.3: + dependencies: + '@babel/template': 7.27.0 + '@babel/types': 7.27.0 + '@types/babel__core': 7.20.5 + '@types/babel__traverse': 7.20.7 + + babel-preset-current-node-syntax@1.1.0(@babel/core@7.26.10): + dependencies: + '@babel/core': 7.26.10 + '@babel/plugin-syntax-async-generators': 7.8.4(@babel/core@7.26.10) + '@babel/plugin-syntax-bigint': 7.8.3(@babel/core@7.26.10) + '@babel/plugin-syntax-class-properties': 7.12.13(@babel/core@7.26.10) + '@babel/plugin-syntax-class-static-block': 7.14.5(@babel/core@7.26.10) + '@babel/plugin-syntax-import-attributes': 7.26.0(@babel/core@7.26.10) + '@babel/plugin-syntax-import-meta': 7.10.4(@babel/core@7.26.10) + '@babel/plugin-syntax-json-strings': 7.8.3(@babel/core@7.26.10) + '@babel/plugin-syntax-logical-assignment-operators': 7.10.4(@babel/core@7.26.10) + '@babel/plugin-syntax-nullish-coalescing-operator': 7.8.3(@babel/core@7.26.10) + '@babel/plugin-syntax-numeric-separator': 7.10.4(@babel/core@7.26.10) + '@babel/plugin-syntax-object-rest-spread': 7.8.3(@babel/core@7.26.10) + '@babel/plugin-syntax-optional-catch-binding': 7.8.3(@babel/core@7.26.10) + '@babel/plugin-syntax-optional-chaining': 7.8.3(@babel/core@7.26.10) + '@babel/plugin-syntax-private-property-in-object': 7.14.5(@babel/core@7.26.10) + '@babel/plugin-syntax-top-level-await': 7.14.5(@babel/core@7.26.10) + + babel-preset-jest@29.6.3(@babel/core@7.26.10): + dependencies: + '@babel/core': 7.26.10 + babel-plugin-jest-hoist: 29.6.3 + babel-preset-current-node-syntax: 1.1.0(@babel/core@7.26.10) + + balanced-match@1.0.2: {} + + brace-expansion@1.1.11: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + brace-expansion@2.0.1: + dependencies: + balanced-match: 1.0.2 + + braces@3.0.3: + dependencies: + fill-range: 7.1.1 + + browserslist@4.24.4: + dependencies: + caniuse-lite: 1.0.30001715 + electron-to-chromium: 1.5.143 + node-releases: 2.0.19 + update-browserslist-db: 1.1.3(browserslist@4.24.4) + + bs-logger@0.2.6: + dependencies: + fast-json-stable-stringify: 2.1.0 + + bser@2.1.1: + dependencies: + node-int64: 0.4.0 + + buffer-from@1.1.2: {} + + call-bind-apply-helpers@1.0.2: + dependencies: + es-errors: 1.3.0 + function-bind: 1.1.2 + + callsites@3.1.0: {} + + camelcase@5.3.1: {} + + camelcase@6.3.0: {} + + caniuse-lite@1.0.30001715: {} + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + char-regex@1.0.2: {} + + ci-info@3.9.0: {} + + cjs-module-lexer@1.4.3: {} + + cliui@8.0.1: + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 7.0.0 + + co@4.6.0: {} + + collect-v8-coverage@1.0.2: {} + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + combined-stream@1.0.8: + dependencies: + delayed-stream: 1.0.0 + + concat-map@0.0.1: {} + + convert-source-map@2.0.0: {} + + create-jest@29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)): + dependencies: + '@jest/types': 29.6.3 + chalk: 4.1.2 + exit: 0.1.2 + graceful-fs: 4.2.11 + jest-config: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + jest-util: 29.7.0 + prompts: 2.4.2 + transitivePeerDependencies: + - '@types/node' + - babel-plugin-macros + - supports-color + - ts-node + + create-require@1.1.1: {} + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + debug@4.4.0: + dependencies: + ms: 2.1.3 + + dedent@1.5.3: {} + + deepmerge@4.3.1: {} + + delayed-stream@1.0.0: {} + + detect-newline@3.1.0: {} + + diff-sequences@29.6.3: {} + + diff@4.0.2: {} + + docker-compose@1.2.0: + dependencies: + yaml: 2.7.1 + + dunder-proto@1.0.1: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-errors: 1.3.0 + gopd: 1.2.0 + + ejs@3.1.10: + dependencies: + jake: 10.9.2 + + electron-to-chromium@1.5.143: {} + + emittery@0.13.1: {} + + emoji-regex@8.0.0: {} + + error-ex@1.3.2: + dependencies: + is-arrayish: 0.2.1 + + es-define-property@1.0.1: {} + + es-errors@1.3.0: {} + + es-object-atoms@1.1.1: + dependencies: + es-errors: 1.3.0 + + es-set-tostringtag@2.1.0: + dependencies: + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + has-tostringtag: 1.0.2 + hasown: 2.0.2 + + escalade@3.2.0: {} + + escape-string-regexp@2.0.0: {} + + esprima@4.0.1: {} + + execa@5.1.1: + dependencies: + cross-spawn: 7.0.6 + get-stream: 6.0.1 + human-signals: 2.1.0 + is-stream: 2.0.1 + merge-stream: 2.0.0 + npm-run-path: 4.0.1 + onetime: 5.1.2 + signal-exit: 3.0.7 + strip-final-newline: 2.0.0 + + exit@0.1.2: {} + + expect@29.7.0: + dependencies: + '@jest/expect-utils': 29.7.0 + jest-get-type: 29.6.3 + jest-matcher-utils: 29.7.0 + jest-message-util: 29.7.0 + jest-util: 29.7.0 + + fast-json-stable-stringify@2.1.0: {} + + fb-watchman@2.0.2: + dependencies: + bser: 2.1.1 + + filelist@1.0.4: + dependencies: + minimatch: 5.1.6 + + fill-range@7.1.1: + dependencies: + to-regex-range: 5.0.1 + + find-up@4.1.0: + dependencies: + locate-path: 5.0.0 + path-exists: 4.0.0 + + follow-redirects@1.15.9: {} + + form-data@4.0.2: + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + es-set-tostringtag: 2.1.0 + mime-types: 2.1.35 + + fs.realpath@1.0.0: {} + + fsevents@2.3.3: + optional: true + + function-bind@1.1.2: {} + + gensync@1.0.0-beta.2: {} + + get-caller-file@2.0.5: {} + + get-intrinsic@1.3.0: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-define-property: 1.0.1 + es-errors: 1.3.0 + es-object-atoms: 1.1.1 + function-bind: 1.1.2 + get-proto: 1.0.1 + gopd: 1.2.0 + has-symbols: 1.1.0 + hasown: 2.0.2 + math-intrinsics: 1.1.0 + + get-package-type@0.1.0: {} + + get-proto@1.0.1: + dependencies: + dunder-proto: 1.0.1 + es-object-atoms: 1.1.1 + + get-stream@6.0.1: {} + + glob@7.2.3: + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + + globals@11.12.0: {} + + gopd@1.2.0: {} + + graceful-fs@4.2.11: {} + + graphql-request@7.1.2(graphql@16.11.0): + dependencies: + '@graphql-typed-document-node/core': 3.2.0(graphql@16.11.0) + graphql: 16.11.0 + + graphql@16.11.0: {} + + has-flag@4.0.0: {} + + has-symbols@1.1.0: {} + + has-tostringtag@1.0.2: + dependencies: + has-symbols: 1.1.0 + + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + + html-escaper@2.0.2: {} + + human-signals@2.1.0: {} + + import-local@3.2.0: + dependencies: + pkg-dir: 4.2.0 + resolve-cwd: 3.0.0 + + imurmurhash@0.1.4: {} + + inflight@1.0.6: + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + + inherits@2.0.4: {} + + is-arrayish@0.2.1: {} + + is-core-module@2.16.1: + dependencies: + hasown: 2.0.2 + + is-fullwidth-code-point@3.0.0: {} + + is-generator-fn@2.1.0: {} + + is-number@7.0.0: {} + + is-stream@2.0.1: {} + + isexe@2.0.0: {} + + istanbul-lib-coverage@3.2.2: {} + + istanbul-lib-instrument@5.2.1: + dependencies: + '@babel/core': 7.26.10 + '@babel/parser': 7.27.0 + '@istanbuljs/schema': 0.1.3 + istanbul-lib-coverage: 3.2.2 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + + istanbul-lib-instrument@6.0.3: + dependencies: + '@babel/core': 7.26.10 + '@babel/parser': 7.27.0 + '@istanbuljs/schema': 0.1.3 + istanbul-lib-coverage: 3.2.2 + semver: 7.7.1 + transitivePeerDependencies: + - supports-color + + istanbul-lib-report@3.0.1: + dependencies: + istanbul-lib-coverage: 3.2.2 + make-dir: 4.0.0 + supports-color: 7.2.0 + + istanbul-lib-source-maps@4.0.1: + dependencies: + debug: 4.4.0 + istanbul-lib-coverage: 3.2.2 + source-map: 0.6.1 + transitivePeerDependencies: + - supports-color + + istanbul-reports@3.1.7: + dependencies: + html-escaper: 2.0.2 + istanbul-lib-report: 3.0.1 + + jake@10.9.2: + dependencies: + async: 3.2.6 + chalk: 4.1.2 + filelist: 1.0.4 + minimatch: 3.1.2 + + javascript-natural-sort@0.7.1: {} + + jest-changed-files@29.7.0: + dependencies: + execa: 5.1.1 + jest-util: 29.7.0 + p-limit: 3.1.0 + + jest-circus@29.7.0: + dependencies: + '@jest/environment': 29.7.0 + '@jest/expect': 29.7.0 + '@jest/test-result': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + chalk: 4.1.2 + co: 4.6.0 + dedent: 1.5.3 + is-generator-fn: 2.1.0 + jest-each: 29.7.0 + jest-matcher-utils: 29.7.0 + jest-message-util: 29.7.0 + jest-runtime: 29.7.0 + jest-snapshot: 29.7.0 + jest-util: 29.7.0 + p-limit: 3.1.0 + pretty-format: 29.7.0 + pure-rand: 6.1.0 + slash: 3.0.0 + stack-utils: 2.0.6 + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + + jest-cli@29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)): + dependencies: + '@jest/core': 29.7.0(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + '@jest/test-result': 29.7.0 + '@jest/types': 29.6.3 + chalk: 4.1.2 + create-jest: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + exit: 0.1.2 + import-local: 3.2.0 + jest-config: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + jest-util: 29.7.0 + jest-validate: 29.7.0 + yargs: 17.7.2 + transitivePeerDependencies: + - '@types/node' + - babel-plugin-macros + - supports-color + - ts-node + + jest-config@29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)): + dependencies: + '@babel/core': 7.26.10 + '@jest/test-sequencer': 29.7.0 + '@jest/types': 29.6.3 + babel-jest: 29.7.0(@babel/core@7.26.10) + chalk: 4.1.2 + ci-info: 3.9.0 + deepmerge: 4.3.1 + glob: 7.2.3 + graceful-fs: 4.2.11 + jest-circus: 29.7.0 + jest-environment-node: 29.7.0 + jest-get-type: 29.6.3 + jest-regex-util: 29.6.3 + jest-resolve: 29.7.0 + jest-runner: 29.7.0 + jest-util: 29.7.0 + jest-validate: 29.7.0 + micromatch: 4.0.8 + parse-json: 5.2.0 + pretty-format: 29.7.0 + slash: 3.0.0 + strip-json-comments: 3.1.1 + optionalDependencies: + '@types/node': 22.14.1 + ts-node: 10.9.2(@types/node@22.14.1)(typescript@5.8.3) + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + + jest-diff@29.7.0: + dependencies: + chalk: 4.1.2 + diff-sequences: 29.6.3 + jest-get-type: 29.6.3 + pretty-format: 29.7.0 + + jest-docblock@29.7.0: + dependencies: + detect-newline: 3.1.0 + + jest-each@29.7.0: + dependencies: + '@jest/types': 29.6.3 + chalk: 4.1.2 + jest-get-type: 29.6.3 + jest-util: 29.7.0 + pretty-format: 29.7.0 + + jest-environment-node@29.7.0: + dependencies: + '@jest/environment': 29.7.0 + '@jest/fake-timers': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + jest-mock: 29.7.0 + jest-util: 29.7.0 + + jest-get-type@29.6.3: {} + + jest-haste-map@29.7.0: + dependencies: + '@jest/types': 29.6.3 + '@types/graceful-fs': 4.1.9 + '@types/node': 22.14.1 + anymatch: 3.1.3 + fb-watchman: 2.0.2 + graceful-fs: 4.2.11 + jest-regex-util: 29.6.3 + jest-util: 29.7.0 + jest-worker: 29.7.0 + micromatch: 4.0.8 + walker: 1.0.8 + optionalDependencies: + fsevents: 2.3.3 + + jest-leak-detector@29.7.0: + dependencies: + jest-get-type: 29.6.3 + pretty-format: 29.7.0 + + jest-matcher-utils@29.7.0: + dependencies: + chalk: 4.1.2 + jest-diff: 29.7.0 + jest-get-type: 29.6.3 + pretty-format: 29.7.0 + + jest-message-util@29.7.0: + dependencies: + '@babel/code-frame': 7.26.2 + '@jest/types': 29.6.3 + '@types/stack-utils': 2.0.3 + chalk: 4.1.2 + graceful-fs: 4.2.11 + micromatch: 4.0.8 + pretty-format: 29.7.0 + slash: 3.0.0 + stack-utils: 2.0.6 + + jest-mock@29.7.0: + dependencies: + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + jest-util: 29.7.0 + + jest-pnp-resolver@1.2.3(jest-resolve@29.7.0): + optionalDependencies: + jest-resolve: 29.7.0 + + jest-regex-util@29.6.3: {} + + jest-resolve-dependencies@29.7.0: + dependencies: + jest-regex-util: 29.6.3 + jest-snapshot: 29.7.0 + transitivePeerDependencies: + - supports-color + + jest-resolve@29.7.0: + dependencies: + chalk: 4.1.2 + graceful-fs: 4.2.11 + jest-haste-map: 29.7.0 + jest-pnp-resolver: 1.2.3(jest-resolve@29.7.0) + jest-util: 29.7.0 + jest-validate: 29.7.0 + resolve: 1.22.10 + resolve.exports: 2.0.3 + slash: 3.0.0 + + jest-runner@29.7.0: + dependencies: + '@jest/console': 29.7.0 + '@jest/environment': 29.7.0 + '@jest/test-result': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + chalk: 4.1.2 + emittery: 0.13.1 + graceful-fs: 4.2.11 + jest-docblock: 29.7.0 + jest-environment-node: 29.7.0 + jest-haste-map: 29.7.0 + jest-leak-detector: 29.7.0 + jest-message-util: 29.7.0 + jest-resolve: 29.7.0 + jest-runtime: 29.7.0 + jest-util: 29.7.0 + jest-watcher: 29.7.0 + jest-worker: 29.7.0 + p-limit: 3.1.0 + source-map-support: 0.5.13 + transitivePeerDependencies: + - supports-color + + jest-runtime@29.7.0: + dependencies: + '@jest/environment': 29.7.0 + '@jest/fake-timers': 29.7.0 + '@jest/globals': 29.7.0 + '@jest/source-map': 29.6.3 + '@jest/test-result': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + chalk: 4.1.2 + cjs-module-lexer: 1.4.3 + collect-v8-coverage: 1.0.2 + glob: 7.2.3 + graceful-fs: 4.2.11 + jest-haste-map: 29.7.0 + jest-message-util: 29.7.0 + jest-mock: 29.7.0 + jest-regex-util: 29.6.3 + jest-resolve: 29.7.0 + jest-snapshot: 29.7.0 + jest-util: 29.7.0 + slash: 3.0.0 + strip-bom: 4.0.0 + transitivePeerDependencies: + - supports-color + + jest-snapshot@29.7.0: + dependencies: + '@babel/core': 7.26.10 + '@babel/generator': 7.27.0 + '@babel/plugin-syntax-jsx': 7.25.9(@babel/core@7.26.10) + '@babel/plugin-syntax-typescript': 7.25.9(@babel/core@7.26.10) + '@babel/types': 7.27.0 + '@jest/expect-utils': 29.7.0 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + babel-preset-current-node-syntax: 1.1.0(@babel/core@7.26.10) + chalk: 4.1.2 + expect: 29.7.0 + graceful-fs: 4.2.11 + jest-diff: 29.7.0 + jest-get-type: 29.6.3 + jest-matcher-utils: 29.7.0 + jest-message-util: 29.7.0 + jest-util: 29.7.0 + natural-compare: 1.4.0 + pretty-format: 29.7.0 + semver: 7.7.1 + transitivePeerDependencies: + - supports-color + + jest-util@29.7.0: + dependencies: + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + chalk: 4.1.2 + ci-info: 3.9.0 + graceful-fs: 4.2.11 + picomatch: 2.3.1 + + jest-validate@29.7.0: + dependencies: + '@jest/types': 29.6.3 + camelcase: 6.3.0 + chalk: 4.1.2 + jest-get-type: 29.6.3 + leven: 3.1.0 + pretty-format: 29.7.0 + + jest-watcher@29.7.0: + dependencies: + '@jest/test-result': 29.7.0 + '@jest/types': 29.6.3 + '@types/node': 22.14.1 + ansi-escapes: 4.3.2 + chalk: 4.1.2 + emittery: 0.13.1 + jest-util: 29.7.0 + string-length: 4.0.2 + + jest-worker@29.7.0: + dependencies: + '@types/node': 22.14.1 + jest-util: 29.7.0 + merge-stream: 2.0.0 + supports-color: 8.1.1 + + jest@29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)): + dependencies: + '@jest/core': 29.7.0(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + '@jest/types': 29.6.3 + import-local: 3.2.0 + jest-cli: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + transitivePeerDependencies: + - '@types/node' + - babel-plugin-macros + - supports-color + - ts-node + + js-tokens@4.0.0: {} + + js-yaml@3.14.1: + dependencies: + argparse: 1.0.10 + esprima: 4.0.1 + + jsesc@3.1.0: {} + + json-parse-even-better-errors@2.3.1: {} + + json5@2.2.3: {} + + kleur@3.0.3: {} + + lago-javascript-client@1.27.1: {} + + leven@3.1.0: {} + + lines-and-columns@1.2.4: {} + + locate-path@5.0.0: + dependencies: + p-locate: 4.1.0 + + lodash.memoize@4.1.2: {} + + lodash@4.17.21: {} + + lru-cache@5.1.1: + dependencies: + yallist: 3.1.1 + + make-dir@4.0.0: + dependencies: + semver: 7.7.1 + + make-error@1.3.6: {} + + makeerror@1.0.12: + dependencies: + tmpl: 1.0.5 + + math-intrinsics@1.1.0: {} + + merge-stream@2.0.0: {} + + micromatch@4.0.8: + dependencies: + braces: 3.0.3 + picomatch: 2.3.1 + + mime-db@1.52.0: {} + + mime-types@2.1.35: + dependencies: + mime-db: 1.52.0 + + mimic-fn@2.1.0: {} + + minimatch@3.1.2: + dependencies: + brace-expansion: 1.1.11 + + minimatch@5.1.6: + dependencies: + brace-expansion: 2.0.1 + + ms@2.1.3: {} + + natural-compare@1.4.0: {} + + node-int64@0.4.0: {} + + node-releases@2.0.19: {} + + normalize-path@3.0.0: {} + + npm-run-path@4.0.1: + dependencies: + path-key: 3.1.1 + + once@1.4.0: + dependencies: + wrappy: 1.0.2 + + onetime@5.1.2: + dependencies: + mimic-fn: 2.1.0 + + p-limit@2.3.0: + dependencies: + p-try: 2.2.0 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@4.1.0: + dependencies: + p-limit: 2.3.0 + + p-try@2.2.0: {} + + parse-json@5.2.0: + dependencies: + '@babel/code-frame': 7.26.2 + error-ex: 1.3.2 + json-parse-even-better-errors: 2.3.1 + lines-and-columns: 1.2.4 + + path-exists@4.0.0: {} + + path-is-absolute@1.0.1: {} + + path-key@3.1.1: {} + + path-parse@1.0.7: {} + + picocolors@1.1.1: {} + + picomatch@2.3.1: {} + + pirates@4.0.7: {} + + pkg-dir@4.2.0: + dependencies: + find-up: 4.1.0 + + prettier@3.5.3: {} + + pretty-format@29.7.0: + dependencies: + '@jest/schemas': 29.6.3 + ansi-styles: 5.2.0 + react-is: 18.3.1 + + prompts@2.4.2: + dependencies: + kleur: 3.0.3 + sisteransi: 1.0.5 + + proxy-from-env@1.1.0: {} + + pure-rand@6.1.0: {} + + react-is@18.3.1: {} + + require-directory@2.1.1: {} + + resolve-cwd@3.0.0: + dependencies: + resolve-from: 5.0.0 + + resolve-from@5.0.0: {} + + resolve.exports@2.0.3: {} + + resolve@1.22.10: + dependencies: + is-core-module: 2.16.1 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + + semver@6.3.1: {} + + semver@7.7.1: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + signal-exit@3.0.7: {} + + simple-git@3.27.0: + dependencies: + '@kwsites/file-exists': 1.1.1 + '@kwsites/promise-deferred': 1.1.1 + debug: 4.4.0 + transitivePeerDependencies: + - supports-color + + sisteransi@1.0.5: {} + + slash@3.0.0: {} + + source-map-support@0.5.13: + dependencies: + buffer-from: 1.1.2 + source-map: 0.6.1 + + source-map@0.6.1: {} + + sprintf-js@1.0.3: {} + + stack-utils@2.0.6: + dependencies: + escape-string-regexp: 2.0.0 + + string-length@4.0.2: + dependencies: + char-regex: 1.0.2 + strip-ansi: 6.0.1 + + string-width@4.2.3: + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + strip-bom@4.0.0: {} + + strip-final-newline@2.0.0: {} + + strip-json-comments@3.1.1: {} + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + supports-color@8.1.1: + dependencies: + has-flag: 4.0.0 + + supports-preserve-symlinks-flag@1.0.0: {} + + test-exclude@6.0.0: + dependencies: + '@istanbuljs/schema': 0.1.3 + glob: 7.2.3 + minimatch: 3.1.2 + + tmpl@1.0.5: {} + + to-regex-range@5.0.1: + dependencies: + is-number: 7.0.0 + + ts-jest@29.3.2(@babel/core@7.26.10)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.26.10))(jest@29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)))(typescript@5.8.3): + dependencies: + bs-logger: 0.2.6 + ejs: 3.1.10 + fast-json-stable-stringify: 2.1.0 + jest: 29.7.0(@types/node@22.14.1)(ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3)) + jest-util: 29.7.0 + json5: 2.2.3 + lodash.memoize: 4.1.2 + make-error: 1.3.6 + semver: 7.7.1 + type-fest: 4.40.1 + typescript: 5.8.3 + yargs-parser: 21.1.1 + optionalDependencies: + '@babel/core': 7.26.10 + '@jest/transform': 29.7.0 + '@jest/types': 29.6.3 + babel-jest: 29.7.0(@babel/core@7.26.10) + + ts-node@10.9.2(@types/node@22.14.1)(typescript@5.8.3): + dependencies: + '@cspotcode/source-map-support': 0.8.1 + '@tsconfig/node10': 1.0.11 + '@tsconfig/node12': 1.0.11 + '@tsconfig/node14': 1.0.3 + '@tsconfig/node16': 1.0.4 + '@types/node': 22.14.1 + acorn: 8.14.1 + acorn-walk: 8.3.4 + arg: 4.1.3 + create-require: 1.1.1 + diff: 4.0.2 + make-error: 1.3.6 + typescript: 5.8.3 + v8-compile-cache-lib: 3.0.1 + yn: 3.1.1 + + type-detect@4.0.8: {} + + type-fest@0.21.3: {} + + type-fest@4.40.1: {} + + typescript@5.8.3: {} + + undici-types@6.21.0: {} + + update-browserslist-db@1.1.3(browserslist@4.24.4): + dependencies: + browserslist: 4.24.4 + escalade: 3.2.0 + picocolors: 1.1.1 + + v8-compile-cache-lib@3.0.1: {} + + v8-to-istanbul@9.3.0: + dependencies: + '@jridgewell/trace-mapping': 0.3.25 + '@types/istanbul-lib-coverage': 2.0.6 + convert-source-map: 2.0.0 + + walker@1.0.8: + dependencies: + makeerror: 1.0.12 + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + wrap-ansi@7.0.0: + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + + wrappy@1.0.2: {} + + write-file-atomic@4.0.2: + dependencies: + imurmurhash: 0.1.4 + signal-exit: 3.0.7 + + y18n@5.0.8: {} + + yallist@3.1.1: {} + + yaml@2.7.1: {} + + yargs-parser@21.1.1: {} + + yargs@17.7.2: + dependencies: + cliui: 8.0.1 + escalade: 3.2.0 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 21.1.1 + + yn@3.1.1: {} + + yocto-queue@0.1.0: {} diff --git a/CloudronPackages/APISIX/apisix-source/t/pubsub/kafka.t b/CloudronPackages/APISIX/apisix-source/t/pubsub/kafka.t new file mode 100644 index 0000000..b779a48 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/pubsub/kafka.t @@ -0,0 +1,372 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use Cwd qw(cwd); +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +my $apisix_home = $ENV{APISIX_HOME} // cwd(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $block_init = <<_EOC_; + `ln -sf $apisix_home/apisix $apisix_home/t/servroot/apisix`; +_EOC_ + + $block->set_value("init", $block_init); + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +add_test_cleanup_handler(sub { + `rm -f $apisix_home/t/servroot/apisix`; +}); + +run_tests(); + +__DATA__ + +=== TEST 1: setup all-in-one test +--- config + location /t { + content_by_lua_block { + local data = { + { + url = "/apisix/admin/routes/kafka", + data = [[{ + "upstream": { + "nodes": { + "127.0.0.1:9092": 1 + }, + "type": "none", + "scheme": "kafka" + }, + "uri": "/kafka" + }]], + }, + { + url = "/apisix/admin/routes/kafka-invalid", + data = [[{ + "upstream": { + "nodes": { + "127.0.0.1:59092": 1 + }, + "type": "none", + "scheme": "kafka" + }, + "uri": "/kafka-invalid" + }]], + }, + { + url = "/apisix/admin/routes/kafka-tlsv", + data = [[{ + "upstream": { + "nodes": { + "127.0.0.1:9093": 1 + }, + "type": "none", + "scheme": "kafka", + "tls": { + "verify": true + } + }, + "uri": "/kafka-tlsv" + }]], + }, + { + url = "/apisix/admin/routes/kafka-tls", + data = [[{ + "upstream": { + "nodes": { + "127.0.0.1:9093": 1 + }, + "type": "none", + "scheme": "kafka", + "tls": { + "verify": false + } + }, + "uri": "/kafka-tls" + }]], + }, + { + url = "/apisix/admin/routes/kafka-sasl", + data = [[{ + "upstream": { + "nodes": { + "127.0.0.1:9094": 1 + }, + "type": "none", + "scheme": "kafka" + }, + "uri": "/kafka-sasl", + "plugins": { + "kafka-proxy": { + "sasl": { + "username": "admin", + "password": "admin-secret" + } + } + } + }]], + }, + } + + local t = require("lib.test_admin").test + + for _, data in ipairs(data) do + local code, body = t(data.url, ngx.HTTP_PUT, data.data) + ngx.say(body) + end + } + } +--- response_body eval +"passed\n"x5 + + + +=== TEST 2: hit route (with HTTP request) +--- request +GET /kafka +--- error_code: 400 +--- error_log +failed to initialize pubsub module, err: bad "upgrade" request header: nil + + + +=== TEST 3: hit route (Kafka) +--- config + # The messages used in this test are produced in the linux-ci-init-service.sh + # script that prepares the CI environment + location /t { + content_by_lua_block { + local pb = require("pb") + local lib_pubsub = require("lib.pubsub") + local test_pubsub = lib_pubsub.new_ws("ws://127.0.0.1:1984/kafka") + local data = { + { + sequence = 0, + cmd_kafka_list_offset = { + topic = "not-exist", + partition = 0, + timestamp = -1, + }, + }, + { + sequence = 1, + cmd_kafka_fetch = { + topic = "not-exist", + partition = 0, + offset = 0, + }, + }, + { + -- Query first message offset + sequence = 2, + cmd_kafka_list_offset = { + topic = "test-consumer", + partition = 0, + timestamp = -2, + }, + }, + { + -- Query last message offset + sequence = 3, + cmd_kafka_list_offset = { + topic = "test-consumer", + partition = 0, + timestamp = -1, + }, + }, + { + -- Query by timestamp, 9999999999999 later than the + -- production time of any message + sequence = 4, + cmd_kafka_list_offset = { + topic = "test-consumer", + partition = 0, + timestamp = "9999999999999", + }, + }, + { + -- Query by timestamp, 1500000000000 ms earlier than the + -- production time of any message + sequence = 5, + cmd_kafka_list_offset = { + topic = "test-consumer", + partition = 0, + timestamp = "1500000000000", + }, + }, + { + sequence = 6, + cmd_kafka_fetch = { + topic = "test-consumer", + partition = 0, + offset = 14, + }, + }, + { + sequence = 7, + cmd_kafka_fetch = { + topic = "test-consumer", + partition = 0, + offset = 999, + }, + }, + } + + for i = 1, #data do + -- force clear state + pb.state(nil) + local data = test_pubsub:send_recv_ws_binary(data[i]) + if data.error_resp then + ngx.say(data.sequence..data.error_resp.message) + end + if data.kafka_list_offset_resp then + ngx.say(data.sequence.."offset: "..data.kafka_list_offset_resp.offset) + end + if data.kafka_fetch_resp then + ngx.say(data.sequence.."offset: "..data.kafka_fetch_resp.messages[1].offset.. + " msg: "..data.kafka_fetch_resp.messages[1].value) + end + end + test_pubsub:close_ws() + } + } +--- response_body +0failed to list offset, topic: not-exist, partition: 0, err: not found topic +1failed to fetch message, topic: not-exist, partition: 0, err: not found topic +2offset: 0 +3offset: 30 +4offset: -1 +5offset: 0 +6offset: 14 msg: testmsg15 +7failed to fetch message, topic: test-consumer, partition: 0, err: OFFSET_OUT_OF_RANGE + + + +=== TEST 4: hit route (Kafka with invalid node ip) +--- config + # The messages used in this test are produced in the linux-ci-init-service.sh + # script that prepares the CI environment + location /t { + content_by_lua_block { + local lib_pubsub = require("lib.pubsub") + local test_pubsub = lib_pubsub.new_ws("ws://127.0.0.1:1984/kafka-invalid") + + local data = test_pubsub:send_recv_ws_binary({ + sequence = 0, + cmd_kafka_list_offset = { + topic = "test-consumer", + partition = 0, + timestamp = -2, + }, + }) + if data.error_resp then + ngx.say(data.sequence..data.error_resp.message) + end + test_pubsub:close_ws() + } + } +--- response_body +0failed to list offset, topic: test-consumer, partition: 0, err: not found topic +--- error_log +all brokers failed in fetch topic metadata + + + +=== TEST 5: hit route (Kafka with TLS) +--- config + location /t { + content_by_lua_block { + local lib_pubsub = require("lib.pubsub") + local test_pubsub = lib_pubsub.new_ws("ws://127.0.0.1:1984/kafka-tls") + + local data = test_pubsub:send_recv_ws_binary({ + sequence = 0, + cmd_kafka_list_offset = { + topic = "test-consumer", + partition = 0, + timestamp = -1, + }, + }) + if data.kafka_list_offset_resp then + ngx.say(data.sequence.."offset: "..data.kafka_list_offset_resp.offset) + end + test_pubsub:close_ws() + } + } +--- response_body +0offset: 30 + + + +=== TEST 6: hit route (Kafka with TLS + ssl verify) +--- config + location /t { + content_by_lua_block { + local lib_pubsub = require("lib.pubsub") + local test_pubsub = lib_pubsub.new_ws("ws://127.0.0.1:1984/kafka-tlsv") + + local data = test_pubsub:send_recv_ws_binary({ + sequence = 0, + cmd_kafka_list_offset = { + topic = "test-consumer", + partition = 0, + timestamp = -1, + }, + }) + if data.kafka_list_offset_resp then + ngx.say(data.sequence.."offset: "..data.kafka_list_offset_resp.offset) + end + test_pubsub:close_ws() + } + } +--- error_log eval +qr/self[- ]signed certificate/ + + + +=== TEST 7: hit route (Kafka with SASL) +--- config + location /t { + content_by_lua_block { + local lib_pubsub = require("lib.pubsub") + local test_pubsub = lib_pubsub.new_ws("ws://127.0.0.1:1984/kafka-sasl") + + local data = test_pubsub:send_recv_ws_binary({ + sequence = 0, + cmd_kafka_list_offset = { + topic = "test-consumer", + partition = 0, + timestamp = -1, + }, + }) + if data.kafka_list_offset_resp then + ngx.say(data.sequence.."offset: "..data.kafka_list_offset_resp.offset) + end + test_pubsub:close_ws() + } + } +--- response_body +0offset: 30 diff --git a/CloudronPackages/APISIX/apisix-source/t/pubsub/pubsub.t b/CloudronPackages/APISIX/apisix-source/t/pubsub/pubsub.t new file mode 100644 index 0000000..3e0dbae --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/pubsub/pubsub.t @@ -0,0 +1,237 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use Cwd qw(cwd); +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +my $apisix_home = $ENV{APISIX_HOME} // cwd(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $block_init = <<_EOC_; + `ln -sf $apisix_home/apisix $apisix_home/t/servroot/apisix`; +_EOC_ + + $block->set_value("init", $block_init); + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +add_test_cleanup_handler(sub { + `rm -f $apisix_home/t/servroot/apisix`; +}); + +run_tests(); + +__DATA__ + +=== TEST 1: setup route by serverless +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/routes/pubsub", ngx.HTTP_PUT, { + plugins = { + ["serverless-pre-function"] = { + phase = "access", + functions = { + [[return function(conf, ctx) + local core = require("apisix.core"); + local pubsub, err = core.pubsub.new() + if not pubsub then + core.log.error("failed to initialize pubsub module, err: ", err) + core.response.exit(400) + return + end + pubsub:on("cmd_ping", function (params) + if params.state == "test" then + return {pong_resp = {state = "test"}} + end + return nil, "error" + end) + pubsub:wait() + ngx.exit(0) + end]], + } + } + }, + uri = "/pubsub" + }) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit route (with HTTP request) +--- request +GET /pubsub +--- error_code: 400 +--- error_log +failed to initialize pubsub module, err: bad "upgrade" request header: nil + + + +=== TEST 3: connect websocket service +--- config + location /t { + content_by_lua_block { + local lib_pubsub = require("lib.pubsub") + local test_pubsub = lib_pubsub.new_ws("ws://127.0.0.1:1984/pubsub") + local data = test_pubsub:send_recv_ws_binary({ + sequence = 0, + cmd_ping = { + state = "test" + }, + }) + if data and data.pong_resp then + ngx.say("ret: ", data.pong_resp.state) + end + test_pubsub:close_ws() + } + } +--- response_body +ret: test + + + +=== TEST 4: connect websocket service (return error) +--- config + location /t { + content_by_lua_block { + local lib_pubsub = require("lib.pubsub") + local test_pubsub = lib_pubsub.new_ws("ws://127.0.0.1:1984/pubsub") + local data = test_pubsub:send_recv_ws_binary({ + sequence = 0, + cmd_ping = { + state = "non-test" + }, + }) + if data and data.error_resp then + ngx.say("ret: ", data.error_resp.message) + end + test_pubsub:close_ws() + } + } +--- response_body +ret: error + + + +=== TEST 5: send unregistered command +--- config + location /t { + content_by_lua_block { + local lib_pubsub = require("lib.pubsub") + local test_pubsub = lib_pubsub.new_ws("ws://127.0.0.1:1984/pubsub") + local data = test_pubsub:send_recv_ws_binary({ + sequence = 0, + cmd_empty = {}, + }) + if data and data.error_resp then + ngx.say(data.error_resp.message) + end + test_pubsub:close_ws() + } + } +--- response_body +unknown command +--- error_log +pubsub callback handler not registered for the command, command: cmd_empty + + + +=== TEST 6: send text command (server skip command, keep connection) +--- config + location /t { + lua_check_client_abort on; + content_by_lua_block { + ngx.on_abort(function () + ngx.log(ngx.ERR, "text command is skipped, and close connection") + ngx.exit(444) + end) + local lib_pubsub = require("lib.pubsub") + local test_pubsub = lib_pubsub.new_ws("ws://127.0.0.1:1984/pubsub") + test_pubsub:send_recv_ws_text("test") + test_pubsub:close_ws() + } + } +--- abort +--- ignore_response +--- error_log +pubsub server receive non-binary data, type: text, data: test +text command is skipped, and close connection +fatal error in pubsub websocket server, err: failed to receive the first 2 bytes: closed + + + +=== TEST 7: send wrong command: empty (server skip command, keep connection) +--- config + location /t { + lua_check_client_abort on; + content_by_lua_block { + ngx.on_abort(function () + ngx.log(ngx.ERR, "empty command is skipped, and close connection") + ngx.exit(444) + end) + local lib_pubsub = require("lib.pubsub") + local test_pubsub = lib_pubsub.new_ws("ws://127.0.0.1:1984/pubsub") + test_pubsub:send_recv_ws_binary({}) + test_pubsub:close_ws() + } + } +--- abort +--- ignore_response +--- error_log +pubsub server receives empty command +empty command is skipped, and close connection +fatal error in pubsub websocket server, err: failed to receive the first 2 bytes: closed + + + +=== TEST 8: send wrong command: undecodable (server skip command, keep connection) +--- config + location /t { + lua_check_client_abort on; + content_by_lua_block { + ngx.on_abort(function () + ngx.log(ngx.ERR, "empty command is skipped, and close connection") + ngx.exit(444) + end) + local lib_pubsub = require("lib.pubsub") + local test_pubsub = lib_pubsub.new_ws("ws://127.0.0.1:1984/pubsub") + test_pubsub:send_recv_ws_binary("!@#$%^&*中文", true) + test_pubsub:close_ws() + } + } +--- abort +--- ignore_response +--- error_log +pubsub server receives empty command +empty command is skipped, and close connection +fatal error in pubsub websocket server, err: failed to receive the first 2 bytes: closed diff --git a/CloudronPackages/APISIX/apisix-source/t/router/graphql.t b/CloudronPackages/APISIX/apisix-source/t/router/graphql.t new file mode 100644 index 0000000..b6936fc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/graphql.t @@ -0,0 +1,374 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route by name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "methods": ["POST"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "vars": [["graphql_name", "==", "repo"]] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: route by name +--- request +POST /hello +query repo { + owner { + name + } +} +--- response_body +hello world + + + +=== TEST 3: set route by operation+name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "methods": ["POST"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "vars": [ + ["graphql_operation", "==", "mutation"], + ["graphql_name", "==", "repo"] + ] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: route by operation+name +--- request +POST /hello +mutation repo($ep: Episode!, $review: ReviewInput!) { + createReview(episode: $ep, review: $review) { + stars + commentary + } +} +--- response_body +hello world + + + +=== TEST 5: route by operation+name, miss +--- request +POST /hello +query repo { + owner { + name + } +} +--- error_code: 404 + + + +=== TEST 6: multiple operations +--- request +POST /hello +mutation repo($ep: Episode!, $review: ReviewInput!) { + createReview(episode: $ep, review: $review) { + stars + commentary + } +} +query repo { + owner { + name + } +} +--- response_body +hello world +--- error_log +Multiple operations are not supported + + + +=== TEST 7: bad graphql +--- request +POST /hello +AA +--- error_code: 404 +--- error_log +failed to parse graphql: Syntax error near line 1 body: AA + + + +=== TEST 8: set anonymous operation name +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "methods": ["POST"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "vars": [ + ["graphql_operation", "==", "query"], + ["graphql_name", "==", ""] + ] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: route by anonymous name +--- request +POST /hello +query { + owner { + name + } +} +--- response_body +hello world + + + +=== TEST 10: limit the max size +--- yaml_config +graphql: + max_size: 5 +--- request +POST /hello +query { + owner { + name + } +} +--- error_code: 404 +--- error_log +failed to read graphql data + + + +=== TEST 11: set graphql_root_fields +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "methods": ["POST", "GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "vars": [ + ["graphql_operation", "==", "query"], + ["graphql_root_fields", "has", "owner"] + ] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: single root field +--- request +POST /hello +query { + owner { + name + } +} +--- response_body +hello world + + + +=== TEST 13: test send http post json data +--- request +POST /hello +{"query":"query{owner{name}}"} +--- more_headers +Content-Type: application/json +--- response_body +hello world + + + +=== TEST 14: test send http get query data +--- request +GET /hello?query=query{owner{name}} +--- response_body +hello world + + + +=== TEST 15: test send http get multiple query data success +--- request +GET /hello?query=query{owner{name}}&query=query{repo{name}} +--- response_body +hello world + + + +=== TEST 16: test send http get multiple query data failure +--- request +GET /hello?query=query{repo{name}}&query=query{owner{name}} +--- error_code: 404 + + + +=== TEST 17: no body (HTTP GET) +--- request +GET /hello +--- error_code: 404 +--- error_log +failed to read graphql data, args[query] is nil + + + +=== TEST 18: no body (HTTP POST JSON) +--- request +POST /hello +{} +--- more_headers +Content-Type: application/json +--- error_code: 404 +--- error_log +failed to read graphql data, json body[query] is nil + + + +=== TEST 19: multiple root fields +--- request +POST /hello +query { + repo { + stars + } + owner { + name + } +} +--- response_body +hello world + + + +=== TEST 20: root fields mismatch +--- request +POST /hello +query { + repo { + name + } +} +--- error_code: 404 + + + +=== TEST 21: no body +--- request +POST /hello +--- error_code: 404 +--- error_log +failed to read graphql data, request body has zero size diff --git a/CloudronPackages/APISIX/apisix-source/t/router/multi-ssl-certs.t b/CloudronPackages/APISIX/apisix-source/t/router/multi-ssl-certs.t new file mode 100644 index 0000000..4ae7ec2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/multi-ssl-certs.t @@ -0,0 +1,342 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +no_root_location(); + +$ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); + +run_tests; + +__DATA__ + +=== TEST 1: set ssl(sni: www.test.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "www.test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: client request +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "www.test.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + + local req = "GET /hello HTTP/1.0\r\nHost: www.test.com\r\nConnection: close\r\n\r\n" + local bytes, err = sock:send(req) + if not bytes then + ngx.say("failed to send http request: ", err) + return + end + + ngx.say("sent http request: ", bytes, " bytes.") + + while true do + local line, err = sock:receive() + if not line then + -- ngx.say("failed to receive response status line: ", err) + break + end + + ngx.say("received: ", line) + end + + local ok, err = sock:close() + ngx.say("close: ", ok, " ", err) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 62 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- error_log +server name: "www.test.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 4: set second ssl(sni: *.test2.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/test2.crt") + local ssl_key = t.read_file("t/certs/test2.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "*.test2.com"} + + local code, body = t.test('/apisix/admin/ssls/2', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "*.test2.com" + }, + "key": "/apisix/ssls/2" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: client request: www.test2.com +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "www.test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body_like +connected: 1 +failed to do SSL handshake: 18: self[- ]signed certificate +--- error_log +server name: "www.test2.com" +we have more than 1 ssl certs now +--- no_error_log +[error] +[alert] + + + +=== TEST 6: set third ssl(sni: apisix.dev) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix_admin_ssl.crt") + local ssl_key = t.read_file("t/certs/apisix_admin_ssl.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "apisix.dev"} + + local code, body = t.test('/apisix/admin/ssls/3', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "apisix.dev" + }, + "key": "/apisix/ssls/3" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: client request: apisix.dev +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "apisix.dev", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body_like +connected: 1 +failed to do SSL handshake: 18: self[- ]signed certificate +--- error_log +server name: "apisix.dev" +we have more than 1 ssl certs now +--- no_error_log +[error] +[alert] + + + +=== TEST 8: remove test ssl certs +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + t.test('/apisix/admin/ssls/1', ngx.HTTP_DELETE) + t.test('/apisix/admin/ssls/2', ngx.HTTP_DELETE) + t.test('/apisix/admin/ssls/3', ngx.HTTP_DELETE) + + } +} +--- request +GET /t diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-host-uri-priority.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-host-uri-priority.t new file mode 100644 index 0000000..a2bb56c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-host-uri-priority.t @@ -0,0 +1,171 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_host_uri' + admin_key: null +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: hit routes(priority: 1 + priority: 2) +--- apisix_yaml +routes: + - + uri: /server_port + host: test.com + upstream: + nodes: + "127.0.0.1:1981": 1 + type: roundrobin + priority: 1 + - + uri: /server_port + host: test.com + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + priority: 2 +#END + +--- request +GET /server_port +--- more_headers +Host: test.com +--- response_body eval +qr/1980/ +--- error_log +use config_provider: yaml + + + +=== TEST 2: hit routes(priority: 2 + priority: 1) +--- apisix_yaml +routes: + - + uri: /server_port + host: test.com + upstream: + nodes: + "127.0.0.1:1981": 1 + type: roundrobin + priority: 2 + - + uri: /server_port + host: test.com + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + priority: 1 +#END + +--- request +GET /server_port +--- more_headers +Host: test.com +--- response_body eval +qr/1981/ +--- error_log +use config_provider: yaml + + + +=== TEST 3: hit routes(priority: default_value + priority: 1) +--- apisix_yaml +routes: + - + uri: /server_port + host: test.com + upstream: + nodes: + "127.0.0.1:1981": 1 + type: roundrobin + - + uri: /server_port + host: test.com + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin + priority: 1 +#END + +--- request +GET /server_port +--- more_headers +Host: test.com +--- response_body eval +qr/1980/ +--- error_log +use config_provider: yaml + + + +=== TEST 4: hit routes(priority: 1 + priority: default_value) +--- apisix_yaml +routes: + - + uri: /server_port + host: test.com + upstream: + nodes: + "127.0.0.1:1981": 1 + type: roundrobin + priority: 1 + - + uri: /server_port + host: test.com + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END + +--- request +GET /server_port +--- more_headers +Host: test.com +--- response_body eval +qr/1981/ +--- error_log +use config_provider: yaml diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-host-uri.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-host-uri.t new file mode 100644 index 0000000..be29464 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-host-uri.t @@ -0,0 +1,285 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_host_uri' +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(host + uri) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "foo.com", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: /not_found +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 4: /not_found +--- request +GET /hello +--- more_headers +Host: not_found.com +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 5: hit routes +--- request +GET /hello +--- more_headers +Host: foo.com +--- response_body +hello world + + + +=== TEST 6: hit routes +--- request +GET /hello +--- more_headers +Host: foo.com +--- response_body +hello world + + + +=== TEST 7: set route(only uri) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port", + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: /not_found +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 9: hit routes +--- request +GET /server_port +--- more_headers +Host: anydomain.com +--- response_body_like eval +qr/1981/ + + + +=== TEST 10: set route(only uri + id: 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: /not_found +--- request +GET /hello2 +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 12: hit routes +--- request +GET /hello +--- more_headers +Host: anydomain.com +--- response_body +hello world + + + +=== TEST 13: delete route(id: 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: set route(wildcard host + uri) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "host": "*.foo.com", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: hit routes +--- request +GET /hello +--- more_headers +Host: www.foo.com +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-host-uri2.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-host-uri2.t new file mode 100644 index 0000000..40936f7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-host-uri2.t @@ -0,0 +1,388 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_host_uri' + admin_key: null +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: test.com +--- apisix_yaml +routes: + - + uri: /server_port + host: test.com + upstream: + nodes: + "127.0.0.1:1981": 1 + type: roundrobin + - + uri: /server_port + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END + +--- request +GET /server_port +--- more_headers +Host: test.com +--- response_body eval +qr/1981/ +--- error_log +use config_provider: yaml + + + +=== TEST 2: *.test.com + uri +--- apisix_yaml +routes: + - + uri: /server_port + host: "*.test.com" + upstream: + nodes: + "127.0.0.1:1981": 1 + type: roundrobin + - + uri: /server_port + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END + +--- request +GET /server_port +--- more_headers +Host: www.test.com +--- response_body eval +qr/1981/ +--- error_log +use config_provider: yaml + + + +=== TEST 3: *.test.com + /* +--- apisix_yaml +routes: + - + uri: /* + host: "*.test.com" + upstream: + nodes: + "127.0.0.1:1981": 1 + type: roundrobin + - + uri: /server_port + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END + +--- request +GET /server_port +--- more_headers +Host: www.test.com +--- response_body eval +qr/1981/ +--- error_log +use config_provider: yaml + + + +=== TEST 4: filter_func(not match) +--- apisix_yaml +routes: + - + uri: /* + host: "*.test.com" + filter_func: "function(vars) return vars.arg_name == 'json' end" + upstream: + nodes: + "127.0.0.1:1981": 1 + type: roundrobin + - + uri: /server_port + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END + +--- request +GET /server_port?name=unknown +--- more_headers +Host: www.test.com +--- response_body eval +qr/1980/ +--- error_log +use config_provider: yaml + + + +=== TEST 5: filter_func(match) +--- apisix_yaml +routes: + - + uri: /* + host: "*.test.com" + filter_func: "function(vars) return vars.arg_name == 'json' end" + upstream: + nodes: + "127.0.0.1:1981": 1 + type: roundrobin + - + uri: /server_port + upstream: + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +#END + +--- request +GET /server_port?name=json +--- more_headers +Host: www.test.com +--- response_body eval +qr/1981/ +--- error_log +use config_provider: yaml + + + +=== TEST 6: set route with ':' +--- yaml_config +apisix: + node_listen: 1984 + router: + http: 'radixtree_host_uri' + admin_key: null +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/file:listReputationHistories", + "plugins":{"proxy-rewrite":{"uri":"/hello"}}, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: hit routes +--- yaml_config +apisix: + router: + http: 'radixtree_host_uri' +--- request +GET /file:listReputationHistories +--- response_body +hello world + + + +=== TEST 8: not hit +--- yaml_config +apisix: + router: + http: 'radixtree_host_uri' +--- request +GET /file:xx +--- error_code: 404 + + + +=== TEST 9: set route with ':' & host +--- yaml_config +apisix: + node_listen: 1984 + router: + http: 'radixtree_host_uri' + admin_key: null +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/do:listReputationHistories", + "hosts": ["t.com"], + "plugins":{"proxy-rewrite":{"uri":"/hello"}}, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: hit routes +--- yaml_config +apisix: + router: + http: 'radixtree_host_uri' +--- request +GET /do:listReputationHistories +--- more_headers +Host: t.com +--- response_body +hello world + + + +=== TEST 11: not hit +--- yaml_config +apisix: + router: + http: 'radixtree_host_uri' +--- request +GET /do:xx +--- more_headers +Host: t.com +--- error_code: 404 + + + +=== TEST 12: request host with uppercase +--- apisix_yaml +routes: + - + uri: /server_port + host: test.com + upstream: + nodes: + "127.0.0.1:1981": 1 + type: roundrobin +#END +--- request +GET /server_port +--- more_headers +Host: tEst.com + + + +=== TEST 13: configure host with uppercase +--- apisix_yaml +routes: + - + uri: /server_port + host: test.coM + upstream: + nodes: + "127.0.0.1:1981": 1 + type: roundrobin +#END +--- request +GET /server_port +--- more_headers +Host: test.com + + + +=== TEST 14: inherit hosts from services +--- apisix_yaml +services: + - id: 1 + hosts: + - bar.com +upstreams: + - id: 1 + nodes: + "127.0.0.1:1980": 1 + type: roundrobin +routes: + - + service_id: 1 + upstream_id: 1 + uri: /hello + plugins: + proxy-rewrite: + uri: /hello1 + - + upstream_id: 1 + uri: /hello + priority: -1 +#END +--- more_headers +Host: www.foo.com +--- request +GET /hello +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-host-uri3.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-host-uri3.t new file mode 100644 index 0000000..0d57788 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-host-uri3.t @@ -0,0 +1,355 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_host_uri' +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!defined $block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } + + if (!$block->error_log && !$block->no_error_log && + (defined $block->error_code && $block->error_code != 502)) + { + $block->set_value("no_error_log", "[error]"); + } + + $block; +}); + +run_tests(); + +__DATA__ + +=== TEST 1: change hosts in services +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "hosts": ["foo.com"] + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "service_id": "1", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + for _, h in ipairs({"foo.com", "bar.com"}) do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {Host = h}}) + if not res then + ngx.say(err) + return + end + if res.status == 404 then + ngx.say(res.status) + else + ngx.print(res.body) + end + end + + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "hosts": ["bar.com"] + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + for _, h in ipairs({"foo.com", "bar.com"}) do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {Host = h}}) + if not res then + ngx.say(err) + return + end + if res.status == 404 then + ngx.say(res.status) + else + ngx.print(res.body) + end + end + } + } +--- response_body +hello world +404 +404 +hello world + + + +=== TEST 2: check matched._path +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "hosts": ["foo.com"], + "plugins": { + "serverless-post-function": { + "functions" : ["return function(conf, ctx) + ngx.log(ngx.WARN, 'matched uri: ', ctx.curr_req_matched._path); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: hit, plain path +--- request +GET /hello +--- more_headers +Host: foo.com +--- grep_error_log eval +qr/matched uri: \/\w+/ +--- grep_error_log_out +matched uri: /hello + + + +=== TEST 4: check matched._path, wildcard +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "hosts": ["foo.com"], + "plugins": { + "serverless-post-function": { + "functions" : ["return function(conf, ctx) + ngx.log(ngx.WARN, 'matched uri: ', ctx.curr_req_matched._path); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: hit +--- request +GET /hello +--- more_headers +Host: foo.com +--- grep_error_log eval +qr/matched uri: \/\S+,/ +--- grep_error_log_out +matched uri: /*, + + + +=== TEST 6: check matched._host +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "hosts": ["foo.com"], + "plugins": { + "serverless-post-function": { + "functions" : ["return function(conf, ctx) + ngx.log(ngx.WARN, 'matched host: ', ctx.curr_req_matched._host); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: hit +--- request +GET /hello +--- more_headers +Host: foo.com +--- grep_error_log eval +qr/func\(\): matched host: [^,]+/ +--- grep_error_log_out +func(): matched host: foo.com + + + +=== TEST 8: check matched._host, wildcard +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "hosts": ["*.com"], + "plugins": { + "serverless-post-function": { + "functions" : ["return function(conf, ctx) + ngx.log(ngx.WARN, 'matched host: ', ctx.curr_req_matched._host); + end"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: hit +--- request +GET /hello +--- more_headers +Host: foo.com +--- grep_error_log eval +qr/func\(\): matched host: [^,]+/ +--- grep_error_log_out +func(): matched host: *.com diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-method.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-method.t new file mode 100644 index 0000000..f17826e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-method.t @@ -0,0 +1,113 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_uri' +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route without PURGE method +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: route mismatch +--- request +PURGE /hello +--- error_code: 404 + + + +=== TEST 3: set route with PURGE method +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "PURGE"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: route match PURGE method +--- request +PURGE /hello +--- error_code: 200 diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-sni.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-sni.t new file mode 100644 index 0000000..781bf28 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-sni.t @@ -0,0 +1,826 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +no_root_location(); + +$ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: set ssl(sni: www.test.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "www.test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 2: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: client request +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "www.test.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + + local req = "GET /hello HTTP/1.0\r\nHost: www.test.com\r\nConnection: close\r\n\r\n" + local bytes, err = sock:send(req) + if not bytes then + ngx.say("failed to send http request: ", err) + return + end + + ngx.say("sent http request: ", bytes, " bytes.") + + while true do + local line, err = sock:receive() + if not line then + -- ngx.say("failed to receive response status line: ", err) + break + end + + ngx.say("received: ", line) + end + + local ok, err = sock:close() + ngx.say("close: ", ok, " ", err) + end -- do + -- collectgarbage() + } +} +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 62 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- error_log +server name: "www.test.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 4: client request(no cert domain) +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "no-cert.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + end + } +} +--- response_body +connected: 1 +failed to do SSL handshake: handshake failed +--- error_log +failed to match any SSL certificate by SNI + + + +=== TEST 5: set ssl(sni: wildcard) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "*.test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "*.test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 6: client request +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "www.test.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + + local req = "GET /hello HTTP/1.0\r\nHost: www.test.com\r\nConnection: close\r\n\r\n" + local bytes, err = sock:send(req) + if not bytes then + ngx.say("failed to send http request: ", err) + return + end + + ngx.say("sent http request: ", bytes, " bytes.") + + while true do + local line, err = sock:receive() + if not line then + -- ngx.say("failed to receive response status line: ", err) + break + end + + ngx.say("received: ", line) + end + + local ok, err = sock:close() + ngx.say("close: ", ok, " ", err) + end -- do + -- collectgarbage() + } +} +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 62 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- error_log +server name: "www.test.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 7: set ssl(sni: test.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 8: client request: test.com +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "test.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + + local req = "GET /hello HTTP/1.0\r\nHost: test.com\r\nConnection: close\r\n\r\n" + local bytes, err = sock:send(req) + if not bytes then + ngx.say("failed to send http request: ", err) + return + end + + ngx.say("sent http request: ", bytes, " bytes.") + + while true do + local line, err = sock:receive() + if not line then + -- ngx.say("failed to receive response status line: ", err) + break + end + + ngx.say("received: ", line) + end + + local ok, err = sock:close() + ngx.say("close: ", ok, " ", err) + end -- do + -- collectgarbage() + } +} +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 58 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- error_log +server name: "test.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 9: set ssl(sni: *.test2.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/test2.crt") + local ssl_key = t.read_file("t/certs/test2.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "*.test2.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "*.test2.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 10: client request: www.test2.com +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "www.test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body_like +connected: 1 +failed to do SSL handshake: 18: self[- ]signed certificate +--- error_log +server name: "www.test2.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 11: client request: aa.bb.test2.com +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "aa.bb.test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +connected: 1 +failed to do SSL handshake: handshake failed +--- error_log +server name: "aa.bb.test2.com" +failed to find any SSL certificate by SNI: aa.bb.test2.com matched SNI: *.test2.com +--- no_error_log +[alert] + + + +=== TEST 12: disable ssl(sni: *.test2.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local data = {status = 0} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PATCH, + core.json.encode(data), + [[{ + "value": { + "status": 0 + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 13: client request: www.test2.com -- failed by disable +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "www.test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +connected: 1 +failed to do SSL handshake: handshake failed +--- error_log +server name: "www.test2.com" +--- no_error_log +[alert] + + + +=== TEST 14: enable ssl(sni: *.test2.com) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local data = {status = 1} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PATCH, + core.json.encode(data), + [[{ + "value": { + "status": 1 + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 15: client request: www.test2.com again +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "www.test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body_like +connected: 1 +failed to do SSL handshake: 18: self[- ]signed certificate +--- error_log +server name: "www.test2.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 16: set ssl(snis: {test2.com, *.test2.com}) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/test2.crt") + local ssl_key = t.read_file("t/certs/test2.key") + local data = {cert = ssl_cert, key = ssl_key, snis = {"test2.com", "*.test2.com"}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "snis": ["test2.com", "*.test2.com"] + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 17: client request: test2.com +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body_like +connected: 1 +failed to do SSL handshake: 18: self[- ]signed certificate +--- error_log +server name: "test2.com" +--- no_error_log +[error] +[alert] + + + +=== TEST 18: client request: aa.bb.test2.com -- snis un-include +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "aa.bb.test2.com", true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +connected: 1 +failed to do SSL handshake: handshake failed +--- error_log +server name: "aa.bb.test2.com" +failed to find any SSL certificate by SNI: aa.bb.test2.com matched SNIs: ["*.test2.com","test2.com"] +--- no_error_log +[alert] + + + +=== TEST 19: set ssl(encrypt ssl key with another iv) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/test2.crt") + local ssl_key = t.aes_encrypt(t.read_file("t/certs/test2.key")) + local data = {cert = ssl_cert, key = ssl_key, snis = {"test2.com", "*.test2.com"}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + ngx.status = code + ngx.print(body) + } +} +--- response_body +{"error_msg":"failed to decrypt previous encrypted key"} +--- error_code: 400 +--- error_log +decrypt ssl key failed diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-sni2.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-sni2.t new file mode 100644 index 0000000..c761c90 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-sni2.t @@ -0,0 +1,822 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +no_root_location(); + +BEGIN { + $ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); + $ENV{TEST_ENV_SSL_CRT} = "-----BEGIN CERTIFICATE----- +MIIEsTCCAxmgAwIBAgIUMbgUUCYHkuKDaPy0bzZowlK0JG4wDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG +Wmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxEjAQBgNVBAMMCXRlc3QyLmNvbTAgFw0y +MDA0MDQyMjE3NTJaGA8yMTIwMDMxMTIyMTc1MlowVzELMAkGA1UEBhMCQ04xEjAQ +BgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVz +dHkxEjAQBgNVBAMMCXRlc3QyLmNvbTCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCC +AYoCggGBAMQGBk35V3zaNVDWzEzVGd+EkZnUOrRpXQg5mmcnoKnrQ5rQQMsQCbMO +gFvLt/9OEZQmbE2HuEKsPzL79Yjdu8rGjSoQdbJZ9ccO32uvln1gn68iK79o7Tvm +TCi+BayyNA+lo9IxrBm1wGBkOU1ZPasGYzgBAbMLTSDps1EYxNR8t4l9PrTTRsh6 +NZyTYoDeVIsKZ9SckpjWVnxHOkF+AzZzIJJSe2pj572TDLYA/Xw9I4X3L+SHzwTl +iGWNXb2tU367LHERHvensQzdle7mQN2kE5GpB7QPWB+t9V4mn30jc/LyDvOaei6L ++pbl5CriGBTjaR80oXhK765K720BQeKUezri15bQlMaUGQRnzr53ZsqA4PEh6WCX +hUT2ibO32+uZFXzVQw8y/JUkPf76pZagi8DoLV+sfSbUtnpbQ8wyV2qqTM2eCuPi +RgUwXQi2WssKKzrqcgKil3vksHZozLtOmyZiNE4qfNxv+UGoIybJtZmB+9spY0Rw +5zBRuULycQIDAQABo3MwcTAdBgNVHQ4EFgQUCmZefzpizPrb3VbiIDhrA48ypB8w +HwYDVR0jBBgwFoAUCmZefzpizPrb3VbiIDhrA48ypB8wDAYDVR0TBAUwAwEB/zAh +BgNVHREEGjAYggl0ZXN0Mi5jb22CCyoudGVzdDIuY29tMA0GCSqGSIb3DQEBCwUA +A4IBgQA0nRTv1zm1ACugJFfYZfxZ0mLJfRUCFMmFfhy+vGiIu6QtnOFVw/tEOyMa +m78lBiqac15n3YWYiHiC5NFffTZ7XVlOjN2i4x2z2IJsHNa8tU80AX0Q/pizGK/d ++dzlcsGBb9MGT18h/B3/EYQFKLjUsr0zvDb1T0YDlRUsN3Bq6CvZmvfe9F7Yh4Z/ +XO5R+rX8w9c9A2jzM5isBw2qp/Ggn5RQodMwApEYkJdu80MuxaY6s3dssS4Ay8wP +VNFEeLcdauJ00ES1OnbnuNiYSiSMOgWBsnR+c8AaSRB/OZLYQQKGGYbq0tspwRjM +MGJRrI/jdKnvJQ8p02abdvA9ZuFChoD3Wg03qQ6bna68ZKPd9peBPpMrDDGDLkGI +NzZ6bLJKILnQkV6b1OHVnPDsKXfXjUTTNK/QLJejTXu9RpMBakYZMzs/SOSDtFlS +A+q25t6+46nvA8msUSBKyOGBX42mJcKvR4OgG44PfDjYfmjn2l+Dz/jNXDclpb+Q +XAzBnfM= +-----END CERTIFICATE-----"; + $ENV{TEST_ENV_SSL_KEY} = "-----BEGIN RSA PRIVATE KEY----- +MIIG5QIBAAKCAYEAxAYGTflXfNo1UNbMTNUZ34SRmdQ6tGldCDmaZyegqetDmtBA +yxAJsw6AW8u3/04RlCZsTYe4Qqw/Mvv1iN27ysaNKhB1sln1xw7fa6+WfWCfryIr +v2jtO+ZMKL4FrLI0D6Wj0jGsGbXAYGQ5TVk9qwZjOAEBswtNIOmzURjE1Hy3iX0+ +tNNGyHo1nJNigN5Uiwpn1JySmNZWfEc6QX4DNnMgklJ7amPnvZMMtgD9fD0jhfcv +5IfPBOWIZY1dva1TfrsscREe96exDN2V7uZA3aQTkakHtA9YH631XiaffSNz8vIO +85p6Lov6luXkKuIYFONpHzSheErvrkrvbQFB4pR7OuLXltCUxpQZBGfOvndmyoDg +8SHpYJeFRPaJs7fb65kVfNVDDzL8lSQ9/vqllqCLwOgtX6x9JtS2eltDzDJXaqpM +zZ4K4+JGBTBdCLZayworOupyAqKXe+SwdmjMu06bJmI0Tip83G/5QagjJsm1mYH7 +2yljRHDnMFG5QvJxAgMBAAECggGBAIELlkruwvGmlULKpWRPReEn3NJwLNVoJ56q +jUMri1FRWAgq4PzNahU+jrHfwxmHw3rMcK/5kQwTaOefh1y63E35uCThARqQroSE +/gBeb6vKWFVrIXG5GbQ9QBXyQroV9r/2Q4q0uJ+UTzklwbNx9G8KnXbY8s1zuyrX +rvzMWYepMwqIMSfJjuebzH9vZ4F+3BlMmF4XVUrYj8bw/SDwXB0UXXT2Z9j6PC1J +CS0oKbgIZ8JhoF3KKjcHBGwWTIf5+byRxeG+z99PBEBafm1Puw1vLfOjD3DN/fso +8xCEtD9pBPBJ+W97x/U+10oKetmP1VVEr2Ph8+s2VH1zsRF5jo5d0GtvJqOwIQJ7 +z3OHJ7lLODw0KAjB1NRXW4dTTUDm6EUuUMWFkGAV6YTyhNLAT0DyrUFJck9RiY48 +3QN8vSf3n/+3wwg1gzcJ9w3W4DUbvGqu86CaUQ4UegfYJlusY/3YGp5bGNQdxmws +lgIoSRrHp6UJKsP8Yl08MIvT/oNLgQKBwQD75SuDeyE0ukhEp0t6v+22d18hfSef +q3lLWMI1SQR9Kiem9Z1KdRkIVY8ZAHANm6D8wgjOODT4QZtiqJd2BJn3Xf+aLfCd +CW0hPvmGTcp/E4sDZ2u0HbIrUStz7ZcgXpjD2JJAJGEKY2Z7J65gnTqbqoBDrw1q +1+FqtikkHRte1UqxjwnWBpSdoRQFgNPHxPWffhML1xsD9Pk1B1b7JoakYcKsNoQM +oXUKPLxSZEtd0hIydqmhGYTa9QWBPNDlA5UCgcEAxzfGbOrPBAOOYZd3jORXQI6p +H7SddTHMQyG04i+OWUd0HZFkK7/k6r26GFmImNIsQMB26H+5XoKRFKn+sUl14xHY +FwB140j0XSav2XzT38UpJ9CptbgK1eKGQVp41xwRYjHVScE5hJuA3a1TKM0l26rp +hny/KaP+tXuqt9QbxcUN6efubNYyFP+m6nq2/XdX74bJuGpXLq8W0oFdiocO6tmF +4/Hsc4dCVrcwULqXQa0lJ57zZpfIPARqWM2847xtAoHBANVUNbDpg6rTJMc34722 +dAy3NhL3mqooH9aG+hsEls+l9uT4WFipqSScyU8ERuHPbt0BO1Hi2kFx1rYMUBG8 +PeT4b7NUutVUGV8xpUNv+FH87Bta6CUnjTAQUzuf+QCJ/NjIPrwh0yloG2+roIvk +PLF/CZfI1hUpdZfZZChYmkiLXPHZURw4gH6q33j1rOYf0WFc9aZua0vDmZame6zB +6P+oZ6VPmi/UQXoFC/y/QfDYK18fjfOI2DJTlnDoX4XErQKBwGc3M5xMz/MRcJyJ +oIwj5jzxbRibOJV2tpD1jsU9xG/nQHbtVEwCgTVKFXf2M3qSMhFeZn0xZ7ZayZY+ +OVJbcDO0lBPezjVzIAB/Qc7aCOBAQ4F4b+VRtHN6iPqlSESTK0KH9Szgas+UzeCM +o7BZEctNMu7WBSkq6ZXXu+zAfZ8q6HmPDA3hsFMG3dFQwSxzv+C/IhZlKkRqvNVV +50QVk5oEF4WxW0PECY/qG6NH+YQylDSB+zPlYf4Of5cBCWOoxQKBwQCeo37JpEAR +kYtqSjXkC5GpPTz8KR9lCY4SDuC1XoSVCP0Tk23GX6GGyEf4JWE+fb/gPEFx4Riu +7pvxRwq+F3LaAa/FFTNUpY1+8UuiMO7J0B1RkVXkyJjFUF/aQxAnOoZPmzrdZhWy +bpe2Ka+JS/aXSd1WRN1nmo/DarpWFvdLWZFwUt6zMziH40o1gyPHEuXOqVtf2QCe +Q6WC9xnEz4lbb/fR2TF9QRA4FtoRpDe/f3ZGIpWE0RdwyZZ6uA7T1+Q= +-----END RSA PRIVATE KEY-----"; +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1 set ssl with multiple certificates. +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local ssl_ecc_cert = t.read_file("t/certs/apisix_ecc.crt") + local ssl_ecc_key = t.read_file("t/certs/apisix_ecc.key") + + local data = { + cert = ssl_cert, + key = ssl_key, + certs = { ssl_ecc_cert }, + keys = { ssl_ecc_key }, + sni = "test.com", + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 2: client request using ECC certificate +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; +location /t { + lua_ssl_ciphers ECDHE-ECDSA-AES256-GCM-SHA384; + content_by_lua_block { + -- etcd sync + + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "test.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +connected: 1 +ssl handshake: true + + + +=== TEST 3: client request using RSA certificate +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + lua_ssl_ciphers ECDHE-RSA-AES256-SHA384; + content_by_lua_block { + -- etcd sync + + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "test.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +connected: 1 +ssl handshake: true + + + +=== TEST 4: set ssl(sni: *.test2.com) once again +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/test2.crt") + local ssl_key = t.read_file("t/certs/test2.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "*.test2.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "*.test2.com" + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 5: caching of parsed certs and pkeys +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + local work = function() + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + ngx.say("connected: ", ok) + + local sess, err = sock:sslhandshake(nil, "www.test2.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + local ok, err = sock:close() + ngx.say("close: ", ok, " ", err) + end -- do + + work() + work() + + -- collectgarbage() + } +} +--- response_body eval +qr{connected: 1 +ssl handshake: true +close: 1 nil +connected: 1 +ssl handshake: true +close: 1 nil} +--- grep_error_log eval +qr/parsing (cert|(priv key)) for sni: www.test2.com/ +--- grep_error_log_out +parsing cert for sni: www.test2.com +parsing priv key for sni: www.test2.com + + + +=== TEST 6: set ssl(encrypt ssl keys with another iv) +--- config +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/test2.crt") + local raw_ssl_key = t.read_file("t/certs/test2.key") + local ssl_key = t.aes_encrypt(raw_ssl_key) + local data = { + certs = { ssl_cert }, + keys = { ssl_key }, + snis = {"test2.com", "*.test2.com"}, + cert = ssl_cert, + key = raw_ssl_key, + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + ngx.status = code + ngx.print(body) + } +} +--- error_code: 400 +--- response_body +{"error_msg":"failed to handle cert-key pair[1]: failed to decrypt previous encrypted key"} +--- error_log +decrypt ssl key failed + + + +=== TEST 7: set miss_head ssl certificate +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/incorrect.crt") + local ssl_key = t.read_file("t/certs/incorrect.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + ngx.status = code + ngx.print(body) + } +} +--- response_body +{"error_msg":"failed to parse cert: PEM_read_bio_X509_AUX() failed"} +--- error_code: 400 +--- no_error_log +[alert] + + + +=== TEST 8: client request without sni +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local sess, err = sock:sslhandshake(nil, nil, true) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + end -- do + -- collectgarbage() + } +} +--- response_body +failed to do SSL handshake: handshake failed +--- error_log +failed to find SNI: please check if the client requests via IP or uses an outdated protocol +--- no_error_log +[alert] + + + +=== TEST 9: client request without sni, but fallback_sni is set +--- yaml_config +apisix: + node_listen: 1984 + ssl: + fallback_sni: "a.test2.com" +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local sess, err = sock:sslhandshake(nil, nil, false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +ssl handshake: true + + + +=== TEST 10: set sni with uppercase +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/test2.crt") + local ssl_key = t.read_file("t/certs/test2.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "*.TesT2.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 11: match case insensitive +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local sess, err = sock:sslhandshake(nil, "a.test2.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +ssl handshake: true + + + +=== TEST 12: set snis with uppercase +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/test2.crt") + local ssl_key = t.read_file("t/certs/test2.key") + local data = {cert = ssl_cert, key = ssl_key, snis = {"TesT2.com", "a.com"}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 13: match case insensitive +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local sess, err = sock:sslhandshake(nil, "TEST2.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +ssl handshake: true + + + +=== TEST 14: ensure table is reused in TLS handshake +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local sess, err = sock:sslhandshake(nil, "TEST2.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- extra_init_by_lua + local tablepool = require("apisix.core").tablepool + local old_fetch = tablepool.fetch + tablepool.fetch = function(name, ...) + ngx.log(ngx.WARN, "fetch table ", name) + return old_fetch(name, ...) + end + + local old_release = tablepool.release + tablepool.release = function(name, ...) + ngx.log(ngx.WARN, "release table ", name) + return old_release(name, ...) + end +--- response_body +ssl handshake: true +--- grep_error_log eval +qr/(fetch|release) table \w+/ +--- grep_error_log_out +fetch table api_ctx +release table api_ctx + + + +=== TEST 15: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/ssl test2.com.crt=@t/certs/test2.crt test2.com.key=@t/certs/test2.key +--- response_body +Success! Data written to: kv/apisix/ssl + + + +=== TEST 16: set ssl conf with secret ref: vault +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix": "kv/apisix", + "token" : "root" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + -- set ssl + local code, body = t('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + [[{ + "cert": "$secret://vault/test1/ssl/test2.com.crt", + "key": "$secret://vault/test1/ssl/test2.com.key", + "sni": "test2.com" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 17: get cert and key from vault +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local sess, err = sock:sslhandshake(nil, "test2.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +ssl handshake: true + + + +=== TEST 18: set ssl conf with secret ref: env +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- set ssl + local code, body = t('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + [[{ + "cert": "$env://TEST_ENV_SSL_CRT", + "key": "$env://TEST_ENV_SSL_KEY", + "sni": "test2.com" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 19: get cert and key from env +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local sess, err = sock:sslhandshake(nil, "test2.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +ssl handshake: true + + + +=== TEST 20: set ssl conf with secret ref: only cert use env +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + -- set ssl + local ssl_key = t.read_file("t/certs/test2.key") + local data = { + cert = "$env://TEST_ENV_SSL_CRT", + key = ssl_key, + sni = "TesT2.com" + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 21: get cert from env +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local sess, err = sock:sslhandshake(nil, "test2.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +ssl handshake: true diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-sni3.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-sni3.t new file mode 100644 index 0000000..ff18bda --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-sni3.t @@ -0,0 +1,283 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +no_root_location(); + +BEGIN { + $ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + +}); + + +run_tests; + +__DATA__ + +=== TEST 1: set sni with trailing period +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/test2.crt") + local ssl_key = t.read_file("t/certs/test2.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "*.test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed +--- error_code: 201 + + + +=== TEST 2: match against sni with no trailing period +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local sess, err = sock:sslhandshake(nil, "a.test.com.", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body +ssl handshake: true + + + +=== TEST 3: set snis with trailing period +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/test2.crt") + local ssl_key = t.read_file("t/certs/test2.key") + local data = {cert = ssl_cert, key = ssl_key, snis = {"test2.com", "a.com"}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: match against sni with no trailing period +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local sess, err = sock:sslhandshake(nil, "test2.com.", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body +ssl handshake: true + + + +=== TEST 5: set ssl(sni: www.test.com.) +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + local ssl_cert = t.read_file("t/certs/test-dot.crt") + local ssl_key = t.read_file("t/certs/test-dot.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com."} + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "www.test.com." + }, + "key": "/apisix/ssls/1" + }]] + ) + ngx.status = code + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: client request +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; +location /t { + content_by_lua_block { + -- etcd sync + ngx.sleep(0.2) + do + local sock = ngx.socket.tcp() + sock:settimeout(2000) + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + ngx.say("connected: ", ok) + local sess, err = sock:sslhandshake(nil, "www.test.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + local req = "GET /hello HTTP/1.0\r\nHost: www.test.com\r\nConnection: close\r\n\r\n" + local bytes, err = sock:send(req) + if not bytes then + ngx.say("failed to send http request: ", err) + return + end + ngx.say("sent http request: ", bytes, " bytes.") + while true do + local line, err = sock:receive() + if not line then + -- ngx.say("failed to receive response status line: ", err) + break + end + ngx.say("received: ", line) + end + local ok, err = sock:close() + ngx.say("close: ", ok, " ", err) + end -- do + -- collectgarbage() + } +} +--- request +GET /t +--- response_body eval +qr{connected: 1 +ssl handshake: true +sent http request: 62 bytes. +received: HTTP/1.1 200 OK +received: Content-Type: text/plain +received: Content-Length: 12 +received: Connection: close +received: Server: APISIX/\d\.\d+(\.\d+)? +received: \nreceived: hello world +close: 1 nil} +--- error_log +server name: "www.test.com" +--- no_error_log +[error] +[alert] diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-host.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-host.t new file mode 100644 index 0000000..b9f2901 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-host.t @@ -0,0 +1,561 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_uri' +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "host": "*.foo.com", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: /not_found +--- request +GET /hello +--- error_code: 404 + + + +=== TEST 4: /not_found +--- request +GET /hello +--- more_headers +Host: not_found.com +--- error_code: 404 + + + +=== TEST 5: hit routes (www.foo.com) +--- request +GET /hello +--- more_headers +Host: www.foo.com +--- response_body +hello world + + + +=== TEST 6: hit routes (user.foo.com) +--- request +GET /hello +--- more_headers +Host: user.foo.com +--- response_body +hello world + + + +=== TEST 7: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "host": "foo.com", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 9: /not_found +--- request +GET /hello +--- error_code: 404 + + + +=== TEST 10: /not_found +--- request +GET /hello +--- more_headers +Host: www.foo.com +--- error_code: 404 + + + +=== TEST 11: hit routes (foo.com) +--- request +GET /hello +--- more_headers +Host: foo.com +--- response_body +hello world + + + +=== TEST 12: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "filter_func": "function(vars) return vars.arg_name == 'json' end", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: not hit: name=unknown +--- request +GET /hello?name=unknown +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 14: hit routes +--- request +GET /hello?name=json +--- response_body +hello world + + + +=== TEST 15: set route with ':' +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/file:listReputationHistories", + "plugins":{"proxy-rewrite":{"uri":"/hello"}}, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: hit routes +--- request +GET /file:listReputationHistories +--- response_body +hello world + + + +=== TEST 17: not hit +--- request +GET /file:xx +--- error_code: 404 + + + +=== TEST 18: inherit hosts from services +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "hosts": ["bar.com"] + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "proxy-rewrite":{"uri":"/hello1"} + }, + "service_id": "1", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "priority": -1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 19: hit +--- more_headers +Host: www.foo.com +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 20: change hosts in services +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "hosts": ["foo.com"] + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {Host = "foo.com"}}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "hosts": ["bar.com"] + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(0.1) + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {Host = "foo.com"}}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- request +GET /t +--- response_body +hello1 world +hello world + + + +=== TEST 21: unbind services +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "proxy-rewrite":{"uri":"/hello1"} + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.say(body) + return + end + ngx.sleep(0.1) + + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {Host = "foo.com"}}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- request +GET /t +--- response_body +hello1 world + + + +=== TEST 22: host from route is preferred +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "hosts": ["foo.com"], + "plugins": { + "proxy-rewrite":{"uri":"/hello1"} + }, + "service_id": "1", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.say(body) + return + end + ngx.sleep(0.1) + + for _, h in ipairs({"foo.com", "bar.com"}) do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {Host = h}}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "host": "foo.com", + "plugins": { + "proxy-rewrite":{"uri":"/hello1"} + }, + "service_id": "1", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.say(body) + return + end + ngx.sleep(0.1) + + for _, h in ipairs({"foo.com", "bar.com"}) do + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {Host = h}}) + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + end + } + } +--- request +GET /t +--- response_body +hello1 world +hello world +hello1 world +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-keep-end-slash.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-keep-end-slash.t new file mode 100644 index 0000000..910ef99 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-keep-end-slash.t @@ -0,0 +1,87 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_uri' + delete_uri_tail_slash: true +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 3: hit route +--- request +GET /hello/ +--- error_code: 404 diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-multiple.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-multiple.t new file mode 100644 index 0000000..8f124fe --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-multiple.t @@ -0,0 +1,210 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_uri' +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/server_port" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: set route(id: 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + }, + "uri": "/server_port/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: set route(id: 3) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/3', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/server_port/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 5: hit route 1 +--- request +GET /server_port +--- response_body eval +qr/1980/ + + + +=== TEST 6: hit route 2 +--- request +GET /server_port/route2 +--- response_body eval +qr/1981/ + + + +=== TEST 7: hit route 3 +--- request +GET /server_port/hello +--- response_body eval +qr/1982/ + + + +=== TEST 8: delete route(id: 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: delete route(id: 3) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/3', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-priority.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-priority.t new file mode 100644 index 0000000..93c2464 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-priority.t @@ -0,0 +1,178 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_uri' +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1 + priority: 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port*", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "priority": 2 + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit routes +--- request +GET /server_port/aa +--- response_body eval +1980 + + + +=== TEST 3: set route(id: 2 + priority: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port*", + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + }, + "priority": 1 + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit routes +--- request +GET /server_port/aa +--- response_body eval +1980 + + + +=== TEST 5: set route(id: 2 + priority: 3) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/server_port*", + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + }, + "priority": 3 + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: hit routes +--- request +GET /server_port/aa +--- response_body eval +1981 + + + +=== TEST 7: set route(id: 2 + priority: 3) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-sanity.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-sanity.t new file mode 100644 index 0000000..5d36a3d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-sanity.t @@ -0,0 +1,381 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +our $servlet_yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_uri' + normalize_uri_like_servlet: true +_EOC_ + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_uri' +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "host": "foo.com", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: /not_found +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 4: /not_found +--- request +GET /hello +--- more_headers +Host: not_found.com +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 5: hit routes +--- request +GET /hello +--- more_headers +Host: foo.com +--- response_body +hello world + + + +=== TEST 6: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + }, + "uri": "/server_port" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: /not_found +--- request +GET /hello +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 8: hit routes +--- request +GET /server_port +--- more_headers +Host: anydomain.com +--- response_body_like eval +qr/1981/ + + + +=== TEST 9: set route(id: 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1981": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: /not_found +--- request +GET /hello2 +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 11: hit routes +--- request +GET /hello +--- more_headers +Host: anydomain.com +--- response_body +hello world + + + +=== TEST 12: delete route(id: 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: set route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: hit route with /hello +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 15: miss route +--- request +GET /hello/ +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 16: match route like servlet +--- yaml_config eval: $::servlet_yaml_config +--- request +GET /hello;world +--- response_body eval +qr/404 Not Found/ +--- error_code: 404 + + + +=== TEST 17: plugin should work on the normalized url +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*", + "plugins": { + "uri-blocker": { + "block_rules": ["/hello/world"] + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: hit +--- yaml_config eval: $::servlet_yaml_config +--- request +GET /hello;a=b/world;a/; +--- error_code: 403 + + + +=== TEST 19: reject bad uri +--- yaml_config eval: $::servlet_yaml_config +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + for _, path in ipairs({ + "/;/a", "/%2e;", "/%2E%2E;", "/.;", "/..;", + "/%2E%2e;", "/b/;/c" + }) do + local httpc = http.new() + local res, err = httpc:request_uri(uri .. path) + if not res then + ngx.say(err) + return + end + + if res.status ~= 400 then + ngx.say(path, " ", res.status) + end + end + + ngx.say("ok") + } + } +--- request +GET /t +--- response_body +ok +--- error_log +failed to normalize diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-vars.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-vars.t new file mode 100644 index 0000000..6e4b787 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-vars.t @@ -0,0 +1,439 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_uri' +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route(id: 1) with vars(user_agent ~* android) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "vars": [["http_user_agent", "~*", "android"]] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: not found because user_agent=ios +--- request +GET /hello +--- more_headers +User-Agent: ios +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: hit routes with user_agent=android +--- request +GET /hello +--- more_headers +User-Agent: android +--- response_body +hello world + + + +=== TEST 4: hit routes with user_agent=Android +--- request +GET /hello +--- more_headers +User-Agent: Android +--- response_body +hello world + + + +=== TEST 5: set route(id: 1) with vars(user_agent ! ~* android) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "vars": [["http_user_agent", "!", "~*", "android"]] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: not found because user_agent=android +--- request +GET /hello +--- more_headers +User-Agent: android +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 7: hit routes with user_agent=ios +--- request +GET /hello +--- more_headers +User-Agent: ios +--- response_body +hello world + + + +=== TEST 8: set route(id: 1) with vars(in table) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "vars": [["http_user_agent", "IN", ["android", "ios"]]] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: hit routes with user_agent=ios +--- request +GET /hello +--- more_headers +User-Agent: ios +--- response_body +hello world + + + +=== TEST 10: hit routes with user_agent=android +--- request +GET /hello +--- more_headers +User-Agent: android +--- response_body +hello world + + + +=== TEST 11: set route(id: 1) with vars(null) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "vars": [["http_user_agent", "==", null]] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: not found because user_agent=android +--- request +GET /hello +--- more_headers +User-Agent: android +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 13: hit route +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 14: set route(id: 1) with vars(items are two) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- deprecated, will be removed soon + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "vars": [["http_user_agent", "ios"]] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: hit routes with user_agent=ios +--- request +GET /hello +--- more_headers +User-Agent: ios +--- response_body +hello world + + + +=== TEST 16: vars rule with logical operator (set) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "vars": [ + "!OR", + ["http_user_agent", "==", "ios"], + ["http_demo", "==", "test"] + ] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: vars rule with logical operator (hit) +--- request +GET /hello +--- more_headers +User-Agent: android +demo: prod +--- response_body +hello world + + + +=== TEST 18: vars rule with logical operator (miss) +--- request +GET /hello +--- more_headers +User-Agent: ios +demo: prod +--- error_code: 404 + + + +=== TEST 19: be compatible with empty vars +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "vars": [] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: hit +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 21: bad vars rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello", + "vars": ["http_user_agent", "~*", "android"] + }]=] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to validate the 'vars' expression: rule should be wrapped inside brackets"} diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-with-parameter.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-with-parameter.t new file mode 100644 index 0000000..269caa4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-with-parameter.t @@ -0,0 +1,272 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_uri_with_parameter' +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/name/:name/bar" + }]], + [[{ + "value": { + "uri": "/name/:name/bar", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: /not_found +--- request +GET /not_found +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 3: /name/json/foo +--- request +GET /name/json2/foo +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 4: /name/json/ +--- request +GET /name/json/ +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 5: /name//bar +--- request +GET /name//bar +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 6: hit route: /name/json/bar +--- request +GET /name/json/bar +--- error_code: 404 +--- response_body eval +qr/404 Not Found/ + + + +=== TEST 7: set route,uri=/:name/foo +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/:name/foo" + }]], + [[{ + "value": { + "uri": "/:name/foo", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }, + "key": "/apisix/routes/1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: /json/foo +--- request +GET /json/foo +--- error_code: 404 +--- response_body eval +qr/404 Not Found/ + + + +=== TEST 9: /json/bbb/foo +--- request +GET /json/bbb/foo +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 10: inherit hosts from services +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "hosts": ["bar.com"] + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "proxy-rewrite":{"uri":"/hello1"} + }, + "service_id": "1", + "uri": "/:name/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "proxy-rewrite":{"uri":"/hello"} + }, + "uri": "/:name/hello", + "priority": -1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: hit +--- more_headers +Host: www.foo.com +--- request +GET /john/hello +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-with-parameter2.t b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-with-parameter2.t new file mode 100644 index 0000000..8659a8b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/router/radixtree-uri-with-parameter2.t @@ -0,0 +1,108 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +worker_connections(256); +no_root_location(); +no_shuffle(); + +our $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + router: + http: 'radixtree_uri_with_parameter' +_EOC_ + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->yaml_config) { + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route with :name as a uri parameter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "proxy-rewrite":{"uri":"/hello"} + }, + "uri": "/name/:name/bar" + }]], + [[{ + "value": { + "uri": "/name/:name/bar", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "proxy-rewrite":{"uri":"/hello"} + }, + }, + "key": "/apisix/routes/1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: parameters with special characters should pass +--- request +GET /name/with%20space/bar +--- error_code: 200 +--- response_body +hello world + + + +=== TEST 3: failing case for the above test +--- request +GET /name/with%20space/foo +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} diff --git a/CloudronPackages/APISIX/apisix-source/t/script/script.t b/CloudronPackages/APISIX/apisix-source/t/script/script.t new file mode 100644 index 0000000..0a0d90e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/script/script.t @@ -0,0 +1,99 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_long_string(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: add service which has plugins +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "name": "script_test", + "plugins": { + "example-plugin": { + "i": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: add route which has scripts and binding service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "service_id": 1, + "script": "local _M = {} \n function _M.access(api_ctx) \n ngx.log(ngx.INFO,\"hit access phase\") \n end \nreturn _M", + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: hit route, execute the scripts but don't execute the plugins +--- request +GET /hello +--- response_body +hello world +--- error_log eval +qr/loaded script_obj: \{"access":"function: 0x[\w]+"\}/ +--- no_error_log eval +qr/plugin rewrite phase, conf: \{"i":1\}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/script/script_distribute.t b/CloudronPackages/APISIX/apisix-source/t/script/script_distribute.t new file mode 100644 index 0000000..212cf51 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/script/script_distribute.t @@ -0,0 +1,150 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_root_location(); +no_shuffle(); + + +run_tests; + +__DATA__ + +=== TEST 1: set route(host + uri) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local script = t.read_file("t/script/script_test.lua") + local data = { + script = script, + uri = "/hello", + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + core.json.encode(data)) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- yaml_config eval: $::yaml_config +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit routes +--- request +GET /hello +--- yaml_config eval: $::yaml_config +--- response_body +hello world +--- error_log +string "route#1" +phase_func(): hit access phase +phase_func(): hit header_filter phase +phase_func(): hit body_filter phase +phase_func(): hit body_filter phase +phase_func(): hit log phase while + + + +=== TEST 3: invalid script in route +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local data = { + script = "invalid script", + uri = "/hello", + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + core.json.encode(data)) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- yaml_config eval: $::yaml_config +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to load 'script' string: [string \"invalid script\"]:1: '=' expected near 'script'"} + + + +=== TEST 4: invalid script in service +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local data = { + script = "invalid script", + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + + local code, body = t.test('/apisix/admin/services/1', + ngx.HTTP_PUT, + core.json.encode(data)) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- yaml_config eval: $::yaml_config +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to load 'script' string: [string \"invalid script\"]:1: '=' expected near 'script'"} diff --git a/CloudronPackages/APISIX/apisix-source/t/script/script_test.lua b/CloudronPackages/APISIX/apisix-source/t/script/script_test.lua new file mode 100644 index 0000000..e2590ab --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/script/script_test.lua @@ -0,0 +1,43 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") + + +local _M = {} + + +function _M.access(api_ctx) + core.log.warn("hit access phase") +end + + +function _M.header_filter(ctx) + core.log.warn("hit header_filter phase") +end + + +function _M.body_filter(ctx) + core.log.warn("hit body_filter phase") +end + + +function _M.log(ctx) + core.log.warn("hit log phase") +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/secret/aws.t b/CloudronPackages/APISIX/apisix-source/t/secret/aws.t new file mode 100644 index 0000000..ae0e09b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/secret/aws.t @@ -0,0 +1,316 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +BEGIN { + $ENV{AWS_REGION} = "us-east-1"; + $ENV{AWS_ACCESS_KEY_ID} = "access"; + $ENV{AWS_SECRET_ACCESS_KEY} = "secret"; + $ENV{AWS_SESSION_TOKEN} = "token"; +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: sanity +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local test_case = { + {access_key_id = "access"}, + {secret_access_key = "secret"}, + {access_key_id = "access", secret_access_key = "secret"}, + {access_key_id = "access", secret_access_key = 1234}, + {access_key_id = 1234, secret_access_key = "secret"}, + {access_key_id = "access", secret_access_key = "secret", session_token = "token"}, + {access_key_id = "access", secret_access_key = "secret", session_token = 1234}, + {access_key_id = "access", secret_access_key = "secret", region = "us-east-1"}, + {access_key_id = "access", secret_access_key = "secret", region = 1234}, + {access_key_id = "access", secret_access_key = "secret", endpoint_url = "http://127.0.0.1:4566"}, + {access_key_id = "access", secret_access_key = "secret", endpoint_url = 1234}, + {access_key_id = "access", secret_access_key = "secret", session_token = "token", endpoint_url = "http://127.0.0.1:4566", region = "us-east-1"}, + } + local aws = require("apisix.secret.aws") + local core = require("apisix.core") + local metadata_schema = aws.schema + + for _, conf in ipairs(test_case) do + local ok, err = core.schema.check(metadata_schema, conf) + ngx.say(ok and "done" or err) + end + } + } +--- response_body +property "secret_access_key" is required +property "access_key_id" is required +done +property "secret_access_key" validation failed: wrong type: expected string, got number +property "access_key_id" validation failed: wrong type: expected string, got number +done +property "session_token" validation failed: wrong type: expected string, got number +done +property "region" validation failed: wrong type: expected string, got number +done +property "endpoint_url" validation failed: wrong type: expected string, got number +done + + + +=== TEST 2: check key: no main key +--- config + location /t { + content_by_lua_block { + local aws = require("apisix.secret.aws") + local conf = { + endpoint_url = "http://127.0.0.1:4566", + region = "us-east-1", + access_key_id = "access", + secret_access_key = "secret", + session_token = "token", + } + local data, err = aws.get(conf, "/apisix") + if err then + return ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +can't find main key, key: /apisix + + + +=== TEST 3: error aws endpoint_url +--- config + location /t { + content_by_lua_block { + local aws = require("apisix.secret.aws") + local conf = { + endpoint_url = "http://127.0.0.1:8080", + region = "us-east-1", + access_key_id = "access", + secret_access_key = "secret", + session_token = "token", + } + local data, err = aws.get(conf, "apisix-key/jack") + if err then + return ngx.say(err) + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +failed to retrtive data from aws secret manager: SecretsManager:getSecretValue() failed to connect to 'http://127.0.0.1:8080': connection refused +--- timeout: 6 + + + +=== TEST 4: get value from aws (status ~= 200) +--- config + location /t { + content_by_lua_block { + local aws = require("apisix.secret.aws") + local conf = { + endpoint_url = "http://127.0.0.1:4566", + region = "us-east-1", + access_key_id = "access", + secret_access_key = "secret", + session_token = "token", + } + local data, err = aws.get(conf, "apisix-error-key/jack") + if err then + return ngx.say("err") + end + ngx.say("value") + } + } +--- request +GET /t +--- response_body +err + + + +=== TEST 5: get json value from aws +--- config + location /t { + content_by_lua_block { + local aws = require("apisix.secret.aws") + local conf = { + endpoint_url = "http://127.0.0.1:4566", + region = "us-east-1", + access_key_id = "access", + secret_access_key = "secret", + session_token = "token", + } + local data, err = aws.get(conf, "apisix-key/jack") + if err then + return ngx.say(err) + end + ngx.say("value") + } + } +--- request +GET /t +--- response_body +value + + + +=== TEST 6: get json value from aws using env var +--- config + location /t { + content_by_lua_block { + local aws = require("apisix.secret.aws") + local conf = { + endpoint_url = "http://127.0.0.1:4566", + region = "us-east-1", + access_key_id = "$ENV://AWS_ACCESS_KEY_ID", + secret_access_key = "$ENV://AWS_SECRET_ACCESS_KEY", + session_token = "$ENV://AWS_SESSION_TOKEN", + } + local data, err = aws.get(conf, "apisix-key/jack") + if err then + return ngx.say(err) + end + ngx.say("value") + } + } +--- request +GET /t +--- response_body +value + + + +=== TEST 7: get string value from aws +--- config + location /t { + content_by_lua_block { + local aws = require("apisix.secret.aws") + local conf = { + endpoint_url = "http://127.0.0.1:4566", + region = "us-east-1", + access_key_id = "$ENV://AWS_ACCESS_KEY_ID", + secret_access_key = "$ENV://AWS_SECRET_ACCESS_KEY", + session_token = "$ENV://AWS_SESSION_TOKEN", + } + local data, err = aws.get(conf, "apisix-mysql") + if err then + return ngx.say(err) + end + ngx.say(data) + } + } +--- request +GET /t +--- response_body +secret + + + +=== TEST 8: add secret && consumer && check +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret aws config + local code, body = t('/apisix/admin/secrets/aws/mysecret', + ngx.HTTP_PUT, + [[{ + "endpoint_url": "http://127.0.0.1:4566", + "region": "us-east-1", + "access_key_id": "access", + "secret_access_key": "secret", + "session_token": "token" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + -- change consumer with secrets ref: aws + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$secret://aws/mysecret/jack/key" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + + local secret = require("apisix.secret") + local value = secret.fetch_by_uri("$secret://aws/mysecret/jack/key") + + + local code, body = t('/apisix/admin/secrets/aws/mysecret', ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$secret://aws/mysecret/jack/key" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + local secret = require("apisix.secret") + local value = secret.fetch_by_uri("$secret://aws/mysecret/jack/key") + if value then + ngx.say("secret value: ", value) + end + ngx.say("all done") + } + } +--- response_body +all done diff --git a/CloudronPackages/APISIX/apisix-source/t/secret/conf/error.json b/CloudronPackages/APISIX/apisix-source/t/secret/conf/error.json new file mode 100644 index 0000000..3d0bb62 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/secret/conf/error.json @@ -0,0 +1,9 @@ +{ + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR\naeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC\nUuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF\n2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4\nv5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep\nAB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw\nIu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P\nPR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic\nDcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49\nsxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC\nafOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC\nl85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz\nlw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC\nrCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g\ntdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16\nUyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1\nUjqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI\n1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh\nGfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46\nxn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4\nupppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF\nFzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo\ny4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W\nvjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK\nYp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S\nkEJQcmfVew5mFXyxuEn3zA==\n-----END PRIVATE KEY-----", + "project_id": "apisix", + "token_uri": "http://127.0.0.1:1980/google/logging/token", + "scope": [ + "https://apisix.apache.org/logs:admin" + ], + "entries_uri": "http://127.0.0.1:1980/google/logging/entries" +} diff --git a/CloudronPackages/APISIX/apisix-source/t/secret/conf/success.json b/CloudronPackages/APISIX/apisix-source/t/secret/conf/success.json new file mode 100644 index 0000000..d9cfbc3 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/secret/conf/success.json @@ -0,0 +1,10 @@ +{ + "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR\naeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC\nUuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF\n2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4\nv5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep\nAB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw\nIu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P\nPR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic\nDcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49\nsxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC\nafOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC\nl85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz\nlw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC\nrCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g\ntdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16\nUyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1\nUjqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI\n1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh\nGfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46\nxn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4\nupppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF\nFzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo\ny4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W\nvjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK\nYp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S\nkEJQcmfVew5mFXyxuEn3zA==\n-----END PRIVATE KEY-----", + "project_id": "apisix", + "token_uri": "http://127.0.0.1:1980/google/secret/token", + "scope": [ + "https://www.googleapis.com/auth/cloud" + ], + "entries_uri": "http://127.0.0.1:1984", + "client_email": "email@apisix.iam.gserviceaccount.com" +} diff --git a/CloudronPackages/APISIX/apisix-source/t/secret/gcp.t b/CloudronPackages/APISIX/apisix-source/t/secret/gcp.t new file mode 100644 index 0000000..b7fc533 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/secret/gcp.t @@ -0,0 +1,737 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: validate different schema situation +--- config + location /t { + content_by_lua_block { + local test_case = { + {}, + {auth_file = "123"}, + {auth_file = 123}, + {auth_config = {client_email = "client", private_key = "private_key"}}, + {auth_config = {private_key = "private_key", project_id = "project_id"}}, + {auth_config = {client_email = "client", project_id = "project_id"}}, + {auth_config = {client_email = "client", private_key = "private_key", project_id = "project_id"}}, + {auth_config = {client_email = 1234, private_key = "private_key", project_id = "project_id"}}, + {auth_config = {client_email = "client", private_key = 1234, project_id = "project_id"}}, + {auth_config = {client_email = "client", private_key = "private_key", project_id = 1234}}, + {auth_config = {client_email = "client", private_key = "private_key", project_id = "project_id"}, ssl_verify = 1234}, + {auth_config = {client_email = "client", private_key = "private_key", project_id = "project_id", token_uri = 1234}}, + {auth_config = {client_email = "client", private_key = "private_key", project_id = "project_id", scope = 1234}}, + {auth_config = {client_email = "client", private_key = "private_key", project_id = "project_id", entries_uri = 1234}}, + {auth_config = {client_email = "client", private_key = "private_key", project_id = "project_id", token_uri = "token_uri", + scope = {"scope"}, entries_uri = "entries_uri"}, ssl_verify = true}, + } + local gcp = require("apisix.secret.gcp") + local core = require("apisix.core") + local metadata_schema = gcp.schema + + for _, conf in ipairs(test_case) do + local ok, err = core.schema.check(metadata_schema, conf) + ngx.say(ok and "done" or err) + end + } + } +--- request +GET /t +--- response_body +value should match only one schema, but matches none +done +property "auth_file" validation failed: wrong type: expected string, got number +property "auth_config" validation failed: property "project_id" is required +property "auth_config" validation failed: property "client_email" is required +property "auth_config" validation failed: property "private_key" is required +done +property "auth_config" validation failed: property "client_email" validation failed: wrong type: expected string, got number +property "auth_config" validation failed: property "private_key" validation failed: wrong type: expected string, got number +property "auth_config" validation failed: property "project_id" validation failed: wrong type: expected string, got number +property "ssl_verify" validation failed: wrong type: expected boolean, got number +property "auth_config" validation failed: property "token_uri" validation failed: wrong type: expected string, got number +property "auth_config" validation failed: property "scope" validation failed: wrong type: expected array, got number +property "auth_config" validation failed: property "entries_uri" validation failed: wrong type: expected string, got number +done + + + +=== TEST 2: check key: no main key +--- config + location /t { + content_by_lua_block { + local gcp = require("apisix.secret.gcp") + local conf = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/token", + scope = { + "https://www.googleapis.com/auth/cloud-platform" + }, + }, + } + local data, err = gcp.get(conf, "/apisix") + if err then + return ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +can't find main key, key: /apisix + + + +=== TEST 3: add secret && consumer && check +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local conf = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/google/secret/token", + scope = { + "https://www.googleapis.com/auth/cloud-platform" + }, + entries_uri = "http://127.0.0.1:1984" + }, + } + + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/secrets/gcp/mysecret', ngx.HTTP_PUT, conf) + + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + -- change consumer with secrets ref: gcp + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$secret://gcp/mysecret/jack/key" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + + local secret = require("apisix.secret") + local value = secret.fetch_by_uri("$secret://gcp/mysecret/jack/key") + + + local code, body = t('/apisix/admin/secrets/gcp/mysecret', ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$secret://gcp/mysecret/jack/key" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + local secret = require("apisix.secret") + local value = secret.fetch_by_uri("$secret://gcp/mysecret/jack/key") + if value then + ngx.say("secret value: ", value) + end + ngx.say("all done") + } + } +--- response_body +all done + + + +=== TEST 4: setup route (/projects/apisix/secrets/jack/versions/latest:access) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": [ + "return function(conf, ctx) + require('lib.server').google_secret_apisix_jack() + end" + ] + } + }, + "uri": "/projects/apisix/secrets/jack/versions/latest:access", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: setup route (/projects/apisix_error/secrets/jack/versions/latest:access) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": [ + "return function(conf, ctx) + require('lib.server').google_secret_apisix_error_jack() + end" + ] + } + }, + "uri": "/projects/apisix_error/secrets/jack/versions/latest:access", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: setup route (/projects/apisix/secrets/mysql/versions/latest:access) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/3', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": [ + "return function(conf, ctx) + require('lib.server').google_secret_apisix_mysql() + end" + ] + } + }, + "uri": "/projects/apisix/secrets/mysql/versions/latest:access", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: get value from gcp by auth_file(fetch_oatuh_conf failed, read failed) +--- config + location /t { + content_by_lua_block { + local conf = { + auth_file = "t/secret/conf/nofind.json", + } + local gcp = require("apisix.secret.gcp") + local value, err = gcp.get(conf, "jack/key") + if not value then + return ngx.say(err) + end + ngx.say(value) + } + } +--- request +GET /t +--- response_body +failed to retrtive data from gcp secret manager: failed to read configuration, file: t/secret/conf/nofind.json, err: t/secret/conf/nofind.json: No such file or directory + + + +=== TEST 8: get value from gcp by auth_file(fetch_oatuh_conf success) +--- config + location /t { + content_by_lua_block { + local conf = { + auth_file = "t/secret/conf/success.json", + } + local gcp = require("apisix.secret.gcp") + local value, err = gcp.get(conf, "jack/key") + if not value then + return ngx.say(err) + end + ngx.say(value) + } + } +--- request +GET /t +--- response_body +value + + + +=== TEST 9: get value from gcp by auth_file(fetch_oatuh_conf failed, undefined) +--- config + location /t { + content_by_lua_block { + local conf = { + auth_file = "t/secret/conf/error.json", + } + local gcp = require("apisix.secret.gcp") + local value, err = gcp.get(conf, "jack/key") + if not value then + return ngx.say(err) + end + ngx.say(value) + } + } +--- request +GET /t +--- response_body +failed to retrtive data from gcp secret manager: config parse failure, file: t/secret/conf/error.json, err: property "auth_config" validation failed: property "client_email" is required + + + +=== TEST 10: get json value from gcp +--- config + location /t { + content_by_lua_block { + local conf = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/google/secret/token", + scope = { + "https://www.googleapis.com/auth/cloud-platform" + }, + entries_uri = "http://127.0.0.1:1984" + }, + } + local gcp = require("apisix.secret.gcp") + local value, err = gcp.get(conf, "jack/key") + if not value then + return ngx.say(err) + end + ngx.say(value) + } + } +--- request +GET /t +--- response_body +value + + + +=== TEST 11: get string value from gcp +--- config + location /t { + content_by_lua_block { + local conf = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/google/secret/token", + scope = { + "https://www.googleapis.com/auth/cloud-platform" + }, + entries_uri = "http://127.0.0.1:1984" + }, + } + local gcp = require("apisix.secret.gcp") + local value, err = gcp.get(conf, "mysql") + if not value then + return ngx.say(err) + end + ngx.say(value) + } + } +--- request +GET /t +--- response_body +secret + + + +=== TEST 12: get value from gcp(failed to get google oauth token) +--- config + location /t { + content_by_lua_block { + local conf = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix", + token_uri = "http://127.0.0.1:1980/google/secret/token", + scope = { + "https://www.googleapis.com/auth/root/cloud-platform" + }, + entries_uri = "http://127.0.0.1:1984" + }, + } + local gcp = require("apisix.secret.gcp") + local value, err = gcp.get(conf, "jack/key") + if not value then + return ngx.say(err) + end + ngx.say(value) + } + } +--- request +GET /t +--- response_body +failed to retrtive data from gcp secret manager: failed to get google oauth token +--- grep_error_log eval +qr/\{\"error\"\:\"[\w+\s+]*\"\}/ +--- grep_error_log_out +{"error":"no access to this scope"} + + + +=== TEST 13: get value from gcp (not res) +--- config + location /t { + content_by_lua_block { + local conf = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix_error", + token_uri = "http://127.0.0.1:1980/google/secret/token", + scope = { + "https://www.googleapis.com/auth/cloud-platform" + }, + entries_uri = "http://127.0.0.1:1984" + }, + } + local gcp = require("apisix.secret.gcp") + local value, err = gcp.get(conf, "jack/key") + if not value then + return ngx.say("err") + end + ngx.say(value) + } + } +--- request +GET /t +--- response_body +err + + + +=== TEST 14: get value from gcp (res status ~= 200) +--- config + location /t { + content_by_lua_block { + local conf = { + auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", + private_key = [[ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR +aeMgaLD3hBjvxKrz10uox1X8q7YYhf2ViRtLRUMa2bEMYksE5hbhwpNf6mKAnLOC +UuAT6cPPdUl/agKpJXviBPIR2LuzD17WsLJHp1HxUDssSkgfCaGcOGGNfLUhhIpF +2JUctLmxiZoAZySlSjcwupSuDJ0aPm0XO8r9H8Qu5kF2Vkz5e5bFivLTmvzrQTe4 +v5V1UI6hThElCSeUmdNF3uG3wopxlvq4zXgLTnuLbrNf/Gc4mlpV+UDgTISj32Ep +AB2vxKEbvQw4ti8YJnGXWjxLerhfrszFw+V8lpeduiDYA44ZFoVqvzxeIsVZNtcw +Iu7PvEPNAgMBAAECggEAVpyN9m7A1F631/aLheFpLgMbeKt4puV7zQtnaJ2XrZ9P +PR7pmNDpTu4uF3k/D8qrIm+L+uhVa+hkquf3wDct6w1JVnfQ93riImbnoKdK13ic +DcEZCwLjByfjFMNCxZ/gAZca55fbExlqhFy6EHmMjhB8s2LsXcTHRuGxNI/Vyi49 +sxECibe0U53aqdJbVWrphIS67cpwl4TUkN6mrHsNuDYNJ9dgkpapoqp4FTFQsBqC +afOK5qgJ68dWZ47FBUng+AZjdCncqAIuJxxItGVQP6YPsFs+OXcivIVHJr363TpC +l85FfdvqWV5OGBbwSKhNwiTNUVvfSQVmtURGWG/HbQKBgQD4gZ1z9+Lx19kT9WTz +lw93lxso++uhAPDTKviyWSRoEe5aN3LCd4My+/Aj+sk4ON/s2BV3ska5Im93j+vC +rCv3uPn1n2jUhWuJ3bDqipeTW4n/CQA2m/8vd26TMk22yOkkqw2MIA8sjJ//SD7g +tdG7up6DgGMP4hgbO89uGU7DAwKBgQDJtkKd0grh3u52Foeh9YaiAgYRwc65IE16 +UyD1OJxIuX/dYQDLlo5KyyngFa1ZhWIs7qC7r3xXH+10kfJY+Q+5YMjmZjlL8SR1 +Ujqd02R9F2//6OeswyReachJZbZdtiEw3lPa4jVFYfhSe0M2ZPxMwvoXb25eyCNI +1lYjSKq87wKBgHnLTNghjeDp4UKe6rNYPgRm0rDrhziJtX5JeUov1mALKb6dnmkh +GfRK9g8sQqKDfXwfC6Z2gaMK9YaryujGaWYoCpoPXtmJ6oLPXH4XHuLh4mhUiP46 +xn8FEfSimuQS4/FMxH8A128GHQSI7AhGFFzlwfrBWcvXC+mNDsTvMmLxAoGARc+4 +upppfccETQZ7JsitMgD1TMwA2f2eEwoWTAitvlXFNT9PYSbYVHaAJbga6PLLCbYF +FzAjHpxEOKYSdEyu7n/ayDL0/Z2V+qzc8KarDsg/0RgwppBbU/nUgeKb/U79qcYo +y4ai3UKNCS70Ei1dTMvmdpnwXwlxfNIBufB6dy0CgYBMYq9Lc31GkC6PcGEEbx6W +vjImOadWZbuOVnvEQjb5XCdcOsWsMcg96PtoeuyyHmhnEF1GsMzcIdQv/PHrvYpK +Yp8D0aqsLEgwGrJQER26FPpKmyIwvcL+nm6q5W31PnU9AOC/WEkB6Zs58hsMzD2S +kEJQcmfVew5mFXyxuEn3zA== +-----END PRIVATE KEY-----]], + project_id = "apisix_error", + token_uri = "http://127.0.0.1:1980/google/secret/token", + scope = { + "https://www.googleapis.com/auth/cloud-platform" + }, + entries_uri = "http://127.0.0.1:1984" + }, + } + local gcp = require("apisix.secret.gcp") + local value, err = gcp.get(conf, "jack/key") + if not value then + return ngx.say("err") + end + ngx.say(value) + } + } +--- request +GET /t +--- response_body +err diff --git a/CloudronPackages/APISIX/apisix-source/t/secret/secret_lru.t b/CloudronPackages/APISIX/apisix-source/t/secret/secret_lru.t new file mode 100644 index 0000000..3ff3386 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/secret/secret_lru.t @@ -0,0 +1,98 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); +run_tests; + +__DATA__ + +=== TEST 1: add secret && consumer && check +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/mysecret', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix": "kv-v1/apisix", + "token": "root" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + -- change consumer with secrets ref: vault + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$secret://vault/mysecret/jack/auth-key" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + + local secret = require("apisix.secret") + local value = secret.fetch_by_uri("$secret://vault/mysecret/jack/auth-key") + + + local code, body = t('/apisix/admin/secrets/vault/mysecret', ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "$secret://vault/mysecret/jack/auth-key" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + local secret = require("apisix.secret") + local value = secret.fetch_by_uri("$secret://vault/mysecret/jack/auth-key") + ngx.say(value) + } + } +--- response_body +nil diff --git a/CloudronPackages/APISIX/apisix-source/t/secret/vault.t b/CloudronPackages/APISIX/apisix-source/t/secret/vault.t new file mode 100644 index 0000000..607604f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/secret/vault.t @@ -0,0 +1,295 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + $ENV{VAULT_TOKEN} = "root"; + $ENV{WRONG_VAULT_TOKEN} = "squareroot" +} + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: check key: error format +--- config + location /t { + content_by_lua_block { + local vault = require("apisix.secret.vault") + local conf = { + prefix = "/kv/prefix", + token = "root", + uri = "http://127.0.0.1:2800" + } + local data, err = vault.get(conf, "apisix") + if err then + return ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +error key format, key: apisix + + + +=== TEST 2: check key: no main key +--- config + location /t { + content_by_lua_block { + local vault = require("apisix.secret.vault") + local conf = { + prefix = "/kv/prefix", + token = "root", + uri = "http://127.0.0.1:2800" + } + local data, err = vault.get(conf, "/apisix") + if err then + return ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +can't find main key, key: /apisix + + + +=== TEST 3: check key: no sub key +--- config + location /t { + content_by_lua_block { + local vault = require("apisix.secret.vault") + local conf = { + prefix = "/kv/prefix", + token = "root", + uri = "http://127.0.0.1:2800" + } + local data, err = vault.get(conf, "apisix/") + if err then + return ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +can't find sub key, key: apisix/ + + + +=== TEST 4: error vault uri +--- config + location /t { + content_by_lua_block { + local vault = require("apisix.secret.vault") + local conf = { + prefix = "/kv/prefix", + token = "root", + uri = "http://127.0.0.2:2800" + } + local data, err = vault.get(conf, "/apisix/sub") + if err then + return ngx.say(err) + end + + ngx.say("done") + } + } +--- request +GET /t +--- response_body +failed to retrtive data from vault kv engine: connection refused +--- timeout: 6 + + + +=== TEST 5: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/apisix-key/jack key=value +--- response_body +Success! Data written to: kv/apisix/apisix-key/jack + + + +=== TEST 6: get value from vault +--- config + location /t { + content_by_lua_block { + local vault = require("apisix.secret.vault") + local conf = { + prefix = "kv/apisix", + token = "root", + uri = "http://127.0.0.1:8200" + } + local value, err = vault.get(conf, "/apisix-key/jack/key") + if err then + return ngx.say(err) + end + + ngx.say("value") + } + } +--- request +GET /t +--- response_body +value + + + +=== TEST 7: get value from vault using token in an env var +--- config + location /t { + content_by_lua_block { + local vault = require("apisix.secret.vault") + local conf = { + prefix = "kv/apisix", + token = "$ENV://VAULT_TOKEN", + uri = "http://127.0.0.1:8200" + } + local value, err = vault.get(conf, "/apisix-key/jack/key") + if err then + return ngx.say(err) + end + + ngx.say("value") + } + } +--- request +GET /t +--- response_body +value + + + +=== TEST 8: get value from vault: token env var wrong/missing +--- config + location /t { + content_by_lua_block { + local vault = require("apisix.secret.vault") + local conf = { + prefix = "kv/apisix", + token = "$ENV://VALT_TOKEN", + uri = "http://127.0.0.1:8200" + } + local value, err = vault.get(conf, "/apisix-key/jack/key") + if err then + return ngx.say(err) + end + + ngx.print("value") + } + } +--- request +GET /t +--- response_body_like +failed to decode result, res: \{\"errors\":\[\"permission denied\"\]}\n + + + +=== TEST 9: get value from vault: token env var contains wrong token +--- config + location /t { + content_by_lua_block { + local vault = require("apisix.secret.vault") + local conf = { + prefix = "kv/apisix", + token = "$ENV://WRONG_VAULT_TOKEN", + uri = "http://127.0.0.1:8200" + } + local value, err = vault.get(conf, "/apisix-key/jack/key") + if err then + return ngx.say(err) + end + + ngx.print("value") + } + } +--- request +GET /t +--- response_body_like +failed to decode result, res: \{\"errors\":\[\"permission denied\"\]}\n + + + +=== TEST 10: setup route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "serverless-pre-function": { + "phase": "access", + "functions": [ + "return function(conf, ctx) ngx.log(ngx.ERR, 'HCV_NAMESAPCE:'..(ctx.var.http_x_vault_namespace or '_')); require('apisix.core').response.exit(200); end" + ] + } + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: hit route (test namespace header) +--- config + location /t { + content_by_lua_block { + local vault = require("apisix.secret.vault") + local conf = { + prefix = "kv/apisix", + token = "test", + uri = "http://localhost:1984/mock", + namespace = "apisix", + } + local value, err = vault.get(conf, "/apisix-key/jack/key") + if err then + return ngx.say(err) + end + } + } +--- request +GET /t +--- error_log +HCV_NAMESAPCE:apisix diff --git a/CloudronPackages/APISIX/apisix-source/t/sse_server_example/go.mod b/CloudronPackages/APISIX/apisix-source/t/sse_server_example/go.mod new file mode 100644 index 0000000..9cc909d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/sse_server_example/go.mod @@ -0,0 +1,3 @@ +module foo.bar/apache/sse_server_example + +go 1.17 diff --git a/CloudronPackages/APISIX/apisix-source/t/sse_server_example/main.go b/CloudronPackages/APISIX/apisix-source/t/sse_server_example/main.go new file mode 100644 index 0000000..ab976c8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/sse_server_example/main.go @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "fmt" + "log" + "net/http" + "os" + "time" +) + +func sseHandler(w http.ResponseWriter, r *http.Request) { + // Set the headers for SSE + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + + f, ok := w.(http.Flusher); + if !ok { + fmt.Fprintf(w, "[ERROR]") + return + } + // A simple loop that sends a message every 500ms + for i := 0; i < 5; i++ { + // Create a message to send to the client + fmt.Fprintf(w, "data: %s\n\n", time.Now().Format(time.RFC3339)) + + // Flush the data immediately to the client + f.Flush() + time.Sleep(500 * time.Millisecond) + } + fmt.Fprintf(w, "data: %s\n\n", "[DONE]") +} + +func main() { + // Create a simple route + http.HandleFunc("/v1/chat/completions", sseHandler) + port := os.Args[1] + // Start the server + log.Println("Starting server on :", port) + log.Fatal(http.ListenAndServe(":" + port, nil)) +} diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-node/healthcheck-resty-events.t b/CloudronPackages/APISIX/apisix-source/t/stream-node/healthcheck-resty-events.t new file mode 100644 index 0000000..16bd593 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-node/healthcheck-resty-events.t @@ -0,0 +1,290 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +BEGIN { + if ($ENV{TEST_EVENTS_MODULE} ne "lua-resty-events") { + $SkipReason = "Only for lua-resty-events events module"; + } +} +use Test::Nginx::Socket::Lua $SkipReason ? (skip_all => $SkipReason) : (); +use t::APISIX 'no_plan'; + +log_level('info'); +no_root_location(); + +run_tests(); + +__DATA__ + +=== TEST 1: create stream route with a upstream that enable active healthcheck only, \ + two upstream nodes: one healthy + one unhealthy, unhealthy node with high priority +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": [ + { "host": "127.0.0.1", "port": 1995, "weight": 100, "priority": 0 }, + { "host": "127.0.0.1", "port": 9995, "weight": 100, "priority": 1 } + ], + "type": "roundrobin", + "retries": 0, + "checks": { + "active": { + "type": "tcp", + "timeout": 1, + "healthy": { + "interval": 1, + "successes": 2 + }, + "unhealthy": { + "interval": 1, + "tcp_failures": 1, + "timeouts": 1 + } + } + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit stream routes +--- stream_conf_enable +--- config + location /t { + content_by_lua_block { + -- send first request to create health checker + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.say("failed to connect: ", err) + return + end + local data, _ = sock:receive() + assert(data == nil, "first request should fail") + sock:close() + + -- wait for health check to take effect + ngx.sleep(2.5) + + for i = 1, 3 do + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local _, err = sock:send("mmm") + if err then + ngx.say("failed to send: ", err) + return + end + + local data, err = sock:receive() + if err then + ngx.say("failed to receive: ", err) + return + end + + assert(data == "hello world", "response should be 'hello world'") + + sock:close() + end + + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + ngs.say("failed to delete stream route") + return + end + + -- wait for checker to release + ngx.sleep(1) + + ngx.say("passed") + } + } +--- timeout: 10 +--- request +GET /t +--- response_body +passed +--- error_log +create new checker +proxy request to 127.0.0.1:9995 while connecting to upstream +connect() failed (111: Connection refused) while connecting to upstream, client: 127.0.0.1, server: 0.0.0.0:1985, upstream: "127.0.0.1:9995" +unhealthy TCP increment (1/1) for '127.0.0.1(127.0.0.1:9995)' +proxy request to 127.0.0.1:1995 while connecting to upstream +proxy request to 127.0.0.1:1995 while connecting to upstream +proxy request to 127.0.0.1:1995 while connecting to upstream +try to release checker + + + +=== TEST 3: create stream route with a upstream that enable active and passive healthcheck, \ + configure active healthcheck with a high unhealthy threshold, \ + two upstream nodes: one healthy + one unhealthy, unhealthy node with high priority +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": [ + { "host": "127.0.0.1", "port": 1995, "weight": 100, "priority": 0 }, + { "host": "127.0.0.1", "port": 9995, "weight": 100, "priority": 1 } + ], + "type": "roundrobin", + "retries": 0, + "checks": { + "active": { + "type": "tcp", + "timeout": 1, + "healthy": { + "interval": 60, + "successes": 2 + }, + "unhealthy": { + "interval": 1, + "tcp_failures": 254, + "timeouts": 1 + } + }, + "passive": { + "type": "tcp", + "healthy": { + "successes": 1 + }, + "unhealthy": { + "tcp_failures": 1 + } + } + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit stream routes +--- stream_conf_enable +--- config + location /t { + content_by_lua_block { + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.say("failed to connect: ", err) + return + end + local data, _ = sock:receive() + assert(data == nil, "first request should fail") + sock:close() + + -- Due to the implementation of lua-resty-events, it relies on the kernel and + -- the Nginx event loop to process socket connections. + -- When lua-resty-healthcheck handles passive healthchecks and uses lua-resty-events + -- as the events module, the synchronization of the first event usually occurs + -- before the start of the passive healthcheck. So when the execution finishes and + -- healthchecker tries to record the healthcheck status, it will not be able to find + -- an existing target (because the synchronization event has not finished yet), which + -- will lead to some anomalies that deviate from the original test case, so compatibility + -- operations are performed here. + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.say("failed to connect: ", err) + return + end + local data, _ = sock:receive() + assert(data == nil, "first request should fail") + sock:close() + + for i = 1, 3 do + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local _, err = sock:send("mmm") + if err then + ngx.say("failed to send: ", err) + return + end + + local data, err = sock:receive() + if err then + ngx.say("failed to receive: ", err) + return + end + + assert(data == "hello world", "response should be 'hello world'") + + sock:close() + end + + ngx.say("passed") + } + } +--- request +GET /t +--- response_body +passed +--- error_log +proxy request to 127.0.0.1:9995 while connecting to upstream +connect() failed (111: Connection refused) while connecting to upstream, client: 127.0.0.1, server: 0.0.0.0:1985, upstream: "127.0.0.1:9995" +enabled healthcheck passive while connecting to upstream, client: 127.0.0.1, server: 0.0.0.0:1985, upstream: "127.0.0.1:9995", +unhealthy TCP increment (1/1) for '(127.0.0.1:9995)' while connecting to upstream, client: 127.0.0.1, server: 0.0.0.0:1985, upstream: "127.0.0.1:9995", +proxy request to 127.0.0.1:1995 while connecting to upstream +proxy request to 127.0.0.1:1995 while connecting to upstream +proxy request to 127.0.0.1:1995 while connecting to upstream +--- timeout: 10 diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-node/healthcheck-resty-worker-events.t b/CloudronPackages/APISIX/apisix-source/t/stream-node/healthcheck-resty-worker-events.t new file mode 100644 index 0000000..a841cba --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-node/healthcheck-resty-worker-events.t @@ -0,0 +1,271 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +BEGIN { + if ($ENV{TEST_EVENTS_MODULE} ne "lua-resty-worker-events") { + $SkipReason = "Only for lua-resty-worker-events events module"; + } +} +use Test::Nginx::Socket::Lua $SkipReason ? (skip_all => $SkipReason) : (); +use t::APISIX 'no_plan'; + +log_level('info'); +no_root_location(); + +run_tests(); + +__DATA__ + +=== TEST 1: create stream route with a upstream that enable active healthcheck only, \ + two upstream nodes: one healthy + one unhealthy, unhealthy node with high priority +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": [ + { "host": "127.0.0.1", "port": 1995, "weight": 100, "priority": 0 }, + { "host": "127.0.0.1", "port": 9995, "weight": 100, "priority": 1 } + ], + "type": "roundrobin", + "retries": 0, + "checks": { + "active": { + "type": "tcp", + "timeout": 1, + "healthy": { + "interval": 1, + "successes": 2 + }, + "unhealthy": { + "interval": 1, + "tcp_failures": 1, + "timeouts": 1 + } + } + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit stream routes +--- stream_conf_enable +--- config + location /t { + content_by_lua_block { + -- send first request to create health checker + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.say("failed to connect: ", err) + return + end + local data, _ = sock:receive() + assert(data == nil, "first request should fail") + sock:close() + + -- wait for health check to take effect + ngx.sleep(2.5) + + for i = 1, 3 do + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local _, err = sock:send("mmm") + if err then + ngx.say("failed to send: ", err) + return + end + + local data, err = sock:receive() + if err then + ngx.say("failed to receive: ", err) + return + end + + assert(data == "hello world", "response should be 'hello world'") + + sock:close() + end + + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + ngs.say("failed to delete stream route") + return + end + + -- wait for checker to release + ngx.sleep(1) + + ngx.say("passed") + } + } +--- timeout: 10 +--- request +GET /t +--- response_body +passed +--- error_log +create new checker +proxy request to 127.0.0.1:9995 while connecting to upstream +connect() failed (111: Connection refused) while connecting to upstream, client: 127.0.0.1, server: 0.0.0.0:1985, upstream: "127.0.0.1:9995" +unhealthy TCP increment (1/1) for '127.0.0.1(127.0.0.1:9995)' +proxy request to 127.0.0.1:1995 while connecting to upstream +proxy request to 127.0.0.1:1995 while connecting to upstream +proxy request to 127.0.0.1:1995 while connecting to upstream +try to release checker + + + +=== TEST 3: create stream route with a upstream that enable active and passive healthcheck, \ + configure active healthcheck with a high unhealthy threshold, \ + two upstream nodes: one healthy + one unhealthy, unhealthy node with high priority +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": [ + { "host": "127.0.0.1", "port": 1995, "weight": 100, "priority": 0 }, + { "host": "127.0.0.1", "port": 9995, "weight": 100, "priority": 1 } + ], + "type": "roundrobin", + "retries": 0, + "checks": { + "active": { + "type": "tcp", + "timeout": 1, + "healthy": { + "interval": 60, + "successes": 2 + }, + "unhealthy": { + "interval": 1, + "tcp_failures": 254, + "timeouts": 1 + } + }, + "passive": { + "type": "tcp", + "healthy": { + "successes": 1 + }, + "unhealthy": { + "tcp_failures": 1 + } + } + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit stream routes +--- stream_conf_enable +--- config + location /t { + content_by_lua_block { + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.say("failed to connect: ", err) + return + end + local data, _ = sock:receive() + assert(data == nil, "first request should fail") + sock:close() + + for i = 1, 3 do + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local _, err = sock:send("mmm") + if err then + ngx.say("failed to send: ", err) + return + end + + local data, err = sock:receive() + if err then + ngx.say("failed to receive: ", err) + return + end + + assert(data == "hello world", "response should be 'hello world'") + + sock:close() + end + + ngx.say("passed") + } + } +--- request +GET /t +--- response_body +passed +--- error_log +proxy request to 127.0.0.1:9995 while connecting to upstream +connect() failed (111: Connection refused) while connecting to upstream, client: 127.0.0.1, server: 0.0.0.0:1985, upstream: "127.0.0.1:9995" +enabled healthcheck passive while connecting to upstream, client: 127.0.0.1, server: 0.0.0.0:1985, upstream: "127.0.0.1:9995", +unhealthy TCP increment (1/1) for '(127.0.0.1:9995)' while connecting to upstream, client: 127.0.0.1, server: 0.0.0.0:1985, upstream: "127.0.0.1:9995", +proxy request to 127.0.0.1:1995 while connecting to upstream +proxy request to 127.0.0.1:1995 while connecting to upstream +proxy request to 127.0.0.1:1995 while connecting to upstream +--- timeout: 10 diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-node/mtls.t b/CloudronPackages/APISIX/apisix-source/t/stream-node/mtls.t new file mode 100644 index 0000000..9e1f2c9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-node/mtls.t @@ -0,0 +1,335 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); + +add_block_preprocessor(sub { + my ($block) = @_; +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set client certificate +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_ca_cert = t.read_file("t/certs/mtls_ca.crt") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:2005"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ssl_key, + } + }, + plugins = { + ["proxy-rewrite"] = { + uri = "/hello" + } + }, + uri = "/mtls" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local data = { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1995"] = 1, + }, + } + } + assert(t.test('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + json.encode(data) + )) + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "localhost", + client = { + ca = ssl_ca_cert, + depth = 2, + } + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 2: hit +--- stream_enable +--- request +GET /mtls +--- more_headers +Host: localhost +--- ignore_response +--- error_log +proxy request to 127.0.0.1:2005 +proxy request to 127.0.0.1:1995 + + + +=== TEST 3: reject client without cetificate +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:2005"] = 1, + } + }, + plugins = { + ["proxy-rewrite"] = { + uri = "/hello" + } + }, + uri = "/mtls" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 4: hit +--- stream_enable +--- request +GET /mtls +--- more_headers +Host: localhost +--- ignore_response +--- error_log +proxy request to 127.0.0.1:2005 +--- no_error_log +proxy request to 127.0.0.1:1995 + + + +=== TEST 5: reject client with bad cetificate +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = { + upstream = { + scheme = "https", + type = "roundrobin", + nodes = { + ["127.0.0.1:2005"] = 1, + }, + tls = { + client_cert = ssl_cert, + client_key = ssl_key, + } + }, + plugins = { + ["proxy-rewrite"] = { + uri = "/hello" + } + }, + uri = "/mtls" + } + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 6: hit +--- stream_enable +--- request +GET /mtls +--- more_headers +Host: localhost +--- ignore_response +--- error_log +proxy request to 127.0.0.1:2005 +--- no_error_log +proxy request to 127.0.0.1:1995 + + + +=== TEST 7: 2 ssl objects, both have mTLS and with different CA +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_ca_cert = t.read_file("t/certs/mtls_ca.crt") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local ssl_ca_cert2 = t.read_file("t/certs/apisix.crt") + + local data = { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1995"] = 1, + }, + } + } + assert(t.test('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + json.encode(data) + )) + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "localhost", + client = { + ca = ssl_ca_cert, + depth = 2, + } + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + return + end + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com", + client = { + ca = ssl_ca_cert2, + depth = 2, + } + } + local code, body = t.test('/apisix/admin/ssls/2', + ngx.HTTP_PUT, + json.encode(data) + ) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 8: request localhost and save tls session to reuse +--- stream_enable +--- max_size: 1048576 +--- exec +echo "" | timeout 1 openssl s_client -ign_eof -connect 127.0.0.1:2005 \ + -servername localhost -cert t/certs/mtls_client.crt -key t/certs/mtls_client.key \ + -sess_out session.dat + + + +=== TEST 9: request test.com with saved tls session +--- stream_enable +--- max_size: 1048576 +--- exec +echo "" | openssl s_client -connect 127.0.0.1:2005 -servername test.com \ + -sess_in session.dat +--- error_log +sni in client hello mismatch hostname of ssl session, sni: test.com, hostname: localhost diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-node/priority-balancer.t b/CloudronPackages/APISIX/apisix-source/t/stream-node/priority-balancer.t new file mode 100644 index 0000000..3d0b8a8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-node/priority-balancer.t @@ -0,0 +1,153 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); # repeat each test to ensure after_balance is called correctly +log_level('info'); +no_root_location(); +worker_connections(1024); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ($block->apisix_yaml) { + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } + } + + $block->set_value("stream_enable", 1); + + if (!$block->stream_request) { + $block->set_value("stream_request", "mmm"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- apisix_yaml +stream_routes: + - id: 1 + upstream: + type: least_conn + nodes: + - host: 127.0.0.1 + port: 1979 + weight: 2 + priority: 1 + - host: 127.0.0.2 + port: 1979 + weight: 1 + priority: 1 + - host: 127.0.0.3 + port: 1979 + weight: 2 + priority: 0 + - host: 127.0.0.4 + port: 1979 + weight: 1 + priority: 0 + - host: 127.0.0.1 + port: 1995 + weight: 2 + priority: -1 +#END +--- stream_response +hello world +--- error_log +connect() failed +failed to get server from current priority 1, try next one +failed to get server from current priority 0, try next one +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1979 +proxy request to 127.0.0.2:1979 +proxy request to 127.0.0.3:1979 +proxy request to 127.0.0.4:1979 +proxy request to 127.0.0.1:1995 + + + +=== TEST 2: default priority is 0 +--- apisix_yaml +stream_routes: + - id: 1 + upstream: + type: least_conn + nodes: + - host: 127.0.0.1 + port: 1979 + weight: 2 + priority: 1 + - host: 127.0.0.2 + port: 1979 + weight: 1 + priority: 1 + - host: 127.0.0.3 + port: 1979 + weight: 2 + - host: 127.0.0.4 + port: 1979 + weight: 1 + - host: 127.0.0.1 + port: 1995 + weight: 2 + priority: -1 +#END +--- stream_response +hello world +--- error_log +connect() failed +failed to get server from current priority 1, try next one +failed to get server from current priority 0, try next one +--- grep_error_log eval +qr/proxy request to \S+/ +--- grep_error_log_out +proxy request to 127.0.0.1:1979 +proxy request to 127.0.0.2:1979 +proxy request to 127.0.0.3:1979 +proxy request to 127.0.0.4:1979 +proxy request to 127.0.0.1:1995 + + + +=== TEST 3: fix priority for nonarray nodes +--- apisix_yaml +stream_routes: + - id: 1 + upstream: + type: roundrobin + nodes: + "127.0.0.1:1995": 1 + "127.0.0.2:1995": 1 +#END +--- stream_response +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-node/random.t b/CloudronPackages/APISIX/apisix-source/t/stream-node/random.t new file mode 100644 index 0000000..dfe939c --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-node/random.t @@ -0,0 +1,79 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +workers(4); +log_level('info'); +worker_connections(256); +repeat_each(1); +no_long_string(); +no_root_location(); + +run_tests(); + +__DATA__ + +=== TEST 1: generate different random number in different worker process +--- stream_enable +--- config + location /test { + content_by_lua_block { + ngx.sleep(0.3) + local log_file = ngx.config.prefix() .. "logs/error.log" + local file = io.open(log_file, "r") + local log = file:read("*a") + + local it, err = ngx.re.gmatch(log, [[random stream test in \[1, 10000\]: (\d+)]], "jom") + if not it then + ngx.log(ngx.ERR, "failed to gmatch: ", err) + return + end + + local random_nums = {} + while true do + local m, err = it() + if err then + ngx.log(ngx.ERR, "error: ", err) + return + end + + if not m then + break + end + + -- found a match + table.insert(random_nums, m[1]) + end + + for i = 2, #random_nums do + local pre = random_nums[i - 1] + local cur = random_nums[i] + ngx.say("random[", i - 1, "] == random[", i, "]: ", pre == cur) + if not pre == cur then + ngx.say("random info in log: ", table.concat(random_nums, ", ")) + break + end + end + } + } +--- request +GET /test +--- response_body +random[1] == random[2]: false +random[2] == random[3]: false +random[3] == random[4]: false +random[4] == random[5]: false diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-node/sanity-repeat.t b/CloudronPackages/APISIX/apisix-source/t/stream-node/sanity-repeat.t new file mode 100644 index 0000000..5a12aa4 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-node/sanity-repeat.t @@ -0,0 +1,134 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +no_root_location(); +workers(1); +repeat_each(2); + +run_tests(); + +__DATA__ + +=== TEST 1: set stream route(id: 1) -> service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "service_id": 1 + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route +--- stream_request eval +mmm +--- stream_response +hello world + + + +=== TEST 3: set stream / ssl +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = { + cert = ssl_cert, key = ssl_key, + sni = "*.test.com", + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t.test('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "sni": "a.test.com", + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit route +--- stream_tls_request +mmm +--- stream_sni: a.test.com +--- response_body +hello world +--- error_log +proxy request to 127.0.0.1:1995 diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-node/sanity-with-service.t b/CloudronPackages/APISIX/apisix-source/t/stream-node/sanity-with-service.t new file mode 100644 index 0000000..799a96a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-node/sanity-with-service.t @@ -0,0 +1,294 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +no_root_location(); + +run_tests(); + +__DATA__ + +=== TEST 1: set stream route(id: 1) -> service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + + code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "service_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route +--- stream_request eval +mmm +--- stream_response +hello world + + + +=== TEST 3: set stream route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.2", + "service_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: not hit route +--- stream_enable +--- stream_response + + + +=== TEST 5: delete route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: set service upstream (id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + + code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: set stream route (id: 1) with service (id: 1) which uses upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "service_id": 1 + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit route +--- stream_request eval +mmm +--- stream_response +hello world + + + +=== TEST 9: set stream route (id: 1) which uses upstream_id and remote address with IP CIDR +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1/26", + "service_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: hit route +--- stream_request eval +mmm +--- stream_response +hello world + + + +=== TEST 11: reject bad CIDR +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": ":/8", + "service_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid remote_addr: :/8"} + + + +=== TEST 12: skip upstream http host check in stream subsystem +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1995": 1, + "127.0.0.2:1995": 1 + }, + "pass_host": "node", + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: hit route +--- stream_request eval +mmm +--- stream_response +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-node/sanity.t b/CloudronPackages/APISIX/apisix-source/t/stream-node/sanity.t new file mode 100644 index 0000000..556c4bc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-node/sanity.t @@ -0,0 +1,403 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +no_root_location(); + +run_tests(); + +__DATA__ + +=== TEST 1: set stream route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route +--- stream_request eval +mmm +--- stream_response +hello world + + + +=== TEST 3: set stream route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.2", + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: not hit route +--- stream_enable +--- stream_response + + + +=== TEST 5: delete route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_DELETE + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: set stream route(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_port": 1995, + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: set upstream (id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: set stream route (id: 1) which uses upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: hit route +--- stream_request eval +mmm +--- stream_response +hello world + + + +=== TEST 10: skip route config tombstone +--- stream_conf_enable +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + t('/apisix/admin/stream_routes/1', ngx.HTTP_DELETE) + t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + assert(sock:send("mmm")) + local data = assert(sock:receive("*a")) + ngx.print(data) + } +} +--- request +GET /t +--- response_body +hello world + + + +=== TEST 11: set stream route (id: 1) which uses upstream_id and remote address with IP CIDR +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1/26", + "upstream_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: hit route +--- stream_request eval +mmm +--- stream_response +hello world + + + +=== TEST 13: reject bad CIDR +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": ":/8", + "upstream_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid remote_addr: :/8"} + + + +=== TEST 14: skip upstream http host check in stream subsystem +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1995": 1, + "127.0.0.2:1995": 1 + }, + "pass_host": "node", + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: hit route +--- stream_request eval +mmm +--- stream_response +hello world + + + +=== TEST 16: reuse ctx and more +--- stream_extra_init_by_lua + local ctx = require("apisix.core.ctx") + local tablepool = require("apisix.core").tablepool + + local old_set_vars_meta = ctx.set_vars_meta + ctx.set_vars_meta = function(...) + ngx.log(ngx.WARN, "fetch ctx var") + return old_set_vars_meta(...) + end + + local old_release_vars = ctx.release_vars + ctx.release_vars = function(...) + ngx.log(ngx.WARN, "release ctx var") + return old_release_vars(...) + end + + local old_fetch = tablepool.fetch + tablepool.fetch = function(name, ...) + ngx.log(ngx.WARN, "fetch table ", name) + return old_fetch(name, ...) + end + + local old_release = tablepool.release + tablepool.release = function(name, ...) + ngx.log(ngx.WARN, "release table ", name) + return old_release(name, ...) + end +--- stream_request eval +mmm +--- stream_response +hello world +--- grep_error_log eval +qr/(fetch|release) (ctx var|table \w+)/ +--- grep_error_log_out +fetch table api_ctx +fetch ctx var +fetch table ctx_var +fetch table plugins +release ctx var +release table ctx_var +release table plugins +release table api_ctx diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-node/sni.t b/CloudronPackages/APISIX/apisix-source/t/stream-node/sni.t new file mode 100644 index 0000000..41554ba --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-node/sni.t @@ -0,0 +1,341 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +no_root_location(); +worker_connections(1024); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set stream / ssl +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = { + cert = ssl_cert, key = ssl_key, + sni = "*.test.com", + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t.test('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "sni": "a.test.com", + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t.test('/apisix/admin/stream_routes/2', + ngx.HTTP_PUT, + [[{ + "sni": "*.test.com", + "upstream": { + "nodes": { + "127.0.0.2:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t.test('/apisix/admin/stream_routes/3', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.3:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route +--- stream_tls_request +mmm +--- stream_sni: a.test.com +--- response_body +hello world +--- error_log +proxy request to 127.0.0.1:1995 + + + +=== TEST 3: hit route (session reuse) +--- stream_tls_request +mmm +--- stream_sni: a.test.com +--- stream_session_reuse +--- response_body +hello world +hello world +--- grep_error_log eval +qr/proxy request to 127.0.0.\d:1995/ +--- grep_error_log_out +proxy request to 127.0.0.1:1995 +proxy request to 127.0.0.1:1995 + + + +=== TEST 4: hit route, wildcard SNI +--- stream_tls_request +mmm +--- stream_sni: b.test.com +--- response_body +hello world +--- error_log +proxy request to 127.0.0.2:1995 + + + +=== TEST 5: hit route, no TLS +--- stream_request +mmm +--- stream_response +hello world +--- error_log +proxy request to 127.0.0.3:1995 + + + +=== TEST 6: set different stream route with the same sni +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local code, body = t.test('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "sni": "a.test.com", + "remote_addr": "127.0.0.2", + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t.test('/apisix/admin/stream_routes/4', + ngx.HTTP_PUT, + [[{ + "sni": "a.test.com", + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "127.0.0.4:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: hit route +--- stream_tls_request +mmm +--- stream_sni: a.test.com +--- response_body +hello world +--- error_log +proxy request to 127.0.0.4:1995 + + + +=== TEST 8: change a.test.com route to fall back to wildcard route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local code, body = t.test('/apisix/admin/stream_routes/4', + ngx.HTTP_PUT, + [[{ + "sni": "a.test.com", + "remote_addr": "127.0.0.3", + "upstream": { + "nodes": { + "127.0.0.4:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: hit route +--- stream_tls_request +mmm +--- stream_sni: a.test.com +--- response_body +hello world +--- error_log +proxy request to 127.0.0.2:1995 + + + +=== TEST 10: use fallback sni to match route +--- yaml_config +apisix: + node_listen: 1984 + proxy_mode: http&stream + stream_proxy: + tcp: + - 9100 + ssl: + fallback_sni: a.test.com +--- stream_tls_request +mmm +--- response_body +hello world +--- error_log +proxy request to 127.0.0.2:1995 + + + +=== TEST 11: no sni matched, fall back to non-sni route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local code, body = t.test('/apisix/admin/stream_routes/2', + ngx.HTTP_DELETE) + + if code >= 300 then + ngx.status = code + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: hit route +--- stream_tls_request +mmm +--- stream_sni: b.test.com +--- response_body +hello world +--- error_log +proxy request to 127.0.0.3:1995 + + + +=== TEST 13: clean up routes +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + for i = 1, 4 do + t.test('/apisix/admin/stream_routes/' .. i, ngx.HTTP_DELETE) + end + } + } +--- request +GET /t diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-node/tls.t b/CloudronPackages/APISIX/apisix-source/t/stream-node/tls.t new file mode 100644 index 0000000..13bdcba --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-node/tls.t @@ -0,0 +1,135 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +no_root_location(); +worker_connections(1024); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set stream / ssl +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = { + cert = ssl_cert, key = ssl_key, + sni = "test.com", + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + return + end + + local code, body = t.test('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route +--- stream_tls_request +mmm +--- stream_sni: test.com +--- response_body +hello world + + + +=== TEST 3: wrong sni +--- stream_tls_request +mmm +--- stream_sni: xx.com +--- error_log +failed to match any SSL certificate by SNI: xx.com + + + +=== TEST 4: missing sni +--- stream_tls_request +mmm +--- error_log +failed to find SNI + + + +=== TEST 5: ensure table is reused in TLS handshake +--- stream_extra_init_by_lua + local tablepool = require("apisix.core").tablepool + local old_fetch = tablepool.fetch + tablepool.fetch = function(name, ...) + ngx.log(ngx.WARN, "fetch table ", name) + return old_fetch(name, ...) + end + + local old_release = tablepool.release + tablepool.release = function(name, ...) + ngx.log(ngx.WARN, "release table ", name) + return old_release(name, ...) + end +--- stream_tls_request +mmm +--- stream_sni: test.com +--- response_body +hello world +--- grep_error_log eval +qr/(fetch|release) table \w+/ +--- grep_error_log_out +fetch table api_ctx +release table api_ctx +fetch table api_ctx +fetch table ctx_var +fetch table plugins +release table ctx_var +release table plugins +release table api_ctx diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-node/upstream-domain.t b/CloudronPackages/APISIX/apisix-source/t/stream-node/upstream-domain.t new file mode 100644 index 0000000..179d645 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-node/upstream-domain.t @@ -0,0 +1,197 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('info'); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("stream_enable", 1); + + if (!$block->stream_request) { + $block->set_value("stream_request", "mmm"); + } + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set upstream & stream_routes (id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "localhost:1995": 1 + }, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route +--- stream_response +hello world + + + +=== TEST 3: set stream_routes with upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "localhost:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit route +--- stream_response +hello world + + + +=== TEST 5: bad domain in the upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "local:1995": 1 + }, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: hit route +--- stream_response +receive stream response error: connection reset by peer +--- error_log + + + +=== TEST 7: bad domain in the stream route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream": { + "nodes": { + "local:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit route +--- stream_response +receive stream response error: connection reset by peer +--- error_log +no valid upstream node diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-node/upstream-tls.t b/CloudronPackages/APISIX/apisix-source/t/stream-node/upstream-tls.t new file mode 100644 index 0000000..be6408f --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-node/upstream-tls.t @@ -0,0 +1,142 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("stream_enable", 1); + + my $stream_config = $block->stream_config // ''; + $stream_config .= <<_EOC_; + server { + listen 8765 ssl; + ssl_certificate cert/apisix.crt; + ssl_certificate_key cert/apisix.key; + + content_by_lua_block { + local sock = ngx.req.socket() + local data = sock:receive("1") + ngx.say("hello ", ngx.var.ssl_server_name) + } + } +_EOC_ + + $block->set_value("extra_stream_config", $stream_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set upstream & stream_routes (id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "scheme": "tls", + "nodes": { + "localhost:8765": 1 + }, + "type": "roundrobin" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "upstream_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route +--- stream_request +mmm +--- stream_response +hello apisix_backend + + + +=== TEST 3: set ssl +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = { + cert = ssl_cert, key = ssl_key, + sni = "test.com", + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit route +--- stream_tls_request +mmm +--- stream_sni: test.com +--- response_body +hello test.com diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-plugin/ip-restriction.t b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/ip-restriction.t new file mode 100644 index 0000000..75d5053 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/ip-restriction.t @@ -0,0 +1,159 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: blacklist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ip-restriction": { + "blacklist": [ + "127.0.0.0/24" + ] + } + }, + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit +--- stream_request eval +mmm +--- error_log +Connection reset by peer + + + +=== TEST 3: whitelist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ip-restriction": { + "whitelist": [ + "127.0.0.0/24" + ] + } + }, + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit +--- stream_request eval +mmm +--- stream_response +hello world + + + +=== TEST 5: validate schema +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "ip-restriction": { + } + }, + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of stream plugin [ip-restriction]: value should match only one schema, but matches none"} diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-plugin/limit-conn.t b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/limit-conn.t new file mode 100644 index 0000000..c6c7c89 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/limit-conn.t @@ -0,0 +1,336 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } + + my $config = $block->config // <<_EOC_; + location /hit { + content_by_lua_block { + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.log(ngx.ERR, "failed to connect: ", err) + return ngx.exit(503) + end + + local bytes, err = sock:send("mmm") + if not bytes then + ngx.log(ngx.ERR, "send stream request error: ", err) + return ngx.exit(503) + end + local data, err = sock:receive("*a") + if not data then + sock:close() + return ngx.exit(503) + end + ngx.print(data) + } + } + + location /test_concurrency { + content_by_lua_block { + local reqs = {} + for i = 1, 5 do + reqs[i] = { "/hit" } + end + local resps = { ngx.location.capture_multi(reqs) } + for i, resp in ipairs(resps) do + ngx.say(resp.status) + end + } + } +_EOC_ + + $block->set_value("config", $config); + + my $stream_upstream_code = $block->stream_upstream_code // <<_EOC_; + local sock = ngx.req.socket() + local data = sock:receive("1") + ngx.sleep(0.2) + ngx.say("hello world") +_EOC_ + $block->set_value("stream_upstream_code", $stream_upstream_code); +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 100, + "burst": 50, + "default_conn_delay": 0.1, + "key": "remote_addr" + } + }, + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: not exceeding the burst +--- request +GET /test_concurrency +--- response_body +200 +200 +200 +200 +200 +--- stream_enable + + + +=== TEST 3: update route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "key": "remote_addr" + } + }, + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: exceeding the burst +--- request +GET /test_concurrency +--- response_body +200 +200 +200 +503 +503 +--- error_log +Connection reset by peer +--- stream_enable + + + +=== TEST 5: var combination +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "key": "$remote_addr $server_addr", + "key_type": "var_combination" + } + }, + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: exceeding the burst +--- request +GET /test_concurrency +--- response_body +200 +200 +200 +503 +503 +--- error_log +Connection reset by peer +--- stream_enable + + + +=== TEST 7: var combination (not exceed the burst) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "key": "$remote_port $server_addr", + "key_type": "var_combination" + } + }, + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: hit +--- request +GET /test_concurrency +--- response_body +200 +200 +200 +200 +200 +--- stream_enable + + + +=== TEST 9: bypass empty key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "key": "$proxy_protocol_addr $proxy_protocol_port", + "key_type": "var_combination" + } + }, + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: hit +--- request +GET /test_concurrency +--- response_body +200 +200 +200 +503 +503 +--- error_log +The value of the configured key is empty, use client IP instead +--- stream_enable diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-plugin/limit-conn2.t b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/limit-conn2.t new file mode 100644 index 0000000..9efb2b6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/limit-conn2.t @@ -0,0 +1,134 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +$ENV{TEST_NGINX_REDIS_PORT} ||= 1985; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +xrpc: + protocols: + - name: redis +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +worker_connections(1024); +run_tests; + +__DATA__ + +=== TEST 1: create a stream router with limit-conn +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "key": "$remote_port $server_addr", + "key_type": "var_combination" + } + }, + "upstream": { + "type": "none", + "nodes": { + "127.0.0.1:6379": 1 + } + }, + "protocol": { + "name": "redis" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: access the redis via proxy +--- config + location /t { + content_by_lua_block { + local redis = require "resty.redis" + local red = redis:new() + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local res, err = red:hmset("animals", "dog", "bark", "cat", "meow") + if not res then + ngx.say("failed to set animals: ", err) + return + end + ngx.say("hmset animals: ", res) + + local res, err = red:hmget("animals", "dog", "cat") + if not res then + ngx.say("failed to get animals: ", err) + return + end + ngx.say("hmget animals: ", res) + + ok, err = red:close() + if not ok then + ngx.say("failed to close: ", err) + return + end + } + } +--- response_body +hmset animals: OK +hmget animals: barkmeow +--- no_error_log +attempt to perform arithmetic on field 'request_time' +--- stream_conf_enable diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-plugin/mqtt-proxy.t b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/mqtt-proxy.t new file mode 100644 index 0000000..65d48ed --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/mqtt-proxy.t @@ -0,0 +1,395 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: set route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_port": 1985, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1995, + "weight": 1 + } + ] + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: invalid header +--- stream_request eval +mmm +--- error_log +Received unexpected MQTT packet type+flags + + + +=== TEST 3: hit route +--- stream_request eval +"\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" +--- stream_response +hello world + + + +=== TEST 4: set route (wrong server port) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_port": 2000, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4, + "upstream": { + "host": "127.0.0.1", + "port": 1995 + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: failed to match route +--- stream_request eval +"\x10\x0f" +--- stream_response +receive stream response error: connection reset by peer +--- error_log +receive stream response error: connection reset by peer +--- error_log +match(): not hit any route + + + +=== TEST 6: set route with host +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_port": 1985, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "localhost", + "port": 1995, + "weight": 1 + } + ] + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: hit route +--- stream_request eval +"\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" +--- stream_response +hello world + + + +=== TEST 8: set route with upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_port": 1985, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": [{ + "host": "127.0.0.1", + "port": 1995, + "weight": 1 + }] + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: hit route +--- stream_request eval +"\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" +--- stream_response +hello world +--- grep_error_log eval +qr/mqtt client id: \w+/ +--- grep_error_log_out +mqtt client id: foo + + + +=== TEST 10: hit route with empty client id +--- stream_request eval +"\x10\x0c\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x00" +--- stream_response +hello world +--- grep_error_log eval +qr/mqtt client id: \w+/ +--- grep_error_log_out + + + +=== TEST 11: MQTT 5 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_port": 1985, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 5 + } + }, + "upstream": { + "type": "roundrobin", + "nodes": [{ + "host": "127.0.0.1", + "port": 1995, + "weight": 1 + }] + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: hit route with empty property +--- stream_request eval +"\x10\x0d\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x00" +--- stream_response +hello world +--- grep_error_log eval +qr/mqtt client id: \w+/ +--- grep_error_log_out + + + +=== TEST 13: hit route with property +--- stream_request eval +"\x10\x1b\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x05\x11\x00\x00\x0e\x10\x00\x09\x63\x6c\x69\x6e\x74\x2d\x31\x31\x31" +--- stream_response +hello world +--- grep_error_log eval +qr/mqtt client id: \S+/ +--- grep_error_log_out +mqtt client id: clint-111 + + + +=== TEST 14: balance with mqtt_client_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_port": 1985, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 5 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "0.0.0.0", + "port": 1995, + "weight": 1 + }, + { + "host": "127.0.0.1", + "port": 1995, + "weight": 1 + } + ] + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: hit route with empty id +--- stream_request eval +"\x10\x0d\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x00" +--- stream_response +hello world +--- grep_error_log eval +qr/(mqtt client id: \w+|proxy request to \S+)/ +--- grep_error_log_out +proxy request to 127.0.0.1:1995 + + + +=== TEST 16: hit route with different client id, part 1 +--- stream_request eval +"\x10\x0e\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x01\x66" +--- stream_response +hello world +--- grep_error_log eval +qr/(mqtt client id: \w+|proxy request to \S+)/ +--- grep_error_log_out +mqtt client id: f +proxy request to 0.0.0.0:1995 + + + +=== TEST 17: hit route with different client id, part 2 +--- stream_request eval +"\x10\x0e\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x01\x67" +--- stream_response +hello world +--- grep_error_log eval +qr/(mqtt client id: \w+|proxy request to \S+)/ +--- grep_error_log_out +mqtt client id: g +proxy request to 127.0.0.1:1995 diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-plugin/mqtt-proxy2.t b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/mqtt-proxy2.t new file mode 100644 index 0000000..3521187 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/mqtt-proxy2.t @@ -0,0 +1,184 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: set route with invalid host +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_port": 1985, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "loc", + "port": 1995, + "weight": 1 + } + ] + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit route +--- stream_request eval +"\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" +--- error_log +failed to parse domain: loc, error: +--- timeout: 10 + + + +=== TEST 3: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "0.0.0.0", + "port": 1995, + "weight": 1 + }, + { + "host": "127.0.0.1", + "port": 1995, + "weight": 1 + } + ] + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: balance with mqtt_client_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_port": 1985, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 5 + } + }, + "upstream_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: hit route with empty id +--- stream_request eval +"\x10\x0d\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x00" +--- stream_response +hello world +--- grep_error_log eval +qr/(mqtt client id: \w+|proxy request to \S+)/ +--- grep_error_log_out +proxy request to 127.0.0.1:1995 + + + +=== TEST 6: hit route with different client id, part 1 +--- stream_request eval +"\x10\x0e\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x01\x66" +--- stream_response +hello world +--- grep_error_log eval +qr/(mqtt client id: \w+|proxy request to \S+)/ +--- grep_error_log_out +mqtt client id: f +proxy request to 0.0.0.0:1995 + + + +=== TEST 7: hit route with different client id, part 2 +--- stream_request eval +"\x10\x0e\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x01\x67" +--- stream_response +hello world +--- grep_error_log eval +qr/(mqtt client id: \w+|proxy request to \S+)/ +--- grep_error_log_out +mqtt client id: g +proxy request to 127.0.0.1:1995 diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-plugin/plugin.t b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/plugin.t new file mode 100644 index 0000000..3e611d2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/plugin.t @@ -0,0 +1,78 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + $block->set_value("no_error_log", "[error]"); + + $block; +}); + +no_long_string(); +no_shuffle(); +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: ensure all plugins have exposed their name +--- stream_enable +--- stream_server_config + content_by_lua_block { + local lfs = require("lfs") + for file_name in lfs.dir(ngx.config.prefix() .. "/../../apisix/stream/plugins/") do + if string.match(file_name, ".lua$") then + local expected = file_name:sub(1, #file_name - 4) + local plugin = require("apisix.stream.plugins." .. expected) + if plugin.name ~= expected then + ngx.say("expected ", expected, " got ", plugin.name) + return + end + end + end + ngx.say('ok') + } +--- stream_response +ok + + + +=== TEST 2: ensure all plugins have unique priority +--- stream_enable +--- stream_server_config + content_by_lua_block { + local lfs = require("lfs") + local pri_name = {} + for file_name in lfs.dir(ngx.config.prefix() .. "/../../apisix/stream/plugins/") do + if string.match(file_name, ".lua$") then + local name = file_name:sub(1, #file_name - 4) + local plugin = require("apisix.stream.plugins." .. name) + if pri_name[plugin.priority] then + ngx.say(name, " has same priority with ", pri_name[plugin.priority]) + return + end + pri_name[plugin.priority] = plugin.name + end + end + ngx.say('ok') + } +--- stream_response +ok diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-plugin/prometheus.t b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/prometheus.t new file mode 100644 index 0000000..7c37128 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/prometheus.t @@ -0,0 +1,158 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $extra_yaml_config = <<_EOC_; +stream_plugins: + - mqtt-proxy + - prometheus +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: pre-create public API route +--- config + location /t { + content_by_lua_block { + local data = { + { + url = "/apisix/admin/routes/metrics", + data = [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/prometheus/metrics" + }]] + }, + { + url = "/apisix/admin/stream_routes/mqtt", + data = [[{ + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + }, + "prometheus": {} + }, + "upstream": { + "type": "roundrobin", + "nodes": [{ + "host": "127.0.0.1", + "port": 1995, + "weight": 1 + }] + } + }]] + } + } + + local t = require("lib.test_admin").test + + for _, data in ipairs(data) do + local code, body = t(data.url, ngx.HTTP_PUT, data.data) + if code > 300 then + ngx.say(body) + return + end + end + } + } +--- response_body + + + +=== TEST 2: hit +--- stream_request eval +"\x10\x0f\x00\x04\x4d\x51\x54\x54\x04\x02\x00\x3c\x00\x03\x66\x6f\x6f" +--- stream_response +hello world + + + +=== TEST 3: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_stream_connection_total\{route="mqtt"\} 1/ + + + +=== TEST 4: hit, error +--- stream_request eval +mmm +--- error_log +Received unexpected MQTT packet type+flags + + + +=== TEST 5: fetch the prometheus metric data +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_stream_connection_total\{route="mqtt"\} 2/ + + + +=== TEST 6: contains metrics from stub_status +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_nginx_http_current_connections\{state="active"\} 1/ + + + +=== TEST 7: contains basic metrics +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_node_info\{hostname="[^"]+"\}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/stream-plugin/syslog.t b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/syslog.t new file mode 100644 index 0000000..0485b08 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/stream-plugin/syslog.t @@ -0,0 +1,416 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } + + if (!defined $block->extra_stream_config) { + my $stream_config = <<_EOC_; + server { + listen 8125 udp; + content_by_lua_block { + require("lib.mock_layer4").dogstatsd() + } + } +_EOC_ + $block->set_value("extra_stream_config", $stream_config); + } + +}); + +run_tests; + +__DATA__ + +=== TEST 1: custom log format not set +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- ensure the format is not set + t('/apisix/admin/plugin_metadata/syslog', + ngx.HTTP_DELETE + ) + + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 8125, + "sock_type": "udp", + "batch_max_size": 1, + "flush_limit":1 + } + }, + "upstream_id": "1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: hit +--- stream_request eval +mmm +--- stream_response +hello world +--- error_log +syslog's log_format is not set + + + +=== TEST 3: set custom log format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/syslog', + ngx.HTTP_PUT, + [[{ + "log_format": { + "client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit +--- stream_request eval +mmm +--- stream_response +hello world +--- wait: 0.5 +--- error_log eval +qr/message received:.*\"client_ip\\"\:\\"127.0.0.1\\"/ + + + +=== TEST 5: flush manually +--- config + location /t { + content_by_lua_block { + local plugin = require("apisix.stream.plugins.syslog") + local logger_socket = require("resty.logger.socket") + local logger, err = logger_socket:new({ + host = "127.0.0.1", + port = 5044, + flush_limit = 100, + }) + + local bytes, err = logger:log("abc") + if err then + ngx.log(ngx.ERR, err) + end + + local bytes, err = logger:log("efg") + if err then + ngx.log(ngx.ERR, err) + end + + local ok, err = plugin.flush_syslog(logger) + if not ok then + ngx.say("failed to flush syslog: ", err) + return + end + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done + + + +=== TEST 6: small flush_limit, instant flush +--- stream_conf_enable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 5044, + "flush_limit" : 1, + "inactive_timeout": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + -- wait etcd sync + ngx.sleep(0.5) + + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + assert(sock:send("mmm")) + local data = assert(sock:receive("*a")) + ngx.print(data) + + -- wait flush log + ngx.sleep(2.5) + } + } +--- request +GET /t +--- response_body +passed +hello world +--- timeout: 5 +--- error_log +try to lock with key stream/route#1 +unlock with key stream/route#1 + + + +=== TEST 7: check plugin configuration updating +--- stream_conf_enable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body1 = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 5044, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.status = code + ngx.say("fail") + return + end + + assert(sock:send("mmm")) + local body2 = assert(sock:receive("*a")) + + local code, body3 = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "host" : "127.0.0.1", + "port" : 5045, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.status = code + ngx.say("fail") + return + end + + assert(sock:send("mmm")) + local body4 = assert(sock:receive("*a")) + + ngx.print(body1) + ngx.print(body2) + ngx.print(body3) + ngx.print(body4) + } + } +--- request +GET /t +--- wait: 0.5 +--- response_body +passedhello world +passedhello world +--- grep_error_log eval +qr/sending a batch logs to 127.0.0.1:(\d+)/ +--- grep_error_log_out +sending a batch logs to 127.0.0.1:5044 +sending a batch logs to 127.0.0.1:5045 + + + +=== TEST 8: log format in plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "batch_max_size": 1, + "flush_limit": 1, + "log_format": { + "vip": "$remote_addr" + }, + "host" : "127.0.0.1", + "port" : 5050 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1995": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: access +--- stream_extra_init_by_lua + local syslog = require("apisix.plugins.syslog.init") + local json = require("apisix.core.json") + local log = require("apisix.core.log") + local old_f = syslog.push_entry + syslog.push_entry = function(conf, ctx, entry) + assert(entry.vip == "127.0.0.1") + log.info("push_entry is called with data: ", json.encode(entry)) + return old_f(conf, ctx, entry) + end +--- stream_request +mmm +--- stream_response +hello world +--- wait: 0.5 +--- no_error_log +[error] +--- error_log +push_entry is called with data diff --git a/CloudronPackages/APISIX/apisix-source/t/tars/conf/tars.sql b/CloudronPackages/APISIX/apisix-source/t/tars/conf/tars.sql new file mode 100644 index 0000000..166bfb9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/tars/conf/tars.sql @@ -0,0 +1,539 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +-- MySQL dump 10.13 Distrib 5.6.26, for Linux (x86_64) +-- +-- Host: 172.25.0.2 Database: db_tars +-- ------------------------------------------------------ +-- Server version 5.6.51 + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +-- +-- Current Database: `db_tars` +-- + +/*!40000 DROP DATABASE IF EXISTS `db_tars`*/; + +CREATE DATABASE /*!32312 IF NOT EXISTS*/ `db_tars` /*!40100 DEFAULT CHARACTER SET latin1 */; + +USE `db_tars`; + +-- +-- Table structure for table `t_adapter_conf` +-- + +DROP TABLE IF EXISTS `t_adapter_conf`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_adapter_conf` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `application` varchar(50) DEFAULT '', + `server_name` varchar(128) DEFAULT '', + `node_name` varchar(50) DEFAULT '', + `adapter_name` varchar(100) DEFAULT '', + `registry_timestamp` datetime(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), + `thread_num` int(11) DEFAULT '1', + `endpoint` varchar(128) DEFAULT '', + `max_connections` int(11) DEFAULT '1000', + `allow_ip` varchar(255) NOT NULL DEFAULT '', + `servant` varchar(128) DEFAULT '', + `queuecap` int(11) DEFAULT NULL, + `queuetimeout` int(11) DEFAULT NULL, + `posttime` datetime DEFAULT NULL, + `lastuser` varchar(30) DEFAULT NULL, + `protocol` varchar(64) DEFAULT 'tars', + `handlegroup` varchar(64) DEFAULT '', + PRIMARY KEY (`id`), + UNIQUE KEY `application` (`application`,`server_name`,`node_name`,`adapter_name`), + KEY `adapter_conf_endpoint_index` (`endpoint`), + KEY `index_regtime_1` (`registry_timestamp`), + KEY `index_regtime` (`registry_timestamp`) +) ENGINE=InnoDB AUTO_INCREMENT=72 DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_ats_cases` +-- + +DROP TABLE IF EXISTS `t_ats_cases`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_ats_cases` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `casename` varchar(20) DEFAULT NULL, + `retvalue` text, + `paramvalue` text, + `interfaceid` int(11) DEFAULT NULL, + `posttime` datetime DEFAULT NULL, + `lastuser` varchar(30) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_ats_interfaces` +-- + +DROP TABLE IF EXISTS `t_ats_interfaces`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_ats_interfaces` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `objname` varchar(150) DEFAULT NULL, + `funcname` varchar(150) DEFAULT NULL, + `retype` text, + `paramtype` text, + `outparamtype` text, + `interfaceid` int(11) DEFAULT NULL, + `postime` datetime DEFAULT NULL, + `lastuser` varchar(30) DEFAULT NULL, + `request_charset` varchar(16) NOT NULL, + `response_charset` varchar(16) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `objname` (`objname`,`funcname`), + UNIQUE KEY `objname_idx` (`objname`,`funcname`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_config_files` +-- + +DROP TABLE IF EXISTS `t_config_files`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_config_files` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `server_name` varchar(128) DEFAULT '', + `set_name` varchar(16) NOT NULL DEFAULT '', + `set_area` varchar(16) NOT NULL DEFAULT '', + `set_group` varchar(16) NOT NULL DEFAULT '', + `host` varchar(20) NOT NULL DEFAULT '', + `filename` varchar(128) DEFAULT NULL, + `config` longtext, + `posttime` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `lastuser` varchar(50) DEFAULT NULL, + `level` int(11) DEFAULT '2', + `config_flag` int(10) NOT NULL DEFAULT '0', + PRIMARY KEY (`id`), + UNIQUE KEY `application` (`server_name`,`filename`,`host`,`level`,`set_name`,`set_area`,`set_group`) +) ENGINE=InnoDB AUTO_INCREMENT=19 DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_config_history_files` +-- + +DROP TABLE IF EXISTS `t_config_history_files`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_config_history_files` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `configid` int(11) DEFAULT NULL, + `reason` varchar(128) DEFAULT '', + `reason_select` varchar(20) NOT NULL DEFAULT '', + `content` longtext, + `posttime` datetime DEFAULT NULL, + `lastuser` varchar(50) DEFAULT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB AUTO_INCREMENT=39 DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_config_references` +-- + +DROP TABLE IF EXISTS `t_config_references`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_config_references` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `config_id` int(11) DEFAULT NULL, + `reference_id` int(11) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `config_id` (`config_id`,`reference_id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_group_priority` +-- + +DROP TABLE IF EXISTS `t_group_priority`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_group_priority` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `name` varchar(128) DEFAULT '', + `group_list` text, + `list_order` int(11) DEFAULT '0', + `station` varchar(128) NOT NULL DEFAULT '', + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_machine_tars_info` +-- + +DROP TABLE IF EXISTS `t_machine_tars_info`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_machine_tars_info` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `application` varchar(100) NOT NULL DEFAULT '', + `server_name` varchar(100) NOT NULL DEFAULT '', + `app_server_name` varchar(50) NOT NULL DEFAULT '', + `node_name` varchar(50) NOT NULL DEFAULT '', + `location` varchar(255) NOT NULL DEFAULT '', + `machine_type` varchar(50) NOT NULL DEFAULT '', + `update_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `update_person` varchar(64) NOT NULL DEFAULT '', + PRIMARY KEY (`application`,`server_name`,`node_name`), + UNIQUE KEY `id` (`id`), + UNIQUE KEY `tmachine_key` (`application`,`node_name`,`server_name`), + KEY `tmachine_i_2` (`node_name`,`server_name`), + KEY `tmachine_idx` (`node_name`,`server_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_node_info` +-- + +DROP TABLE IF EXISTS `t_node_info`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_node_info` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `node_name` varchar(128) DEFAULT '', + `node_obj` varchar(128) DEFAULT '', + `endpoint_ip` varchar(16) DEFAULT '', + `endpoint_port` int(11) DEFAULT '0', + `data_dir` varchar(128) DEFAULT '', + `load_avg1` float DEFAULT '0', + `load_avg5` float DEFAULT '0', + `load_avg15` float DEFAULT '0', + `last_reg_time` datetime DEFAULT '1970-01-01 00:08:00', + `last_heartbeat` datetime DEFAULT '1970-01-01 00:08:00', + `setting_state` enum('active','inactive') DEFAULT 'inactive', + `present_state` enum('active','inactive') DEFAULT 'inactive', + `tars_version` varchar(128) NOT NULL DEFAULT '', + `template_name` varchar(128) NOT NULL DEFAULT '', + `modify_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `group_id` int(11) DEFAULT '-1', + `label` text, + PRIMARY KEY (`id`), + UNIQUE KEY `node_name` (`node_name`), + KEY `indx_node_info_1` (`last_heartbeat`), + KEY `indx_node_info` (`last_heartbeat`) +) ENGINE=InnoDB AUTO_INCREMENT=14 DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_profile_template` +-- + +DROP TABLE IF EXISTS `t_profile_template`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_profile_template` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `template_name` varchar(128) DEFAULT '', + `parents_name` varchar(128) DEFAULT '', + `profile` text NOT NULL, + `posttime` datetime DEFAULT NULL, + `lastuser` varchar(30) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `template_name` (`template_name`) +) ENGINE=InnoDB AUTO_INCREMENT=21 DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_registry_info` +-- + +DROP TABLE IF EXISTS `t_registry_info`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_registry_info` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `locator_id` varchar(128) NOT NULL DEFAULT '', + `servant` varchar(128) NOT NULL DEFAULT '', + `endpoint` varchar(128) NOT NULL DEFAULT '', + `last_heartbeat` datetime DEFAULT '1970-01-01 00:08:00', + `present_state` enum('active','inactive') DEFAULT 'inactive', + `tars_version` varchar(128) NOT NULL DEFAULT '', + `modify_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `enable_group` char(1) DEFAULT 'N', + PRIMARY KEY (`id`), + UNIQUE KEY `locator_id` (`locator_id`,`servant`) +) ENGINE=InnoDB AUTO_INCREMENT=4576264 DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_server_conf` +-- + +DROP TABLE IF EXISTS `t_server_conf`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_server_conf` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `application` varchar(128) DEFAULT '', + `server_name` varchar(128) DEFAULT '', + `node_group` varchar(50) NOT NULL DEFAULT '', + `node_name` varchar(50) NOT NULL DEFAULT '', + `registry_timestamp` datetime(3) NOT NULL DEFAULT CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3), + `base_path` varchar(128) DEFAULT '', + `exe_path` varchar(128) NOT NULL DEFAULT '', + `template_name` varchar(128) NOT NULL DEFAULT '', + `bak_flag` int(11) NOT NULL DEFAULT '0', + `setting_state` enum('active','inactive') NOT NULL DEFAULT 'inactive', + `present_state` enum('active','inactive','activating','deactivating','destroyed') NOT NULL DEFAULT 'inactive', + `process_id` int(11) NOT NULL DEFAULT '0', + `patch_version` varchar(128) NOT NULL DEFAULT '', + `patch_time` datetime NOT NULL DEFAULT '2021-12-22 10:35:56', + `patch_user` varchar(128) NOT NULL DEFAULT '', + `tars_version` varchar(128) NOT NULL DEFAULT '', + `posttime` datetime DEFAULT NULL, + `lastuser` varchar(30) DEFAULT NULL, + `server_type` enum('tars_cpp','not_tars','tars_java','tars_nodejs','tars_php','tars_go') DEFAULT NULL, + `start_script_path` varchar(128) DEFAULT NULL, + `stop_script_path` varchar(128) DEFAULT NULL, + `monitor_script_path` varchar(128) DEFAULT NULL, + `enable_group` char(1) DEFAULT 'N', + `enable_set` char(1) NOT NULL DEFAULT 'N', + `set_name` varchar(16) DEFAULT NULL, + `set_area` varchar(16) DEFAULT NULL, + `set_group` varchar(64) DEFAULT NULL, + `ip_group_name` varchar(64) DEFAULT NULL, + `profile` text, + `config_center_port` int(11) NOT NULL DEFAULT '0', + `async_thread_num` int(11) DEFAULT '3', + `server_important_type` enum('0','1','2','3','4','5') DEFAULT '0', + `remote_log_reserve_time` varchar(32) NOT NULL DEFAULT '65', + `remote_log_compress_time` varchar(32) NOT NULL DEFAULT '2', + `remote_log_type` int(1) NOT NULL DEFAULT '0', + `flow_state` enum('active','inactive') NOT NULL DEFAULT 'active', + PRIMARY KEY (`id`), + UNIQUE KEY `application` (`application`,`server_name`,`node_name`), + KEY `node_name` (`node_name`), + KEY `index_i_3` (`setting_state`,`server_type`,`application`,`server_name`,`node_name`), + KEY `index_regtime` (`registry_timestamp`), + KEY `index_i` (`setting_state`,`server_type`,`application`,`server_name`,`node_name`) +) ENGINE=InnoDB AUTO_INCREMENT=63 DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_server_group_relation` +-- + +DROP TABLE IF EXISTS `t_server_group_relation`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_server_group_relation` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `application` varchar(90) NOT NULL DEFAULT '', + `server_group` varchar(50) DEFAULT '', + `server_name` varchar(50) DEFAULT '', + `create_time` datetime DEFAULT NULL, + `creator` varchar(30) DEFAULT '', + PRIMARY KEY (`id`), + KEY `f_unique` (`application`,`server_group`,`server_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_server_group_rule` +-- + +DROP TABLE IF EXISTS `t_server_group_rule`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_server_group_rule` ( + `group_id` int(10) unsigned NOT NULL AUTO_INCREMENT, + `ip_order` enum('allow_denny','denny_allow') NOT NULL DEFAULT 'denny_allow', + `allow_ip_rule` text, + `denny_ip_rule` text, + `lastuser` varchar(50) DEFAULT NULL, + `modify_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + `group_name` varchar(128) DEFAULT '', + `group_name_cn` varchar(128) DEFAULT '', + PRIMARY KEY (`group_id`), + UNIQUE KEY `group_name_index` (`group_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_server_notifys` +-- + +DROP TABLE IF EXISTS `t_server_notifys`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_server_notifys` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `application` varchar(128) DEFAULT '', + `server_name` varchar(128) DEFAULT NULL, + `container_name` varchar(128) DEFAULT '', + `node_name` varchar(128) NOT NULL DEFAULT '', + `set_name` varchar(16) DEFAULT NULL, + `set_area` varchar(16) DEFAULT NULL, + `set_group` varchar(16) DEFAULT NULL, + `server_id` varchar(100) DEFAULT NULL, + `thread_id` varchar(20) DEFAULT NULL, + `command` varchar(50) DEFAULT NULL, + `result` text, + `notifytime` datetime DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `index_name` (`server_name`), + KEY `servernoticetime_i_1` (`notifytime`), + KEY `indx_1_server_id` (`server_id`), + KEY `query_index` (`application`,`server_name`,`node_name`,`set_name`,`set_area`,`set_group`), + KEY `servernoticetime_i` (`notifytime`), + KEY `indx_server_id` (`server_id`) +) ENGINE=InnoDB AUTO_INCREMENT=21962 DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_server_patchs` +-- + +DROP TABLE IF EXISTS `t_server_patchs`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_server_patchs` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `server` varchar(50) DEFAULT NULL, + `version` varchar(1000) DEFAULT '', + `tgz` varchar(255) DEFAULT NULL, + `update_text` varchar(255) DEFAULT NULL, + `reason_select` varchar(255) DEFAULT NULL, + `document_complate` varchar(30) DEFAULT NULL, + `is_server_group` int(2) NOT NULL DEFAULT '0', + `publish` int(3) DEFAULT NULL, + `publish_time` datetime DEFAULT NULL, + `publish_user` varchar(30) DEFAULT NULL, + `upload_time` datetime DEFAULT NULL, + `upload_user` varchar(30) DEFAULT NULL, + `posttime` datetime DEFAULT NULL, + `lastuser` varchar(30) DEFAULT NULL, + `is_release_version` enum('true','false') DEFAULT 'true', + `package_type` int(4) DEFAULT '0', + `group_id` varchar(64) NOT NULL DEFAULT '', + `default_version` int(4) DEFAULT '0', + `md5` varchar(40) DEFAULT NULL, + `svn_version` varchar(50) DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `server_patchs_server_index` (`server`), + KEY `index_patchs_i1` (`server`), + KEY `index_i_2` (`tgz`(50)), + KEY `index_i` (`tgz`) +) ENGINE=InnoDB AUTO_INCREMENT=170 DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_task` +-- + +DROP TABLE IF EXISTS `t_task`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_task` ( + `id` int(11) unsigned NOT NULL AUTO_INCREMENT, + `task_no` varchar(40) DEFAULT NULL, + `serial` int(1) DEFAULT NULL, + `user_name` varchar(20) DEFAULT NULL, + `create_time` datetime DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `f_task` (`task_no`), + CONSTRAINT `t_task_ibfk_1` FOREIGN KEY (`task_no`) REFERENCES `t_task_item` (`task_no`) ON DELETE SET NULL ON UPDATE CASCADE +) ENGINE=InnoDB AUTO_INCREMENT=119 DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_task_item` +-- + +DROP TABLE IF EXISTS `t_task_item`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_task_item` ( + `id` int(11) unsigned NOT NULL AUTO_INCREMENT, + `task_no` varchar(40) DEFAULT NULL, + `item_no` varchar(40) DEFAULT NULL, + `application` varchar(30) DEFAULT NULL, + `server_name` varchar(50) DEFAULT NULL, + `node_name` varchar(20) DEFAULT NULL, + `command` varchar(20) DEFAULT NULL, + `parameters` text, + `start_time` datetime DEFAULT NULL, + `end_time` datetime DEFAULT NULL, + `status` int(11) DEFAULT NULL, + `set_name` varchar(20) DEFAULT NULL, + `log` text, + PRIMARY KEY (`id`), + UNIQUE KEY `f_uniq` (`item_no`,`task_no`), + KEY `f_task_no` (`task_no`), + KEY `f_index` (`application`,`server_name`,`command`) +) ENGINE=InnoDB AUTO_INCREMENT=120 DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `t_web_release_conf` +-- + +DROP TABLE IF EXISTS `t_web_release_conf`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t_web_release_conf` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `server` varchar(100) NOT NULL DEFAULT '', + `path` varchar(200) NOT NULL DEFAULT '', + `server_dir` varchar(200) NOT NULL DEFAULT '', + `is_server_group` int(2) NOT NULL DEFAULT '0', + `enable_batch` int(2) NOT NULL DEFAULT '0', + `user` varchar(200) NOT NULL DEFAULT '*', + `posttime` datetime DEFAULT NULL, + `lastuser` varchar(60) DEFAULT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `server` (`server`,`is_server_group`), + KEY `web_release_conf_server_index` (`server`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +-- Dump completed diff --git a/CloudronPackages/APISIX/apisix-source/t/tars/discovery/stream/tars.t b/CloudronPackages/APISIX/apisix-source/t/tars/discovery/stream/tars.t new file mode 100644 index 0000000..b674970 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/tars/discovery/stream/tars.t @@ -0,0 +1,212 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +plan('no_plan'); + +repeat_each(1); +log_level('warn'); +no_root_location(); +no_shuffle(); +workers(4); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + tars: + db_conf: + host: 127.0.0.1 + port: 3306 + database: db_tars + user: root + password: tars2022 + full_fetch_interval: 3 + incremental_fetch_interval: 1 +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + my $apisix_yaml = $block->apisix_yaml // <<_EOC_; +routes: [] +#END +_EOC_ + + $block->set_value("apisix_yaml", $apisix_yaml); + + my $extra_init_by_lua_start = <<_EOC_; + -- reduce incremental_fetch_interval,full_fetch_interval + local schema = require("apisix.discovery.tars.schema") + schema.properties.incremental_fetch_interval.minimum=1 + schema.properties.incremental_fetch_interval.default=1 + schema.properties.full_fetch_interval.minimum = 3 + schema.properties.full_fetch_interval.default = 3 +_EOC_ + + $block->set_value("extra_init_by_lua_start", $extra_init_by_lua_start); + $block->set_value("stream_extra_init_by_lua_start", $extra_init_by_lua_start); + + my $config = $block->config // <<_EOC_; + + location /sql { + content_by_lua_block { + local mysql = require("resty.mysql") + local core = require("apisix.core") + local ipairs = ipairs + + ngx.req.read_body() + local sql = ngx.req.get_body_data() + core.log.info("get sql ", sql) + + local db_conf= { + host="127.0.0.1", + port=3306, + database="db_tars", + user="root", + password="tars2022", + } + + local db_cli, err = mysql:new() + if not db_cli then + core.log.error("failed to instantiate mysql: ", err) + return + end + db_cli:set_timeout(3000) + + local ok, err, errcode, sqlstate = db_cli:connect(db_conf) + if not ok then + core.log.error("failed to connect mysql: ", err, ", ", errcode, ", ", sqlstate) + return + end + + local res, err, errcode, sqlstate = db_cli:query(sql) + if not res then + ngx.say("bad result: ", err, ": ", errcode, ": ", sqlstate, ".") + return + end + ngx.say("DONE") + } + } +_EOC_ + + $block->set_value("config", $config); + + my $stream_config = $block->stream_config // <<_EOC_; + server { + listen 8125; + content_by_lua_block { + local core = require("apisix.core") + local d = require("apisix.discovery.tars") + + ngx.sleep(2) + + local sock = ngx.req.socket() + local request_body = sock:receive() + + core.log.info("get body ", request_body) + + local response_body = "{" + local queries = core.json.decode(request_body) + for _,query in ipairs(queries) do + local nodes = d.nodes(query) + if nodes==nil or #nodes==0 then + response_body=response_body.." "..0 + else + response_body=response_body.." "..#nodes + end + end + ngx.say(response_body.." }") + } + } + +_EOC_ + + $block->set_value("extra_stream_config", $stream_config); + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: create initial server and servant +--- timeout: 3 +--- request eval +[ +"POST /sql +truncate table t_server_conf", + +"POST /sql +truncate table t_adapter_conf", + +"POST /sql +insert into t_server_conf(application, server_name, node_name, registry_timestamp, + template_name, setting_state, present_state, server_type) +values ('A', 'AServer', '172.16.1.1', now(), 'taf-cpp', 'active', 'active', 'tars_cpp'), + ('B', 'BServer', '172.16.2.1', now(), 'taf-cpp', 'active', 'active', 'tars_cpp'), + ('C', 'CServer', '172.16.3.1', now(), 'taf-cpp', 'active', 'active', 'tars_cpp')", + +"POST /sql +insert into t_adapter_conf(application, server_name, node_name, adapter_name, endpoint, servant) +values ('A', 'AServer', '172.16.1.1', 'A.AServer.FirstObjAdapter', + 'tcp -h 172.16.1.1 -p 10001 -e 0 -t 6000', 'A.AServer.FirstObj'), + ('B', 'BServer', '172.16.2.1', 'B.BServer.FirstObjAdapter', + 'tcp -p 10001 -h 172.16.2.1 -e 0 -t 6000', 'B.BServer.FirstObj'), + ('C', 'CServer', '172.16.3.1', 'C.CServer.FirstObjAdapter', + 'tcp -e 0 -h 172.16.3.1 -t 6000 -p 10001 ', 'C.CServer.FirstObj')", + +] +--- response_body eval +[ + "DONE\n", + "DONE\n", + "DONE\n", + "DONE\n", +] + + + +=== TEST 2: get count after create servant +--- apisix_yaml +stream_routes: + - + id: 1 + server_port: 1985 + upstream_id: 1 + +upstreams: + - nodes: + "127.0.0.1:8125": 1 + type: roundrobin + id: 1 + +#END +--- stream_request +["A.AServer.FirstObj","B.BServer.FirstObj", "C.CServer.FirstObj"] +--- stream_response eval +qr{ 1 1 1 } diff --git a/CloudronPackages/APISIX/apisix-source/t/tars/discovery/tars.t b/CloudronPackages/APISIX/apisix-source/t/tars/discovery/tars.t new file mode 100644 index 0000000..206e4cc --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/tars/discovery/tars.t @@ -0,0 +1,391 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('warn'); +no_root_location(); +no_shuffle(); +workers(4); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 + enable_admin: false +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + tars: + db_conf: + host: 127.0.0.1 + port: 3306 + database: db_tars + user: root + password: tars2022 + full_fetch_interval: 3 + incremental_fetch_interval: 1 +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + my $apisix_yaml = $block->apisix_yaml // <<_EOC_; +routes: [] +#END +_EOC_ + + $block->set_value("apisix_yaml", $apisix_yaml); + + my $extra_init_by_lua_start = <<_EOC_; + -- reduce incremental_fetch_interval,full_fetch_interval + local schema = require("apisix.discovery.tars.schema") + schema.properties.incremental_fetch_interval.minimum=1 + schema.properties.incremental_fetch_interval.default=1 + schema.properties.full_fetch_interval.minimum = 3 + schema.properties.full_fetch_interval.default = 3 +_EOC_ + + $block->set_value("extra_init_by_lua_start", $extra_init_by_lua_start); + + my $config = $block->config // <<_EOC_; + location /count { + content_by_lua_block { + local core = require("apisix.core") + local d = require("apisix.discovery.tars") + + ngx.sleep(2) + + ngx.req.read_body() + local request_body = ngx.req.get_body_data() + local queries = core.json.decode(request_body) + local response_body = "{" + for _,query in ipairs(queries) do + local nodes = d.nodes(query) + if nodes==nil or #nodes==0 then + response_body=response_body.." "..0 + else + response_body=response_body.." "..#nodes + end + end + ngx.say(response_body.." }") + } + } + + location /nodes { + content_by_lua_block { + local core = require("apisix.core") + local d = require("apisix.discovery.tars") + + ngx.sleep(2) + + ngx.req.read_body() + local servant = ngx.req.get_body_data() + local response="" + local nodes = d.nodes(servant) + response="{" + for _,node in ipairs(nodes or {}) do + response=response..node.host..":"..node.port.."," + end + response=response.."}" + ngx.say(response) + } + } + + location /sql { + content_by_lua_block { + local mysql = require("resty.mysql") + local core = require("apisix.core") + local ipairs = ipairs + + ngx.req.read_body() + local sql = ngx.req.get_body_data() + core.log.info("get sql ", sql) + + local db_conf= { + host="127.0.0.1", + port=3306, + database="db_tars", + user="root", + password="tars2022", + } + + local db_cli, err = mysql:new() + if not db_cli then + core.log.error("failed to instantiate mysql: ", err) + return + end + db_cli:set_timeout(3000) + + local ok, err, errcode, sqlstate = db_cli:connect(db_conf) + if not ok then + core.log.error("failed to connect mysql: ", err, ", ", errcode, ", ", sqlstate) + return + end + + local res, err, errcode, sqlstate = db_cli:query(sql) + if not res then + ngx.say("bad result: ", err, ": ", errcode, ": ", sqlstate, ".") + return + end + ngx.say("DONE") + } + } +_EOC_ + + $block->set_value("config", $config); + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: create initial server and servant +--- timeout: 3 +--- request eval +[ +"POST /sql +truncate table t_server_conf", + +"POST /sql +truncate table t_adapter_conf", + +"POST /sql +insert into t_server_conf(application, server_name, node_name, registry_timestamp, + template_name, setting_state, present_state, server_type) +values ('A', 'AServer', '172.16.1.1', now(), 'taf-cpp', 'active', 'active', 'tars_cpp'), + ('B', 'BServer', '172.16.2.1', now(), 'taf-cpp', 'active', 'active', 'tars_cpp'), + ('C', 'CServer', '172.16.3.1', now(), 'taf-cpp', 'active', 'active', 'tars_cpp')", + +"POST /sql +insert into t_adapter_conf(application, server_name, node_name, adapter_name, endpoint, servant) +values ('A', 'AServer', '172.16.1.1', 'A.AServer.FirstObjAdapter', + 'tcp -h 172.16.1.1 -p 10001 -e 0 -t 6000', 'A.AServer.FirstObj'), + ('B', 'BServer', '172.16.2.1', 'B.BServer.FirstObjAdapter', + 'tcp -p 10001 -h 172.16.2.1 -e 0 -t 6000', 'B.BServer.FirstObj'), + ('C', 'CServer', '172.16.3.1', 'C.CServer.FirstObjAdapter', + 'tcp -e 0 -h 172.16.3.1 -t 6000 -p 10001 ', 'C.CServer.FirstObj')", + +"GET /count +[\"A.AServer.FirstObj\",\"B.BServer.FirstObj\", \"C.CServer.FirstObj\"]", + +] +--- response_body eval +[ + "DONE\n", + "DONE\n", + "DONE\n", + "DONE\n", + "{ 1 1 1 }\n", +] + + + +=== TEST 2: add servers on different nodes +--- timeout: 3 +--- request eval +[ +"POST /sql +insert into t_server_conf(application, server_name, node_name, registry_timestamp, + template_name, setting_state, present_state, server_type) +values ('A', 'AServer', '172.16.1.2', now(), 'taf-cpp', 'active', 'active', 'tars_cpp'), + ('B', 'BServer', '172.16.2.2', now(), 'taf-cpp', 'active', 'active', 'tars_cpp'), + ('C', 'CServer', '172.16.3.2', now(), 'taf-cpp', 'active', 'active', 'tars_cpp')", + +"GET /count +[\"A.AServer.FirstObj\",\"B.BServer.FirstObj\", \"C.CServer.FirstObj\"]", + +] +--- response_body eval +[ + "DONE\n", + "{ 1 1 1 }\n", +] + + + +=== TEST 3: add servant +--- timeout: 3 +--- request eval +[ +"POST /sql +insert into t_adapter_conf(application, server_name, node_name, adapter_name, endpoint, servant) +values ('A', 'AServer', '172.16.1.2', 'A.AServer.FirstObjAdapter', + 'tcp -h 172.16.1.2 -p 10001 -e 0 -t 6000', 'A.AServer.FirstObj'), + ('A', 'AServer', '172.16.1.2', 'A.AServer.SecondObjAdapter', + 'tcp -p 10002 -h 172.16.1.2 -e 0 -t 6000', 'A.AServer.SecondObj')", + +"GET /count +[\"A.AServer.FirstObj\", \"A.AServer.SecondObj\", \"B.BServer.FirstObj\", \"C.CServer.FirstObj\"]", + +] +--- response_body eval +[ + "DONE\n", + "{ 2 1 1 1 }\n", +] + + + +=== TEST 4: update servant, update setting_state +--- timeout: 3 +--- request eval +[ +"POST /sql +update t_server_conf set setting_state='inactive' +where application = 'A' and server_name = 'AServer' and node_name = '172.16.1.2'", + +"GET /count +[\"A.AServer.FirstObj\", \"A.AServer.SecondObj\", \"B.BServer.FirstObj\", \"C.CServer.FirstObj\"]", + +] +--- response_body eval +[ + "DONE\n", + "{ 1 0 1 1 }\n", +] + + + +=== TEST 5: update server setting_state +--- timeout: 3 +--- request eval +[ +"POST /sql +update t_server_conf set setting_state='active', present_state='inactive' +where application = 'A' and server_name = 'AServer' and node_name = '172.16.1.2'", + +"GET /count +[\"A.AServer.FirstObj\", \"A.AServer.SecondObj\", \"B.BServer.FirstObj\", \"C.CServer.FirstObj\"]", + +] +--- response_body eval +[ + "DONE\n", + "{ 1 0 1 1 }\n", +] + + + +=== TEST 6: update server present_state +--- timeout: 3 +--- request eval +[ +"POST /sql +update t_server_conf set setting_state='active', present_state='active' +where application = 'A' and server_name = 'AServer' and node_name = '172.16.1.2'", + +"GET /count +[\"A.AServer.FirstObj\", \"A.AServer.SecondObj\", \"B.BServer.FirstObj\", \"C.CServer.FirstObj\"]", + +] +--- response_body eval +[ + "DONE\n", + "{ 2 1 1 1 }\n", +] + + + +=== TEST 7: update servant endpoint +--- timeout: 3 +--- request eval +[ +"GET /nodes +A.AServer.SecondObj", + +"POST /sql +update t_adapter_conf set endpoint='tcp -h 172.16.1.2 -p 10003 -e 0 -t 3000' +where application = 'A' and server_name = 'AServer' +and node_name = '172.16.1.2' and servant='A.AServer.SecondObj'", + +"GET /nodes +A.AServer.SecondObj", + +] +--- response_body eval +[ + "{172.16.1.2:10002,}\n", + "DONE\n", + "{172.16.1.2:10003,}\n", +] + + + +=== TEST 8: delete servant +--- request eval +[ +"POST /sql +delete from t_adapter_conf where application = 'A' and server_name = 'AServer' +and node_name = '172.16.1.2' and servant = 'A.AServer.SecondObj'", + +] +--- response_body eval +[ + "DONE\n", +] + + + +=== TEST 9: count after delete servant +--- timeout: 4 +--- wait: 3 +--- request eval +[ +"GET /count +[\"A.AServer.FirstObj\", \"A.AServer.SecondObj\", \"B.BServer.FirstObj\", \"C.CServer.FirstObj\"]", + +] +--- response_body eval +[ + "{ 2 0 1 1 }\n", +] + + + +=== TEST 10: delete server +--- request eval +[ +"POST /sql +delete from t_server_conf +where application = 'A' and server_name = 'AServer' and node_name = '172.16.1.1'", + +] +--- response_body eval +[ + "DONE\n", +] + + + +=== TEST 11: count after delete +--- timeout: 4 +--- wait: 3 +--- request eval +[ +"GET /count +[\"A.AServer.FirstObj\", \"A.AServer.SecondObj\", \"B.BServer.FirstObj\", \"C.CServer.FirstObj\"]", + +] +--- response_body eval +[ + "{ 1 0 1 1 }\n", +] diff --git a/CloudronPackages/APISIX/apisix-source/t/ts/admin_api.ts b/CloudronPackages/APISIX/apisix-source/t/ts/admin_api.ts new file mode 100644 index 0000000..6cc3813 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/ts/admin_api.ts @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import axios, { + type AxiosRequestConfig, + type Method, + type RawAxiosRequestHeaders, +} from 'axios'; + +export const request = async ( + url: string, + method: Method = 'GET', + body?: object, + headers?: RawAxiosRequestHeaders, + config?: AxiosRequestConfig, +) => { + return axios.request({ + method, + // TODO: use 9180 for admin api + baseURL: 'http://127.0.0.1:1984', + url, + data: body, + headers: { + 'X-API-KEY': 'edd1c9f034335f136f87ad84b625c8f1', + ...headers, + }, + ...config, + }); +}; diff --git a/CloudronPackages/APISIX/apisix-source/t/ts/utils.ts b/CloudronPackages/APISIX/apisix-source/t/ts/utils.ts new file mode 100644 index 0000000..9112158 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/ts/utils.ts @@ -0,0 +1,18 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +export const wait = (ms: number) => + new Promise((resolve) => setTimeout(resolve, ms)); diff --git a/CloudronPackages/APISIX/apisix-source/t/tsconfig.esm.json b/CloudronPackages/APISIX/apisix-source/t/tsconfig.esm.json new file mode 100644 index 0000000..b8b237e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/tsconfig.esm.json @@ -0,0 +1,9 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "module": "ESNext", + "moduleResolution": "Bundler", + "isolatedModules": true + }, + "include": ["**/*.mts"] +} diff --git a/CloudronPackages/APISIX/apisix-source/t/tsconfig.json b/CloudronPackages/APISIX/apisix-source/t/tsconfig.json new file mode 100644 index 0000000..d733f24 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/tsconfig.json @@ -0,0 +1,11 @@ +{ + "compilerOptions": { + "target": "esnext", + "module": "commonjs", + "lib": ["esnext"], + "esModuleInterop": true, + "strict": true, + "noEmit": true + }, + "include": ["**/*.ts"] +} diff --git a/CloudronPackages/APISIX/apisix-source/t/utils/batch-processor.t b/CloudronPackages/APISIX/apisix-source/t/utils/batch-processor.t new file mode 100644 index 0000000..e1ce83b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/utils/batch-processor.t @@ -0,0 +1,483 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +run_tests; + +__DATA__ + +=== TEST 1: send invalid arguments for constructor +--- config + location /t { + content_by_lua_block { + local Batch = require("apisix.utils.batch-processor") + local config = { + max_retry_count = 2, + batch_max_size = 1, + retry_delay = 0, + } + local func_to_send = function(elements) + return true + end + local log_buffer, err = Batch:new("", config) + + if log_buffer then + log_buffer:push({hello='world'}) + ngx.say("done") + end + + if not log_buffer then + ngx.say("failed") + end + + } + } +--- request +GET /t +--- response_body +failed +--- wait: 0.5 + + + +=== TEST 2: sanity +--- config + location /t { + content_by_lua_block { + local Batch = require("apisix.utils.batch-processor") + local func_to_send = function(elements) + return true + end + + local config = { + max_retry_count = 2, + batch_max_size = 1, + retry_delay = 0, + } + + local log_buffer, err = Batch:new(func_to_send, config) + + if not log_buffer then + ngx.say(err) + end + + log_buffer:push({hello='world'}) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log +Batch Processor[log buffer] successfully processed the entries +--- wait: 0.5 + + + +=== TEST 3: batch processor timeout exceeded +--- config + location /t { + content_by_lua_block { + local Batch = require("apisix.utils.batch-processor") + local config = { + max_retry_count = 2, + batch_max_size = 2, + retry_delay = 0, + inactive_timeout = 1 + } + local func_to_send = function(elements) + return true + end + local log_buffer, err = Batch:new(func_to_send, config) + + if not log_buffer then + ngx.say(err) + end + + log_buffer:push({hello='world'}) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log +Batch Processor[log buffer] buffer duration exceeded, activating buffer flush +Batch Processor[log buffer] successfully processed the entries +--- wait: 3 + + + +=== TEST 4: batch processor batch max size exceeded +--- config + location /t { + content_by_lua_block { + local Batch = require("apisix.utils.batch-processor") + local config = { + max_retry_count = 2, + batch_max_size = 2, + retry_delay = 0, + } + local func_to_send = function(elements) + return true + end + local log_buffer, err = Batch:new(func_to_send, config) + + if not log_buffer then + ngx.say(err) + end + + log_buffer:push({hello='world'}) + log_buffer:push({hello='world'}) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +Batch Processor[log buffer] buffer duration exceeded, activating buffer flush +--- error_log +Batch Processor[log buffer] batch max size has exceeded +Batch Processor[log buffer] successfully processed the entries +--- wait: 1 + + + +=== TEST 5: first failed to process and second try success +--- config + location /t { + content_by_lua_block { + local Batch = require("apisix.utils.batch-processor") + local core = require("apisix.core") + local retry = false + local config = { + max_retry_count = 2, + batch_max_size = 2, + retry_delay = 0, + } + local func_to_send = function(elements) + if not retry then + retry = true + return false + end + return true + end + local log_buffer, err = Batch:new(func_to_send, config) + + if not log_buffer then + ngx.say(err) + end + + log_buffer:push({hello='world'}) + log_buffer:push({hello='world'}) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log +Batch Processor[log buffer] failed to process entries +Batch Processor[log buffer] successfully processed the entries +--- wait: 0.5 + + + +=== TEST 6: Exceeding max retry count +--- config + location /t { + content_by_lua_block { + local Batch = require("apisix.utils.batch-processor") + local config = { + max_retry_count = 2, + batch_max_size = 2, + retry_delay = 0, + } + local func_to_send = function(elements) + return false + end + local log_buffer, err = Batch:new(func_to_send, config) + + if not log_buffer then + ngx.say(err) + end + + log_buffer:push({hello='world'}) + log_buffer:push({hello='world'}) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +Batch Processor[log buffer] buffer duration exceeded, activating buffer flush +--- error_log +Batch Processor[log buffer] failed to process entries +Batch Processor[log buffer] exceeded the max_retry_count +--- wait: 0.5 + + + +=== TEST 7: two batches +--- config + location /t { + content_by_lua_block { + local Batch = require("apisix.utils.batch-processor") + local core = require("apisix.core") + local count = 0 + local config = { + max_retry_count = 2, + batch_max_size = 2, + retry_delay = 0, + } + local func_to_send = function(elements) + count = count + 1 + core.log.info("batch[", count , "] sent") + return true + end + local log_buffer, err = Batch:new(func_to_send, config) + + if not log_buffer then + ngx.say(err) + end + + log_buffer:push({hello='world'}) + log_buffer:push({hello='world'}) + log_buffer:push({hello='world'}) + log_buffer:push({hello='world'}) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +Batch Processor[log buffer] activating flush due to no activity +--- error_log +batch[1] sent +batch[2] sent +--- wait: 0.5 + + + +=== TEST 8: batch processor retry count 0 and fail processing +--- config + location /t { + content_by_lua_block { + local Batch = require("apisix.utils.batch-processor") + local config = { + max_retry_count = 0, + batch_max_size = 2, + retry_delay = 0, + } + local func_to_send = function(elements) + return false + end + local log_buffer, err = Batch:new(func_to_send, config) + + if not log_buffer then + ngx.say(err) + end + + log_buffer:push({hello='world'}) + log_buffer:push({hello='world'}) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +Batch Processor[log buffer] activating flush due to no activity +--- error_log +Batch Processor[log buffer] exceeded the max_retry_count +--- wait: 0.5 + + + +=== TEST 9: batch processor timeout exceeded +--- config + location /t { + content_by_lua_block { + local Batch = require("apisix.utils.batch-processor") + local config = { + max_retry_count = 2, + batch_max_size = 2, + retry_delay = 0, + buffer_duration = 60, + inactive_timeout = 1, + } + local func_to_send = function(elements) + return true + end + local log_buffer, err = Batch:new(func_to_send, config) + + if not log_buffer then + ngx.say(err) + end + + log_buffer:push({hello='world'}) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log +Batch Processor[log buffer] buffer duration exceeded, activating buffer flush +Batch Processor[log buffer] successfully processed the entries +--- wait: 3 + + + +=== TEST 10: json encode and log elements +--- config + location /t { + content_by_lua_block { + local Batch = require("apisix.utils.batch-processor") + local core = require("apisix.core") + local config = { + max_retry_count = 2, + batch_max_size = 2, + retry_delay = 0, + } + local func_to_send = function(elements) + core.log.info(require("toolkit.json").encode(elements)) + return true + end + local log_buffer, err = Batch:new(func_to_send, config) + + if not log_buffer then + ngx.say(err) + end + + log_buffer:push({msg='1'}) + log_buffer:push({msg='2'}) + log_buffer:push({msg='3'}) + log_buffer:push({msg='4'}) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +Batch Processor[log buffer] activating flush due to no activity +--- error_log +[{"msg":"1"},{"msg":"2"}] +[{"msg":"3"},{"msg":"4"}] +--- wait: 0.5 + + + +=== TEST 11: extend timer +--- config + location /t { + content_by_lua_block { + local Batch = require("apisix.utils.batch-processor") + local core = require("apisix.core") + local config = { + max_retry_count = 1, + batch_max_size = 3, + retry_delay = 0, + inactive_timeout = 1 + } + local func_to_send = function(elements) + core.log.info(require("toolkit.json").encode(elements)) + return true + end + local log_buffer, err = Batch:new(func_to_send, config) + + if not log_buffer then + ngx.say(err) + end + + log_buffer:push({msg='1'}) + ngx.sleep(0.3) + log_buffer:push({msg='2'}) + log_buffer:push({msg='3'}) + log_buffer:push({msg='4'}) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- no_error_log +Batch Processor[log buffer] activating flush due to no activity +--- error_log +Batch Processor[log buffer] extending buffer timer +--- wait: 3 + + + +=== TEST 12: partially consumed entries +--- config + location /t { + content_by_lua_block { + local Batch = require("apisix.utils.batch-processor") + local core = require("apisix.core") + local config = { + max_retry_count = 1, + batch_max_size = 3, + retry_delay = 0, + inactive_timeout = 1 + } + local func_to_send = function(elements) + core.log.info(require("toolkit.json").encode(elements)) + return false, "error after consuming single entry", 2 + end + local log_buffer, err = Batch:new(func_to_send, config) + + if not log_buffer then + ngx.say(err) + end + + log_buffer:push({msg='1'}) + log_buffer:push({msg='2'}) + log_buffer:push({msg='3'}) + log_buffer:push({msg='4'}) + ngx.say("done") + } + } +--- request +GET /t +--- response_body +done +--- error_log +[{"msg":"1"},{"msg":"2"},{"msg":"3"}] +Batch Processor[log buffer] failed to process entries [2/3]: error after consuming single entry +[{"msg":"2"},{"msg":"3"}] +Batch Processor[log buffer] failed to process entries [1/2]: error after consuming single entry +[{"msg":"4"}] +--- wait: 2 diff --git a/CloudronPackages/APISIX/apisix-source/t/utils/rfc5424.t b/CloudronPackages/APISIX/apisix-source/t/utils/rfc5424.t new file mode 100644 index 0000000..06051e6 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/utils/rfc5424.t @@ -0,0 +1,83 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Compatibility testing +--- config + location /t { + content_by_lua_block { + local rfc5424 = require("apisix.utils.rfc5424") + local structured_data = { + {name = "project", value = "apisix.apache.org"}, + {name = "logstore", value = "apisix.apache.org"}, + {name = "access-key-id", value = "apisix.sls.logger"}, + {name = "access-key-secret", value = "BD274822-96AA-4DA6-90EC-15940FB24444"} + } + local data = rfc5424.encode("SYSLOG", "INFO", "localhost", "apisix", + 123456, "hello world", structured_data) + ngx.say(data) + } + } +--- response_body eval +qr/<46>1.*localhost apisix 123456 - \[logservice project=\"apisix\.apache\.org\" logstore=\"apisix\.apache\.org\" access-key-id=\"apisix\.sls\.logger\" access-key-secret=\"BD274822-96AA-4DA6-90EC-15940FB24444\"\] hello world/ + + + +=== TEST 2: No structured data test +--- config + location /t { + content_by_lua_block { + local rfc5424 = require("apisix.utils.rfc5424") + local data = rfc5424.encode("SYSLOG", "INFO", "localhost", "apisix", + 123456, "hello world") + ngx.say(data) + } + } +--- response_body eval +qr/<46>1.*localhost apisix 123456 - - hello world/ + + + +=== TEST 3: No host and appname test +--- config + location /t { + content_by_lua_block { + local rfc5424 = require("apisix.utils.rfc5424") + local data = rfc5424.encode("SYSLOG", "INFO", nil, nil, + 123456, "hello world") + ngx.say(data) + } + } +--- response_body eval +qr/<46>1.*- - 123456 - - hello world/ diff --git a/CloudronPackages/APISIX/apisix-source/t/wasm/fault-injection.t b/CloudronPackages/APISIX/apisix-source/t/wasm/fault-injection.t new file mode 100644 index 0000000..f690e9e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/wasm/fault-injection.t @@ -0,0 +1,280 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $extra_yaml_config = <<_EOC_; +wasm: + plugins: + - name: wasm_fault_injection + priority: 7997 + file: t/wasm/fault-injection/main.go.wasm +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: fault injection +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_fault_injection": { + "conf": "{\"http_status\":401, \"body\":\"HIT\n\"}" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit +--- request +GET /hello +--- error_code: 401 +--- response_body +HIT + + + +=== TEST 3: fault injection, with 0 percentage +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_fault_injection": { + "conf": "{\"http_status\":401, \"percentage\":0}" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: hit +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 5: fault injection without body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_fault_injection": { + "conf": "{\"http_status\":401}" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: hit +--- request +GET /hello +--- error_code: 401 +--- response_body_like eval +qr/401 Authorization Required<\/title>/ + + + +=== TEST 7: fault injection +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_fault_injection": { + "conf": { + "http_status": 401, + "body": "HIT\n" + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit +--- request +GET /hello +--- error_code: 401 +--- response_body +HIT + + + +=== TEST 9: fault injection, with 0 percentage +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_fault_injection": { + "conf": { + "http_status": 401, + "percentage": 0 + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- ret_code: 401 +--- response_body +passed + + + +=== TEST 10: hit +--- request +GET /hello +--- response_body +hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/wasm/fault-injection/main.go b/CloudronPackages/APISIX/apisix-source/t/wasm/fault-injection/main.go new file mode 100644 index 0000000..20213aa --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/wasm/fault-injection/main.go @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "math/rand" + + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm" + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm/types" + + // tinygo doesn't support encoding/json, see https://github.com/tinygo-org/tinygo/issues/447 + "github.com/valyala/fastjson" +) + +func main() { + proxywasm.SetVMContext(&vmContext{}) +} + +type vmContext struct { + types.DefaultVMContext +} + +func (*vmContext) NewPluginContext(contextID uint32) types.PluginContext { + return &pluginContext{} +} + +type pluginContext struct { + types.DefaultPluginContext + Body []byte + HttpStatus uint32 + Percentage int +} + +func (ctx *pluginContext) OnPluginStart(pluginConfigurationSize int) types.OnPluginStartStatus { + data, err := proxywasm.GetPluginConfiguration() + if err != nil { + proxywasm.LogErrorf("error reading plugin configuration: %v", err) + return types.OnPluginStartStatusFailed + } + + var p fastjson.Parser + v, err := p.ParseBytes(data) + if err != nil { + proxywasm.LogErrorf("error decoding plugin configuration: %v", err) + return types.OnPluginStartStatusFailed + } + ctx.Body = v.GetStringBytes("body") + ctx.HttpStatus = uint32(v.GetUint("http_status")) + if v.Exists("percentage") { + ctx.Percentage = v.GetInt("percentage") + } else { + ctx.Percentage = 100 + } + + // schema check + if ctx.HttpStatus < 200 { + proxywasm.LogError("bad http_status") + return types.OnPluginStartStatusFailed + } + if ctx.Percentage < 0 || ctx.Percentage > 100 { + proxywasm.LogError("bad percentage") + return types.OnPluginStartStatusFailed + } + + return types.OnPluginStartStatusOK +} + +func (ctx *pluginContext) NewHttpContext(contextID uint32) types.HttpContext { + return &httpLifecycle{parent: ctx} +} + +type httpLifecycle struct { + types.DefaultHttpContext + parent *pluginContext +} + +func sampleHit(percentage int) bool { + return rand.Intn(100) < percentage +} + +func (ctx *httpLifecycle) OnHttpRequestHeaders(numHeaders int, endOfStream bool) types.Action { + plugin := ctx.parent + if !sampleHit(plugin.Percentage) { + return types.ActionContinue + } + + err := proxywasm.SendHttpResponse(plugin.HttpStatus, nil, plugin.Body, -1) + if err != nil { + proxywasm.LogErrorf("failed to send local response: %v", err) + return types.ActionContinue + } + return types.ActionPause +} diff --git a/CloudronPackages/APISIX/apisix-source/t/wasm/forward-auth.go b/CloudronPackages/APISIX/apisix-source/t/wasm/forward-auth.go new file mode 100644 index 0000000..5b308d2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/wasm/forward-auth.go @@ -0,0 +1,219 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "net/url" + "strconv" + "strings" + + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm" + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm/types" + "github.com/valyala/fastjson" +) + +func main() { + proxywasm.SetVMContext(&vmContext{}) +} + +type vmContext struct { + types.DefaultVMContext +} + +func (*vmContext) NewPluginContext(contextID uint32) types.PluginContext { + return &pluginContext{ + contextID: contextID, + upstreamHeaders: map[string]struct{}{}, + clientHeaders: map[string]struct{}{}, + requestHeaders: map[string]struct{}{}, + } +} + +type pluginContext struct { + types.DefaultPluginContext + contextID uint32 + + host string + path string + scheme string + upstreamHeaders map[string]struct{} + clientHeaders map[string]struct{} + requestHeaders map[string]struct{} + timeout uint32 +} + +func (ctx *pluginContext) OnPluginStart(pluginConfigurationSize int) types.OnPluginStartStatus { + data, err := proxywasm.GetPluginConfiguration() + if err != nil { + proxywasm.LogErrorf("error reading plugin configuration: %v", err) + return types.OnPluginStartStatusFailed + } + + var p fastjson.Parser + v, err := p.ParseBytes(data) + if err != nil { + proxywasm.LogErrorf("error decoding plugin configuration: %v", err) + return types.OnPluginStartStatusFailed + } + + ctx.timeout = uint32(v.GetUint("timeout")) + if ctx.timeout == 0 { + ctx.timeout = 3000 + } + + // schema check + if ctx.timeout < 1 || ctx.timeout > 60000 { + proxywasm.LogError("bad timeout") + return types.OnPluginStartStatusFailed + } + + s := string(v.GetStringBytes("uri")) + if s == "" { + proxywasm.LogError("bad uri") + return types.OnPluginStartStatusFailed + } + + uri, err := url.Parse(s) + if err != nil { + proxywasm.LogErrorf("bad uri: %v", err) + return types.OnPluginStartStatusFailed + } + + ctx.host = uri.Host + ctx.path = uri.Path + ctx.scheme = uri.Scheme + + arr := v.GetArray("upstream_headers") + for _, a := range arr { + ctx.upstreamHeaders[strings.ToLower(string(a.GetStringBytes()))] = struct{}{} + } + + arr = v.GetArray("request_headers") + for _, a := range arr { + ctx.requestHeaders[string(a.GetStringBytes())] = struct{}{} + } + + arr = v.GetArray("client_headers") + for _, a := range arr { + ctx.clientHeaders[strings.ToLower(string(a.GetStringBytes()))] = struct{}{} + } + + return types.OnPluginStartStatusOK +} + +func (pluginCtx *pluginContext) NewHttpContext(contextID uint32) types.HttpContext { + ctx := &httpContext{contextID: contextID, pluginCtx: pluginCtx} + return ctx +} + +type httpContext struct { + types.DefaultHttpContext + contextID uint32 + pluginCtx *pluginContext +} + +func (ctx *httpContext) dispatchHttpCall(elem *fastjson.Value) { + method, _ := proxywasm.GetHttpRequestHeader(":method") + uri, _ := proxywasm.GetHttpRequestHeader(":path") + scheme, _ := proxywasm.GetHttpRequestHeader(":scheme") + host, _ := proxywasm.GetHttpRequestHeader("host") + addr, _ := proxywasm.GetProperty([]string{"remote_addr"}) + + pctx := ctx.pluginCtx + hs := [][2]string{} + hs = append(hs, [2]string{":scheme", pctx.scheme}) + hs = append(hs, [2]string{"host", pctx.host}) + hs = append(hs, [2]string{":path", pctx.path}) + hs = append(hs, [2]string{"X-Forwarded-Proto", scheme}) + hs = append(hs, [2]string{"X-Forwarded-Method", method}) + hs = append(hs, [2]string{"X-Forwarded-Host", host}) + hs = append(hs, [2]string{"X-Forwarded-Uri", uri}) + hs = append(hs, [2]string{"X-Forwarded-For", string(addr)}) + + for k := range pctx.requestHeaders { + h, err := proxywasm.GetHttpRequestHeader(k) + + if err != nil && err != types.ErrorStatusNotFound { + proxywasm.LogErrorf("httpcall failed: %v", err) + return + } + hs = append(hs, [2]string{k, h}) + } + + calloutID, err := proxywasm.DispatchHttpCall(pctx.host, hs, nil, nil, + pctx.timeout, ctx.httpCallback) + if err != nil { + proxywasm.LogErrorf("httpcall failed: %v", err) + return + } + proxywasm.LogInfof("httpcall calloutID %d, pluginCtxID %d", calloutID, ctx.pluginCtx.contextID) +} + +func (ctx *httpContext) OnHttpRequestHeaders(numHeaders int, endOfStream bool) types.Action { + data, err := proxywasm.GetPluginConfiguration() + if err != nil { + proxywasm.LogErrorf("error reading plugin configuration: %v", err) + return types.ActionContinue + } + + var p fastjson.Parser + v, err := p.ParseBytes(data) + if err != nil { + proxywasm.LogErrorf("error decoding plugin configuration: %v", err) + return types.ActionContinue + } + + ctx.dispatchHttpCall(v) + return types.ActionContinue +} + +func (ctx *httpContext) httpCallback(numHeaders int, bodySize int, numTrailers int) { + hs, err := proxywasm.GetHttpCallResponseHeaders() + if err != nil { + proxywasm.LogErrorf("callback err: %v", err) + return + } + + var status int + for _, h := range hs { + if h[0] == ":status" { + status, _ = strconv.Atoi(h[1]) + } + + if _, ok := ctx.pluginCtx.upstreamHeaders[h[0]]; ok { + err := proxywasm.ReplaceHttpRequestHeader(h[0], h[1]) + if err != nil { + proxywasm.LogErrorf("set header failed: %v", err) + } + } + } + + if status >= 300 { + chs := [][2]string{} + for _, h := range hs { + if _, ok := ctx.pluginCtx.clientHeaders[h[0]]; ok { + chs = append(chs, [2]string{h[0], h[1]}) + } + } + + if err := proxywasm.SendHttpResponse(403, chs, nil, -1); err != nil { + proxywasm.LogErrorf("send http failed: %v", err) + return + } + } +} diff --git a/CloudronPackages/APISIX/apisix-source/t/wasm/forward-auth.t b/CloudronPackages/APISIX/apisix-source/t/wasm/forward-auth.t new file mode 100644 index 0000000..50ad39b --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/wasm/forward-auth.t @@ -0,0 +1,253 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $extra_yaml_config = <<_EOC_; +wasm: + plugins: + - name: wasm-forward-auth + priority: 7997 + file: t/wasm/forward-auth.go.wasm +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: setup route with plugin +--- config + location /t { + content_by_lua_block { + local data = { + { + url = "/apisix/admin/upstreams/u1", + data = [[{ + "nodes": { + "127.0.0.1:1984": 1 + }, + "type": "roundrobin" + }]], + }, + { + url = "/apisix/admin/routes/auth", + data = [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": [ + "return function(conf, ctx) + local core = require(\"apisix.core\"); + if core.request.header(ctx, \"Authorization\") == \"111\" then + core.response.exit(200); + end + end", + "return function(conf, ctx) + local core = require(\"apisix.core\"); + if core.request.header(ctx, \"Authorization\") == \"222\" then + core.response.set_header(\"X-User-ID\", \"i-am-an-user\"); + core.response.exit(200); + end + end",]] .. [[ + "return function(conf, ctx) + local core = require(\"apisix.core\"); + if core.request.header(ctx, \"Authorization\") == \"333\" then + core.response.set_header(\"X-User-ID\", \"i-am-an-user\"); + core.response.exit(401); + end + end", + "return function(conf, ctx) + local core = require(\"apisix.core\"); + if core.request.header(ctx, \"Authorization\") == \"444\" then + local auth_headers = { + 'X-Forwarded-Proto', + 'X-Forwarded-Method', + 'X-Forwarded-Host', + 'X-Forwarded-Uri', + 'X-Forwarded-For', + } + for _, k in ipairs(auth_headers) do + core.log.warn('get header ', string.lower(k), ': ', core.request.header(ctx, k)) + end + core.response.exit(403); + end + end" + ] + } + }, + "uri": "/auth" + }]], + }, + { + url = "/apisix/admin/routes/echo", + data = [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions": [ + "return function (conf, ctx) + local core = require(\"apisix.core\"); + core.response.exit(200, core.request.headers(ctx)); + end" + ] + } + }, + "uri": "/echo" + }]], + }, + { + url = "/apisix/admin/routes/1", + data = [[{ + "plugins": { + "wasm-forward-auth": { + "conf": "{ + \"uri\": \"http://127.0.0.1:1984/auth\", + \"request_headers\": [\"Authorization\"], + \"client_headers\": [\"X-User-ID\"], + \"upstream_headers\": [\"X-User-ID\"] + }" + }, + "proxy-rewrite": { + "uri": "/echo" + } + }, + "upstream_id": "u1", + "uri": "/hello" + }]], + }, + { + url = "/apisix/admin/routes/2", + data = [[{ + "plugins": { + "wasm-forward-auth": { + "conf": "{ + \"uri\": \"http://127.0.0.1:1984/auth\", + \"request_headers\": [\"Authorization\"] + }" + }, + "proxy-rewrite": { + "uri": "/echo" + } + }, + "upstream_id": "u1", + "uri": "/empty" + }]], + }, + } + + local t = require("lib.test_admin").test + + for _, data in ipairs(data) do + local code, body = t(data.url, ngx.HTTP_PUT, data.data) + ngx.say(body) + end + } + } +--- response_body eval +"passed\n" x 5 + + + +=== TEST 2: hit route (test request_headers) +--- request +GET /hello +--- more_headers +Authorization: 111 +--- response_body_like eval +qr/\"authorization\":\"111\"/ + + + +=== TEST 3: hit route (test upstream_headers) +--- request +GET /hello +--- more_headers +Authorization: 222 +--- response_body_like eval +qr/\"x-user-id\":\"i-am-an-user\"/ + + + +=== TEST 4: hit route (test client_headers) +--- request +GET /hello +--- more_headers +Authorization: 333 +--- error_code: 403 +--- response_headers +x-user-id: i-am-an-user + + + +=== TEST 5: hit route (check APISIX generated headers and ignore client headers) +--- request +GET /hello +--- more_headers +Authorization: 444 +X-Forwarded-Host: apisix.apache.org +--- error_code: 403 +--- grep_error_log eval +qr/get header \S+: \S+/ +--- grep_error_log_out +get header x-forwarded-proto: http, +get header x-forwarded-method: GET, +get header x-forwarded-host: localhost, +get header x-forwarded-uri: /hello, +get header x-forwarded-for: 127.0.0.1, + + + +=== TEST 6: hit route (not send upstream headers) +--- request +GET /empty +--- more_headers +Authorization: 222 +--- response_body_unlike eval +qr/\"x-user-id\":\"i-am-an-user\"/ + + + +=== TEST 7: hit route (not send client headers) +--- request +GET /empty +--- more_headers +Authorization: 333 +--- error_code: 403 +--- response_headers +!x-user-id diff --git a/CloudronPackages/APISIX/apisix-source/t/wasm/global-rule.t b/CloudronPackages/APISIX/apisix-source/t/wasm/global-rule.t new file mode 100644 index 0000000..f9fd322 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/wasm/global-rule.t @@ -0,0 +1,175 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $extra_yaml_config = <<_EOC_; +wasm: + plugins: + - name: wasm_log + priority: 7999 + file: t/wasm/log/main.go.wasm + - name: wasm_log2 + priority: 7998 + file: t/wasm/log/main.go.wasm +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "wasm_log": { + "conf": "blahblah" + }, + "wasm_log2": { + "conf": "zzz" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit +--- request +GET /hello +--- grep_error_log eval +qr/run plugin ctx \d+ with conf \S+ in http ctx \d+/ +--- grep_error_log_out +run plugin ctx 1 with conf blahblah in http ctx 2 +run plugin ctx 1 with conf zzz in http ctx 2 + + + +=== TEST 3: global rule + route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_log2": { + "conf": "www" + } + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: hit +--- request +GET /hello +--- grep_error_log eval +qr/run plugin ctx \d+ with conf \S+ in http ctx \d+/ +--- grep_error_log_out +run plugin ctx 1 with conf blahblah in http ctx 2 +run plugin ctx 1 with conf zzz in http ctx 2 +run plugin ctx 3 with conf www in http ctx 4 + + + +=== TEST 5: delete global rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/global_rules/1', ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + + ngx.say(body) + } + } +--- response_body +passed diff --git a/CloudronPackages/APISIX/apisix-source/t/wasm/go.mod b/CloudronPackages/APISIX/apisix-source/t/wasm/go.mod new file mode 100644 index 0000000..f82b7e9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/wasm/go.mod @@ -0,0 +1,10 @@ +module github.com/api7/wasm-nginx-module + +go 1.17 + +require ( + github.com/tetratelabs/proxy-wasm-go-sdk v0.16.0 + github.com/valyala/fastjson v1.6.3 +) + +//replace github.com/tetratelabs/proxy-wasm-go-sdk => ../proxy-wasm-go-sdk diff --git a/CloudronPackages/APISIX/apisix-source/t/wasm/go.sum b/CloudronPackages/APISIX/apisix-source/t/wasm/go.sum new file mode 100644 index 0000000..835b676 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/wasm/go.sum @@ -0,0 +1,16 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tetratelabs/proxy-wasm-go-sdk v0.16.0 h1:6xhDLV4DD2+q3Rs4CDh7cqo69rQ50XgCusv/58D44o4= +github.com/tetratelabs/proxy-wasm-go-sdk v0.16.0/go.mod h1:8CxNZJ+9yDEvNnAog384fC8j1tKNF0tTZevGjOuY9ds= +github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc= +github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/CloudronPackages/APISIX/apisix-source/t/wasm/log/main.go b/CloudronPackages/APISIX/apisix-source/t/wasm/log/main.go new file mode 100644 index 0000000..2880b09 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/wasm/log/main.go @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm" + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm/types" +) + +func main() { + proxywasm.SetVMContext(&vmContext{}) +} + +type vmContext struct { + // Embed the default VM context here, + // so that we don't need to reimplement all the methods. + types.DefaultVMContext +} + +func (*vmContext) NewPluginContext(contextID uint32) types.PluginContext { + return &pluginContext{contextID: contextID} +} + +type pluginContext struct { + // Embed the default plugin context here, + // so that we don't need to reimplement all the methods. + types.DefaultPluginContext + conf string + contextID uint32 +} + +func (ctx *pluginContext) OnPluginStart(pluginConfigurationSize int) types.OnPluginStartStatus { + data, err := proxywasm.GetPluginConfiguration() + if err != nil { + proxywasm.LogCriticalf("error reading plugin configuration: %v", err) + return types.OnPluginStartStatusFailed + } + + ctx.conf = string(data) + return types.OnPluginStartStatusOK +} + +func (ctx *pluginContext) OnPluginDone() bool { + proxywasm.LogInfo("do clean up...") + return true +} + +func (ctx *pluginContext) NewHttpContext(contextID uint32) types.HttpContext { + return &httpLifecycle{pluginCtxID: ctx.contextID, conf: ctx.conf, contextID: contextID} +} + +type httpLifecycle struct { + // Embed the default http context here, + // so that we don't need to reimplement all the methods. + types.DefaultHttpContext + pluginCtxID uint32 + contextID uint32 + conf string +} + +func (ctx *httpLifecycle) OnHttpRequestHeaders(numHeaders int, endOfStream bool) types.Action { + proxywasm.LogWarnf("run plugin ctx %d with conf %s in http ctx %d", + ctx.pluginCtxID, ctx.conf, ctx.contextID) + // TODO: support access/modify http request headers + return types.ActionContinue +} diff --git a/CloudronPackages/APISIX/apisix-source/t/wasm/request-body.t b/CloudronPackages/APISIX/apisix-source/t/wasm/request-body.t new file mode 100644 index 0000000..156df25 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/wasm/request-body.t @@ -0,0 +1,251 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $extra_yaml_config = <<_EOC_; +wasm: + plugins: + - name: wasm-request-body + priority: 7997 + file: t/wasm/request-body/main.go.wasm + - name: wasm-request-body2 + priority: 7996 + file: t/wasm/request-body/main.go.wasm +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm-request-body": { + "conf": "{\"processReqBody\":true, \"start\":1, \"size\":3}" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit +--- request +POST /hello +hello +--- grep_error_log eval +qr/request get body: \w+/ +--- grep_error_log_out +request get body: ell + + + +=== TEST 3: no body +--- request +POST /hello +--- error_log +error status returned by host: not found + + + +=== TEST 4: do not process body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm-request-body": { + "conf": "{\"processReqBody\":false, \"start\":1, \"size\":3}" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: hit +--- request +POST /hello +hello +--- grep_error_log eval +qr/request get body: \w+/ +--- grep_error_log_out + + + +=== TEST 6: ensure the process body flag is plugin independent +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm-request-body": { + "conf": "{\"processReqBody\":true, \"start\":1, \"size\":3}" + }, + "wasm-request-body2": { + "conf": "{\"processReqBody\":false, \"start\":2, \"size\":3}" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: hit +--- request +POST /hello +hello +--- grep_error_log eval +qr/request get body: \w+/ +--- grep_error_log_out +request get body: ell + + + +=== TEST 8: invalid conf type no set conf +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm-request-body": { + "setting": {"processReqBody":true, "start":1, "size":3} + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- error_code: 400 +--- response_body_like eval +qr/property.*conf.*is required/ + + + +=== TEST 9: hit +--- request +POST /hello +hello +--- grep_error_log eval +qr/request get body: \w+/ +--- grep_error_log_out +request get body: ell diff --git a/CloudronPackages/APISIX/apisix-source/t/wasm/request-body/main.go b/CloudronPackages/APISIX/apisix-source/t/wasm/request-body/main.go new file mode 100644 index 0000000..7895fe9 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/wasm/request-body/main.go @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm" + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm/types" + "github.com/valyala/fastjson" +) + +func main() { + proxywasm.SetVMContext(&vmContext{}) +} + +type vmContext struct { + types.DefaultVMContext +} + +func (*vmContext) NewPluginContext(contextID uint32) types.PluginContext { + return &pluginContext{contextID: contextID} +} + +type pluginContext struct { + types.DefaultPluginContext + contextID uint32 + start int + size int + processReqBody bool +} + +func (ctx *pluginContext) OnPluginStart(pluginConfigurationSize int) types.OnPluginStartStatus { + data, err := proxywasm.GetPluginConfiguration() + if err != nil { + proxywasm.LogCriticalf("error reading plugin configuration: %v", err) + return types.OnPluginStartStatusFailed + } + + var conf *fastjson.Value + var p fastjson.Parser + conf, err = p.ParseBytes(data) + if err != nil { + proxywasm.LogErrorf("error decoding plugin configuration: %v", err) + return types.OnPluginStartStatusFailed + } + + ctx.start = conf.GetInt("start") + ctx.size = conf.GetInt("size") + ctx.processReqBody = conf.GetBool("processReqBody") + return types.OnPluginStartStatusOK +} + +func (ctx *pluginContext) NewHttpContext(contextID uint32) types.HttpContext { + return &httpContext{pluginCtx: ctx, contextID: contextID} +} + +type httpContext struct { + types.DefaultHttpContext + pluginCtx *pluginContext + contextID uint32 +} + +func (ctx *httpContext) OnHttpRequestHeaders(numHeaders int, endOfStream bool) types.Action { + if ctx.pluginCtx.processReqBody { + proxywasm.SetProperty([]string{"wasm_process_req_body"}, []byte("true")) + } + + return types.ActionContinue +} + +func (ctx *httpContext) OnHttpRequestBody(bodySize int, endOfStream bool) types.Action { + size := ctx.pluginCtx.size + if size == 0 { + size = bodySize + } + + body, err := proxywasm.GetHttpRequestBody(ctx.pluginCtx.start, size) + if err != nil { + proxywasm.LogErrorf("failed to get body: %v", err) + return types.ActionContinue + } + + proxywasm.LogWarnf("request get body: %v", string(body)) + return types.ActionContinue +} diff --git a/CloudronPackages/APISIX/apisix-source/t/wasm/response-rewrite.t b/CloudronPackages/APISIX/apisix-source/t/wasm/response-rewrite.t new file mode 100644 index 0000000..a1a5357 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/wasm/response-rewrite.t @@ -0,0 +1,188 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + my $extra_yaml_config = <<_EOC_; +wasm: + plugins: + - name: wasm-response-rewrite + priority: 7997 + file: t/wasm/response-rewrite/main.go.wasm + - name: wasm-response-rewrite2 + priority: 7996 + file: t/wasm/response-rewrite/main.go.wasm +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: response rewrite headers +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm-response-rewrite": { + "conf": "{\"headers\":[{\"name\":\"x-wasm\",\"value\":\"apisix\"}]}" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit +--- request +GET /hello +--- response_headers +x-wasm: apisix + + + +=== TEST 3: log response body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm-response-rewrite": { + "conf": "{\"body\":\"a\"}" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: hit +--- request +GET /hello +--- grep_error_log eval +qr/get body .+/ +--- grep_error_log_out +get body [hello world + + + +=== TEST 5: ensure the process body flag is plugin independent +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm-response-rewrite": { + "conf": "{\"body\":\"a\"}" + }, + "wasm-response-rewrite2": { + "conf": "{\"headers\":[{\"name\":\"x-wasm\",\"value\":\"apisix\"}]}" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: hit +--- request +GET /hello +--- grep_error_log eval +qr/get body .+/ +--- grep_error_log_out +get body [hello world diff --git a/CloudronPackages/APISIX/apisix-source/t/wasm/response-rewrite/main.go b/CloudronPackages/APISIX/apisix-source/t/wasm/response-rewrite/main.go new file mode 100644 index 0000000..fb7b184 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/wasm/response-rewrite/main.go @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm" + "github.com/tetratelabs/proxy-wasm-go-sdk/proxywasm/types" + + "github.com/valyala/fastjson" +) + +func main() { + proxywasm.SetVMContext(&vmContext{}) +} + +type vmContext struct { + types.DefaultVMContext +} + +func (*vmContext) NewPluginContext(contextID uint32) types.PluginContext { + return &pluginContext{} +} + +type header struct { + Name string + Value string +} + +type pluginContext struct { + types.DefaultPluginContext + Headers []header + Body []byte +} + +func (ctx *pluginContext) OnPluginStart(pluginConfigurationSize int) types.OnPluginStartStatus { + data, err := proxywasm.GetPluginConfiguration() + if err != nil { + proxywasm.LogErrorf("error reading plugin configuration: %v", err) + return types.OnPluginStartStatusFailed + } + + var p fastjson.Parser + v, err := p.ParseBytes(data) + if err != nil { + proxywasm.LogErrorf("error decoding plugin configuration: %v", err) + return types.OnPluginStartStatusFailed + } + headers := v.GetArray("headers") + ctx.Headers = make([]header, len(headers)) + for i, hdr := range headers { + ctx.Headers[i] = header{ + Name: string(hdr.GetStringBytes("name")), + Value: string(hdr.GetStringBytes("value")), + } + } + + body := v.GetStringBytes("body") + ctx.Body = body + + return types.OnPluginStartStatusOK +} + +func (ctx *pluginContext) NewHttpContext(contextID uint32) types.HttpContext { + return &httpContext{parent: ctx} +} + +type httpContext struct { + types.DefaultHttpContext + parent *pluginContext +} + +func (ctx *httpContext) OnHttpResponseHeaders(numHeaders int, endOfStream bool) types.Action { + plugin := ctx.parent + for _, hdr := range plugin.Headers { + proxywasm.ReplaceHttpResponseHeader(hdr.Name, hdr.Value) + } + + if len(plugin.Body) > 0 { + proxywasm.SetProperty([]string{"wasm_process_resp_body"}, []byte("true")) + } + + return types.ActionContinue +} + +func (ctx *httpContext) OnHttpResponseBody(bodySize int, endOfStream bool) types.Action { + plugin := ctx.parent + + if len(plugin.Body) > 0 && !endOfStream { + // TODO support changing body + body, err := proxywasm.GetHttpResponseBody(0, bodySize) + if err != nil { + proxywasm.LogErrorf("failed to get body: %v", err) + return types.ActionContinue + } + proxywasm.LogWarnf("get body [%s]", string(body)) + } + + return types.ActionContinue +} diff --git a/CloudronPackages/APISIX/apisix-source/t/wasm/route.t b/CloudronPackages/APISIX/apisix-source/t/wasm/route.t new file mode 100644 index 0000000..717e54e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/wasm/route.t @@ -0,0 +1,464 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + if (!defined $block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +wasm: + plugins: + - name: wasm_log + priority: 7999 + file: t/wasm/log/main.go.wasm + - name: wasm_log2 + priority: 7998 + file: t/wasm/log/main.go.wasm +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: scheme check with empty json body +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_log": {} + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- error_code: 400 +--- error_log eval +qr/invalid request body/ + + + +=== TEST 2: scheme check with conf type number +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_log": {"conf": 123} + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- error_code: 400 +--- error_log eval +qr/invalid request body/ + + + +=== TEST 3: scheme check with conf json type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_log": {"conf": {}}} + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- error_code: 400 +--- response_body_like eval +qr/value should match only one schema, but matches none/ + + + +=== TEST 4: scheme check with conf json type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_log": {"conf": ""}} + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- error_code: 400 +--- response_body_like eval +qr/value should match only one schema, but matches none/ + + + +=== TEST 5: sanity +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_log": { + "conf": "blahblah" + }, + "wasm_log2": { + "conf": "zzz" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: hit +--- request +GET /hello +--- grep_error_log eval +qr/run plugin ctx \d+ with conf \S+ in http ctx \d+/ +--- grep_error_log_out +run plugin ctx 1 with conf blahblah in http ctx 2 +run plugin ctx 1 with conf zzz in http ctx 2 + + + +=== TEST 7: run wasm plugin in rewrite phase (prior to the one run in access phase) +--- extra_yaml_config +wasm: + plugins: + - name: wasm_log + priority: 7999 + file: t/wasm/log/main.go.wasm + - name: wasm_log2 + priority: 7998 + file: t/wasm/log/main.go.wasm + http_request_phase: rewrite +--- request +GET /hello +--- grep_error_log eval +qr/run plugin ctx \d+ with conf \S+ in http ctx \d+/ +--- grep_error_log_out +run plugin ctx 1 with conf zzz in http ctx 2 +run plugin ctx 1 with conf blahblah in http ctx 2 + + + +=== TEST 8: plugin from service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_log": { + "id": "log", + "conf": "blahblah" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "service_id": "1", + "hosts": ["foo.com"] + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "service_id": "1", + "hosts": ["bar.com"] + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + + for i = 1, 4 do + local host = "foo.com" + if i % 2 == 0 then + host = "bar.com" + end + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {host = host}}) + if not res then + ngx.say(err) + return + end + end + } + } +--- grep_error_log eval +qr/run plugin ctx \d+ with conf \S+ in http ctx \d+/ +--- grep_error_log_out +run plugin ctx 1 with conf blahblah in http ctx 2 +run plugin ctx 3 with conf blahblah in http ctx 4 +run plugin ctx 1 with conf blahblah in http ctx 2 +run plugin ctx 3 with conf blahblah in http ctx 4 + + + +=== TEST 10: plugin from plugin_config +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "wasm_log": { + "id": "log", + "conf": "blahblah" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello", + "plugin_config_id": "1", + "hosts": ["foo.com"] + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/hello", + "plugin_config_id": "1", + "hosts": ["bar.com"] + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: hit +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + + for i = 1, 4 do + local host = "foo.com" + if i % 2 == 0 then + host = "bar.com" + end + local httpc = http.new() + local res, err = httpc:request_uri(uri, {headers = {host = host}}) + if not res then + ngx.say(err) + return + end + end + } + } +--- grep_error_log eval +qr/run plugin ctx \d+ with conf \S+ in http ctx \d+/ +--- grep_error_log_out +run plugin ctx 1 with conf blahblah in http ctx 2 +run plugin ctx 3 with conf blahblah in http ctx 4 +run plugin ctx 1 with conf blahblah in http ctx 2 +run plugin ctx 3 with conf blahblah in http ctx 4 diff --git a/CloudronPackages/APISIX/apisix-source/t/xds-library/config_xds.t b/CloudronPackages/APISIX/apisix-source/t/xds-library/config_xds.t new file mode 100644 index 0000000..98b4070 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xds-library/config_xds.t @@ -0,0 +1,129 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +use Cwd qw(cwd); +my $apisix_home = $ENV{APISIX_HOME} || cwd(); + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } + + my $lua_deps_path = $block->lua_deps_path // <<_EOC_; + lua_package_path "$apisix_home/?.lua;$apisix_home/?/init.lua;$apisix_home/deps/share/lua/5.1/?/init.lua;$apisix_home/deps/share/lua/5.1/?.lua;$apisix_home/apisix/?.lua;$apisix_home/t/?.lua;;"; + lua_package_cpath "$apisix_home/?.so;$apisix_home/t/xds-library/?.so;$apisix_home/deps/lib/lua/5.1/?.so;$apisix_home/deps/lib64/lua/5.1/?.so;;"; +_EOC_ + + $block->set_value("lua_deps_path", $lua_deps_path); + + my $extra_init_by_lua = <<_EOC_; + -- + local config_xds = require("apisix.core.config_xds") + + local inject = function(mod, name) + local old_f = mod[name] + mod[name] = function (...) + ngx.log(ngx.WARN, "config_xds run ", name) + return { true } + end + end + + inject(config_xds, "new") + +_EOC_ + + $block->set_value("extra_init_by_lua", $extra_init_by_lua); + + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: xds +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: load xDS library successfully +--- config + location /t { + content_by_lua_block { + ngx.say("ok") + } + } +--- no_error_log eval +qr/can not load xDS library/ + + + +=== TEST 2: read data form shdict that wirted by xDS library +--- config + location /t { + content_by_lua_block { + -- wait for xds library sync data + ngx.sleep(1.5) + local core = require("apisix.core") + local value = ngx.shared["xds-config"]:get("/routes/1") + local route_conf, err = core.json.decode(value) + local json_encode = require("toolkit.json").encode + ngx.say(json_encode(route_conf.uri)) + } + } +--- response_body +"/hello" + + + +=== TEST 3: read conf version +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local version + for i = 1, 5 do + version = ngx.shared["xds-config-version"]:get("version") + if version then + ngx.say(version) + break + end + -- wait for xds library sync data + ngx.sleep(1.5) + end + } + } +--- response_body eval +qr/^\d{13}$/ diff --git a/CloudronPackages/APISIX/apisix-source/t/xds-library/config_xds_2.t b/CloudronPackages/APISIX/apisix-source/t/xds-library/config_xds_2.t new file mode 100644 index 0000000..bfd9fd0 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xds-library/config_xds_2.t @@ -0,0 +1,239 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +use Cwd qw(cwd); +my $apisix_home = $ENV{APISIX_HOME} || cwd(); + +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } + + my $lua_deps_path = $block->lua_deps_path // <<_EOC_; + lua_package_path "$apisix_home/?.lua;$apisix_home/?/init.lua;$apisix_home/deps/share/lua/5.1/?/init.lua;$apisix_home/deps/share/lua/5.1/?.lua;$apisix_home/apisix/?.lua;$apisix_home/t/?.lua;;"; + lua_package_cpath "$apisix_home/?.so;$apisix_home/t/xds-library/?.so;$apisix_home/deps/lib/lua/5.1/?.so;$apisix_home/deps/lib64/lua/5.1/?.so;;"; +_EOC_ + + $block->set_value("lua_deps_path", $lua_deps_path); + + if (!$block->yaml_config) { + my $yaml_config = <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: xds +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: proxy request using data written by xds(id = 1) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { method = "GET"}) + + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- response_body +hello world + + + +=== TEST 2: proxy request using data written by xds(id = 2, upstream_id = 1) +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello1" + local res, err = httpc:request_uri(uri, { method = "GET"}) + + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- response_body +hello1 world + + + +=== TEST 3: proxy request using data written by xds(id = 3, upstream_id = 2) +--- config + location /t { + content_by_lua_block { + ngx.sleep(1.5) + local core = require("apisix.core") + local value = ngx.shared["xds-config"]:flush_all() + ngx.sleep(1.5) + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { method = "GET"}) + + if not res then + ngx.say(err) + return + end + ngx.print(res.body) + } + } +--- response_body +hello world + + + +=== TEST 4: flush all keys in xds config +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + ngx.shared["xds-config"]:flush_all() + ngx.update_time() + ngx.shared["xds-config-version"]:set("version", ngx.now()) + ngx.sleep(1.5) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { method = "GET"}) + + if not res then + ngx.say(err) + return + end + ngx.status = res.status + ngx.print(res.body) + } + } +--- error_code: 404 +--- response_body +{"error_msg":"404 Route Not Found"} + + + +=== TEST 5: bad format json +--- config + location /t { + content_by_lua_block { + local data = [[{ + upstream = { + type = "roundrobin" + nodes = { + ["127.0.0.1:1980"] = 1, + } + }, + uri = "/bad_json" + }]] + ngx.shared["xds-config"]:set("/routes/3", data) + ngx.update_time() + ngx.shared["xds-config-version"]:set("version", ngx.now()) + ngx.sleep(1.5) + + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/bad_json" + local res, err = httpc:request_uri(uri, { method = "GET"}) + + if not res then + ngx.say(err) + return + end + ngx.status = res.status + } + } +--- wait: 2 +--- error_code: 404 +--- error_log +decode the conf of [/routes/3] failed, err: Expected object key string but found invalid token + + + +=== TEST 6: schema check fail +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local data = { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:65536"] = 1, + } + } + } + local data_str = core.json.encode(data) + ngx.shared["xds-config"]:set("/routes/3", data_str) + ngx.update_time() + ngx.shared["xds-config-version"]:set("version", ngx.now()) + ngx.sleep(1.5) + } + } +--- no_error_log +[alert] +-- wait: 2 +--- error_log +failed to check the conf of [/routes/3] err:allOf 1 failed: value should match only one schema, but matches none + + + +=== TEST 7: not table +--- config + location /t { + content_by_lua_block { + local data = "/not_table" + ngx.shared["xds-config"]:set("/routes/3", data) + ngx.update_time() + ngx.shared["xds-config-version"]:set("version", ngx.now()) + ngx.sleep(1.5) + } + } +--- no_error_log +[alert] +-- wait: 2 +--- error_log +invalid conf of [/routes/3], conf: nil, it should be an object diff --git a/CloudronPackages/APISIX/apisix-source/t/xds-library/export.go b/CloudronPackages/APISIX/apisix-source/t/xds-library/export.go new file mode 100644 index 0000000..afb4680 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xds-library/export.go @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import "C" +import "unsafe" + +//export initial +func initial(config_zone unsafe.Pointer, version_zone unsafe.Pointer) { + write_config(config_zone, version_zone) +} diff --git a/CloudronPackages/APISIX/apisix-source/t/xds-library/main.go b/CloudronPackages/APISIX/apisix-source/t/xds-library/main.go new file mode 100644 index 0000000..6ee3bb2 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xds-library/main.go @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +/* +#cgo LDFLAGS: -shared -ldl +#include "xds.h" +#include <stdlib.h> + +extern void ngx_lua_ffi_shdict_store(void *zone, int op, + const unsigned char *key, size_t key_len, + int value_type, + const unsigned char *str_value_buf, size_t str_value_len, + double num_value, long exptime, int user_flags, char **errmsg, + int *forcible); +*/ +import "C" + +import ( + "context" + "fmt" + "strconv" + "time" + "unsafe" +) + +func main() { +} + +func write_config(config_zone unsafe.Pointer, version_zone unsafe.Pointer) { + route_key := "/routes/1" + route_value := fmt.Sprintf(`{ +"status": 1, +"update_time": 1647250524, +"create_time": 1646972532, +"uri": "/hello", +"priority": 0, +"id": "1", +"upstream": { + "nodes": [ + { + "port": 1980, + "priority": 0, + "host": "127.0.0.1", + "weight": 1 + } + ], + "type": "roundrobin", + "hash_on": "vars", + "pass_host": "pass", + "scheme": "http" +} +}`) + + upstream_key := "/upstreams/1" + upstream_value := fmt.Sprintf(`{ +"id": "1", +"nodes": { + "127.0.0.1:1980": 1 +}, +"type": "roundrobin" +}`) + + r_u_key := "/routes/2" + r_u_value := fmt.Sprintf(`{ +"status": 1, +"update_time": 1647250524, +"create_time": 1646972532, +"uri": "/hello1", +"priority": 0, +"id": "2", +"upstream_id": "1" +}`) + + write_shdict(route_key, route_value, config_zone) + write_shdict(upstream_key, upstream_value, config_zone) + write_shdict(r_u_key, r_u_value, config_zone) + update_conf_version(version_zone) + +} + +func get_version() string { + return strconv.FormatInt(time.Now().UnixNano()/1e6, 10) +} + +func update_conf_version(zone unsafe.Pointer) { + ctx := context.Background() + key := "version" + write_shdict(key, get_version(), zone) + go func() { + for { + select { + case <-ctx.Done(): + return + case <-time.After(time.Second * 5): + write_shdict("version", get_version(), zone) + } + } + }() +} + +func write_shdict(key string, value string, zone unsafe.Pointer) { + var keyCStr = C.CString(key) + defer C.free(unsafe.Pointer(keyCStr)) + var keyLen = C.size_t(len(key)) + + var valueCStr = C.CString(value) + defer C.free(unsafe.Pointer(valueCStr)) + var valueLen = C.size_t(len(value)) + + errMsgBuf := make([]*C.char, 1) + var forcible = 0 + + C.ngx_lua_ffi_shdict_store(zone, 0x0004, + (*C.uchar)(unsafe.Pointer(keyCStr)), keyLen, + 4, + (*C.uchar)(unsafe.Pointer(valueCStr)), valueLen, + 0, 0, 0, + (**C.char)(unsafe.Pointer(&errMsgBuf[0])), + (*C.int)(unsafe.Pointer(&forcible)), + ) +} diff --git a/CloudronPackages/APISIX/apisix-source/t/xds-library/xds.h b/CloudronPackages/APISIX/apisix-source/t/xds-library/xds.h new file mode 100644 index 0000000..48a8735 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xds-library/xds.h @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef XDS_H +#define XDS_H +#include <dlfcn.h> +#include <stdlib.h> + + +void ngx_lua_ffi_shdict_store(void *zone, int op, + const unsigned char *key, size_t key_len, + int value_type, + const unsigned char *str_value_buf, size_t str_value_len, + double num_value, long exptime, int user_flags, char **errmsg, + int *forcible) +{ + static void* dlhandle; + static void (*fp)(void *zone, int op, + const unsigned char *key, size_t key_len, + int value_type, + const unsigned char *str_value_buf, size_t str_value_len, + double num_value, long exptime, int user_flags, char **errmsg, + int *forcible); + + if (!dlhandle) { + dlhandle = dlopen(NULL, RTLD_NOW); + } + if (!dlhandle) { + return; + } + + fp = dlsym(dlhandle, "ngx_http_lua_ffi_shdict_store"); + if (!fp) { + fp = dlsym(dlhandle, "ngx_meta_lua_ffi_shdict_store"); + } + + fp(zone, op, key, key_len, value_type, str_value_buf, str_value_len, + num_value, exptime, user_flags, errmsg, forcible); +} + + +#endif // XDS_H diff --git a/CloudronPackages/APISIX/apisix-source/t/xrpc/apisix/stream/xrpc/protocols/pingpong/init.lua b/CloudronPackages/APISIX/apisix-source/t/xrpc/apisix/stream/xrpc/protocols/pingpong/init.lua new file mode 100644 index 0000000..3ea0c7e --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xrpc/apisix/stream/xrpc/protocols/pingpong/init.lua @@ -0,0 +1,287 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local sdk = require("apisix.stream.xrpc.sdk") +local xrpc_socket = require("resty.apisix.stream.xrpc.socket") +local bit = require("bit") +local lshift = bit.lshift +local ffi = require("ffi") +local ffi_str = ffi.string +local ipairs = ipairs +local math_random = math.random +local OK = ngx.OK +local DECLINED = ngx.DECLINED +local DONE = ngx.DONE +local str_byte = string.byte + + +core.ctx.register_var("rpc_len", function(ctx) + return ctx.len +end) + +local _M = {} +local router_version +local router +-- pingpong protocol is designed to use in the test of xRPC. +-- It contains two part: a fixed-length header & a body. +-- Header format: +-- "pp" (magic number) + 1 bytes req type + 2 bytes stream id + 1 reserved bytes +-- + 4 bytes body length + optional 4 bytes service name +local HDR_LEN = 10 +local TYPE_HEARTBEAT = 1 +local TYPE_UNARY = 2 +local TYPE_STREAM = 3 +local TYPE_UNARY_DYN_UP = 4 + + +function _M.init_worker() + core.log.info("call pingpong's init_worker") +end + + +function _M.init_downstream(session) + -- create the downstream + local sk = xrpc_socket.downstream.socket() + sk:settimeout(1000) -- the short timeout is just for test + return sk +end + + +local function read_data(sk, len, body) + local f = body and sk.drain or sk.read + local p, err = f(sk, len) + if not p then + if err ~= "closed" then + core.log.error("failed to read: ", err) + end + return nil + end + + return p +end + + +local function to_int32(p, idx) + return lshift(p[idx], 24) + lshift(p[idx + 1], 16) + lshift(p[idx + 2], 8) + p[idx + 3] +end + + +function _M.from_downstream(session, downstream) + -- read a request from downstream + -- return status and the new ctx + core.log.info("call pingpong's from_downstream") + + local p = read_data(downstream, HDR_LEN, false) + if p == nil then + return DECLINED + end + + local p_b = str_byte("p") + if p[0] ~= p_b or p[1] ~= p_b then + core.log.error("invalid magic number: ", ffi_str(p, 2)) + return DECLINED + end + + local typ = p[2] + if typ == TYPE_HEARTBEAT then + core.log.info("send heartbeat") + + -- need to reset read buf as we won't forward it + downstream:reset_read_buf() + downstream:send(ffi_str(p, HDR_LEN)) + return DONE + end + + local stream_id = p[3] * 256 + p[4] + local ctx = sdk.get_req_ctx(session, stream_id) + + local body_len = to_int32(p, 6) + core.log.info("read body len: ", body_len) + + if typ == TYPE_UNARY_DYN_UP then + local p = read_data(downstream, 4, false) + if p == nil then + return DECLINED + end + + local len = 4 + for i = 0, 3 do + if p[i] == 0 then + len = i + break + end + end + local service = ffi_str(p, len) + core.log.info("get service [", service, "]") + ctx.service = service + + local changed, raw_router, version = sdk.get_router(session, router_version) + if changed then + router_version = version + router = {} + + for _, r in ipairs(raw_router) do + local conf = r.protocol.conf + if conf and conf.service then + router[conf.service] = r + end + end + end + + local conf = router[ctx.service] + if conf then + local err = sdk.set_upstream(session, conf) + if err then + core.log.error("failed to set upstream: ", err) + return DECLINED + end + end + end + + local p = read_data(downstream, body_len, true) + if p == nil then + return DECLINED + end + + ctx.is_unary = typ == TYPE_UNARY or typ == TYPE_UNARY_DYN_UP + ctx.is_stream = typ == TYPE_STREAM + ctx.id = stream_id + ctx.len = HDR_LEN + body_len + if typ == TYPE_UNARY_DYN_UP then + ctx.len = ctx.len + 4 + end + + return OK, ctx +end + + +function _M.connect_upstream(session, ctx) + -- connect the upstream with upstream_conf + -- also do some handshake jobs + -- return status and the new upstream + core.log.info("call pingpong's connect_upstream") + + local conf = session.upstream_conf + local nodes = conf.nodes + if #nodes == 0 then + core.log.error("failed to connect: no nodes") + return DECLINED + end + local node = nodes[math_random(#nodes)] + + core.log.info("connect to ", node.host, ":", node.port) + + local sk = sdk.connect_upstream(node, conf) + if not sk then + return DECLINED + end + + return OK, sk +end + + +function _M.disconnect_upstream(session, upstream) + -- disconnect upstream created by connect_upstream + sdk.disconnect_upstream(upstream, session.upstream_conf) +end + + +function _M.to_upstream(session, ctx, downstream, upstream) + -- send the request read from downstream to the upstream + -- return whether the request is sent + core.log.info("call pingpong's to_upstream") + + local ok, err = upstream:move(downstream) + if not ok then + core.log.error("failed to send to upstream: ", err) + return DECLINED + end + + if ctx.is_unary then + local p = read_data(upstream, ctx.len, false) + if p == nil then + return DECLINED + end + + local ok, err = downstream:move(upstream) + if not ok then + core.log.error("failed to handle upstream: ", err) + return DECLINED + end + + return DONE + end + + return OK +end + + +function _M.from_upstream(session, downstream, upstream) + local p = read_data(upstream, HDR_LEN, false) + if p == nil then + return DECLINED + end + + local p_b = str_byte("p") + if p[0] ~= p_b or p[1] ~= p_b then + core.log.error("invalid magic number: ", ffi_str(p, 2)) + return DECLINED + end + + local typ = p[2] + if typ == TYPE_HEARTBEAT then + core.log.info("send heartbeat") + + -- need to reset read buf as we won't forward it + upstream:reset_read_buf() + upstream:send(ffi_str(p, HDR_LEN)) + return DONE + end + + local stream_id = p[3] * 256 + p[4] + local ctx = sdk.get_req_ctx(session, stream_id) + + local body_len = to_int32(p, 6) + if ctx.len then + if body_len ~= ctx.len - HDR_LEN then + core.log.error("upstream body len mismatch, expected: ", ctx.len - HDR_LEN, + ", actual: ", body_len) + return DECLINED + end + end + + local p = read_data(upstream, body_len, true) + if p == nil then + return DECLINED + end + + local ok, err = downstream:move(upstream) + if not ok then + core.log.error("failed to handle upstream: ", err) + return DECLINED + end + + return DONE, ctx +end + + +function _M.log(session, ctx) + core.log.info("call pingpong's log, ctx unfinished: ", ctx.unfinished == true) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/xrpc/apisix/stream/xrpc/protocols/pingpong/schema.lua b/CloudronPackages/APISIX/apisix-source/t/xrpc/apisix/stream/xrpc/protocols/pingpong/schema.lua new file mode 100644 index 0000000..28bdaef --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xrpc/apisix/stream/xrpc/protocols/pingpong/schema.lua @@ -0,0 +1,52 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") + + +local schema = { + type = "object", + properties = { + service = { + type = "string" + }, + faults = { + type = "array", + minItems = 1, + items = { + type = "object", + properties = { + header_type = { type = "string" }, + delay = { + type = "number", + description = "additional delay in seconds", + } + }, + required = {"header_type"} + }, + }, + }, +} + +local _M = {} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +return _M diff --git a/CloudronPackages/APISIX/apisix-source/t/xrpc/dubbo.t b/CloudronPackages/APISIX/apisix-source/t/xrpc/dubbo.t new file mode 100644 index 0000000..290eadb --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xrpc/dubbo.t @@ -0,0 +1,168 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +xrpc: + protocols: + - name: dubbo +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + my $config = $block->config // <<_EOC_; + location /t { + content_by_lua_block { + ngx.req.read_body() + local sock = ngx.socket.tcp() + sock:settimeout(1000) + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.log(ngx.ERR, "failed to connect: ", err) + return ngx.exit(503) + end + + local bytes, err = sock:send(ngx.req.get_body_data()) + if not bytes then + ngx.log(ngx.ERR, "send stream request error: ", err) + return ngx.exit(503) + end + while true do + local data, err = sock:receiveany(4096) + if not data then + sock:close() + break + end + ngx.print(data) + end + } + } +_EOC_ + + $block->set_value("config", $config); + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]\nRPC is not finished"); + } + + $block; +}); + +worker_connections(1024); +run_tests; + +__DATA__ + +=== TEST 1: init +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "dubbo" + }, + upstream = { + nodes = { + ["127.0.0.1:20880"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: use dubbo_backend_provider server. request=org.apache.dubbo.backend.DemoService,service_version:1.0.1#hello,response=dubbo success & 200 +--- request eval +"GET /t +\xda\xbb\xc2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xef\x05\x32\x2e\x30\x2e\x32\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x05\x31\x2e\x30\x2e\x30\x05\x68\x65\x6c\x6c\x6f\x0f\x4c\x6a\x61\x76\x61\x2f\x75\x74\x69\x6c\x2f\x4d\x61\x70\x3b\x48\x04\x6e\x61\x6d\x65\x08\x7a\x68\x61\x6e\x67\x73\x61\x6e\x5a\x48\x04\x70\x61\x74\x68\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x12\x72\x65\x6d\x6f\x74\x65\x2e\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x0b\x73\x70\x2d\x63\x6f\x6e\x73\x75\x6d\x65\x72\x09\x69\x6e\x74\x65\x72\x66\x61\x63\x65\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x07\x76\x65\x72\x73\x69\x6f\x6e\x05\x31\x2e\x30\x2e\x30\x07\x74\x69\x6d\x65\x6f\x75\x74\x04\x31\x30\x30\x30\x5a" +--- response_body eval +"\xda\xbb\x02\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x30\x94\x48\x04\x62\x6f\x64\x79\x0e\x64\x75\x62\x62\x6f\x20\x73\x75\x63\x63\x65\x73\x73\x0a\x06\x73\x74\x61\x74\x75\x73\x03\x32\x30\x30\x5a\x48\x05\x64\x75\x62\x62\x6f\x05\x32\x2e\x30\x2e\x32\x5a" +--- stream_conf_enable +--- log_level: debug +--- no_error_log + + + +=== TEST 3: heart beat. request=\xe2|11..,response=\x22|00... +--- request eval +"GET /t +\xda\xbb\xe2\x00\x00\x00\x00\x00\x00\x00\x00\x34\x00\x00\x00\x01\x4e" +--- response_body eval +"\xda\xbb\x22\x14\x00\x00\x00\x00\x00\x00\x00\x34\x00\x00\x00\x01\x4e" +--- stream_conf_enable +--- log_level: debug +--- no_error_log + + + +=== TEST 4: no response. Different from test2 \x82=10000010, the second bit=0 of the third byte means no need to return +--- request eval +"GET /t +\xda\xbb\x82\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xef\x05\x32\x2e\x30\x2e\x32\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x05\x31\x2e\x30\x2e\x30\x05\x68\x65\x6c\x6c\x6f\x0f\x4c\x6a\x61\x76\x61\x2f\x75\x74\x69\x6c\x2f\x4d\x61\x70\x3b\x48\x04\x6e\x61\x6d\x65\x08\x7a\x68\x61\x6e\x67\x73\x61\x6e\x5a\x48\x04\x70\x61\x74\x68\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x12\x72\x65\x6d\x6f\x74\x65\x2e\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x0b\x73\x70\x2d\x63\x6f\x6e\x73\x75\x6d\x65\x72\x09\x69\x6e\x74\x65\x72\x66\x61\x63\x65\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x07\x76\x65\x72\x73\x69\x6f\x6e\x05\x31\x2e\x30\x2e\x30\x07\x74\x69\x6d\x65\x6f\x75\x74\x04\x31\x30\x30\x30\x5a" +--- response_body eval +"" +--- stream_conf_enable +--- log_level: debug +--- no_error_log + + + +=== TEST 5: failed response. request=org.apache.dubbo.backend.DemoService,service_version:1.0.1#fail,response=503 +--- request eval +"GET /t +\xda\xbb\xc2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xee\x05\x32\x2e\x30\x2e\x32\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x05\x31\x2e\x30\x2e\x30\x04\x66\x61\x69\x6c\x0f\x4c\x6a\x61\x76\x61\x2f\x75\x74\x69\x6c\x2f\x4d\x61\x70\x3b\x48\x04\x6e\x61\x6d\x65\x08\x7a\x68\x61\x6e\x67\x73\x61\x6e\x5a\x48\x04\x70\x61\x74\x68\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x12\x72\x65\x6d\x6f\x74\x65\x2e\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x0b\x73\x70\x2d\x63\x6f\x6e\x73\x75\x6d\x65\x72\x09\x69\x6e\x74\x65\x72\x66\x61\x63\x65\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x07\x76\x65\x72\x73\x69\x6f\x6e\x05\x31\x2e\x30\x2e\x30\x07\x74\x69\x6d\x65\x6f\x75\x74\x04\x31\x30\x30\x30\x5a" +--- response_body eval +"\xda\xbb\x02\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x2d\x94\x48\x04\x62\x6f\x64\x79\x0b\x64\x75\x62\x62\x6f\x20\x66\x61\x69\x6c\x0a\x06\x73\x74\x61\x74\x75\x73\x03\x35\x30\x33\x5a\x48\x05\x64\x75\x62\x62\x6f\x05\x32\x2e\x30\x2e\x32\x5a" +--- stream_conf_enable +--- log_level: debug +--- no_error_log + + + +=== TEST 6: invalid magic(dabc<>dabb) for heart beat. +--- request eval +"GET /t +\xda\xbc\xe2\x00\x00\x00\x00\x00\x00\x00\x00\x34\x00\x00\x00\x01\x4e" +--- error_log +unknown magic number +--- stream_conf_enable diff --git a/CloudronPackages/APISIX/apisix-source/t/xrpc/pingpong.t b/CloudronPackages/APISIX/apisix-source/t/xrpc/pingpong.t new file mode 100644 index 0000000..65b5346 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xrpc/pingpong.t @@ -0,0 +1,781 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +xrpc: + protocols: + - name: pingpong + - name: redis +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + my $config = $block->config // <<_EOC_; + location /t { + content_by_lua_block { + ngx.req.read_body() + local sock = ngx.socket.tcp() + sock:settimeout(1000) + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.log(ngx.ERR, "failed to connect: ", err) + return ngx.exit(503) + end + + local bytes, err = sock:send(ngx.req.get_body_data()) + if not bytes then + ngx.log(ngx.ERR, "send stream request error: ", err) + return ngx.exit(503) + end + while true do + local data, err = sock:receiveany(4096) + if not data then + sock:close() + break + end + ngx.print(data) + end + } + } +_EOC_ + + $block->set_value("config", $config); + + my $stream_upstream_code = $block->stream_upstream_code // <<_EOC_; + local sock = ngx.req.socket(true) + sock:settimeout(10) + while true do + local data = sock:receiveany(4096) + if not data then + return + end + sock:send(data) + end +_EOC_ + + $block->set_value("stream_upstream_code", $stream_upstream_code); + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]\nRPC is not finished"); + } + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: init +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong" + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: too short +--- stream_request +mmm +--- error_log +call pingpong's init_worker +failed to read: timeout + + + +=== TEST 3: reply directly +--- request eval +"POST /t +pp\x01\x00\x00\x00\x00\x00\x00\x00" +--- response_body eval +"pp\x01\x00\x00\x00\x00\x00\x00\x00" +--- stream_conf_enable + + + +=== TEST 4: unary +--- request eval +"POST /t +" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" x 3 +--- response_body eval +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" x 3 +--- log_level: debug +--- no_error_log +stream lua tcp socket set keepalive +--- stream_conf_enable + + + +=== TEST 5: unary & heartbeat +--- request eval +"POST /t +" . +"pp\x01\x00\x00\x00\x00\x00\x00\x00" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- response_body eval +"pp\x01\x00\x00\x00\x00\x00\x00\x00" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- stream_conf_enable + + + +=== TEST 6: can't connect to upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong" + }, + upstream = { + nodes = { + ["127.0.0.1:1979"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: hit +--- request eval +"POST /t +" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" x 3 +--- error_log +failed to connect: connection refused +--- stream_conf_enable + + + +=== TEST 8: use short timeout to check upstream's bad response +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong" + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + timeout = { + connect = 0.01, + send = 0.009, + read = 0.008, + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 9: bad response +--- request eval +"POST /t +" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" x 1 +--- stream_conf_enable +--- stream_upstream_code + local sock = ngx.req.socket(true) + sock:settimeout(10) + while true do + local data = sock:receiveany(4096) + if not data then + return + end + sock:send(data:sub(5)) + end +--- error_log +failed to read: timeout +stream lua tcp socket connect timeout: 10 +lua tcp socket send timeout: 9 +stream lua tcp socket read timeout: 8 +--- log_level: debug + + + +=== TEST 10: reset +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong" + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 11: client stream, N:N +--- request eval +"POST /t +" . +"pp\x03\x00\x01\x00\x00\x00\x00\x03ABC" . +"pp\x03\x00\x02\x00\x00\x00\x00\x04ABCD" +--- stream_conf_enable +--- stream_upstream_code + local sock = ngx.req.socket(true) + sock:settimeout(10) + local data1 = sock:receive(13) + if not data1 then + return + end + local data2 = sock:receive(14) + if not data2 then + return + end + assert(sock:send(data2)) + assert(sock:send(data1)) +--- response_body eval +"pp\x03\x00\x02\x00\x00\x00\x00\x04ABCD" . +"pp\x03\x00\x01\x00\x00\x00\x00\x03ABC" + + + +=== TEST 12: client stream, bad response +--- request eval +"POST /t +" . +"pp\x03\x00\x01\x00\x00\x00\x00\x03ABC" . +"pp\x03\x00\x02\x00\x00\x00\x00\x04ABCD" +--- stream_conf_enable +--- stream_upstream_code + local sock = ngx.req.socket(true) + sock:settimeout(10) + local data1 = sock:receive(13) + if not data1 then + return + end + local data2 = sock:receive(14) + if not data2 then + return + end + assert(sock:send(data2)) + assert(sock:send(data1:sub(11))) +--- response_body eval +"pp\x03\x00\x02\x00\x00\x00\x00\x04ABCD" +--- error_log +RPC is not finished +call pingpong's log, ctx unfinished: true + + + +=== TEST 13: server stream, heartbeat +--- request eval +"POST /t +" . +"pp\x03\x00\x01\x00\x00\x00\x00\x03ABC" +--- stream_conf_enable +--- stream_upstream_code + local sock = ngx.req.socket(true) + sock:settimeout(10) + local data1 = sock:receive(13) + if not data1 then + return + end + local hb = "pp\x01\x00\x00\x00\x00\x00\x00\x00" + assert(sock:send(hb)) + local data2 = sock:receive(10) + if not data2 then + return + end + assert(data2 == hb) + assert(sock:send(data1)) +--- response_body eval +"pp\x03\x00\x01\x00\x00\x00\x00\x03ABC" + + + +=== TEST 14: server stream +--- request eval +"POST /t +" . +"pp\x03\x00\x01\x00\x00\x00\x00\x01A" +--- stream_conf_enable +--- stream_upstream_code + local sock = ngx.req.socket(true) + sock:settimeout(10) + local data1 = sock:receive(11) + if not data1 then + return + end + assert(sock:send("pp\x03\x00\x03\x00\x00\x00\x00\x03ABC")) + assert(sock:send("pp\x03\x00\x02\x00\x00\x00\x00\x02AB")) + assert(sock:send(data1)) +--- response_body eval +"pp\x03\x00\x03\x00\x00\x00\x00\x03ABC" . +"pp\x03\x00\x02\x00\x00\x00\x00\x02AB" . +"pp\x03\x00\x01\x00\x00\x00\x00\x01A" +--- grep_error_log eval +qr/call pingpong's log, ctx unfinished: \w+/ +--- grep_error_log_out +call pingpong's log, ctx unfinished: false +call pingpong's log, ctx unfinished: false +call pingpong's log, ctx unfinished: false + + + +=== TEST 15: superior & subordinate +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong" + }, + upstream = { + nodes = { + ["127.0.0.3:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/stream_routes/2', + ngx.HTTP_PUT, + { + protocol = { + superior_id = 1, + conf = { + service = "a" + }, + name = "pingpong" + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/stream_routes/3', + ngx.HTTP_PUT, + { + protocol = { + superior_id = 1, + conf = { + service = "b" + }, + name = "pingpong" + }, + upstream = { + nodes = { + ["127.0.0.2:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + -- routes below should not be used to matched + local code, body = t('/apisix/admin/stream_routes/4', + ngx.HTTP_PUT, + { + protocol = { + superior_id = 10000, + conf = { + service = "b" + }, + name = "pingpong" + }, + upstream = { + nodes = { + ["127.0.0.2:1979"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/stream_routes/5', + ngx.HTTP_PUT, + { + protocol = { + name = "redis" + }, + upstream = { + nodes = { + ["127.0.0.1:6379"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: hit +--- request eval +"POST /t +" . +"pp\x04\x00\x00\x00\x00\x00\x00\x03a\x00\x00\x00ABC" . +"pp\x04\x00\x00\x00\x00\x00\x00\x04b\x00\x00\x00ABCD" . +"pp\x04\x00\x00\x00\x00\x00\x00\x03a\x00\x00\x00ABC" +--- response_body eval +"pp\x04\x00\x00\x00\x00\x00\x00\x03a\x00\x00\x00ABC" . +"pp\x04\x00\x00\x00\x00\x00\x00\x04b\x00\x00\x00ABCD" . +"pp\x04\x00\x00\x00\x00\x00\x00\x03a\x00\x00\x00ABC" +--- grep_error_log eval +qr/connect to \S+ while prereading client data/ +--- grep_error_log_out +connect to 127.0.0.1:1995 while prereading client data +connect to 127.0.0.2:1995 while prereading client data +--- stream_conf_enable + + + +=== TEST 17: hit (fallback to superior if not found) +--- request eval +"POST /t +" . +"pp\x04\x00\x00\x00\x00\x00\x00\x03abcdABC" . +"pp\x04\x00\x00\x00\x00\x00\x00\x04a\x00\x00\x00ABCD" . +"pp\x04\x00\x00\x00\x00\x00\x00\x03abcdABC" +--- response_body eval +"pp\x04\x00\x00\x00\x00\x00\x00\x03abcdABC" . +"pp\x04\x00\x00\x00\x00\x00\x00\x04a\x00\x00\x00ABCD" . +"pp\x04\x00\x00\x00\x00\x00\x00\x03abcdABC" +--- grep_error_log eval +qr/connect to \S+ while prereading client data/ +--- grep_error_log_out +connect to 127.0.0.3:1995 while prereading client data +connect to 127.0.0.1:1995 while prereading client data +--- stream_conf_enable + + + +=== TEST 18: cache router by version +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local sock = ngx.socket.tcp() + sock:settimeout(1000) + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.log(ngx.ERR, "failed to connect: ", err) + return ngx.exit(503) + end + + assert(sock:send("pp\x04\x00\x00\x00\x00\x00\x00\x03a\x00\x00\x00ABC")) + + ngx.sleep(0.1) + + local code, body = t('/apisix/admin/stream_routes/2', + ngx.HTTP_PUT, + { + protocol = { + superior_id = 1, + conf = { + service = "c" + }, + name = "pingpong" + }, + upstream = { + nodes = { + ["127.0.0.4:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.1) + + local s = "pp\x04\x00\x00\x00\x00\x00\x00\x04a\x00\x00\x00ABCD" + assert(sock:send(s .. "pp\x04\x00\x00\x00\x00\x00\x00\x03c\x00\x00\x00ABC")) + + while true do + local data, err = sock:receiveany(4096) + if not data then + sock:close() + break + end + ngx.print(data) + end + } + } +--- request +GET /t +--- response_body eval +"pp\x04\x00\x00\x00\x00\x00\x00\x03a\x00\x00\x00ABC" . +"pp\x04\x00\x00\x00\x00\x00\x00\x04a\x00\x00\x00ABCD" . +"pp\x04\x00\x00\x00\x00\x00\x00\x03c\x00\x00\x00ABC" +--- grep_error_log eval +qr/connect to \S+ while prereading client data/ +--- grep_error_log_out +connect to 127.0.0.1:1995 while prereading client data +connect to 127.0.0.3:1995 while prereading client data +connect to 127.0.0.4:1995 while prereading client data +--- stream_conf_enable + + + +=== TEST 19: use upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + { + nodes = { + ["127.0.0.3:1995"] = 1 + }, + type = "roundrobin" + } + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/stream_routes/2', + ngx.HTTP_PUT, + { + protocol = { + superior_id = 1, + conf = { + service = "a" + }, + name = "pingpong" + }, + upstream_id = 1 + } + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 20: hit +--- request eval +"POST /t +" . +"pp\x04\x00\x00\x00\x00\x00\x00\x03a\x00\x00\x00ABC" +--- response_body eval +"pp\x04\x00\x00\x00\x00\x00\x00\x03a\x00\x00\x00ABC" +--- grep_error_log eval +qr/connect to \S+ while prereading client data/ +--- grep_error_log_out +connect to 127.0.0.3:1995 while prereading client data +--- stream_conf_enable + + + +=== TEST 21: cache router by version, with upstream_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local sock = ngx.socket.tcp() + sock:settimeout(1000) + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.log(ngx.ERR, "failed to connect: ", err) + return ngx.exit(503) + end + + assert(sock:send("pp\x04\x00\x00\x00\x00\x00\x00\x03a\x00\x00\x00ABC")) + + ngx.sleep(0.1) + + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.sleep(0.1) + + local s = "pp\x04\x00\x00\x00\x00\x00\x00\x04a\x00\x00\x00ABCD" + assert(sock:send(s)) + + while true do + local data, err = sock:receiveany(4096) + if not data then + sock:close() + break + end + ngx.print(data) + end + } + } +--- request +GET /t +--- response_body eval +"pp\x04\x00\x00\x00\x00\x00\x00\x03a\x00\x00\x00ABC" . +"pp\x04\x00\x00\x00\x00\x00\x00\x04a\x00\x00\x00ABCD" +--- grep_error_log eval +qr/connect to \S+ while prereading client data/ +--- grep_error_log_out +connect to 127.0.0.3:1995 while prereading client data +connect to 127.0.0.1:1995 while prereading client data +--- stream_conf_enable diff --git a/CloudronPackages/APISIX/apisix-source/t/xrpc/pingpong2.t b/CloudronPackages/APISIX/apisix-source/t/xrpc/pingpong2.t new file mode 100644 index 0000000..7365929 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xrpc/pingpong2.t @@ -0,0 +1,753 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +xrpc: + protocols: + - name: pingpong +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + my $config = $block->config // <<_EOC_; + location /t { + content_by_lua_block { + ngx.req.read_body() + local sock = ngx.socket.tcp() + sock:settimeout(1000) + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.log(ngx.ERR, "failed to connect: ", err) + return ngx.exit(503) + end + + local bytes, err = sock:send(ngx.req.get_body_data()) + if not bytes then + ngx.log(ngx.ERR, "send stream request error: ", err) + return ngx.exit(503) + end + while true do + local data, err = sock:receiveany(4096) + if not data then + sock:close() + break + end + ngx.print(data) + end + } + } +_EOC_ + + $block->set_value("config", $config); + + my $stream_upstream_code = $block->stream_upstream_code // <<_EOC_; + local sock = ngx.req.socket(true) + sock:settimeout(10) + while true do + local data = sock:receiveany(4096) + if not data then + return + end + sock:send(data) + end +_EOC_ + + $block->set_value("stream_upstream_code", $stream_upstream_code); + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]\nRPC is not finished"); + } + + if (!defined $block->extra_stream_config) { + my $stream_config = <<_EOC_; + server { + listen 8125 udp; + content_by_lua_block { + require("lib.mock_layer4").dogstatsd() + } + } +_EOC_ + $block->set_value("extra_stream_config", $stream_config); + } + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: init +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong" + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: check the default timeout +--- request eval +"POST /t +" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- response_body eval +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- error_log +stream lua tcp socket connect timeout: 60000 +lua tcp socket send timeout: 60000 +stream lua tcp socket read timeout: 60000 +--- log_level: debug +--- stream_conf_enable + + + +=== TEST 3: bad loggger filter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong", + logger = { + { + name = "syslog", + filter = { + {} + }, + conf = {} + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: failed to validate the 'filter' expression +--- request eval +"POST /t +" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- stream_conf_enable +--- error_log +failed to validate the 'filter' expression: rule too short + + + +=== TEST 5: set loggger filter(single rule) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong", + logger = { + { + name = "syslog", + filter = { + {"rpc_len", ">", 10} + }, + conf = {} + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: log filter matched successful +--- request eval +"POST /t +" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- stream_conf_enable +--- error_log +log filter: syslog filter result: true + + + +=== TEST 7: update loggger filter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong", + logger = { + { + name = "syslog", + filter = { + {"rpc_len", "<", 10} + }, + conf = {} + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 8: failed to match log filter +--- request eval +"POST /t +" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- stream_conf_enable +--- error_log +log filter: syslog filter result: false + + + +=== TEST 9: set loggger filter(multiple rules) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong", + logger = { + { + name = "syslog", + filter = { + {"rpc_len", ">", 12}, + {"rpc_len", "<", 14} + }, + conf = {} + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 10: log filter matched successful +--- request eval +"POST /t +" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- stream_conf_enable +--- error_log +log filter: syslog filter result: true + + + +=== TEST 11: update loggger filter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong", + logger = { + { + name = "syslog", + filter = { + {"rpc_len", "<", 10}, + {"rpc_len", ">", 12} + }, + conf = {} + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 12: failed to match log filter +--- request eval +"POST /t +" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- stream_conf_enable +--- error_log +log filter: syslog filter result: false + + + +=== TEST 13: set custom log format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/syslog', + ngx.HTTP_PUT, + [[{ + "log_format": { + "client_ip": "$remote_addr" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 14: no loggger filter, defaulte executed logger plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong", + logger = { + { + name = "syslog", + conf = { + host = "127.0.0.1", + port = 8125, + sock_type = "udp", + batch_max_size = 1, + flush_limit = 1 + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 15: verify the data received by the log server +--- request eval +"POST /t +" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- stream_conf_enable +--- wait: 0.5 +--- error_log eval +qr/message received:.*\"client_ip\"\:\"127.0.0.1\"/ + + + +=== TEST 16: set loggger filter +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong", + logger = { + { + name = "syslog", + filter = { + {"rpc_len", ">", 10} + }, + conf = { + host = "127.0.0.1", + port = 8125, + sock_type = "udp", + batch_max_size = 1, + flush_limit = 1 + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 17: verify the data received by the log server +--- request eval +"POST /t +" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- stream_conf_enable +--- wait: 0.5 +--- error_log eval +qr/message received:.*\"client_ip\"\:\"127.0.0.1\"/ + + + +=== TEST 18: small flush_limit, instant flush +--- stream_conf_enable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong", + logger = { + { + name = "syslog", + filter = { + {"rpc_len", ">", 10} + }, + conf = { + host = "127.0.0.1", + port = 5044, + batch_max_size = 1, + flush_limit = 1 + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + + -- wait etcd sync + ngx.sleep(0.5) + + local sock = ngx.socket.tcp() + sock:settimeout(1000) + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.log(ngx.ERR, "failed to connect: ", err) + return ngx.exit(503) + end + + assert(sock:send("pp\x02\x00\x00\x00\x00\x00\x00\x03ABC")) + + while true do + local data, err = sock:receiveany(4096) + if not data then + sock:close() + break + end + ngx.print(data) + end + -- wait flush log + ngx.sleep(2.5) + } + } +--- request +GET /t +--- response_body eval +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- timeout: 5 +--- error_log +try to lock with key xrpc-pingpong-logger#table +unlock with key xrpc-pingpong-logger#table + + + +=== TEST 19: check plugin configuration updating +--- stream_conf_enable +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong", + logger = { + { + name = "syslog", + filter = { + {"rpc_len", ">", 10} + }, + conf = { + host = "127.0.0.1", + port = 5044, + batch_max_size = 1 + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.status = code + ngx.say("fail") + return + end + assert(sock:send("pp\x02\x00\x00\x00\x00\x00\x00\x03ABC")) + local body1, err + while true do + body1, err = sock:receiveany(4096) + if not data then + sock:close() + break + end + end + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong", + logger = { + { + name = "syslog", + filter = { + {"rpc_len", ">", 10} + }, + conf = { + host = "127.0.0.1", + port = 5045, + batch_max_size = 1 + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + local sock = ngx.socket.tcp() + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.status = code + ngx.say("fail") + return + end + assert(sock:send("pp\x02\x00\x00\x00\x00\x00\x00\x03ABC")) + local body2, err + while true do + body2, err = sock:receiveany(4096) + if not data then + sock:close() + break + end + end + ngx.print(body1) + ngx.print(body2) + } + } +--- request +GET /t +--- wait: 0.5 +--- response_body eval +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- grep_error_log eval +qr/sending a batch logs to 127.0.0.1:(\d+)/ +--- grep_error_log_out +sending a batch logs to 127.0.0.1:5044 +sending a batch logs to 127.0.0.1:5045 diff --git a/CloudronPackages/APISIX/apisix-source/t/xrpc/pingpong3.t b/CloudronPackages/APISIX/apisix-source/t/xrpc/pingpong3.t new file mode 100644 index 0000000..da16e62 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xrpc/pingpong3.t @@ -0,0 +1,193 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +xrpc: + protocols: + - name: pingpong +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + my $config = $block->config // <<_EOC_; + location /t { + content_by_lua_block { + ngx.req.read_body() + local sock = ngx.socket.tcp() + sock:settimeout(1000) + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.log(ngx.ERR, "failed to connect: ", err) + return ngx.exit(503) + end + + local bytes, err = sock:send(ngx.req.get_body_data()) + if not bytes then + ngx.log(ngx.ERR, "send stream request error: ", err) + return ngx.exit(503) + end + while true do + local data, err = sock:receiveany(4096) + if not data then + sock:close() + break + end + ngx.print(data) + end + } + } +_EOC_ + + $block->set_value("config", $config); + + my $stream_upstream_code = $block->stream_upstream_code // <<_EOC_; + local sock = ngx.req.socket(true) + sock:settimeout(10) + while true do + local data = sock:receiveany(4096) + if not data then + return + end + sock:send(data) + end +_EOC_ + + $block->set_value("stream_upstream_code", $stream_upstream_code); + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]\nRPC is not finished"); + } + + if (!defined $block->extra_stream_config) { + my $stream_config = <<_EOC_; + server { + listen 8125 udp; + content_by_lua_block { + require("lib.mock_layer4").dogstatsd() + } + } +_EOC_ + $block->set_value("extra_stream_config", $stream_config); + } + + $block; +}); + +run_tests; + +__DATA__ + +=== TEST 1: set custom log format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/syslog', + ngx.HTTP_PUT, + [[{ + "log_format": { + "rpc_time": "$rpc_time" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: use vae rpc_time +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "pingpong", + logger = { + { + name = "syslog", + filter = { + {"rpc_time", ">=", 0} + }, + conf = { + host = "127.0.0.1", + port = 8125, + sock_type = "udp", + batch_max_size = 1, + flush_limit = 1 + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:1995"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: verify the data received by the log server +--- request eval +"POST /t +" . +"pp\x02\x00\x00\x00\x00\x00\x00\x03ABC" +--- stream_conf_enable +--- wait: 0.5 +--- error_log eval +qr/message received:.*\"rpc_time\"\:(0.\d+|0)\}/ diff --git a/CloudronPackages/APISIX/apisix-source/t/xrpc/prometheus.t b/CloudronPackages/APISIX/apisix-source/t/xrpc/prometheus.t new file mode 100644 index 0000000..cc267ac --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xrpc/prometheus.t @@ -0,0 +1,273 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +$ENV{TEST_NGINX_REDIS_PORT} ||= 1985; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +stream_plugins: + - prometheus +xrpc: + protocols: + - name: redis +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]\nRPC is not finished"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +worker_connections(1024); +run_tests; + +__DATA__ + +=== TEST 1: route with metrics +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + { + uri = "/apisix/prometheus/metrics", + plugins = { + ["public-api"] = {} + } + } + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "redis", + conf = { + faults = { + {delay = 0.08, commands = {"hmset"}}, + {delay = 0.3, commands = {"hmget"}}, + } + }, + metric = { + enable = true, + } + }, + upstream = { + nodes = { + ["127.0.0.1:6379"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit +--- config + location /t { + content_by_lua_block { + local redis = require "resty.redis" + local red = redis:new() + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local res, err = red:hmset("animals", "dog", "bark", "cat", "meow") + if not res then + ngx.say("failed to set animals: ", err) + return + end + + local res, err = red:hmget("animals", "dog", "cat") + if not res then + ngx.say("failed to get animals: ", err) + return + end + } + } +--- response_body +--- stream_conf_enable + + + +=== TEST 3: check metrics +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_redis_commands_latency_seconds_bucket\{route="1",command="hmget",le="0.5"\} 1/ and +qr/apisix_redis_commands_latency_seconds_bucket\{route="1",command="hmset",le="0.1"\} 1/ and +qr/apisix_redis_commands_total\{route="1",command="hmget"\} 1 +apisix_redis_commands_total\{route="1",command="hmset"\} 1/ + + + +=== TEST 4: ignore metric if prometheus is disabled +--- config + location /t { + content_by_lua_block { + local redis = require "resty.redis" + local red = redis:new() + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local res, err = red:hmset("animals", "dog", "bark", "cat", "meow") + if not res then + ngx.say("failed to set animals: ", err) + return + end + } + } +--- response_body +--- extra_yaml_config +stream_plugins: + - ip-restriction +xrpc: + protocols: + - name: redis +--- stream_conf_enable + + + +=== TEST 5: check metrics +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_redis_commands_total\{route="1",command="hmset"\} 1/ + + + +=== TEST 6: ignore metric if metric is disabled +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "redis", + conf = { + faults = { + {delay = 0.08, commands = {"hmset"}}, + {delay = 0.3, commands = {"hmget"}}, + } + }, + metric = { + enable = false + } + }, + upstream = { + nodes = { + ["127.0.0.1:6379"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: hit +--- config + location /t { + content_by_lua_block { + local redis = require "resty.redis" + local red = redis:new() + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local res, err = red:hmset("animals", "dog", "bark", "cat", "meow") + if not res then + ngx.say("failed to set animals: ", err) + return + end + } + } +--- response_body +--- stream_conf_enable + + + +=== TEST 8: check metrics +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_redis_commands_total\{route="1",command="hmset"\} 1/ diff --git a/CloudronPackages/APISIX/apisix-source/t/xrpc/redis.t b/CloudronPackages/APISIX/apisix-source/t/xrpc/redis.t new file mode 100644 index 0000000..afb2f40 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xrpc/redis.t @@ -0,0 +1,783 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +$ENV{TEST_NGINX_REDIS_PORT} ||= 1985; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +xrpc: + protocols: + - name: redis +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]\nRPC is not finished"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +worker_connections(1024); +run_tests; + +__DATA__ + +=== TEST 1: init +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "redis" + }, + upstream = { + nodes = { + ["127.0.0.1:6379"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: sanity +--- config + location /t { + content_by_lua_block { + local redis = require "resty.redis" + local red = redis:new() + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local res, err = red:hmset("animals", "dog", "bark", "cat", "meow") + if not res then + ngx.say("failed to set animals: ", err) + return + end + ngx.say("hmset animals: ", res) + + local res, err = red:hmget("animals", "dog", "cat") + if not res then + ngx.say("failed to get animals: ", err) + return + end + + ngx.say("hmget animals: ", res) + + local res, err = red:hget("animals", "dog") + if not res then + ngx.say("failed to get animals: ", err) + return + end + + ngx.say("hget animals: ", res) + + local res, err = red:hget("animals", "not_found") + if not res then + ngx.say("failed to get animals: ", err) + return + end + + ngx.say("hget animals: ", res) + } + } +--- response_body +hmset animals: OK +hmget animals: barkmeow +hget animals: bark +hget animals: null +--- stream_conf_enable + + + +=== TEST 3: error +--- config + location /t { + content_by_lua_block { + local redis = require "resty.redis" + local red = redis:new() + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local res, err = red:get("animals") + if not res then + ngx.say("failed to set animals: ", err) + end + + local res, err = red:hget("animals", "dog") + if not res then + ngx.say("failed to get animals: ", err) + return + end + + ngx.say("hget animals: ", res) + } + } +--- response_body +failed to set animals: WRONGTYPE Operation against a key holding the wrong kind of value +hget animals: bark +--- stream_conf_enable + + + +=== TEST 4: big value +--- config + location /t { + content_by_lua_block { + local redis = require "resty.redis" + local red = redis:new() + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local res, err = red:set("big-key", ("\r\n"):rep(1024 * 1024 * 16)) + if not res then + ngx.say("failed to set: ", err) + return + end + + local res, err = red:get("big-key") + if not res then + ngx.say("failed to get: ", err) + return + end + + ngx.print(res) + } + } +--- response_body eval +"\r\n" x 16777216 +--- stream_conf_enable + + + +=== TEST 5: pipeline +--- config + location /t { + content_by_lua_block { + local cjson = require("cjson") + local redis = require "resty.redis" + + local t = {} + for i = 1, 180 do + local th = assert(ngx.thread.spawn(function(i) + local red = redis:new() + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + red:init_pipeline() + + red:set("mark_" .. i, i) + red:get("mark_" .. i) + red:get("counter") + for j = 1, 4 do + red:incr("counter") + end + + local results, err = red:commit_pipeline() + if not results then + ngx.say("failed to commit: ", err) + return + end + + local begin = tonumber(results[3]) + for j = 1, 4 do + local incred = results[3 + j] + if incred ~= results[2 + j] + 1 then + ngx.log(ngx.ERR, cjson.encode(results)) + end + end + end, i)) + table.insert(t, th) + end + for i, th in ipairs(t) do + ngx.thread.wait(th) + end + } + } +--- response_body +--- stream_conf_enable + + + +=== TEST 6: delay +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "redis", + conf = { + faults = { + {delay = 0.01, key = "ignored", commands = {"Ping", "time"}} + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:6379"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: hit +--- config + location /t { + content_by_lua_block { + local redis = require "resty.redis" + local red = redis:new() + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local start = ngx.now() + local res, err = red:ping() + if not res then + ngx.say(err) + return + end + local now = ngx.now() + -- use integer to bypass float point number precision problem + if math.ceil((now - start) * 1000) < 10 then + ngx.say(now, " ", start) + return + end + start = now + + local res, err = red:time() + if not res then + ngx.say(err) + return + end + local now = ngx.now() + if math.ceil((now - start) * 1000) < 10 then + ngx.say(now, " ", start) + return + end + start = now + + red:init_pipeline() + red:time() + red:time() + red:get("A") + + local results, err = red:commit_pipeline() + if not results then + ngx.say("failed to commit: ", err) + return + end + local now = ngx.now() + if math.ceil((now - start) * 1000) < 20 or math.ceil((now - start) * 1000) > 30 then + ngx.say(now, " ", start) + return + end + + ngx.say("ok") + } + } +--- response_body +ok +--- stream_conf_enable + + + +=== TEST 8: DFS match +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "redis", + conf = { + faults = { + {delay = 0.02, key = "a", commands = {"get"}}, + {delay = 0.01, commands = {"get", "set"}}, + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:6379"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: hit +--- config + location /t { + content_by_lua_block { + local redis = require "resty.redis" + local red = redis:new() + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local start = ngx.now() + local res, err = red:get("a") + if not res then + ngx.say(err) + return + end + local now = ngx.now() + if math.ceil((now - start) * 1000) < 20 then + ngx.say(now, " ", start) + return + end + start = now + + local res, err = red:set("a", "a") + if not res then + ngx.say(err) + return + end + local now = ngx.now() + if math.ceil((now - start) * 1000) < 10 then + ngx.say(now, " ", start) + return + end + start = now + + red:init_pipeline() + red:get("b") + red:set("A", "a") + + local results, err = red:commit_pipeline() + if not results then + ngx.say("failed to commit: ", err) + return + end + local now = ngx.now() + if math.ceil((now - start) * 1000) < 20 or math.ceil((now - start) * 1000) > 30 then + ngx.say(now, " ", start) + return + end + + ngx.say("ok") + } + } +--- response_body +ok +--- stream_conf_enable + + + +=== TEST 10: multi keys +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "redis", + conf = { + faults = { + {delay = 0.06, key = "b", commands = {"del"}}, + {delay = 0.04, key = "a", commands = {"mset"}}, + {delay = 0.02, key = "b", commands = {"mset"}}, + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:6379"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: hit +--- config + location /t { + content_by_lua_block { + local redis = require "resty.redis" + local red = redis:new() + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local start = ngx.now() + local res, err = red:mset("c", 1, "a", 2, "b", 3) + if not res then + ngx.say(err) + return + end + local now = ngx.now() + if math.ceil((now - start) * 1000) < 40 then + ngx.say("mset a ", now, " ", start) + return + end + start = now + + local res, err = red:mset("b", 2, "a", 3) + if not res then + ngx.say(err) + return + end + local now = ngx.now() + if math.ceil((now - start) * 1000) < 20 or math.ceil((now - start) * 1000) > 35 then + ngx.say("mset b ", now, " ", start) + return + end + start = now + + local res, err = red:mset("c", "a") + if not res then + ngx.say(err) + return + end + local now = ngx.now() + if math.ceil((now - start) * 1000) > 20 then + ngx.say("mset mismatch ", now, " ", start) + return + end + start = now + + local res, err = red:del("a", "b") + if not res then + ngx.say(err) + return + end + local now = ngx.now() + if math.ceil((now - start) * 1000) < 60 then + ngx.say("del b ", now, " ", start) + return + end + start = now + + ngx.say("ok") + } + } +--- response_body +ok +--- stream_conf_enable + + + +=== TEST 12: publish & subscribe +--- stream_extra_init_by_lua + local cjson = require "cjson" + local redis_proto = require("apisix.stream.xrpc.protocols.redis") + redis_proto.log = function(sess, ctx) + ngx.log(ngx.WARN, "log redis request ", cjson.encode(ctx.cmd_line)) + end + +--- config + location /t { + content_by_lua_block { + local cjson = require "cjson" + local redis = require "resty.redis" + + local red = redis:new() + local red2 = redis:new() + + red:set_timeout(1000) -- 1 sec + red2:set_timeout(1000) -- 1 sec + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("1: failed to connect: ", err) + return + end + + ok, err = red2:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("2: failed to connect: ", err) + return + end + + local res, err = red:subscribe("dog") + if not res then + ngx.say("1: failed to subscribe: ", err) + return + end + + ngx.say("1: subscribe dog: ", cjson.encode(res)) + + res, err = red:subscribe("cat") + if not res then + ngx.say("1: failed to subscribe: ", err) + return + end + + ngx.say("1: subscribe cat: ", cjson.encode(res)) + + res, err = red2:publish("dog", "Hello") + if not res then + ngx.say("2: failed to publish: ", err) + return + end + + ngx.say("2: publish: ", cjson.encode(res)) + + res, err = red:read_reply() + if not res then + ngx.say("1: failed to read reply: ", err) + else + ngx.say("1: receive: ", cjson.encode(res)) + end + + red:set_timeout(10) -- 10ms + res, err = red:read_reply() + if not res then + ngx.say("1: failed to read reply: ", err) + else + ngx.say("1: receive: ", cjson.encode(res)) + end + red:set_timeout(1000) -- 1s + + res, err = red:unsubscribe() + if not res then + ngx.say("1: failed to unscribe: ", err) + else + ngx.say("1: unsubscribe: ", cjson.encode(res)) + end + + res, err = red:read_reply() + if not res then + ngx.say("1: failed to read reply: ", err) + else + ngx.say("1: receive: ", cjson.encode(res)) + end + + red:set_timeout(10) -- 10ms + res, err = red:read_reply() + if not res then + ngx.say("1: failed to read reply: ", err) + else + ngx.say("1: receive: ", cjson.encode(res)) + end + red:set_timeout(1000) -- 1s + + res, err = red:set("dog", 1) + if not res then + ngx.say("1: failed to set: ", err) + else + ngx.say("1: receive: ", cjson.encode(res)) + end + + red:close() + red2:close() + } + } +--- response_body_like chop +^1: subscribe dog: \["subscribe","dog",1\] +1: subscribe cat: \["subscribe","cat",2\] +2: publish: 1 +1: receive: \["message","dog","Hello"\] +1: failed to read reply: timeout +1: unsubscribe: \[\["unsubscribe","(?:dog|cat)",1\],\["unsubscribe","(?:dog|cat)",0\]\] +1: failed to read reply: not subscribed +1: failed to read reply: not subscribed +1: receive: "OK" +$ +--- stream_conf_enable +--- grep_error_log eval +qr/log redis request \[[^]]+\]/ +--- grep_error_log_out +log redis request ["subscribe","dog"] +log redis request ["subscribe","cat"] +log redis request ["publish","dog","Hello"] +log redis request ["unsubscribe"] +log redis request ["set","dog","1"] + + + +=== TEST 13: psubscribe & punsubscribe +--- stream_extra_init_by_lua + local cjson = require "cjson" + local redis_proto = require("apisix.stream.xrpc.protocols.redis") + redis_proto.log = function(sess, ctx) + ngx.log(ngx.WARN, "log redis request ", cjson.encode(ctx.cmd_line)) + end + +--- config + location /t { + content_by_lua_block { + local cjson = require "cjson" + local redis = require "resty.redis" + + local red = redis:new() + local red2 = redis:new() + + red:set_timeout(1000) -- 1 sec + red2:set_timeout(1000) -- 1 sec + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("1: failed to connect: ", err) + return + end + + ok, err = red2:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("2: failed to connect: ", err) + return + end + + local res, err = red:psubscribe("dog*", "cat*") + if not res then + ngx.say("1: failed to subscribe: ", err) + return + end + + ngx.say("1: psubscribe: ", cjson.encode(res)) + + res, err = red2:publish("dog1", "Hello") + if not res then + ngx.say("2: failed to publish: ", err) + return + end + + ngx.say("2: publish: ", cjson.encode(res)) + + res, err = red:read_reply() + if not res then + ngx.say("1: failed to read reply: ", err) + else + ngx.say("1: receive: ", cjson.encode(res)) + end + + res, err = red:punsubscribe("cat*", "dog*") + if not res then + ngx.say("1: failed to unscribe: ", err) + else + ngx.say("1: punsubscribe: ", cjson.encode(res)) + end + + res, err = red:set("dog", 1) + if not res then + ngx.say("1: failed to set: ", err) + else + ngx.say("1: receive: ", cjson.encode(res)) + end + + red:close() + red2:close() + } + } +--- response_body_like chop +^1: psubscribe: \[\["psubscribe","dog\*",1\],\["psubscribe","cat\*",2\]\] +2: publish: 1 +1: receive: \["pmessage","dog\*","dog1","Hello"\] +1: punsubscribe: \[\["punsubscribe","cat\*",1\],\["punsubscribe","dog\*",0\]\] +1: receive: "OK" +$ +--- stream_conf_enable +--- grep_error_log eval +qr/log redis request \[[^]]+\]/ +--- grep_error_log_out +log redis request ["psubscribe","dog*","cat*"] +log redis request ["publish","dog1","Hello"] +log redis request ["punsubscribe","cat*","dog*"] +log redis request ["set","dog","1"] diff --git a/CloudronPackages/APISIX/apisix-source/t/xrpc/redis2.t b/CloudronPackages/APISIX/apisix-source/t/xrpc/redis2.t new file mode 100644 index 0000000..076a406 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/t/xrpc/redis2.t @@ -0,0 +1,202 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +log_level("warn"); + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +$ENV{TEST_NGINX_REDIS_PORT} ||= 1985; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +xrpc: + protocols: + - name: redis +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]\nRPC is not finished"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + if (!defined $block->extra_stream_config) { + my $stream_config = <<_EOC_; + server { + listen 8125 udp; + content_by_lua_block { + require("lib.mock_layer4").dogstatsd() + } + } +_EOC_ + $block->set_value("extra_stream_config", $stream_config); + } + + $block; +}); + +worker_connections(1024); +run_tests; + +__DATA__ + +=== TEST 1: set custom log format +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/syslog', + ngx.HTTP_PUT, + [[{ + "log_format": { + "rpc_time": "$rpc_time", + "redis_cmd_line": "$redis_cmd_line" + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: use register vars(redis_cmd_line and rpc_time) in logger +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "redis", + conf = { + faults = { + {delay = 0.01, commands = {"hmset", "hmget", "ping"}}, + } + }, + logger = { + { + name = "syslog", + filter = { + {"rpc_time", ">=", 0.001}, + }, + conf = { + host = "127.0.0.1", + port = 8125, + sock_type = "udp", + batch_max_size = 1, + flush_limit = 1 + } + } + } + }, + upstream = { + nodes = { + ["127.0.0.1:6379"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: verify the data received by the log server +--- stream_conf_enable +--- config + location /t { + content_by_lua_block { + local redis = require "resty.redis" + local red = redis:new() + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local res, err = red:hmset("animals", "dog", "bark", "cat", "meow") + if not res then + ngx.say("failed to set animals: ", err) + return + end + ngx.say("hmset animals: ", res) + + local res, err = red:hmget("animals", "dog", "cat") + if not res then + ngx.say("failed to get animals: ", err) + return + end + + ngx.say("hmget animals: ", res) + + -- test for only one command in cmd_line + local res, err = red:ping() + if not res then + ngx.say(err) + return + end + + ngx.say("ping: ", string.lower(res)) + } + } +--- response_body +hmset animals: OK +hmget animals: barkmeow +ping: pong +--- wait: 1 +--- grep_error_log eval +qr/message received:.*\"redis_cmd_line\":[^}|^,]+/ +--- grep_error_log_out eval +qr{message received:.*\"redis_cmd_line\":\"hmset animals dog bark cat meow\"(?s).* +message received:.*\"redis_cmd_line\":\"hmget animals dog cat\"(?s).* +message received:.*\"redis_cmd_line\":\"ping\"} diff --git a/CloudronPackages/APISIX/apisix-source/utils/check-category.py b/CloudronPackages/APISIX/apisix-source/utils/check-category.py new file mode 100755 index 0000000..6de86a7 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/utils/check-category.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# coding: utf-8 +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json +import os +import sys +from os import path + +EXT = ".md" + +try: + unicode # Python 2 +except NameError: + unicode = str # Python 3 + +def collect_fn(entries, topic): + if "id" in topic: + fn = topic["id"] + entries.append(fn) + elif "items" in topic: + for item in topic["items"]: + if isinstance(item, unicode): + entries.append(item) + else: + collect_fn(entries, item) + +def check_category(root): + index = root + "config.json" + with open(index) as f: + entries = [] + + data = json.load(f) + for topic in data["sidebar"]: + collect_fn(entries, topic) + for e in entries: + fn = root + e + EXT + if not path.exists(fn): + print("Entry %s in the sidebar can't be found. Please remove it from %s." + % (fn, index)) + return False + + ignore_list = ["examples/plugins-hmac-auth-generate-signature", "config", "README"] + entries.extend(ignore_list) + existed_files = [] + for parent, dirs, files in os.walk(root): + for fn in files: + existed_files.append(path.join(parent[len(root):], path.splitext(fn)[0])) + for fn in existed_files: + if fn not in entries: + print("File %s%s%s is not indexed. Please add it to %s." % (root, fn, EXT, index)) + return False + return True + +roots = ["docs/en/latest/", "docs/zh/latest/"] +for r in roots: + if not check_category(r): + sys.exit(-1) diff --git a/CloudronPackages/APISIX/apisix-source/utils/check-lua-code-style.sh b/CloudronPackages/APISIX/apisix-source/utils/check-lua-code-style.sh new file mode 100755 index 0000000..5296e75 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/utils/check-lua-code-style.sh @@ -0,0 +1,32 @@ +#!/bin/sh + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -ex + +luacheck -q apisix t/lib + +find apisix -name '*.lua' ! -wholename 'apisix/cli/ngx_tpl.lua' ! -wholename 'apisix/cli/config.lua' -exec ./utils/lj-releng {} + > \ + /tmp/check.log 2>&1 || (cat /tmp/check.log && exit 1) + +grep -E "ERROR.*.lua:" /tmp/check.log > /tmp/error.log || true +if [ -s /tmp/error.log ]; then + echo "=====bad style=====" + cat /tmp/check.log + exit 1 +fi diff --git a/CloudronPackages/APISIX/apisix-source/utils/check-merge-conflict.sh b/CloudronPackages/APISIX/apisix-source/utils/check-merge-conflict.sh new file mode 100755 index 0000000..4516368 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/utils/check-merge-conflict.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -euo pipefail + +grep "^<<<<<<< HEAD" $(git grep --cached -l '' | xargs) && exit 1 +grep "^>>>>>>> master" $(git grep --cached -l '' | xargs) && exit 1 +exit 0 diff --git a/CloudronPackages/APISIX/apisix-source/utils/check-plugins-code.sh b/CloudronPackages/APISIX/apisix-source/utils/check-plugins-code.sh new file mode 100755 index 0000000..dde36bf --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/utils/check-plugins-code.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +RED="\033[1;31m"; +NC="\033[0m"; # No Color +hit=0 + +checkfunc () { + funccontent=$1 + file=$2 + [[ $funccontent =~ "core.response.exit" ]] && echo -e ${RED}${file}${NC} && echo " can't exit in rewrite or access phase!" && ((hit++)) + [[ $funccontent =~ "ngx.exit" ]] && echo -e ${RED}${file}${NC} && echo " can't exit in rewrite or access phase!" && ((hit++)) + [[ $funccontent =~ "ngx.redirect" ]] && echo -e ${RED}${file}${NC} && echo " can't call ngx.redirect in rewrite or access phase!" && ((hit++)) +} + + +filtercode () { + content=$1 + file=$2 + + rcontent=${content##*_M.rewrite} + rewritefunc=${rcontent%%function*} + checkfunc "$rewritefunc" "$file" + + rcontent=${content##*_M.access} + accessfunc=${rcontent%%function*} + checkfunc "$accessfunc" "$file" +} + + +for file in apisix/plugins/*.lua +do + if test -f $file + then + content=$(cat $file) + filtercode "$content" "$file" + fi +done +for file in apisix/stream/plugins/*.lua +do + if test -f $file + then + content=$(cat $file) + filtercode "$content" "$file" + fi +done + +if (($hit>0)) +then + exit 1 +fi + +# test case for check +content=$(cat t/fake-plugin-exit.lua) +filtercode "$content" > test.log 2>&1 || (cat test.log && exit 1) + +echo "All passed." diff --git a/CloudronPackages/APISIX/apisix-source/utils/check-test-code-style.sh b/CloudronPackages/APISIX/apisix-source/utils/check-test-code-style.sh new file mode 100755 index 0000000..92ddb76 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/utils/check-test-code-style.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -x -euo pipefail + +find t -name '*.t' -exec grep -E "\-\-\-\s+(SKIP|ONLY|LAST|FIRST)$" {} + > /tmp/error.log || true +if [ -s /tmp/error.log ]; then + echo "Forbidden directives to found. Bypass test cases without reason are not allowed." + cat /tmp/error.log + exit 1 +fi + +find t -name '*.t' -exec ./utils/reindex {} + > \ + /tmp/check.log 2>&1 || (cat /tmp/check.log && exit 1) + +grep "done." /tmp/check.log > /tmp/error.log || true +if [ -s /tmp/error.log ]; then + echo "=====bad style=====" + cat /tmp/error.log + echo "you need to run 'reindex' to fix them. Read CONTRIBUTING.md for more details." + exit 1 +fi diff --git a/CloudronPackages/APISIX/apisix-source/utils/check-version.sh b/CloudronPackages/APISIX/apisix-source/utils/check-version.sh new file mode 100755 index 0000000..e78857d --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/utils/check-version.sh @@ -0,0 +1,82 @@ +#!/bin/sh + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +ver=$1 + +red='\e[0;41m' +RED='\e[1;31m' +green='\e[0;32m' +GREEN='\e[1;32m' +NC='\e[0m' + +# doc: apisix $ver +matched=`grep "apisix.[0-9][0-9.]*" -r doc/` +expected=`grep "apisix.$ver" -r doc/` + +if [ "$matched" = "$expected" ]; then + echo -e "${green}passed: (doc) apisix $ver ${NC}" +else + echo -e "${RED}failed: (doc) apisix $ver ${NC}" 1>&2 + echo + echo "-----maybe wrong version-----" + echo "$matched" + exit 1 +fi + +# doc: version $ver +matched=`grep "version [0-9][0-9.]*" -r doc/` +expected=`grep -F "version $ver" -r doc/` + +if [ "$matched" = "$expected" ]; then + echo -e "${green}passed: (doc) version $ver ${NC}" +else + echo -e "${RED}failed: (doc) version $ver ${NC}" 1>&2 + echo + echo "-----maybe wrong version-----" + echo "$matched" + exit 1 +fi + +# lua: VERSION = $ver +matched=`grep "VERSION = \"[0-9][0-9.]*\"" -r apisix/` +expected=`grep -F "VERSION = \"$ver\"" -r apisix/` + +if [ "$matched" = "$expected" ]; then + echo -e "${green}passed: (lua) VERSION = $ver ${NC}" +else + echo -e "${RED}failed: (lua) VERSION = \"$ver\" ${NC}" 1>&2 + echo + echo "-----maybe wrong version-----" + echo "$matched" + exit 1 +fi + + +# rockspec +matched=`ls -l | grep "$ver" ` + +if [ -z "$matched" ]; then + echo -e "${RED}failed: (rockspec) VERSION = $ver \"$ver\" ${NC}" 1>&2 + echo + echo "-----please check rockspec file for VERSION \"$ver\"-----" + echo "$matched" + exit 1 +else + echo -e "${green}passed: (rockspec) VERSION = $ver ${NC}" +fi diff --git a/CloudronPackages/APISIX/apisix-source/utils/fix-zh-doc-segment.py b/CloudronPackages/APISIX/apisix-source/utils/fix-zh-doc-segment.py new file mode 100755 index 0000000..1d76f2a --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/utils/fix-zh-doc-segment.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +# coding: utf-8 +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +from os import path +from zhon.hanzi import punctuation # sudo pip3 install zhon + + +def need_fold(pre, cur): + pre = pre.rstrip("\r\n") + if len(pre) == 0 or len(cur) == 0: + return False + if ord(pre[-1]) < 128 or ord(cur[0]) < 128: + return False + # the prev line ends with Chinese and the curr line starts with Chinese + if pre.startswith(":::note"): + # ignore special mark + return False + if pre[-1] in punctuation: + # skip punctuation + return False + return True + +def check_segment(root): + for parent, dirs, files in os.walk(root): + for fn in files: + fn = path.join(parent, fn) + with open(fn) as f: + lines = f.readlines() + new_lines = [lines[0]] + skip = False + for i in range(1, len(lines)): + if lines[i-1].startswith('```'): + skip = not skip + if not skip and need_fold(lines[i-1], lines[i]): + new_lines[-1] = new_lines[-1].rstrip("\r\n") + lines[i] + else: + new_lines.append(lines[i]) + if len(new_lines) != len(lines): + print("find broken newline in file: %s" % fn) + with open(fn, "w") as f: + f.writelines(new_lines) + + +roots = ["docs/zh/latest/"] +for r in roots: + check_segment(r) diff --git a/CloudronPackages/APISIX/apisix-source/utils/gen-vote-contents.sh b/CloudronPackages/APISIX/apisix-source/utils/gen-vote-contents.sh new file mode 100755 index 0000000..d644dfd --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/utils/gen-vote-contents.sh @@ -0,0 +1,93 @@ +#!/bin/sh + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +VERSION=$1 + +SUBSTRING1=$(echo $VERSION| cut -d'.' -f 1) +SUBSTRING2=$(echo $VERSION| cut -d'.' -f 2) +BLOB_VERSION=$SUBSTRING1.$SUBSTRING2 +CHANGELOG_HASH=$(printf $VERSION | sed 's/\.//g') + +RELEASE_NOTE_PR="https://github.com/apache/apisix/blob/release/$BLOB_VERSION/CHANGELOG.md#$CHANGELOG_HASH" +COMMIT_ID=$(git rev-parse --short HEAD) + +vote_contents=$(cat <<EOF +Hello, Community, + +This is a call for the vote to release Apache APISIX version + +Release notes: + +$RELEASE_NOTE_PR + +The release candidates: + +https://dist.apache.org/repos/dist/dev/apisix/$VERSION/ + +Release Commit ID: + +https://github.com/apache/apisix/commit/$COMMIT_ID + +Keys to verify the Release Candidate: + +https://dist.apache.org/repos/dist/dev/apisix/KEYS + +Steps to validating the release: + +1. Download the release + +wget https://dist.apache.org/repos/dist/dev/apisix/$VERSION/apache-apisix-$VERSION-src.tgz + +2. Checksums and signatures + +wget https://dist.apache.org/repos/dist/dev/apisix/KEYS + +wget https://dist.apache.org/repos/dist/dev/apisix/$VERSION/apache-apisix-$VERSION-src.tgz.asc + +wget https://dist.apache.org/repos/dist/dev/apisix/$VERSION/apache-apisix-$VERSION-src.tgz.sha512 + +gpg --import KEYS + +shasum -c apache-apisix-$VERSION-src.tgz.sha512 + +gpg --verify apache-apisix-$VERSION-src.tgz.asc apache-apisix-$VERSION-src.tgz + +3. Unzip and Check files + +tar zxvf apache-apisix-$VERSION-src.tgz + +4. Build Apache APISIX: + +https://github.com/apache/apisix/blob/release/$BLOB_VERSION/docs/en/latest/building-apisix.md#building-apisix-from-source + +The vote will be open for at least 72 hours or until necessary number of +votes are reached. + +Please vote accordingly: + +[ ] +1 approve +[ ] +0 no opinion +[ ] -1 disapprove with the reason +EOF +) + +if [ ! -d release ];then + mkdir release +fi + +printf "$vote_contents" > ./release/apache-apisix-$VERSION-vote-contents.txt diff --git a/CloudronPackages/APISIX/apisix-source/utils/install-dependencies.sh b/CloudronPackages/APISIX/apisix-source/utils/install-dependencies.sh new file mode 100755 index 0000000..b084a35 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/utils/install-dependencies.sh @@ -0,0 +1,174 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +set -ex + +function detect_aur_helper() { + if [[ $(command -v yay) ]]; then + AUR_HELPER=yay + elif [[ $(command -v pacaur) ]]; then + AUR_HELPER=pacaur + else + echo No available AUR helpers found. Please specify your AUR helper by AUR_HELPER. + exit 255 + fi +} + +function install_dependencies_with_aur() { + detect_aur_helper + $AUR_HELPER -S openresty --noconfirm + sudo pacman -S openssl --noconfirm + + export OPENRESTY_PREFIX=/opt/openresty + + sudo mkdir $OPENRESTY_PREFIX/openssl + sudo ln -s /usr/include $OPENRESTY_PREFIX/openssl/include + sudo ln -s /usr/lib $OPENRESTY_PREFIX/openssl/lib +} + +# Install dependencies on centos and fedora +function install_dependencies_with_yum() { + sudo yum install -y yum-utils + sudo yum-config-manager --add-repo "https://openresty.org/package/${1}/openresty.repo" + if [[ "${1}" == "centos" ]]; then + sudo yum -y install centos-release-scl + sudo yum -y install devtoolset-9 patch wget + set +eu + source scl_source enable devtoolset-9 + set -eu + fi + sudo yum install -y \ + gcc gcc-c++ curl wget unzip xz gnupg perl-ExtUtils-Embed cpanminus patch libyaml-devel \ + perl perl-devel pcre pcre-devel openldap-devel \ + openresty-zlib-devel openresty-pcre-devel +} + +# Install dependencies on ubuntu and debian +function install_dependencies_with_apt() { + # add OpenResty source + sudo apt-get update + sudo apt-get -y install software-properties-common wget lsb-release gnupg patch + wget -qO - https://openresty.org/package/pubkey.gpg | sudo apt-key add - + arch=$(uname -m | tr '[:upper:]' '[:lower:]') + arch_path="" + if [[ $arch == "arm64" ]] || [[ $arch == "aarch64" ]]; then + arch_path="arm64/" + fi + if [[ "${1}" == "ubuntu" ]]; then + sudo add-apt-repository -y "deb http://openresty.org/package/${arch_path}ubuntu $(lsb_release -sc) main" + elif [[ "${1}" == "debian" ]]; then + sudo add-apt-repository -y "deb http://openresty.org/package/${arch_path}debian $(lsb_release -sc) openresty" + fi + sudo apt-get update + + # install some compilation tools + sudo apt-get install -y curl make gcc g++ cpanminus libpcre3 libpcre3-dev libyaml-dev unzip openresty-zlib-dev openresty-pcre-dev +} + +# Identify the different distributions and call the corresponding function +function multi_distro_installation() { + if grep -Eqi "CentOS" /etc/issue || grep -Eq "CentOS" /etc/*-release; then + install_dependencies_with_yum "centos" + elif grep -Eqi -e "Red Hat" -e "rhel" /etc/*-release; then + install_dependencies_with_yum "rhel" + elif grep -Eqi "Fedora" /etc/issue || grep -Eq "Fedora" /etc/*-release; then + install_dependencies_with_yum "fedora" + elif grep -Eqi "Debian" /etc/issue || grep -Eq "Debian" /etc/*-release; then + install_dependencies_with_apt "debian" + elif grep -Eqi "Ubuntu" /etc/issue || grep -Eq "Ubuntu" /etc/*-release; then + install_dependencies_with_apt "ubuntu" + elif grep -Eqi "Arch" /etc/issue || grep -Eqi "EndeavourOS" /etc/issue || grep -Eq "Arch" /etc/*-release; then + install_dependencies_with_aur + else + echo "Non-supported distribution, APISIX is only supported on Linux-based systems" + exit 1 + fi + install_apisix_runtime +} + +function multi_distro_uninstallation() { + if grep -Eqi "CentOS" /etc/issue || grep -Eq "CentOS" /etc/*-release; then + sudo yum autoremove -y openresty-zlib-devel openresty-pcre-devel + elif grep -Eqi -e "Red Hat" -e "rhel" /etc/*-release; then + sudo yum autoremove -y openresty-zlib-devel openresty-pcre-devel + elif grep -Eqi "Fedora" /etc/issue || grep -Eq "Fedora" /etc/*-release; then + sudo yum autoremove -y openresty-zlib-devel openresty-pcre-devel + elif grep -Eqi "Debian" /etc/issue || grep -Eq "Debian" /etc/*-release; then + sudo apt-get autoremove -y openresty-zlib-dev openresty-pcre-dev + elif grep -Eqi "Ubuntu" /etc/issue || grep -Eq "Ubuntu" /etc/*-release; then + sudo apt-get autoremove -y openresty-zlib-dev openresty-pcre-dev + else + echo "Non-supported distribution, APISIX is only supported on Linux-based systems" + exit 1 + fi +} + +function install_apisix_runtime() { + export runtime_version=${APISIX_RUNTIME:?} + wget "https://raw.githubusercontent.com/api7/apisix-build-tools/apisix-runtime/${APISIX_RUNTIME}/build-apisix-runtime.sh" + chmod +x build-apisix-runtime.sh + ./build-apisix-runtime.sh latest + rm build-apisix-runtime.sh +} + +# Install LuaRocks +function install_luarocks() { + if [ -f "./utils/linux-install-luarocks.sh" ]; then + ./utils/linux-install-luarocks.sh + elif [ -f "./linux-install-luarocks.sh" ]; then + ./linux-install-luarocks.sh + else + echo "Installing luarocks from remote master branch" + curl https://raw.githubusercontent.com/apache/apisix/master/utils/linux-install-luarocks.sh -sL | bash - + fi +} + +# Entry +function main() { + OS_NAME=$(uname -s | tr '[:upper:]' '[:lower:]') + if [[ "$#" == 0 ]]; then + if [[ "${OS_NAME}" == "linux" ]]; then + multi_distro_installation + install_luarocks + return + else + echo "Non-supported distribution, APISIX is only supported on Linux-based systems" + exit 1 + fi + fi + + case_opt=$1 + case "${case_opt}" in + "install_luarocks") + install_luarocks + ;; + "uninstall") + if [[ "${OS_NAME}" == "linux" ]]; then + multi_distro_uninstallation + else + echo "Non-supported distribution, APISIX is only supported on Linux-based systems" + fi + ;; + *) + echo "Unsupported method: ${case_opt}" + ;; + esac +} + +main "$@" diff --git a/CloudronPackages/APISIX/apisix-source/utils/linux-install-luarocks.sh b/CloudronPackages/APISIX/apisix-source/utils/linux-install-luarocks.sh new file mode 100755 index 0000000..88623a8 --- /dev/null +++ b/CloudronPackages/APISIX/apisix-source/utils/linux-install-luarocks.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +set -ex + +# you might need sudo to run this script +if [ -z ${OPENRESTY_PREFIX} ]; then + OPENRESTY_PREFIX="/usr/local/openresty" +fi + +LUAROCKS_VER=3.12.0 +wget -q https://github.com/luarocks/luarocks/archive/v"$LUAROCKS_VER".tar.gz +tar -xf v"$LUAROCKS_VER".tar.gz +rm -f v"$LUAROCKS_VER".tar.gz +cd luarocks-"$LUAROCKS_VER" || exit + +OR_BIN="$OPENRESTY_PREFIX/bin/openresty" +OR_VER=$($OR_BIN -v 2>&1 | awk -F '/' '{print $2}' | awk -F '.' '{print $1 * 100 + $2}') +if [[ -e $OR_BIN && "$OR_VER" -ge 119 ]]; then + WITH_LUA_OPT="--with-lua=${OPENRESTY_PREFIX}/luajit" +else + # For old version OpenResty, we still need to install LuaRocks with Lua + WITH_LUA_OPT= +fi + +./configure $WITH_LUA_OPT \ + > build.log 2>&1 || (cat build.log && exit 1) + +make build > build.log 2>&1 || (cat build.log && exit 1) +sudo make install > build.log 2>&1 || (cat build.log && exit 1) +cd .. || exit +rm -rf luarocks-"$LUAROCKS_VER" + +mkdir ~/.luarocks || true + +# For old version OpenResty, we still need to install LuaRocks with Lua +OPENSSL_PREFIX=${OPENRESTY_PREFIX}/openssl +if [ -d ${OPENRESTY_PREFIX}/openssl3 ]; then + OPENSSL_PREFIX=${OPENRESTY_PREFIX}/openssl3 +elif [ -d ${OPENRESTY_PREFIX}/openssl111 ]; then + OPENSSL_PREFIX=${OPENRESTY_PREFIX}/openssl111 +fi + +[ ! -d ${OPENSSL_PREFIX} ] && echo "Warning: the path ${OPENSSL_PREFIX} is not found." + +FOUND_PATH=$(echo "${PATH}" | grep -oP '(?<=:|)/usr/local/bin(?=:|)') || true +if [[ "${FOUND_PATH}" == "" ]]; then + echo "Warning: the path /usr/local/bin is not included in the system default PATH variable." + export PATH=$PATH:/usr/local/bin +fi + +luarocks config variables.OPENSSL_LIBDIR ${OPENSSL_PREFIX}/lib +luarocks config variables.OPENSSL_INCDIR ${OPENSSL_PREFIX}/include +luarocks config variables.YAML_DIR /usr diff --git a/CloudronPackages/APISIX/start.sh b/CloudronPackages/APISIX/start.sh new file mode 100644 index 0000000..2899aa8 --- /dev/null +++ b/CloudronPackages/APISIX/start.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Set APISIX prefix +PREFIX=${APISIX_PREFIX:=/usr/local/apisix} + +# Generate APISIX configuration (config.yaml) to connect to Cloudron etcd +cat <<EOF > ${PREFIX}/conf/config.yaml +apisix: + etcd: + host: + - "http://${CLOUDRON_ETCD_HOST}:${CLOUDRON_ETCD_PORT}" + prefix: "/apisix" + timeout: 30 + +# Other APISIX configuration can go here if needed + +EOF + +# Initialize APISIX +/usr/bin/apisix init + +# Initialize etcd connection for APISIX +/usr/bin/apisix init_etcd + +# Start OpenResty (APISIX server) +exec /usr/local/openresty/bin/openresty -p ${PREFIX} -g 'daemon off;'